1 /* This file is part of the Emulex RoCE Device Driver for
2 * RoCE (RDMA over Converged Ethernet) adapters.
3 * Copyright (C) 2012-2015 Emulex. All rights reserved.
4 * EMULEX and SLI are trademarks of Emulex.
7 * This software is available to you under a choice of one of two licenses.
8 * You may choose to be licensed under the terms of the GNU General Public
9 * License (GPL) Version 2, available from the file COPYING in the main
10 * directory of this source tree, or the BSD license below:
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
16 * - Redistributions of source code must retain the above copyright notice,
17 * this list of conditions and the following disclaimer.
19 * - Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the distribution.
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
27 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
30 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
31 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
32 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
33 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 * Contact Information:
36 * linux-drivers@emulex.com
40 * Costa Mesa, CA 92626
43 #include <linux/dma-mapping.h>
44 #include <rdma/ib_verbs.h>
45 #include <rdma/ib_user_verbs.h>
46 #include <rdma/iw_cm.h>
47 #include <rdma/ib_umem.h>
48 #include <rdma/ib_addr.h>
49 #include <rdma/ib_cache.h>
50 #include <rdma/uverbs_ioctl.h>
53 #include "ocrdma_hw.h"
54 #include "ocrdma_verbs.h"
55 #include <rdma/ocrdma-abi.h>
57 int ocrdma_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
66 int ocrdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr,
69 struct ocrdma_dev *dev = get_ocrdma_dev(ibdev);
71 if (uhw->inlen || uhw->outlen)
74 memset(attr, 0, sizeof *attr);
75 memcpy(&attr->fw_ver, &dev->attr.fw_ver[0],
76 min(sizeof(dev->attr.fw_ver), sizeof(attr->fw_ver)));
77 ocrdma_get_guid(dev, (u8 *)&attr->sys_image_guid);
78 attr->max_mr_size = dev->attr.max_mr_size;
79 attr->page_size_cap = 0xffff000;
80 attr->vendor_id = dev->nic_info.pdev->vendor;
81 attr->vendor_part_id = dev->nic_info.pdev->device;
82 attr->hw_ver = dev->asic_id;
83 attr->max_qp = dev->attr.max_qp;
84 attr->max_ah = OCRDMA_MAX_AH;
85 attr->max_qp_wr = dev->attr.max_wqe;
87 attr->device_cap_flags = IB_DEVICE_CURR_QP_STATE_MOD |
88 IB_DEVICE_RC_RNR_NAK_GEN |
89 IB_DEVICE_SHUTDOWN_PORT |
90 IB_DEVICE_SYS_IMAGE_GUID |
91 IB_DEVICE_LOCAL_DMA_LKEY |
92 IB_DEVICE_MEM_MGT_EXTENSIONS;
93 attr->max_send_sge = dev->attr.max_send_sge;
94 attr->max_recv_sge = dev->attr.max_recv_sge;
95 attr->max_sge_rd = dev->attr.max_rdma_sge;
96 attr->max_cq = dev->attr.max_cq;
97 attr->max_cqe = dev->attr.max_cqe;
98 attr->max_mr = dev->attr.max_mr;
99 attr->max_mw = dev->attr.max_mw;
100 attr->max_pd = dev->attr.max_pd;
101 attr->atomic_cap = 0;
103 attr->max_map_per_fmr = 0;
104 attr->max_qp_rd_atom =
105 min(dev->attr.max_ord_per_qp, dev->attr.max_ird_per_qp);
106 attr->max_qp_init_rd_atom = dev->attr.max_ord_per_qp;
107 attr->max_srq = dev->attr.max_srq;
108 attr->max_srq_sge = dev->attr.max_srq_sge;
109 attr->max_srq_wr = dev->attr.max_rqe;
110 attr->local_ca_ack_delay = dev->attr.local_ca_ack_delay;
111 attr->max_fast_reg_page_list_len = dev->attr.max_pages_per_frmr;
116 static inline void get_link_speed_and_width(struct ocrdma_dev *dev,
117 u8 *ib_speed, u8 *ib_width)
122 status = ocrdma_mbx_get_link_speed(dev, &speed, NULL);
124 speed = OCRDMA_PHYS_LINK_SPEED_ZERO;
127 case OCRDMA_PHYS_LINK_SPEED_1GBPS:
128 *ib_speed = IB_SPEED_SDR;
129 *ib_width = IB_WIDTH_1X;
132 case OCRDMA_PHYS_LINK_SPEED_10GBPS:
133 *ib_speed = IB_SPEED_QDR;
134 *ib_width = IB_WIDTH_1X;
137 case OCRDMA_PHYS_LINK_SPEED_20GBPS:
138 *ib_speed = IB_SPEED_DDR;
139 *ib_width = IB_WIDTH_4X;
142 case OCRDMA_PHYS_LINK_SPEED_40GBPS:
143 *ib_speed = IB_SPEED_QDR;
144 *ib_width = IB_WIDTH_4X;
149 *ib_speed = IB_SPEED_SDR;
150 *ib_width = IB_WIDTH_1X;
154 int ocrdma_query_port(struct ib_device *ibdev,
155 u8 port, struct ib_port_attr *props)
157 enum ib_port_state port_state;
158 struct ocrdma_dev *dev;
159 struct net_device *netdev;
161 /* props being zeroed by the caller, avoid zeroing it here */
162 dev = get_ocrdma_dev(ibdev);
163 netdev = dev->nic_info.netdev;
164 if (netif_running(netdev) && netif_oper_up(netdev)) {
165 port_state = IB_PORT_ACTIVE;
166 props->phys_state = 5;
168 port_state = IB_PORT_DOWN;
169 props->phys_state = 3;
171 props->max_mtu = IB_MTU_4096;
172 props->active_mtu = iboe_get_mtu(netdev->mtu);
177 props->state = port_state;
178 props->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP |
179 IB_PORT_DEVICE_MGMT_SUP |
180 IB_PORT_VENDOR_CLASS_SUP;
181 props->ip_gids = true;
182 props->gid_tbl_len = OCRDMA_MAX_SGID;
183 props->pkey_tbl_len = 1;
184 props->bad_pkey_cntr = 0;
185 props->qkey_viol_cntr = 0;
186 get_link_speed_and_width(dev, &props->active_speed,
187 &props->active_width);
188 props->max_msg_sz = 0x80000000;
189 props->max_vl_num = 4;
193 int ocrdma_modify_port(struct ib_device *ibdev, u8 port, int mask,
194 struct ib_port_modify *props)
199 static int ocrdma_add_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr,
202 struct ocrdma_mm *mm;
204 mm = kzalloc(sizeof(*mm), GFP_KERNEL);
207 mm->key.phy_addr = phy_addr;
209 INIT_LIST_HEAD(&mm->entry);
211 mutex_lock(&uctx->mm_list_lock);
212 list_add_tail(&mm->entry, &uctx->mm_head);
213 mutex_unlock(&uctx->mm_list_lock);
217 static void ocrdma_del_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr,
220 struct ocrdma_mm *mm, *tmp;
222 mutex_lock(&uctx->mm_list_lock);
223 list_for_each_entry_safe(mm, tmp, &uctx->mm_head, entry) {
224 if (len != mm->key.len && phy_addr != mm->key.phy_addr)
227 list_del(&mm->entry);
231 mutex_unlock(&uctx->mm_list_lock);
234 static bool ocrdma_search_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr,
238 struct ocrdma_mm *mm;
240 mutex_lock(&uctx->mm_list_lock);
241 list_for_each_entry(mm, &uctx->mm_head, entry) {
242 if (len != mm->key.len && phy_addr != mm->key.phy_addr)
248 mutex_unlock(&uctx->mm_list_lock);
253 static u16 _ocrdma_pd_mgr_get_bitmap(struct ocrdma_dev *dev, bool dpp_pool)
255 u16 pd_bitmap_idx = 0;
256 const unsigned long *pd_bitmap;
259 pd_bitmap = dev->pd_mgr->pd_dpp_bitmap;
260 pd_bitmap_idx = find_first_zero_bit(pd_bitmap,
261 dev->pd_mgr->max_dpp_pd);
262 __set_bit(pd_bitmap_idx, dev->pd_mgr->pd_dpp_bitmap);
263 dev->pd_mgr->pd_dpp_count++;
264 if (dev->pd_mgr->pd_dpp_count > dev->pd_mgr->pd_dpp_thrsh)
265 dev->pd_mgr->pd_dpp_thrsh = dev->pd_mgr->pd_dpp_count;
267 pd_bitmap = dev->pd_mgr->pd_norm_bitmap;
268 pd_bitmap_idx = find_first_zero_bit(pd_bitmap,
269 dev->pd_mgr->max_normal_pd);
270 __set_bit(pd_bitmap_idx, dev->pd_mgr->pd_norm_bitmap);
271 dev->pd_mgr->pd_norm_count++;
272 if (dev->pd_mgr->pd_norm_count > dev->pd_mgr->pd_norm_thrsh)
273 dev->pd_mgr->pd_norm_thrsh = dev->pd_mgr->pd_norm_count;
275 return pd_bitmap_idx;
278 static int _ocrdma_pd_mgr_put_bitmap(struct ocrdma_dev *dev, u16 pd_id,
284 pd_count = dpp_pool ? dev->pd_mgr->pd_dpp_count :
285 dev->pd_mgr->pd_norm_count;
290 pd_bit_index = pd_id - dev->pd_mgr->pd_dpp_start;
291 if (pd_bit_index >= dev->pd_mgr->max_dpp_pd) {
294 __clear_bit(pd_bit_index, dev->pd_mgr->pd_dpp_bitmap);
295 dev->pd_mgr->pd_dpp_count--;
298 pd_bit_index = pd_id - dev->pd_mgr->pd_norm_start;
299 if (pd_bit_index >= dev->pd_mgr->max_normal_pd) {
302 __clear_bit(pd_bit_index, dev->pd_mgr->pd_norm_bitmap);
303 dev->pd_mgr->pd_norm_count--;
310 static int ocrdma_put_pd_num(struct ocrdma_dev *dev, u16 pd_id,
315 mutex_lock(&dev->dev_lock);
316 status = _ocrdma_pd_mgr_put_bitmap(dev, pd_id, dpp_pool);
317 mutex_unlock(&dev->dev_lock);
321 static int ocrdma_get_pd_num(struct ocrdma_dev *dev, struct ocrdma_pd *pd)
326 mutex_lock(&dev->dev_lock);
327 if (pd->dpp_enabled) {
328 /* try allocating DPP PD, if not available then normal PD */
329 if (dev->pd_mgr->pd_dpp_count < dev->pd_mgr->max_dpp_pd) {
330 pd_idx = _ocrdma_pd_mgr_get_bitmap(dev, true);
331 pd->id = dev->pd_mgr->pd_dpp_start + pd_idx;
332 pd->dpp_page = dev->pd_mgr->dpp_page_index + pd_idx;
333 } else if (dev->pd_mgr->pd_norm_count <
334 dev->pd_mgr->max_normal_pd) {
335 pd_idx = _ocrdma_pd_mgr_get_bitmap(dev, false);
336 pd->id = dev->pd_mgr->pd_norm_start + pd_idx;
337 pd->dpp_enabled = false;
342 if (dev->pd_mgr->pd_norm_count < dev->pd_mgr->max_normal_pd) {
343 pd_idx = _ocrdma_pd_mgr_get_bitmap(dev, false);
344 pd->id = dev->pd_mgr->pd_norm_start + pd_idx;
349 mutex_unlock(&dev->dev_lock);
356 * ocrdma_ucontext must be used here because this function is also
357 * called from ocrdma_alloc_ucontext where ib_udata does not have
358 * valid ib_ucontext pointer. ib_uverbs_get_context does not call
359 * uobj_{alloc|get_xxx} helpers which are used to store the
360 * ib_ucontext in uverbs_attr_bundle wrapping the ib_udata. so
361 * ib_udata does NOT imply valid ib_ucontext here!
363 static int _ocrdma_alloc_pd(struct ocrdma_dev *dev, struct ocrdma_pd *pd,
364 struct ocrdma_ucontext *uctx,
365 struct ib_udata *udata)
369 if (udata && uctx && dev->attr.max_dpp_pds) {
371 ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R;
373 pd->dpp_enabled ? (dev->nic_info.db_page_size /
374 dev->attr.wqe_size) : 0;
377 if (dev->pd_mgr->pd_prealloc_valid)
378 return ocrdma_get_pd_num(dev, pd);
381 status = ocrdma_mbx_alloc_pd(dev, pd);
383 if (pd->dpp_enabled) {
384 pd->dpp_enabled = false;
394 static inline int is_ucontext_pd(struct ocrdma_ucontext *uctx,
395 struct ocrdma_pd *pd)
397 return (uctx->cntxt_pd == pd);
400 static void _ocrdma_dealloc_pd(struct ocrdma_dev *dev,
401 struct ocrdma_pd *pd)
403 if (dev->pd_mgr->pd_prealloc_valid)
404 ocrdma_put_pd_num(dev, pd->id, pd->dpp_enabled);
406 ocrdma_mbx_dealloc_pd(dev, pd);
409 static int ocrdma_alloc_ucontext_pd(struct ocrdma_dev *dev,
410 struct ocrdma_ucontext *uctx,
411 struct ib_udata *udata)
413 struct ib_device *ibdev = &dev->ibdev;
417 pd = rdma_zalloc_drv_obj(ibdev, ib_pd);
422 uctx->cntxt_pd = get_ocrdma_pd(pd);
424 status = _ocrdma_alloc_pd(dev, uctx->cntxt_pd, uctx, udata);
426 kfree(uctx->cntxt_pd);
430 uctx->cntxt_pd->uctx = uctx;
431 uctx->cntxt_pd->ibpd.device = &dev->ibdev;
436 static void ocrdma_dealloc_ucontext_pd(struct ocrdma_ucontext *uctx)
438 struct ocrdma_pd *pd = uctx->cntxt_pd;
439 struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
441 if (uctx->pd_in_use) {
442 pr_err("%s(%d) Freeing in use pdid=0x%x.\n",
443 __func__, dev->id, pd->id);
445 kfree(uctx->cntxt_pd);
446 uctx->cntxt_pd = NULL;
447 _ocrdma_dealloc_pd(dev, pd);
450 static struct ocrdma_pd *ocrdma_get_ucontext_pd(struct ocrdma_ucontext *uctx)
452 struct ocrdma_pd *pd = NULL;
454 mutex_lock(&uctx->mm_list_lock);
455 if (!uctx->pd_in_use) {
456 uctx->pd_in_use = true;
459 mutex_unlock(&uctx->mm_list_lock);
464 static void ocrdma_release_ucontext_pd(struct ocrdma_ucontext *uctx)
466 mutex_lock(&uctx->mm_list_lock);
467 uctx->pd_in_use = false;
468 mutex_unlock(&uctx->mm_list_lock);
471 int ocrdma_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata)
473 struct ib_device *ibdev = uctx->device;
475 struct ocrdma_ucontext *ctx = get_ocrdma_ucontext(uctx);
476 struct ocrdma_alloc_ucontext_resp resp = {};
477 struct ocrdma_dev *dev = get_ocrdma_dev(ibdev);
478 struct pci_dev *pdev = dev->nic_info.pdev;
479 u32 map_len = roundup(sizeof(u32) * 2048, PAGE_SIZE);
483 INIT_LIST_HEAD(&ctx->mm_head);
484 mutex_init(&ctx->mm_list_lock);
486 ctx->ah_tbl.va = dma_alloc_coherent(&pdev->dev, map_len,
487 &ctx->ah_tbl.pa, GFP_KERNEL);
491 ctx->ah_tbl.len = map_len;
493 resp.ah_tbl_len = ctx->ah_tbl.len;
494 resp.ah_tbl_page = virt_to_phys(ctx->ah_tbl.va);
496 status = ocrdma_add_mmap(ctx, resp.ah_tbl_page, resp.ah_tbl_len);
500 status = ocrdma_alloc_ucontext_pd(dev, ctx, udata);
504 resp.dev_id = dev->id;
505 resp.max_inline_data = dev->attr.max_inline_data;
506 resp.wqe_size = dev->attr.wqe_size;
507 resp.rqe_size = dev->attr.rqe_size;
508 resp.dpp_wqe_size = dev->attr.wqe_size;
510 memcpy(resp.fw_ver, dev->attr.fw_ver, sizeof(resp.fw_ver));
511 status = ib_copy_to_udata(udata, &resp, sizeof(resp));
517 ocrdma_dealloc_ucontext_pd(ctx);
519 ocrdma_del_mmap(ctx, ctx->ah_tbl.pa, ctx->ah_tbl.len);
521 dma_free_coherent(&pdev->dev, ctx->ah_tbl.len, ctx->ah_tbl.va,
526 void ocrdma_dealloc_ucontext(struct ib_ucontext *ibctx)
528 struct ocrdma_mm *mm, *tmp;
529 struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ibctx);
530 struct ocrdma_dev *dev = get_ocrdma_dev(ibctx->device);
531 struct pci_dev *pdev = dev->nic_info.pdev;
533 ocrdma_dealloc_ucontext_pd(uctx);
535 ocrdma_del_mmap(uctx, uctx->ah_tbl.pa, uctx->ah_tbl.len);
536 dma_free_coherent(&pdev->dev, uctx->ah_tbl.len, uctx->ah_tbl.va,
539 list_for_each_entry_safe(mm, tmp, &uctx->mm_head, entry) {
540 list_del(&mm->entry);
545 int ocrdma_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
547 struct ocrdma_ucontext *ucontext = get_ocrdma_ucontext(context);
548 struct ocrdma_dev *dev = get_ocrdma_dev(context->device);
549 unsigned long vm_page = vma->vm_pgoff << PAGE_SHIFT;
550 u64 unmapped_db = (u64) dev->nic_info.unmapped_db;
551 unsigned long len = (vma->vm_end - vma->vm_start);
555 if (vma->vm_start & (PAGE_SIZE - 1))
557 found = ocrdma_search_mmap(ucontext, vma->vm_pgoff << PAGE_SHIFT, len);
561 if ((vm_page >= unmapped_db) && (vm_page <= (unmapped_db +
562 dev->nic_info.db_total_size)) &&
563 (len <= dev->nic_info.db_page_size)) {
564 if (vma->vm_flags & VM_READ)
567 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
568 status = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
569 len, vma->vm_page_prot);
570 } else if (dev->nic_info.dpp_unmapped_len &&
571 (vm_page >= (u64) dev->nic_info.dpp_unmapped_addr) &&
572 (vm_page <= (u64) (dev->nic_info.dpp_unmapped_addr +
573 dev->nic_info.dpp_unmapped_len)) &&
574 (len <= dev->nic_info.dpp_unmapped_len)) {
575 if (vma->vm_flags & VM_READ)
578 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
579 status = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
580 len, vma->vm_page_prot);
582 status = remap_pfn_range(vma, vma->vm_start,
583 vma->vm_pgoff, len, vma->vm_page_prot);
588 static int ocrdma_copy_pd_uresp(struct ocrdma_dev *dev, struct ocrdma_pd *pd,
589 struct ib_udata *udata)
593 u64 dpp_page_addr = 0;
595 struct ocrdma_alloc_pd_uresp rsp;
596 struct ocrdma_ucontext *uctx = rdma_udata_to_drv_context(
597 udata, struct ocrdma_ucontext, ibucontext);
599 memset(&rsp, 0, sizeof(rsp));
601 rsp.dpp_enabled = pd->dpp_enabled;
602 db_page_addr = ocrdma_get_db_addr(dev, pd->id);
603 db_page_size = dev->nic_info.db_page_size;
605 status = ocrdma_add_mmap(uctx, db_page_addr, db_page_size);
609 if (pd->dpp_enabled) {
610 dpp_page_addr = dev->nic_info.dpp_unmapped_addr +
611 (pd->id * PAGE_SIZE);
612 status = ocrdma_add_mmap(uctx, dpp_page_addr,
616 rsp.dpp_page_addr_hi = upper_32_bits(dpp_page_addr);
617 rsp.dpp_page_addr_lo = dpp_page_addr;
620 status = ib_copy_to_udata(udata, &rsp, sizeof(rsp));
629 ocrdma_del_mmap(pd->uctx, dpp_page_addr, PAGE_SIZE);
631 ocrdma_del_mmap(pd->uctx, db_page_addr, db_page_size);
635 int ocrdma_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
637 struct ib_device *ibdev = ibpd->device;
638 struct ocrdma_dev *dev = get_ocrdma_dev(ibdev);
639 struct ocrdma_pd *pd;
641 u8 is_uctx_pd = false;
642 struct ocrdma_ucontext *uctx = rdma_udata_to_drv_context(
643 udata, struct ocrdma_ucontext, ibucontext);
646 pd = ocrdma_get_ucontext_pd(uctx);
653 pd = get_ocrdma_pd(ibpd);
654 status = _ocrdma_alloc_pd(dev, pd, uctx, udata);
660 status = ocrdma_copy_pd_uresp(dev, pd, udata);
668 ocrdma_release_ucontext_pd(uctx);
670 _ocrdma_dealloc_pd(dev, pd);
675 void ocrdma_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
677 struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
678 struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
679 struct ocrdma_ucontext *uctx = NULL;
684 u64 dpp_db = dev->nic_info.dpp_unmapped_addr +
685 (pd->id * PAGE_SIZE);
687 ocrdma_del_mmap(pd->uctx, dpp_db, PAGE_SIZE);
688 usr_db = ocrdma_get_db_addr(dev, pd->id);
689 ocrdma_del_mmap(pd->uctx, usr_db, dev->nic_info.db_page_size);
691 if (is_ucontext_pd(uctx, pd)) {
692 ocrdma_release_ucontext_pd(uctx);
696 _ocrdma_dealloc_pd(dev, pd);
699 static int ocrdma_alloc_lkey(struct ocrdma_dev *dev, struct ocrdma_mr *mr,
700 u32 pdid, int acc, u32 num_pbls, u32 addr_check)
705 mr->hwmr.local_rd = 1;
706 mr->hwmr.remote_rd = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
707 mr->hwmr.remote_wr = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
708 mr->hwmr.local_wr = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
709 mr->hwmr.mw_bind = (acc & IB_ACCESS_MW_BIND) ? 1 : 0;
710 mr->hwmr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
711 mr->hwmr.num_pbls = num_pbls;
713 status = ocrdma_mbx_alloc_lkey(dev, &mr->hwmr, pdid, addr_check);
717 mr->ibmr.lkey = mr->hwmr.lkey;
718 if (mr->hwmr.remote_wr || mr->hwmr.remote_rd)
719 mr->ibmr.rkey = mr->hwmr.lkey;
723 struct ib_mr *ocrdma_get_dma_mr(struct ib_pd *ibpd, int acc)
726 struct ocrdma_mr *mr;
727 struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
728 struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
730 if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE)) {
731 pr_err("%s err, invalid access rights\n", __func__);
732 return ERR_PTR(-EINVAL);
735 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
737 return ERR_PTR(-ENOMEM);
739 status = ocrdma_alloc_lkey(dev, mr, pd->id, acc, 0,
740 OCRDMA_ADDR_CHECK_DISABLE);
743 return ERR_PTR(status);
749 static void ocrdma_free_mr_pbl_tbl(struct ocrdma_dev *dev,
750 struct ocrdma_hw_mr *mr)
752 struct pci_dev *pdev = dev->nic_info.pdev;
756 for (i = 0; i < mr->num_pbls; i++) {
757 if (!mr->pbl_table[i].va)
759 dma_free_coherent(&pdev->dev, mr->pbl_size,
761 mr->pbl_table[i].pa);
763 kfree(mr->pbl_table);
764 mr->pbl_table = NULL;
768 static int ocrdma_get_pbl_info(struct ocrdma_dev *dev, struct ocrdma_mr *mr,
777 pbl_size = OCRDMA_MIN_HPAGE_SIZE * (1 << idx);
778 if (pbl_size > MAX_OCRDMA_PBL_SIZE) {
782 num_pbls = roundup(num_pbes, (pbl_size / sizeof(u64)));
783 num_pbls = num_pbls / (pbl_size / sizeof(u64));
785 } while (num_pbls >= dev->attr.max_num_mr_pbl);
787 mr->hwmr.num_pbes = num_pbes;
788 mr->hwmr.num_pbls = num_pbls;
789 mr->hwmr.pbl_size = pbl_size;
793 static int ocrdma_build_pbl_tbl(struct ocrdma_dev *dev, struct ocrdma_hw_mr *mr)
797 u32 dma_len = mr->pbl_size;
798 struct pci_dev *pdev = dev->nic_info.pdev;
802 mr->pbl_table = kcalloc(mr->num_pbls, sizeof(struct ocrdma_pbl),
808 for (i = 0; i < mr->num_pbls; i++) {
809 va = dma_alloc_coherent(&pdev->dev, dma_len, &pa, GFP_KERNEL);
811 ocrdma_free_mr_pbl_tbl(dev, mr);
815 mr->pbl_table[i].va = va;
816 mr->pbl_table[i].pa = pa;
821 static void build_user_pbes(struct ocrdma_dev *dev, struct ocrdma_mr *mr,
824 struct ocrdma_pbe *pbe;
825 struct sg_dma_page_iter sg_iter;
826 struct ocrdma_pbl *pbl_tbl = mr->hwmr.pbl_table;
827 struct ib_umem *umem = mr->umem;
828 int pbe_cnt, total_num_pbes = 0;
831 if (!mr->hwmr.num_pbes)
834 pbe = (struct ocrdma_pbe *)pbl_tbl->va;
837 for_each_sg_dma_page (umem->sg_head.sgl, &sg_iter, umem->nmap, 0) {
838 /* store the page address in pbe */
839 pg_addr = sg_page_iter_dma_address(&sg_iter);
840 pbe->pa_lo = cpu_to_le32(pg_addr);
841 pbe->pa_hi = cpu_to_le32(upper_32_bits(pg_addr));
846 /* if done building pbes, issue the mbx cmd. */
847 if (total_num_pbes == num_pbes)
850 /* if the given pbl is full storing the pbes,
853 if (pbe_cnt == (mr->hwmr.pbl_size / sizeof(u64))) {
855 pbe = (struct ocrdma_pbe *)pbl_tbl->va;
861 struct ib_mr *ocrdma_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
862 u64 usr_addr, int acc, struct ib_udata *udata)
864 int status = -ENOMEM;
865 struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
866 struct ocrdma_mr *mr;
867 struct ocrdma_pd *pd;
870 pd = get_ocrdma_pd(ibpd);
872 if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE))
873 return ERR_PTR(-EINVAL);
875 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
877 return ERR_PTR(status);
878 mr->umem = ib_umem_get(udata, start, len, acc, 0);
879 if (IS_ERR(mr->umem)) {
883 num_pbes = ib_umem_page_count(mr->umem);
884 status = ocrdma_get_pbl_info(dev, mr, num_pbes);
888 mr->hwmr.pbe_size = PAGE_SIZE;
889 mr->hwmr.fbo = ib_umem_offset(mr->umem);
890 mr->hwmr.va = usr_addr;
892 mr->hwmr.remote_wr = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
893 mr->hwmr.remote_rd = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
894 mr->hwmr.local_wr = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
895 mr->hwmr.local_rd = 1;
896 mr->hwmr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
897 status = ocrdma_build_pbl_tbl(dev, &mr->hwmr);
900 build_user_pbes(dev, mr, num_pbes);
901 status = ocrdma_reg_mr(dev, &mr->hwmr, pd->id, acc);
904 mr->ibmr.lkey = mr->hwmr.lkey;
905 if (mr->hwmr.remote_wr || mr->hwmr.remote_rd)
906 mr->ibmr.rkey = mr->hwmr.lkey;
911 ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr);
914 return ERR_PTR(status);
917 int ocrdma_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
919 struct ocrdma_mr *mr = get_ocrdma_mr(ib_mr);
920 struct ocrdma_dev *dev = get_ocrdma_dev(ib_mr->device);
922 (void) ocrdma_mbx_dealloc_lkey(dev, mr->hwmr.fr_mr, mr->hwmr.lkey);
925 ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr);
927 /* it could be user registered memory. */
929 ib_umem_release(mr->umem);
932 /* Don't stop cleanup, in case FW is unresponsive */
933 if (dev->mqe_ctx.fw_error_state) {
934 pr_err("%s(%d) fw not responding.\n",
940 static int ocrdma_copy_cq_uresp(struct ocrdma_dev *dev, struct ocrdma_cq *cq,
941 struct ib_udata *udata)
944 struct ocrdma_ucontext *uctx = rdma_udata_to_drv_context(
945 udata, struct ocrdma_ucontext, ibucontext);
946 struct ocrdma_create_cq_uresp uresp;
948 /* this must be user flow! */
952 memset(&uresp, 0, sizeof(uresp));
953 uresp.cq_id = cq->id;
954 uresp.page_size = PAGE_ALIGN(cq->len);
956 uresp.max_hw_cqe = cq->max_hw_cqe;
957 uresp.page_addr[0] = virt_to_phys(cq->va);
958 uresp.db_page_addr = ocrdma_get_db_addr(dev, uctx->cntxt_pd->id);
959 uresp.db_page_size = dev->nic_info.db_page_size;
960 uresp.phase_change = cq->phase_change ? 1 : 0;
961 status = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
963 pr_err("%s(%d) copy error cqid=0x%x.\n",
964 __func__, dev->id, cq->id);
967 status = ocrdma_add_mmap(uctx, uresp.db_page_addr, uresp.db_page_size);
970 status = ocrdma_add_mmap(uctx, uresp.page_addr[0], uresp.page_size);
972 ocrdma_del_mmap(uctx, uresp.db_page_addr, uresp.db_page_size);
980 struct ib_cq *ocrdma_create_cq(struct ib_device *ibdev,
981 const struct ib_cq_init_attr *attr,
982 struct ib_udata *udata)
984 int entries = attr->cqe;
985 struct ocrdma_cq *cq;
986 struct ocrdma_dev *dev = get_ocrdma_dev(ibdev);
987 struct ocrdma_ucontext *uctx = rdma_udata_to_drv_context(
988 udata, struct ocrdma_ucontext, ibucontext);
991 struct ocrdma_create_cq_ureq ureq;
994 return ERR_PTR(-EINVAL);
997 if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
998 return ERR_PTR(-EFAULT);
1001 cq = kzalloc(sizeof(*cq), GFP_KERNEL);
1003 return ERR_PTR(-ENOMEM);
1005 spin_lock_init(&cq->cq_lock);
1006 spin_lock_init(&cq->comp_handler_lock);
1007 INIT_LIST_HEAD(&cq->sq_head);
1008 INIT_LIST_HEAD(&cq->rq_head);
1011 pd_id = uctx->cntxt_pd->id;
1013 status = ocrdma_mbx_create_cq(dev, cq, entries, ureq.dpp_cq, pd_id);
1016 return ERR_PTR(status);
1019 status = ocrdma_copy_cq_uresp(dev, cq, udata);
1023 cq->phase = OCRDMA_CQE_VALID;
1024 dev->cq_tbl[cq->id] = cq;
1028 ocrdma_mbx_destroy_cq(dev, cq);
1030 return ERR_PTR(status);
1033 int ocrdma_resize_cq(struct ib_cq *ibcq, int new_cnt,
1034 struct ib_udata *udata)
1037 struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
1039 if (new_cnt < 1 || new_cnt > cq->max_hw_cqe) {
1043 ibcq->cqe = new_cnt;
1047 static void ocrdma_flush_cq(struct ocrdma_cq *cq)
1050 int valid_count = 0;
1051 unsigned long flags;
1053 struct ocrdma_dev *dev = get_ocrdma_dev(cq->ibcq.device);
1054 struct ocrdma_cqe *cqe = NULL;
1057 cqe_cnt = cq->cqe_cnt;
1059 /* Last irq might have scheduled a polling thread
1060 * sync-up with it before hard flushing.
1062 spin_lock_irqsave(&cq->cq_lock, flags);
1064 if (is_cqe_valid(cq, cqe))
1069 ocrdma_ring_cq_db(dev, cq->id, false, false, valid_count);
1070 spin_unlock_irqrestore(&cq->cq_lock, flags);
1073 int ocrdma_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
1075 struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
1076 struct ocrdma_eq *eq = NULL;
1077 struct ocrdma_dev *dev = get_ocrdma_dev(ibcq->device);
1081 dev->cq_tbl[cq->id] = NULL;
1082 indx = ocrdma_get_eq_table_index(dev, cq->eqn);
1083 BUG_ON(indx == -EINVAL);
1085 eq = &dev->eq_tbl[indx];
1086 irq = ocrdma_get_irq(dev, eq);
1087 synchronize_irq(irq);
1088 ocrdma_flush_cq(cq);
1090 (void)ocrdma_mbx_destroy_cq(dev, cq);
1092 pdid = cq->ucontext->cntxt_pd->id;
1093 ocrdma_del_mmap(cq->ucontext, (u64) cq->pa,
1094 PAGE_ALIGN(cq->len));
1095 ocrdma_del_mmap(cq->ucontext,
1096 ocrdma_get_db_addr(dev, pdid),
1097 dev->nic_info.db_page_size);
1104 static int ocrdma_add_qpn_map(struct ocrdma_dev *dev, struct ocrdma_qp *qp)
1106 int status = -EINVAL;
1108 if (qp->id < OCRDMA_MAX_QP && dev->qp_tbl[qp->id] == NULL) {
1109 dev->qp_tbl[qp->id] = qp;
1115 static void ocrdma_del_qpn_map(struct ocrdma_dev *dev, struct ocrdma_qp *qp)
1117 dev->qp_tbl[qp->id] = NULL;
1120 static int ocrdma_check_qp_params(struct ib_pd *ibpd, struct ocrdma_dev *dev,
1121 struct ib_qp_init_attr *attrs,
1122 struct ib_udata *udata)
1124 if ((attrs->qp_type != IB_QPT_GSI) &&
1125 (attrs->qp_type != IB_QPT_RC) &&
1126 (attrs->qp_type != IB_QPT_UC) &&
1127 (attrs->qp_type != IB_QPT_UD)) {
1128 pr_err("%s(%d) unsupported qp type=0x%x requested\n",
1129 __func__, dev->id, attrs->qp_type);
1132 /* Skip the check for QP1 to support CM size of 128 */
1133 if ((attrs->qp_type != IB_QPT_GSI) &&
1134 (attrs->cap.max_send_wr > dev->attr.max_wqe)) {
1135 pr_err("%s(%d) unsupported send_wr=0x%x requested\n",
1136 __func__, dev->id, attrs->cap.max_send_wr);
1137 pr_err("%s(%d) supported send_wr=0x%x\n",
1138 __func__, dev->id, dev->attr.max_wqe);
1141 if (!attrs->srq && (attrs->cap.max_recv_wr > dev->attr.max_rqe)) {
1142 pr_err("%s(%d) unsupported recv_wr=0x%x requested\n",
1143 __func__, dev->id, attrs->cap.max_recv_wr);
1144 pr_err("%s(%d) supported recv_wr=0x%x\n",
1145 __func__, dev->id, dev->attr.max_rqe);
1148 if (attrs->cap.max_inline_data > dev->attr.max_inline_data) {
1149 pr_err("%s(%d) unsupported inline data size=0x%x requested\n",
1150 __func__, dev->id, attrs->cap.max_inline_data);
1151 pr_err("%s(%d) supported inline data size=0x%x\n",
1152 __func__, dev->id, dev->attr.max_inline_data);
1155 if (attrs->cap.max_send_sge > dev->attr.max_send_sge) {
1156 pr_err("%s(%d) unsupported send_sge=0x%x requested\n",
1157 __func__, dev->id, attrs->cap.max_send_sge);
1158 pr_err("%s(%d) supported send_sge=0x%x\n",
1159 __func__, dev->id, dev->attr.max_send_sge);
1162 if (attrs->cap.max_recv_sge > dev->attr.max_recv_sge) {
1163 pr_err("%s(%d) unsupported recv_sge=0x%x requested\n",
1164 __func__, dev->id, attrs->cap.max_recv_sge);
1165 pr_err("%s(%d) supported recv_sge=0x%x\n",
1166 __func__, dev->id, dev->attr.max_recv_sge);
1169 /* unprivileged user space cannot create special QP */
1170 if (udata && attrs->qp_type == IB_QPT_GSI) {
1172 ("%s(%d) Userspace can't create special QPs of type=0x%x\n",
1173 __func__, dev->id, attrs->qp_type);
1176 /* allow creating only one GSI type of QP */
1177 if (attrs->qp_type == IB_QPT_GSI && dev->gsi_qp_created) {
1178 pr_err("%s(%d) GSI special QPs already created.\n",
1182 /* verify consumer QPs are not trying to use GSI QP's CQ */
1183 if ((attrs->qp_type != IB_QPT_GSI) && (dev->gsi_qp_created)) {
1184 if ((dev->gsi_sqcq == get_ocrdma_cq(attrs->send_cq)) ||
1185 (dev->gsi_rqcq == get_ocrdma_cq(attrs->recv_cq))) {
1186 pr_err("%s(%d) Consumer QP cannot use GSI CQs.\n",
1194 static int ocrdma_copy_qp_uresp(struct ocrdma_qp *qp,
1195 struct ib_udata *udata, int dpp_offset,
1196 int dpp_credit_lmt, int srq)
1200 struct ocrdma_create_qp_uresp uresp;
1201 struct ocrdma_pd *pd = qp->pd;
1202 struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
1204 memset(&uresp, 0, sizeof(uresp));
1205 usr_db = dev->nic_info.unmapped_db +
1206 (pd->id * dev->nic_info.db_page_size);
1207 uresp.qp_id = qp->id;
1208 uresp.sq_dbid = qp->sq.dbid;
1209 uresp.num_sq_pages = 1;
1210 uresp.sq_page_size = PAGE_ALIGN(qp->sq.len);
1211 uresp.sq_page_addr[0] = virt_to_phys(qp->sq.va);
1212 uresp.num_wqe_allocated = qp->sq.max_cnt;
1214 uresp.rq_dbid = qp->rq.dbid;
1215 uresp.num_rq_pages = 1;
1216 uresp.rq_page_size = PAGE_ALIGN(qp->rq.len);
1217 uresp.rq_page_addr[0] = virt_to_phys(qp->rq.va);
1218 uresp.num_rqe_allocated = qp->rq.max_cnt;
1220 uresp.db_page_addr = usr_db;
1221 uresp.db_page_size = dev->nic_info.db_page_size;
1222 uresp.db_sq_offset = OCRDMA_DB_GEN2_SQ_OFFSET;
1223 uresp.db_rq_offset = OCRDMA_DB_GEN2_RQ_OFFSET;
1224 uresp.db_shift = OCRDMA_DB_RQ_SHIFT;
1226 if (qp->dpp_enabled) {
1227 uresp.dpp_credit = dpp_credit_lmt;
1228 uresp.dpp_offset = dpp_offset;
1230 status = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
1232 pr_err("%s(%d) user copy error.\n", __func__, dev->id);
1235 status = ocrdma_add_mmap(pd->uctx, uresp.sq_page_addr[0],
1236 uresp.sq_page_size);
1241 status = ocrdma_add_mmap(pd->uctx, uresp.rq_page_addr[0],
1242 uresp.rq_page_size);
1248 ocrdma_del_mmap(pd->uctx, uresp.sq_page_addr[0], uresp.sq_page_size);
1253 static void ocrdma_set_qp_db(struct ocrdma_dev *dev, struct ocrdma_qp *qp,
1254 struct ocrdma_pd *pd)
1256 if (ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R) {
1257 qp->sq_db = dev->nic_info.db +
1258 (pd->id * dev->nic_info.db_page_size) +
1259 OCRDMA_DB_GEN2_SQ_OFFSET;
1260 qp->rq_db = dev->nic_info.db +
1261 (pd->id * dev->nic_info.db_page_size) +
1262 OCRDMA_DB_GEN2_RQ_OFFSET;
1264 qp->sq_db = dev->nic_info.db +
1265 (pd->id * dev->nic_info.db_page_size) +
1266 OCRDMA_DB_SQ_OFFSET;
1267 qp->rq_db = dev->nic_info.db +
1268 (pd->id * dev->nic_info.db_page_size) +
1269 OCRDMA_DB_RQ_OFFSET;
1273 static int ocrdma_alloc_wr_id_tbl(struct ocrdma_qp *qp)
1276 kcalloc(qp->sq.max_cnt, sizeof(*(qp->wqe_wr_id_tbl)),
1278 if (qp->wqe_wr_id_tbl == NULL)
1281 kcalloc(qp->rq.max_cnt, sizeof(u64), GFP_KERNEL);
1282 if (qp->rqe_wr_id_tbl == NULL)
1288 static void ocrdma_set_qp_init_params(struct ocrdma_qp *qp,
1289 struct ocrdma_pd *pd,
1290 struct ib_qp_init_attr *attrs)
1293 spin_lock_init(&qp->q_lock);
1294 INIT_LIST_HEAD(&qp->sq_entry);
1295 INIT_LIST_HEAD(&qp->rq_entry);
1297 qp->qp_type = attrs->qp_type;
1298 qp->cap_flags = OCRDMA_QP_INB_RD | OCRDMA_QP_INB_WR;
1299 qp->max_inline_data = attrs->cap.max_inline_data;
1300 qp->sq.max_sges = attrs->cap.max_send_sge;
1301 qp->rq.max_sges = attrs->cap.max_recv_sge;
1302 qp->state = OCRDMA_QPS_RST;
1303 qp->signaled = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR) ? true : false;
1306 static void ocrdma_store_gsi_qp_cq(struct ocrdma_dev *dev,
1307 struct ib_qp_init_attr *attrs)
1309 if (attrs->qp_type == IB_QPT_GSI) {
1310 dev->gsi_qp_created = 1;
1311 dev->gsi_sqcq = get_ocrdma_cq(attrs->send_cq);
1312 dev->gsi_rqcq = get_ocrdma_cq(attrs->recv_cq);
1316 struct ib_qp *ocrdma_create_qp(struct ib_pd *ibpd,
1317 struct ib_qp_init_attr *attrs,
1318 struct ib_udata *udata)
1321 struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
1322 struct ocrdma_qp *qp;
1323 struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
1324 struct ocrdma_create_qp_ureq ureq;
1325 u16 dpp_credit_lmt, dpp_offset;
1327 status = ocrdma_check_qp_params(ibpd, dev, attrs, udata);
1331 memset(&ureq, 0, sizeof(ureq));
1333 if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
1334 return ERR_PTR(-EFAULT);
1336 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
1341 ocrdma_set_qp_init_params(qp, pd, attrs);
1343 qp->cap_flags |= (OCRDMA_QP_MW_BIND | OCRDMA_QP_LKEY0 |
1344 OCRDMA_QP_FAST_REG);
1346 mutex_lock(&dev->dev_lock);
1347 status = ocrdma_mbx_create_qp(qp, attrs, ureq.enable_dpp_cq,
1349 &dpp_offset, &dpp_credit_lmt);
1353 /* user space QP's wr_id table are managed in library */
1354 if (udata == NULL) {
1355 status = ocrdma_alloc_wr_id_tbl(qp);
1360 status = ocrdma_add_qpn_map(dev, qp);
1363 ocrdma_set_qp_db(dev, qp, pd);
1365 status = ocrdma_copy_qp_uresp(qp, udata, dpp_offset,
1367 (attrs->srq != NULL));
1371 ocrdma_store_gsi_qp_cq(dev, attrs);
1372 qp->ibqp.qp_num = qp->id;
1373 mutex_unlock(&dev->dev_lock);
1377 ocrdma_del_qpn_map(dev, qp);
1379 ocrdma_mbx_destroy_qp(dev, qp);
1381 mutex_unlock(&dev->dev_lock);
1382 kfree(qp->wqe_wr_id_tbl);
1383 kfree(qp->rqe_wr_id_tbl);
1385 pr_err("%s(%d) error=%d\n", __func__, dev->id, status);
1387 return ERR_PTR(status);
1390 int _ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1394 struct ocrdma_qp *qp;
1395 struct ocrdma_dev *dev;
1396 enum ib_qp_state old_qps;
1398 qp = get_ocrdma_qp(ibqp);
1399 dev = get_ocrdma_dev(ibqp->device);
1400 if (attr_mask & IB_QP_STATE)
1401 status = ocrdma_qp_state_change(qp, attr->qp_state, &old_qps);
1402 /* if new and previous states are same hw doesn't need to
1407 return ocrdma_mbx_modify_qp(dev, qp, attr, attr_mask);
1410 int ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1411 int attr_mask, struct ib_udata *udata)
1413 unsigned long flags;
1414 int status = -EINVAL;
1415 struct ocrdma_qp *qp;
1416 struct ocrdma_dev *dev;
1417 enum ib_qp_state old_qps, new_qps;
1419 qp = get_ocrdma_qp(ibqp);
1420 dev = get_ocrdma_dev(ibqp->device);
1422 /* syncronize with multiple context trying to change, retrive qps */
1423 mutex_lock(&dev->dev_lock);
1424 /* syncronize with wqe, rqe posting and cqe processing contexts */
1425 spin_lock_irqsave(&qp->q_lock, flags);
1426 old_qps = get_ibqp_state(qp->state);
1427 if (attr_mask & IB_QP_STATE)
1428 new_qps = attr->qp_state;
1431 spin_unlock_irqrestore(&qp->q_lock, flags);
1433 if (!ib_modify_qp_is_ok(old_qps, new_qps, ibqp->qp_type, attr_mask)) {
1434 pr_err("%s(%d) invalid attribute mask=0x%x specified for\n"
1435 "qpn=0x%x of type=0x%x old_qps=0x%x, new_qps=0x%x\n",
1436 __func__, dev->id, attr_mask, qp->id, ibqp->qp_type,
1441 status = _ocrdma_modify_qp(ibqp, attr, attr_mask);
1445 mutex_unlock(&dev->dev_lock);
1449 static enum ib_mtu ocrdma_mtu_int_to_enum(u16 mtu)
1467 static int ocrdma_to_ib_qp_acc_flags(int qp_cap_flags)
1469 int ib_qp_acc_flags = 0;
1471 if (qp_cap_flags & OCRDMA_QP_INB_WR)
1472 ib_qp_acc_flags |= IB_ACCESS_REMOTE_WRITE;
1473 if (qp_cap_flags & OCRDMA_QP_INB_RD)
1474 ib_qp_acc_flags |= IB_ACCESS_LOCAL_WRITE;
1475 return ib_qp_acc_flags;
1478 int ocrdma_query_qp(struct ib_qp *ibqp,
1479 struct ib_qp_attr *qp_attr,
1480 int attr_mask, struct ib_qp_init_attr *qp_init_attr)
1484 struct ocrdma_qp_params params;
1485 struct ocrdma_qp *qp = get_ocrdma_qp(ibqp);
1486 struct ocrdma_dev *dev = get_ocrdma_dev(ibqp->device);
1488 memset(¶ms, 0, sizeof(params));
1489 mutex_lock(&dev->dev_lock);
1490 status = ocrdma_mbx_query_qp(dev, qp, ¶ms);
1491 mutex_unlock(&dev->dev_lock);
1494 if (qp->qp_type == IB_QPT_UD)
1495 qp_attr->qkey = params.qkey;
1497 ocrdma_mtu_int_to_enum(params.path_mtu_pkey_indx &
1498 OCRDMA_QP_PARAMS_PATH_MTU_MASK) >>
1499 OCRDMA_QP_PARAMS_PATH_MTU_SHIFT;
1500 qp_attr->path_mig_state = IB_MIG_MIGRATED;
1501 qp_attr->rq_psn = params.hop_lmt_rq_psn & OCRDMA_QP_PARAMS_RQ_PSN_MASK;
1502 qp_attr->sq_psn = params.tclass_sq_psn & OCRDMA_QP_PARAMS_SQ_PSN_MASK;
1503 qp_attr->dest_qp_num =
1504 params.ack_to_rnr_rtc_dest_qpn & OCRDMA_QP_PARAMS_DEST_QPN_MASK;
1506 qp_attr->qp_access_flags = ocrdma_to_ib_qp_acc_flags(qp->cap_flags);
1507 qp_attr->cap.max_send_wr = qp->sq.max_cnt - 1;
1508 qp_attr->cap.max_recv_wr = qp->rq.max_cnt - 1;
1509 qp_attr->cap.max_send_sge = qp->sq.max_sges;
1510 qp_attr->cap.max_recv_sge = qp->rq.max_sges;
1511 qp_attr->cap.max_inline_data = qp->max_inline_data;
1512 qp_init_attr->cap = qp_attr->cap;
1513 qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
1515 rdma_ah_set_grh(&qp_attr->ah_attr, NULL,
1516 params.rnt_rc_sl_fl &
1517 OCRDMA_QP_PARAMS_FLOW_LABEL_MASK,
1519 (params.hop_lmt_rq_psn &
1520 OCRDMA_QP_PARAMS_HOP_LMT_MASK) >>
1521 OCRDMA_QP_PARAMS_HOP_LMT_SHIFT,
1522 (params.tclass_sq_psn &
1523 OCRDMA_QP_PARAMS_TCLASS_MASK) >>
1524 OCRDMA_QP_PARAMS_TCLASS_SHIFT);
1525 rdma_ah_set_dgid_raw(&qp_attr->ah_attr, ¶ms.dgid[0]);
1527 rdma_ah_set_port_num(&qp_attr->ah_attr, 1);
1528 rdma_ah_set_sl(&qp_attr->ah_attr, (params.rnt_rc_sl_fl &
1529 OCRDMA_QP_PARAMS_SL_MASK) >>
1530 OCRDMA_QP_PARAMS_SL_SHIFT);
1531 qp_attr->timeout = (params.ack_to_rnr_rtc_dest_qpn &
1532 OCRDMA_QP_PARAMS_ACK_TIMEOUT_MASK) >>
1533 OCRDMA_QP_PARAMS_ACK_TIMEOUT_SHIFT;
1534 qp_attr->rnr_retry = (params.ack_to_rnr_rtc_dest_qpn &
1535 OCRDMA_QP_PARAMS_RNR_RETRY_CNT_MASK) >>
1536 OCRDMA_QP_PARAMS_RNR_RETRY_CNT_SHIFT;
1537 qp_attr->retry_cnt =
1538 (params.rnt_rc_sl_fl & OCRDMA_QP_PARAMS_RETRY_CNT_MASK) >>
1539 OCRDMA_QP_PARAMS_RETRY_CNT_SHIFT;
1540 qp_attr->min_rnr_timer = 0;
1541 qp_attr->pkey_index = 0;
1542 qp_attr->port_num = 1;
1543 rdma_ah_set_path_bits(&qp_attr->ah_attr, 0);
1544 rdma_ah_set_static_rate(&qp_attr->ah_attr, 0);
1545 qp_attr->alt_pkey_index = 0;
1546 qp_attr->alt_port_num = 0;
1547 qp_attr->alt_timeout = 0;
1548 memset(&qp_attr->alt_ah_attr, 0, sizeof(qp_attr->alt_ah_attr));
1549 qp_state = (params.max_sge_recv_flags & OCRDMA_QP_PARAMS_STATE_MASK) >>
1550 OCRDMA_QP_PARAMS_STATE_SHIFT;
1551 qp_attr->qp_state = get_ibqp_state(qp_state);
1552 qp_attr->cur_qp_state = qp_attr->qp_state;
1553 qp_attr->sq_draining = (qp_state == OCRDMA_QPS_SQ_DRAINING) ? 1 : 0;
1554 qp_attr->max_dest_rd_atomic =
1555 params.max_ord_ird >> OCRDMA_QP_PARAMS_MAX_ORD_SHIFT;
1556 qp_attr->max_rd_atomic =
1557 params.max_ord_ird & OCRDMA_QP_PARAMS_MAX_IRD_MASK;
1558 qp_attr->en_sqd_async_notify = (params.max_sge_recv_flags &
1559 OCRDMA_QP_PARAMS_FLAGS_SQD_ASYNC) ? 1 : 0;
1560 /* Sync driver QP state with FW */
1561 ocrdma_qp_state_change(qp, qp_attr->qp_state, NULL);
1566 static void ocrdma_srq_toggle_bit(struct ocrdma_srq *srq, unsigned int idx)
1568 unsigned int i = idx / 32;
1569 u32 mask = (1U << (idx % 32));
1571 srq->idx_bit_fields[i] ^= mask;
1574 static int ocrdma_hwq_free_cnt(struct ocrdma_qp_hwq_info *q)
1576 return ((q->max_wqe_idx - q->head) + q->tail) % q->max_cnt;
1579 static int is_hw_sq_empty(struct ocrdma_qp *qp)
1581 return (qp->sq.tail == qp->sq.head);
1584 static int is_hw_rq_empty(struct ocrdma_qp *qp)
1586 return (qp->rq.tail == qp->rq.head);
1589 static void *ocrdma_hwq_head(struct ocrdma_qp_hwq_info *q)
1591 return q->va + (q->head * q->entry_size);
1594 static void *ocrdma_hwq_head_from_idx(struct ocrdma_qp_hwq_info *q,
1597 return q->va + (idx * q->entry_size);
1600 static void ocrdma_hwq_inc_head(struct ocrdma_qp_hwq_info *q)
1602 q->head = (q->head + 1) & q->max_wqe_idx;
1605 static void ocrdma_hwq_inc_tail(struct ocrdma_qp_hwq_info *q)
1607 q->tail = (q->tail + 1) & q->max_wqe_idx;
1610 /* discard the cqe for a given QP */
1611 static void ocrdma_discard_cqes(struct ocrdma_qp *qp, struct ocrdma_cq *cq)
1613 unsigned long cq_flags;
1614 unsigned long flags;
1615 int discard_cnt = 0;
1616 u32 cur_getp, stop_getp;
1617 struct ocrdma_cqe *cqe;
1618 u32 qpn = 0, wqe_idx = 0;
1620 spin_lock_irqsave(&cq->cq_lock, cq_flags);
1622 /* traverse through the CQEs in the hw CQ,
1623 * find the matching CQE for a given qp,
1624 * mark the matching one discarded by clearing qpn.
1625 * ring the doorbell in the poll_cq() as
1626 * we don't complete out of order cqe.
1629 cur_getp = cq->getp;
1630 /* find upto when do we reap the cq. */
1631 stop_getp = cur_getp;
1633 if (is_hw_sq_empty(qp) && (!qp->srq && is_hw_rq_empty(qp)))
1636 cqe = cq->va + cur_getp;
1637 /* if (a) done reaping whole hw cq, or
1638 * (b) qp_xq becomes empty.
1641 qpn = cqe->cmn.qpn & OCRDMA_CQE_QPN_MASK;
1642 /* if previously discarded cqe found, skip that too. */
1643 /* check for matching qp */
1644 if (qpn == 0 || qpn != qp->id)
1647 if (is_cqe_for_sq(cqe)) {
1648 ocrdma_hwq_inc_tail(&qp->sq);
1651 wqe_idx = (le32_to_cpu(cqe->rq.buftag_qpn) >>
1652 OCRDMA_CQE_BUFTAG_SHIFT) &
1653 qp->srq->rq.max_wqe_idx;
1654 BUG_ON(wqe_idx < 1);
1655 spin_lock_irqsave(&qp->srq->q_lock, flags);
1656 ocrdma_hwq_inc_tail(&qp->srq->rq);
1657 ocrdma_srq_toggle_bit(qp->srq, wqe_idx - 1);
1658 spin_unlock_irqrestore(&qp->srq->q_lock, flags);
1661 ocrdma_hwq_inc_tail(&qp->rq);
1664 /* mark cqe discarded so that it is not picked up later
1670 cur_getp = (cur_getp + 1) % cq->max_hw_cqe;
1671 } while (cur_getp != stop_getp);
1672 spin_unlock_irqrestore(&cq->cq_lock, cq_flags);
1675 void ocrdma_del_flush_qp(struct ocrdma_qp *qp)
1678 unsigned long flags;
1679 struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
1680 /* sync with any active CQ poll */
1682 spin_lock_irqsave(&dev->flush_q_lock, flags);
1683 found = ocrdma_is_qp_in_sq_flushlist(qp->sq_cq, qp);
1685 list_del(&qp->sq_entry);
1687 found = ocrdma_is_qp_in_rq_flushlist(qp->rq_cq, qp);
1689 list_del(&qp->rq_entry);
1691 spin_unlock_irqrestore(&dev->flush_q_lock, flags);
1694 int ocrdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
1696 struct ocrdma_pd *pd;
1697 struct ocrdma_qp *qp;
1698 struct ocrdma_dev *dev;
1699 struct ib_qp_attr attrs;
1701 unsigned long flags;
1703 qp = get_ocrdma_qp(ibqp);
1704 dev = get_ocrdma_dev(ibqp->device);
1708 /* change the QP state to ERROR */
1709 if (qp->state != OCRDMA_QPS_RST) {
1710 attrs.qp_state = IB_QPS_ERR;
1711 attr_mask = IB_QP_STATE;
1712 _ocrdma_modify_qp(ibqp, &attrs, attr_mask);
1714 /* ensure that CQEs for newly created QP (whose id may be same with
1715 * one which just getting destroyed are same), dont get
1716 * discarded until the old CQEs are discarded.
1718 mutex_lock(&dev->dev_lock);
1719 (void) ocrdma_mbx_destroy_qp(dev, qp);
1722 * acquire CQ lock while destroy is in progress, in order to
1723 * protect against proessing in-flight CQEs for this QP.
1725 spin_lock_irqsave(&qp->sq_cq->cq_lock, flags);
1726 if (qp->rq_cq && (qp->rq_cq != qp->sq_cq)) {
1727 spin_lock(&qp->rq_cq->cq_lock);
1728 ocrdma_del_qpn_map(dev, qp);
1729 spin_unlock(&qp->rq_cq->cq_lock);
1731 ocrdma_del_qpn_map(dev, qp);
1733 spin_unlock_irqrestore(&qp->sq_cq->cq_lock, flags);
1736 ocrdma_discard_cqes(qp, qp->sq_cq);
1737 ocrdma_discard_cqes(qp, qp->rq_cq);
1739 mutex_unlock(&dev->dev_lock);
1742 ocrdma_del_mmap(pd->uctx, (u64) qp->sq.pa,
1743 PAGE_ALIGN(qp->sq.len));
1745 ocrdma_del_mmap(pd->uctx, (u64) qp->rq.pa,
1746 PAGE_ALIGN(qp->rq.len));
1749 ocrdma_del_flush_qp(qp);
1751 kfree(qp->wqe_wr_id_tbl);
1752 kfree(qp->rqe_wr_id_tbl);
1757 static int ocrdma_copy_srq_uresp(struct ocrdma_dev *dev, struct ocrdma_srq *srq,
1758 struct ib_udata *udata)
1761 struct ocrdma_create_srq_uresp uresp;
1763 memset(&uresp, 0, sizeof(uresp));
1764 uresp.rq_dbid = srq->rq.dbid;
1765 uresp.num_rq_pages = 1;
1766 uresp.rq_page_addr[0] = virt_to_phys(srq->rq.va);
1767 uresp.rq_page_size = srq->rq.len;
1768 uresp.db_page_addr = dev->nic_info.unmapped_db +
1769 (srq->pd->id * dev->nic_info.db_page_size);
1770 uresp.db_page_size = dev->nic_info.db_page_size;
1771 uresp.num_rqe_allocated = srq->rq.max_cnt;
1772 if (ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R) {
1773 uresp.db_rq_offset = OCRDMA_DB_GEN2_RQ_OFFSET;
1774 uresp.db_shift = 24;
1776 uresp.db_rq_offset = OCRDMA_DB_RQ_OFFSET;
1777 uresp.db_shift = 16;
1780 status = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
1783 status = ocrdma_add_mmap(srq->pd->uctx, uresp.rq_page_addr[0],
1784 uresp.rq_page_size);
1790 int ocrdma_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init_attr,
1791 struct ib_udata *udata)
1794 struct ocrdma_pd *pd = get_ocrdma_pd(ibsrq->pd);
1795 struct ocrdma_dev *dev = get_ocrdma_dev(ibsrq->device);
1796 struct ocrdma_srq *srq = get_ocrdma_srq(ibsrq);
1798 if (init_attr->attr.max_sge > dev->attr.max_recv_sge)
1800 if (init_attr->attr.max_wr > dev->attr.max_rqe)
1803 spin_lock_init(&srq->q_lock);
1805 srq->db = dev->nic_info.db + (pd->id * dev->nic_info.db_page_size);
1806 status = ocrdma_mbx_create_srq(dev, srq, init_attr, pd);
1811 srq->rqe_wr_id_tbl = kcalloc(srq->rq.max_cnt, sizeof(u64),
1813 if (!srq->rqe_wr_id_tbl) {
1818 srq->bit_fields_len = (srq->rq.max_cnt / 32) +
1819 (srq->rq.max_cnt % 32 ? 1 : 0);
1820 srq->idx_bit_fields =
1821 kmalloc_array(srq->bit_fields_len, sizeof(u32),
1823 if (!srq->idx_bit_fields) {
1827 memset(srq->idx_bit_fields, 0xff,
1828 srq->bit_fields_len * sizeof(u32));
1831 if (init_attr->attr.srq_limit) {
1832 status = ocrdma_mbx_modify_srq(srq, &init_attr->attr);
1838 status = ocrdma_copy_srq_uresp(dev, srq, udata);
1846 ocrdma_mbx_destroy_srq(dev, srq);
1847 kfree(srq->rqe_wr_id_tbl);
1848 kfree(srq->idx_bit_fields);
1852 int ocrdma_modify_srq(struct ib_srq *ibsrq,
1853 struct ib_srq_attr *srq_attr,
1854 enum ib_srq_attr_mask srq_attr_mask,
1855 struct ib_udata *udata)
1858 struct ocrdma_srq *srq;
1860 srq = get_ocrdma_srq(ibsrq);
1861 if (srq_attr_mask & IB_SRQ_MAX_WR)
1864 status = ocrdma_mbx_modify_srq(srq, srq_attr);
1868 int ocrdma_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
1871 struct ocrdma_srq *srq;
1873 srq = get_ocrdma_srq(ibsrq);
1874 status = ocrdma_mbx_query_srq(srq, srq_attr);
1878 void ocrdma_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
1880 struct ocrdma_srq *srq;
1881 struct ocrdma_dev *dev = get_ocrdma_dev(ibsrq->device);
1883 srq = get_ocrdma_srq(ibsrq);
1885 ocrdma_mbx_destroy_srq(dev, srq);
1888 ocrdma_del_mmap(srq->pd->uctx, (u64) srq->rq.pa,
1889 PAGE_ALIGN(srq->rq.len));
1891 kfree(srq->idx_bit_fields);
1892 kfree(srq->rqe_wr_id_tbl);
1895 /* unprivileged verbs and their support functions. */
1896 static void ocrdma_build_ud_hdr(struct ocrdma_qp *qp,
1897 struct ocrdma_hdr_wqe *hdr,
1898 const struct ib_send_wr *wr)
1900 struct ocrdma_ewqe_ud_hdr *ud_hdr =
1901 (struct ocrdma_ewqe_ud_hdr *)(hdr + 1);
1902 struct ocrdma_ah *ah = get_ocrdma_ah(ud_wr(wr)->ah);
1904 ud_hdr->rsvd_dest_qpn = ud_wr(wr)->remote_qpn;
1905 if (qp->qp_type == IB_QPT_GSI)
1906 ud_hdr->qkey = qp->qkey;
1908 ud_hdr->qkey = ud_wr(wr)->remote_qkey;
1909 ud_hdr->rsvd_ahid = ah->id;
1910 ud_hdr->hdr_type = ah->hdr_type;
1911 if (ah->av->valid & OCRDMA_AV_VLAN_VALID)
1912 hdr->cw |= (OCRDMA_FLAG_AH_VLAN_PR << OCRDMA_WQE_FLAGS_SHIFT);
1915 static void ocrdma_build_sges(struct ocrdma_hdr_wqe *hdr,
1916 struct ocrdma_sge *sge, int num_sge,
1917 struct ib_sge *sg_list)
1921 for (i = 0; i < num_sge; i++) {
1922 sge[i].lrkey = sg_list[i].lkey;
1923 sge[i].addr_lo = sg_list[i].addr;
1924 sge[i].addr_hi = upper_32_bits(sg_list[i].addr);
1925 sge[i].len = sg_list[i].length;
1926 hdr->total_len += sg_list[i].length;
1929 memset(sge, 0, sizeof(*sge));
1932 static inline uint32_t ocrdma_sglist_len(struct ib_sge *sg_list, int num_sge)
1934 uint32_t total_len = 0, i;
1936 for (i = 0; i < num_sge; i++)
1937 total_len += sg_list[i].length;
1942 static int ocrdma_build_inline_sges(struct ocrdma_qp *qp,
1943 struct ocrdma_hdr_wqe *hdr,
1944 struct ocrdma_sge *sge,
1945 const struct ib_send_wr *wr, u32 wqe_size)
1950 if (wr->send_flags & IB_SEND_INLINE && qp->qp_type != IB_QPT_UD) {
1951 hdr->total_len = ocrdma_sglist_len(wr->sg_list, wr->num_sge);
1952 if (unlikely(hdr->total_len > qp->max_inline_data)) {
1953 pr_err("%s() supported_len=0x%x,\n"
1954 " unsupported len req=0x%x\n", __func__,
1955 qp->max_inline_data, hdr->total_len);
1958 dpp_addr = (char *)sge;
1959 for (i = 0; i < wr->num_sge; i++) {
1961 (void *)(unsigned long)wr->sg_list[i].addr,
1962 wr->sg_list[i].length);
1963 dpp_addr += wr->sg_list[i].length;
1966 wqe_size += roundup(hdr->total_len, OCRDMA_WQE_ALIGN_BYTES);
1967 if (0 == hdr->total_len)
1968 wqe_size += sizeof(struct ocrdma_sge);
1969 hdr->cw |= (OCRDMA_TYPE_INLINE << OCRDMA_WQE_TYPE_SHIFT);
1971 ocrdma_build_sges(hdr, sge, wr->num_sge, wr->sg_list);
1973 wqe_size += (wr->num_sge * sizeof(struct ocrdma_sge));
1975 wqe_size += sizeof(struct ocrdma_sge);
1976 hdr->cw |= (OCRDMA_TYPE_LKEY << OCRDMA_WQE_TYPE_SHIFT);
1978 hdr->cw |= ((wqe_size / OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT);
1982 static int ocrdma_build_send(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
1983 const struct ib_send_wr *wr)
1986 struct ocrdma_sge *sge;
1987 u32 wqe_size = sizeof(*hdr);
1989 if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI) {
1990 ocrdma_build_ud_hdr(qp, hdr, wr);
1991 sge = (struct ocrdma_sge *)(hdr + 2);
1992 wqe_size += sizeof(struct ocrdma_ewqe_ud_hdr);
1994 sge = (struct ocrdma_sge *)(hdr + 1);
1997 status = ocrdma_build_inline_sges(qp, hdr, sge, wr, wqe_size);
2001 static int ocrdma_build_write(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
2002 const struct ib_send_wr *wr)
2005 struct ocrdma_sge *ext_rw = (struct ocrdma_sge *)(hdr + 1);
2006 struct ocrdma_sge *sge = ext_rw + 1;
2007 u32 wqe_size = sizeof(*hdr) + sizeof(*ext_rw);
2009 status = ocrdma_build_inline_sges(qp, hdr, sge, wr, wqe_size);
2012 ext_rw->addr_lo = rdma_wr(wr)->remote_addr;
2013 ext_rw->addr_hi = upper_32_bits(rdma_wr(wr)->remote_addr);
2014 ext_rw->lrkey = rdma_wr(wr)->rkey;
2015 ext_rw->len = hdr->total_len;
2019 static void ocrdma_build_read(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
2020 const struct ib_send_wr *wr)
2022 struct ocrdma_sge *ext_rw = (struct ocrdma_sge *)(hdr + 1);
2023 struct ocrdma_sge *sge = ext_rw + 1;
2024 u32 wqe_size = ((wr->num_sge + 1) * sizeof(struct ocrdma_sge)) +
2025 sizeof(struct ocrdma_hdr_wqe);
2027 ocrdma_build_sges(hdr, sge, wr->num_sge, wr->sg_list);
2028 hdr->cw |= ((wqe_size / OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT);
2029 hdr->cw |= (OCRDMA_READ << OCRDMA_WQE_OPCODE_SHIFT);
2030 hdr->cw |= (OCRDMA_TYPE_LKEY << OCRDMA_WQE_TYPE_SHIFT);
2032 ext_rw->addr_lo = rdma_wr(wr)->remote_addr;
2033 ext_rw->addr_hi = upper_32_bits(rdma_wr(wr)->remote_addr);
2034 ext_rw->lrkey = rdma_wr(wr)->rkey;
2035 ext_rw->len = hdr->total_len;
2038 static int get_encoded_page_size(int pg_sz)
2040 /* Max size is 256M 4096 << 16 */
2043 if (pg_sz == (4096 << i))
2048 static int ocrdma_build_reg(struct ocrdma_qp *qp,
2049 struct ocrdma_hdr_wqe *hdr,
2050 const struct ib_reg_wr *wr)
2053 struct ocrdma_ewqe_fr *fast_reg = (struct ocrdma_ewqe_fr *)(hdr + 1);
2054 struct ocrdma_mr *mr = get_ocrdma_mr(wr->mr);
2055 struct ocrdma_pbl *pbl_tbl = mr->hwmr.pbl_table;
2056 struct ocrdma_pbe *pbe;
2057 u32 wqe_size = sizeof(*fast_reg) + sizeof(*hdr);
2058 int num_pbes = 0, i;
2060 wqe_size = roundup(wqe_size, OCRDMA_WQE_ALIGN_BYTES);
2062 hdr->cw |= (OCRDMA_FR_MR << OCRDMA_WQE_OPCODE_SHIFT);
2063 hdr->cw |= ((wqe_size / OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT);
2065 if (wr->access & IB_ACCESS_LOCAL_WRITE)
2066 hdr->rsvd_lkey_flags |= OCRDMA_LKEY_FLAG_LOCAL_WR;
2067 if (wr->access & IB_ACCESS_REMOTE_WRITE)
2068 hdr->rsvd_lkey_flags |= OCRDMA_LKEY_FLAG_REMOTE_WR;
2069 if (wr->access & IB_ACCESS_REMOTE_READ)
2070 hdr->rsvd_lkey_flags |= OCRDMA_LKEY_FLAG_REMOTE_RD;
2071 hdr->lkey = wr->key;
2072 hdr->total_len = mr->ibmr.length;
2074 fbo = mr->ibmr.iova - mr->pages[0];
2076 fast_reg->va_hi = upper_32_bits(mr->ibmr.iova);
2077 fast_reg->va_lo = (u32) (mr->ibmr.iova & 0xffffffff);
2078 fast_reg->fbo_hi = upper_32_bits(fbo);
2079 fast_reg->fbo_lo = (u32) fbo & 0xffffffff;
2080 fast_reg->num_sges = mr->npages;
2081 fast_reg->size_sge = get_encoded_page_size(mr->ibmr.page_size);
2084 for (i = 0; i < mr->npages; i++) {
2085 u64 buf_addr = mr->pages[i];
2087 pbe->pa_lo = cpu_to_le32((u32) (buf_addr & PAGE_MASK));
2088 pbe->pa_hi = cpu_to_le32((u32) upper_32_bits(buf_addr));
2092 /* if the pbl is full storing the pbes,
2095 if (num_pbes == (mr->hwmr.pbl_size/sizeof(u64))) {
2097 pbe = (struct ocrdma_pbe *)pbl_tbl->va;
2104 static void ocrdma_ring_sq_db(struct ocrdma_qp *qp)
2106 u32 val = qp->sq.dbid | (1 << OCRDMA_DB_SQ_SHIFT);
2108 iowrite32(val, qp->sq_db);
2111 int ocrdma_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
2112 const struct ib_send_wr **bad_wr)
2115 struct ocrdma_qp *qp = get_ocrdma_qp(ibqp);
2116 struct ocrdma_hdr_wqe *hdr;
2117 unsigned long flags;
2119 spin_lock_irqsave(&qp->q_lock, flags);
2120 if (qp->state != OCRDMA_QPS_RTS && qp->state != OCRDMA_QPS_SQD) {
2121 spin_unlock_irqrestore(&qp->q_lock, flags);
2127 if (qp->qp_type == IB_QPT_UD &&
2128 (wr->opcode != IB_WR_SEND &&
2129 wr->opcode != IB_WR_SEND_WITH_IMM)) {
2134 if (ocrdma_hwq_free_cnt(&qp->sq) == 0 ||
2135 wr->num_sge > qp->sq.max_sges) {
2140 hdr = ocrdma_hwq_head(&qp->sq);
2142 if (wr->send_flags & IB_SEND_SIGNALED || qp->signaled)
2143 hdr->cw |= (OCRDMA_FLAG_SIG << OCRDMA_WQE_FLAGS_SHIFT);
2144 if (wr->send_flags & IB_SEND_FENCE)
2146 (OCRDMA_FLAG_FENCE_L << OCRDMA_WQE_FLAGS_SHIFT);
2147 if (wr->send_flags & IB_SEND_SOLICITED)
2149 (OCRDMA_FLAG_SOLICIT << OCRDMA_WQE_FLAGS_SHIFT);
2151 switch (wr->opcode) {
2152 case IB_WR_SEND_WITH_IMM:
2153 hdr->cw |= (OCRDMA_FLAG_IMM << OCRDMA_WQE_FLAGS_SHIFT);
2154 hdr->immdt = ntohl(wr->ex.imm_data);
2157 hdr->cw |= (OCRDMA_SEND << OCRDMA_WQE_OPCODE_SHIFT);
2158 ocrdma_build_send(qp, hdr, wr);
2160 case IB_WR_SEND_WITH_INV:
2161 hdr->cw |= (OCRDMA_FLAG_INV << OCRDMA_WQE_FLAGS_SHIFT);
2162 hdr->cw |= (OCRDMA_SEND << OCRDMA_WQE_OPCODE_SHIFT);
2163 hdr->lkey = wr->ex.invalidate_rkey;
2164 status = ocrdma_build_send(qp, hdr, wr);
2166 case IB_WR_RDMA_WRITE_WITH_IMM:
2167 hdr->cw |= (OCRDMA_FLAG_IMM << OCRDMA_WQE_FLAGS_SHIFT);
2168 hdr->immdt = ntohl(wr->ex.imm_data);
2170 case IB_WR_RDMA_WRITE:
2171 hdr->cw |= (OCRDMA_WRITE << OCRDMA_WQE_OPCODE_SHIFT);
2172 status = ocrdma_build_write(qp, hdr, wr);
2174 case IB_WR_RDMA_READ:
2175 ocrdma_build_read(qp, hdr, wr);
2177 case IB_WR_LOCAL_INV:
2179 (OCRDMA_LKEY_INV << OCRDMA_WQE_OPCODE_SHIFT);
2180 hdr->cw |= ((sizeof(struct ocrdma_hdr_wqe) +
2181 sizeof(struct ocrdma_sge)) /
2182 OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT;
2183 hdr->lkey = wr->ex.invalidate_rkey;
2186 status = ocrdma_build_reg(qp, hdr, reg_wr(wr));
2196 if (wr->send_flags & IB_SEND_SIGNALED || qp->signaled)
2197 qp->wqe_wr_id_tbl[qp->sq.head].signaled = 1;
2199 qp->wqe_wr_id_tbl[qp->sq.head].signaled = 0;
2200 qp->wqe_wr_id_tbl[qp->sq.head].wrid = wr->wr_id;
2201 ocrdma_cpu_to_le32(hdr, ((hdr->cw >> OCRDMA_WQE_SIZE_SHIFT) &
2202 OCRDMA_WQE_SIZE_MASK) * OCRDMA_WQE_STRIDE);
2203 /* make sure wqe is written before adapter can access it */
2205 /* inform hw to start processing it */
2206 ocrdma_ring_sq_db(qp);
2208 /* update pointer, counter for next wr */
2209 ocrdma_hwq_inc_head(&qp->sq);
2212 spin_unlock_irqrestore(&qp->q_lock, flags);
2216 static void ocrdma_ring_rq_db(struct ocrdma_qp *qp)
2218 u32 val = qp->rq.dbid | (1 << OCRDMA_DB_RQ_SHIFT);
2220 iowrite32(val, qp->rq_db);
2223 static void ocrdma_build_rqe(struct ocrdma_hdr_wqe *rqe,
2224 const struct ib_recv_wr *wr, u16 tag)
2227 struct ocrdma_sge *sge;
2229 wqe_size = (wr->num_sge * sizeof(*sge)) + sizeof(*rqe);
2231 wqe_size = sizeof(*sge) + sizeof(*rqe);
2233 rqe->cw = ((wqe_size / OCRDMA_WQE_STRIDE) <<
2234 OCRDMA_WQE_SIZE_SHIFT);
2235 rqe->cw |= (OCRDMA_FLAG_SIG << OCRDMA_WQE_FLAGS_SHIFT);
2236 rqe->cw |= (OCRDMA_TYPE_LKEY << OCRDMA_WQE_TYPE_SHIFT);
2238 rqe->rsvd_tag = tag;
2239 sge = (struct ocrdma_sge *)(rqe + 1);
2240 ocrdma_build_sges(rqe, sge, wr->num_sge, wr->sg_list);
2241 ocrdma_cpu_to_le32(rqe, wqe_size);
2244 int ocrdma_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
2245 const struct ib_recv_wr **bad_wr)
2248 unsigned long flags;
2249 struct ocrdma_qp *qp = get_ocrdma_qp(ibqp);
2250 struct ocrdma_hdr_wqe *rqe;
2252 spin_lock_irqsave(&qp->q_lock, flags);
2253 if (qp->state == OCRDMA_QPS_RST || qp->state == OCRDMA_QPS_ERR) {
2254 spin_unlock_irqrestore(&qp->q_lock, flags);
2259 if (ocrdma_hwq_free_cnt(&qp->rq) == 0 ||
2260 wr->num_sge > qp->rq.max_sges) {
2265 rqe = ocrdma_hwq_head(&qp->rq);
2266 ocrdma_build_rqe(rqe, wr, 0);
2268 qp->rqe_wr_id_tbl[qp->rq.head] = wr->wr_id;
2269 /* make sure rqe is written before adapter can access it */
2272 /* inform hw to start processing it */
2273 ocrdma_ring_rq_db(qp);
2275 /* update pointer, counter for next wr */
2276 ocrdma_hwq_inc_head(&qp->rq);
2279 spin_unlock_irqrestore(&qp->q_lock, flags);
2283 /* cqe for srq's rqe can potentially arrive out of order.
2284 * index gives the entry in the shadow table where to store
2285 * the wr_id. tag/index is returned in cqe to reference back
2288 static int ocrdma_srq_get_idx(struct ocrdma_srq *srq)
2293 for (row = 0; row < srq->bit_fields_len; row++) {
2294 if (srq->idx_bit_fields[row]) {
2295 indx = ffs(srq->idx_bit_fields[row]);
2296 indx = (row * 32) + (indx - 1);
2297 BUG_ON(indx >= srq->rq.max_cnt);
2298 ocrdma_srq_toggle_bit(srq, indx);
2303 BUG_ON(row == srq->bit_fields_len);
2304 return indx + 1; /* Use from index 1 */
2307 static void ocrdma_ring_srq_db(struct ocrdma_srq *srq)
2309 u32 val = srq->rq.dbid | (1 << 16);
2311 iowrite32(val, srq->db + OCRDMA_DB_GEN2_SRQ_OFFSET);
2314 int ocrdma_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
2315 const struct ib_recv_wr **bad_wr)
2318 unsigned long flags;
2319 struct ocrdma_srq *srq;
2320 struct ocrdma_hdr_wqe *rqe;
2323 srq = get_ocrdma_srq(ibsrq);
2325 spin_lock_irqsave(&srq->q_lock, flags);
2327 if (ocrdma_hwq_free_cnt(&srq->rq) == 0 ||
2328 wr->num_sge > srq->rq.max_sges) {
2333 tag = ocrdma_srq_get_idx(srq);
2334 rqe = ocrdma_hwq_head(&srq->rq);
2335 ocrdma_build_rqe(rqe, wr, tag);
2337 srq->rqe_wr_id_tbl[tag] = wr->wr_id;
2338 /* make sure rqe is written before adapter can perform DMA */
2340 /* inform hw to start processing it */
2341 ocrdma_ring_srq_db(srq);
2342 /* update pointer, counter for next wr */
2343 ocrdma_hwq_inc_head(&srq->rq);
2346 spin_unlock_irqrestore(&srq->q_lock, flags);
2350 static enum ib_wc_status ocrdma_to_ibwc_err(u16 status)
2352 enum ib_wc_status ibwc_status;
2355 case OCRDMA_CQE_GENERAL_ERR:
2356 ibwc_status = IB_WC_GENERAL_ERR;
2358 case OCRDMA_CQE_LOC_LEN_ERR:
2359 ibwc_status = IB_WC_LOC_LEN_ERR;
2361 case OCRDMA_CQE_LOC_QP_OP_ERR:
2362 ibwc_status = IB_WC_LOC_QP_OP_ERR;
2364 case OCRDMA_CQE_LOC_EEC_OP_ERR:
2365 ibwc_status = IB_WC_LOC_EEC_OP_ERR;
2367 case OCRDMA_CQE_LOC_PROT_ERR:
2368 ibwc_status = IB_WC_LOC_PROT_ERR;
2370 case OCRDMA_CQE_WR_FLUSH_ERR:
2371 ibwc_status = IB_WC_WR_FLUSH_ERR;
2373 case OCRDMA_CQE_MW_BIND_ERR:
2374 ibwc_status = IB_WC_MW_BIND_ERR;
2376 case OCRDMA_CQE_BAD_RESP_ERR:
2377 ibwc_status = IB_WC_BAD_RESP_ERR;
2379 case OCRDMA_CQE_LOC_ACCESS_ERR:
2380 ibwc_status = IB_WC_LOC_ACCESS_ERR;
2382 case OCRDMA_CQE_REM_INV_REQ_ERR:
2383 ibwc_status = IB_WC_REM_INV_REQ_ERR;
2385 case OCRDMA_CQE_REM_ACCESS_ERR:
2386 ibwc_status = IB_WC_REM_ACCESS_ERR;
2388 case OCRDMA_CQE_REM_OP_ERR:
2389 ibwc_status = IB_WC_REM_OP_ERR;
2391 case OCRDMA_CQE_RETRY_EXC_ERR:
2392 ibwc_status = IB_WC_RETRY_EXC_ERR;
2394 case OCRDMA_CQE_RNR_RETRY_EXC_ERR:
2395 ibwc_status = IB_WC_RNR_RETRY_EXC_ERR;
2397 case OCRDMA_CQE_LOC_RDD_VIOL_ERR:
2398 ibwc_status = IB_WC_LOC_RDD_VIOL_ERR;
2400 case OCRDMA_CQE_REM_INV_RD_REQ_ERR:
2401 ibwc_status = IB_WC_REM_INV_RD_REQ_ERR;
2403 case OCRDMA_CQE_REM_ABORT_ERR:
2404 ibwc_status = IB_WC_REM_ABORT_ERR;
2406 case OCRDMA_CQE_INV_EECN_ERR:
2407 ibwc_status = IB_WC_INV_EECN_ERR;
2409 case OCRDMA_CQE_INV_EEC_STATE_ERR:
2410 ibwc_status = IB_WC_INV_EEC_STATE_ERR;
2412 case OCRDMA_CQE_FATAL_ERR:
2413 ibwc_status = IB_WC_FATAL_ERR;
2415 case OCRDMA_CQE_RESP_TIMEOUT_ERR:
2416 ibwc_status = IB_WC_RESP_TIMEOUT_ERR;
2419 ibwc_status = IB_WC_GENERAL_ERR;
2425 static void ocrdma_update_wc(struct ocrdma_qp *qp, struct ib_wc *ibwc,
2428 struct ocrdma_hdr_wqe *hdr;
2429 struct ocrdma_sge *rw;
2432 hdr = ocrdma_hwq_head_from_idx(&qp->sq, wqe_idx);
2434 ibwc->wr_id = qp->wqe_wr_id_tbl[wqe_idx].wrid;
2435 /* Undo the hdr->cw swap */
2436 opcode = le32_to_cpu(hdr->cw) & OCRDMA_WQE_OPCODE_MASK;
2439 ibwc->opcode = IB_WC_RDMA_WRITE;
2442 rw = (struct ocrdma_sge *)(hdr + 1);
2443 ibwc->opcode = IB_WC_RDMA_READ;
2444 ibwc->byte_len = rw->len;
2447 ibwc->opcode = IB_WC_SEND;
2450 ibwc->opcode = IB_WC_REG_MR;
2452 case OCRDMA_LKEY_INV:
2453 ibwc->opcode = IB_WC_LOCAL_INV;
2456 ibwc->status = IB_WC_GENERAL_ERR;
2457 pr_err("%s() invalid opcode received = 0x%x\n",
2458 __func__, hdr->cw & OCRDMA_WQE_OPCODE_MASK);
2463 static void ocrdma_set_cqe_status_flushed(struct ocrdma_qp *qp,
2464 struct ocrdma_cqe *cqe)
2466 if (is_cqe_for_sq(cqe)) {
2467 cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
2468 cqe->flags_status_srcqpn) &
2469 ~OCRDMA_CQE_STATUS_MASK);
2470 cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
2471 cqe->flags_status_srcqpn) |
2472 (OCRDMA_CQE_WR_FLUSH_ERR <<
2473 OCRDMA_CQE_STATUS_SHIFT));
2475 if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI) {
2476 cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
2477 cqe->flags_status_srcqpn) &
2478 ~OCRDMA_CQE_UD_STATUS_MASK);
2479 cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
2480 cqe->flags_status_srcqpn) |
2481 (OCRDMA_CQE_WR_FLUSH_ERR <<
2482 OCRDMA_CQE_UD_STATUS_SHIFT));
2484 cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
2485 cqe->flags_status_srcqpn) &
2486 ~OCRDMA_CQE_STATUS_MASK);
2487 cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
2488 cqe->flags_status_srcqpn) |
2489 (OCRDMA_CQE_WR_FLUSH_ERR <<
2490 OCRDMA_CQE_STATUS_SHIFT));
2495 static bool ocrdma_update_err_cqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe,
2496 struct ocrdma_qp *qp, int status)
2498 bool expand = false;
2501 ibwc->qp = &qp->ibqp;
2502 ibwc->status = ocrdma_to_ibwc_err(status);
2504 ocrdma_flush_qp(qp);
2505 ocrdma_qp_state_change(qp, IB_QPS_ERR, NULL);
2507 /* if wqe/rqe pending for which cqe needs to be returned,
2508 * trigger inflating it.
2510 if (!is_hw_rq_empty(qp) || !is_hw_sq_empty(qp)) {
2512 ocrdma_set_cqe_status_flushed(qp, cqe);
2517 static int ocrdma_update_err_rcqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe,
2518 struct ocrdma_qp *qp, int status)
2520 ibwc->opcode = IB_WC_RECV;
2521 ibwc->wr_id = qp->rqe_wr_id_tbl[qp->rq.tail];
2522 ocrdma_hwq_inc_tail(&qp->rq);
2524 return ocrdma_update_err_cqe(ibwc, cqe, qp, status);
2527 static int ocrdma_update_err_scqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe,
2528 struct ocrdma_qp *qp, int status)
2530 ocrdma_update_wc(qp, ibwc, qp->sq.tail);
2531 ocrdma_hwq_inc_tail(&qp->sq);
2533 return ocrdma_update_err_cqe(ibwc, cqe, qp, status);
2537 static bool ocrdma_poll_err_scqe(struct ocrdma_qp *qp,
2538 struct ocrdma_cqe *cqe, struct ib_wc *ibwc,
2539 bool *polled, bool *stop)
2542 struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
2543 int status = (le32_to_cpu(cqe->flags_status_srcqpn) &
2544 OCRDMA_CQE_STATUS_MASK) >> OCRDMA_CQE_STATUS_SHIFT;
2545 if (status < OCRDMA_MAX_CQE_ERR)
2546 atomic_inc(&dev->cqe_err_stats[status]);
2548 /* when hw sq is empty, but rq is not empty, so we continue
2549 * to keep the cqe in order to get the cq event again.
2551 if (is_hw_sq_empty(qp) && !is_hw_rq_empty(qp)) {
2552 /* when cq for rq and sq is same, it is safe to return
2553 * flush cqe for RQEs.
2555 if (!qp->srq && (qp->sq_cq == qp->rq_cq)) {
2557 status = OCRDMA_CQE_WR_FLUSH_ERR;
2558 expand = ocrdma_update_err_rcqe(ibwc, cqe, qp, status);
2560 /* stop processing further cqe as this cqe is used for
2561 * triggering cq event on buddy cq of RQ.
2562 * When QP is destroyed, this cqe will be removed
2563 * from the cq's hardware q.
2569 } else if (is_hw_sq_empty(qp)) {
2576 expand = ocrdma_update_err_scqe(ibwc, cqe, qp, status);
2581 static bool ocrdma_poll_success_scqe(struct ocrdma_qp *qp,
2582 struct ocrdma_cqe *cqe,
2583 struct ib_wc *ibwc, bool *polled)
2585 bool expand = false;
2586 int tail = qp->sq.tail;
2589 if (!qp->wqe_wr_id_tbl[tail].signaled) {
2590 *polled = false; /* WC cannot be consumed yet */
2592 ibwc->status = IB_WC_SUCCESS;
2594 ibwc->qp = &qp->ibqp;
2595 ocrdma_update_wc(qp, ibwc, tail);
2598 wqe_idx = (le32_to_cpu(cqe->wq.wqeidx) &
2599 OCRDMA_CQE_WQEIDX_MASK) & qp->sq.max_wqe_idx;
2600 if (tail != wqe_idx)
2601 expand = true; /* Coalesced CQE can't be consumed yet */
2603 ocrdma_hwq_inc_tail(&qp->sq);
2607 static bool ocrdma_poll_scqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe,
2608 struct ib_wc *ibwc, bool *polled, bool *stop)
2613 status = (le32_to_cpu(cqe->flags_status_srcqpn) &
2614 OCRDMA_CQE_STATUS_MASK) >> OCRDMA_CQE_STATUS_SHIFT;
2616 if (status == OCRDMA_CQE_SUCCESS)
2617 expand = ocrdma_poll_success_scqe(qp, cqe, ibwc, polled);
2619 expand = ocrdma_poll_err_scqe(qp, cqe, ibwc, polled, stop);
2623 static int ocrdma_update_ud_rcqe(struct ocrdma_dev *dev, struct ib_wc *ibwc,
2624 struct ocrdma_cqe *cqe)
2629 status = (le32_to_cpu(cqe->flags_status_srcqpn) &
2630 OCRDMA_CQE_UD_STATUS_MASK) >> OCRDMA_CQE_UD_STATUS_SHIFT;
2631 ibwc->src_qp = le32_to_cpu(cqe->flags_status_srcqpn) &
2632 OCRDMA_CQE_SRCQP_MASK;
2633 ibwc->pkey_index = 0;
2634 ibwc->wc_flags = IB_WC_GRH;
2635 ibwc->byte_len = (le32_to_cpu(cqe->ud.rxlen_pkey) >>
2636 OCRDMA_CQE_UD_XFER_LEN_SHIFT) &
2637 OCRDMA_CQE_UD_XFER_LEN_MASK;
2639 if (ocrdma_is_udp_encap_supported(dev)) {
2640 hdr_type = (le32_to_cpu(cqe->ud.rxlen_pkey) >>
2641 OCRDMA_CQE_UD_L3TYPE_SHIFT) &
2642 OCRDMA_CQE_UD_L3TYPE_MASK;
2643 ibwc->wc_flags |= IB_WC_WITH_NETWORK_HDR_TYPE;
2644 ibwc->network_hdr_type = hdr_type;
2650 static void ocrdma_update_free_srq_cqe(struct ib_wc *ibwc,
2651 struct ocrdma_cqe *cqe,
2652 struct ocrdma_qp *qp)
2654 unsigned long flags;
2655 struct ocrdma_srq *srq;
2658 srq = get_ocrdma_srq(qp->ibqp.srq);
2659 wqe_idx = (le32_to_cpu(cqe->rq.buftag_qpn) >>
2660 OCRDMA_CQE_BUFTAG_SHIFT) & srq->rq.max_wqe_idx;
2661 BUG_ON(wqe_idx < 1);
2663 ibwc->wr_id = srq->rqe_wr_id_tbl[wqe_idx];
2664 spin_lock_irqsave(&srq->q_lock, flags);
2665 ocrdma_srq_toggle_bit(srq, wqe_idx - 1);
2666 spin_unlock_irqrestore(&srq->q_lock, flags);
2667 ocrdma_hwq_inc_tail(&srq->rq);
2670 static bool ocrdma_poll_err_rcqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe,
2671 struct ib_wc *ibwc, bool *polled, bool *stop,
2675 struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
2677 if (status < OCRDMA_MAX_CQE_ERR)
2678 atomic_inc(&dev->cqe_err_stats[status]);
2680 /* when hw_rq is empty, but wq is not empty, so continue
2681 * to keep the cqe to get the cq event again.
2683 if (is_hw_rq_empty(qp) && !is_hw_sq_empty(qp)) {
2684 if (!qp->srq && (qp->sq_cq == qp->rq_cq)) {
2686 status = OCRDMA_CQE_WR_FLUSH_ERR;
2687 expand = ocrdma_update_err_scqe(ibwc, cqe, qp, status);
2693 } else if (is_hw_rq_empty(qp)) {
2700 expand = ocrdma_update_err_rcqe(ibwc, cqe, qp, status);
2705 static void ocrdma_poll_success_rcqe(struct ocrdma_qp *qp,
2706 struct ocrdma_cqe *cqe, struct ib_wc *ibwc)
2708 struct ocrdma_dev *dev;
2710 dev = get_ocrdma_dev(qp->ibqp.device);
2711 ibwc->opcode = IB_WC_RECV;
2712 ibwc->qp = &qp->ibqp;
2713 ibwc->status = IB_WC_SUCCESS;
2715 if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI)
2716 ocrdma_update_ud_rcqe(dev, ibwc, cqe);
2718 ibwc->byte_len = le32_to_cpu(cqe->rq.rxlen);
2720 if (is_cqe_imm(cqe)) {
2721 ibwc->ex.imm_data = htonl(le32_to_cpu(cqe->rq.lkey_immdt));
2722 ibwc->wc_flags |= IB_WC_WITH_IMM;
2723 } else if (is_cqe_wr_imm(cqe)) {
2724 ibwc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
2725 ibwc->ex.imm_data = htonl(le32_to_cpu(cqe->rq.lkey_immdt));
2726 ibwc->wc_flags |= IB_WC_WITH_IMM;
2727 } else if (is_cqe_invalidated(cqe)) {
2728 ibwc->ex.invalidate_rkey = le32_to_cpu(cqe->rq.lkey_immdt);
2729 ibwc->wc_flags |= IB_WC_WITH_INVALIDATE;
2732 ocrdma_update_free_srq_cqe(ibwc, cqe, qp);
2734 ibwc->wr_id = qp->rqe_wr_id_tbl[qp->rq.tail];
2735 ocrdma_hwq_inc_tail(&qp->rq);
2739 static bool ocrdma_poll_rcqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe,
2740 struct ib_wc *ibwc, bool *polled, bool *stop)
2743 bool expand = false;
2746 if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI) {
2747 status = (le32_to_cpu(cqe->flags_status_srcqpn) &
2748 OCRDMA_CQE_UD_STATUS_MASK) >>
2749 OCRDMA_CQE_UD_STATUS_SHIFT;
2751 status = (le32_to_cpu(cqe->flags_status_srcqpn) &
2752 OCRDMA_CQE_STATUS_MASK) >> OCRDMA_CQE_STATUS_SHIFT;
2755 if (status == OCRDMA_CQE_SUCCESS) {
2757 ocrdma_poll_success_rcqe(qp, cqe, ibwc);
2759 expand = ocrdma_poll_err_rcqe(qp, cqe, ibwc, polled, stop,
2765 static void ocrdma_change_cq_phase(struct ocrdma_cq *cq, struct ocrdma_cqe *cqe,
2768 if (cq->phase_change) {
2770 cq->phase = (~cq->phase & OCRDMA_CQE_VALID);
2772 /* clear valid bit */
2773 cqe->flags_status_srcqpn = 0;
2777 static int ocrdma_poll_hwcq(struct ocrdma_cq *cq, int num_entries,
2782 bool expand = false;
2783 int polled_hw_cqes = 0;
2784 struct ocrdma_qp *qp = NULL;
2785 struct ocrdma_dev *dev = get_ocrdma_dev(cq->ibcq.device);
2786 struct ocrdma_cqe *cqe;
2787 u16 cur_getp; bool polled = false; bool stop = false;
2789 cur_getp = cq->getp;
2790 while (num_entries) {
2791 cqe = cq->va + cur_getp;
2792 /* check whether valid cqe or not */
2793 if (!is_cqe_valid(cq, cqe))
2795 qpn = (le32_to_cpu(cqe->cmn.qpn) & OCRDMA_CQE_QPN_MASK);
2796 /* ignore discarded cqe */
2799 qp = dev->qp_tbl[qpn];
2802 if (is_cqe_for_sq(cqe)) {
2803 expand = ocrdma_poll_scqe(qp, cqe, ibwc, &polled,
2806 expand = ocrdma_poll_rcqe(qp, cqe, ibwc, &polled,
2813 /* clear qpn to avoid duplicate processing by discard_cqe() */
2816 polled_hw_cqes += 1;
2817 cur_getp = (cur_getp + 1) % cq->max_hw_cqe;
2818 ocrdma_change_cq_phase(cq, cqe, cur_getp);
2828 cq->getp = cur_getp;
2831 ocrdma_ring_cq_db(dev, cq->id, false, false, polled_hw_cqes);
2836 /* insert error cqe if the QP's SQ or RQ's CQ matches the CQ under poll. */
2837 static int ocrdma_add_err_cqe(struct ocrdma_cq *cq, int num_entries,
2838 struct ocrdma_qp *qp, struct ib_wc *ibwc)
2842 while (num_entries) {
2843 if (is_hw_sq_empty(qp) && is_hw_rq_empty(qp))
2845 if (!is_hw_sq_empty(qp) && qp->sq_cq == cq) {
2846 ocrdma_update_wc(qp, ibwc, qp->sq.tail);
2847 ocrdma_hwq_inc_tail(&qp->sq);
2848 } else if (!is_hw_rq_empty(qp) && qp->rq_cq == cq) {
2849 ibwc->wr_id = qp->rqe_wr_id_tbl[qp->rq.tail];
2850 ocrdma_hwq_inc_tail(&qp->rq);
2855 ibwc->status = IB_WC_WR_FLUSH_ERR;
2863 int ocrdma_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
2865 int cqes_to_poll = num_entries;
2866 struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
2867 struct ocrdma_dev *dev = get_ocrdma_dev(ibcq->device);
2868 int num_os_cqe = 0, err_cqes = 0;
2869 struct ocrdma_qp *qp;
2870 unsigned long flags;
2872 /* poll cqes from adapter CQ */
2873 spin_lock_irqsave(&cq->cq_lock, flags);
2874 num_os_cqe = ocrdma_poll_hwcq(cq, cqes_to_poll, wc);
2875 spin_unlock_irqrestore(&cq->cq_lock, flags);
2876 cqes_to_poll -= num_os_cqe;
2879 wc = wc + num_os_cqe;
2880 /* adapter returns single error cqe when qp moves to
2881 * error state. So insert error cqes with wc_status as
2882 * FLUSHED for pending WQEs and RQEs of QP's SQ and RQ
2883 * respectively which uses this CQ.
2885 spin_lock_irqsave(&dev->flush_q_lock, flags);
2886 list_for_each_entry(qp, &cq->sq_head, sq_entry) {
2887 if (cqes_to_poll == 0)
2889 err_cqes = ocrdma_add_err_cqe(cq, cqes_to_poll, qp, wc);
2890 cqes_to_poll -= err_cqes;
2891 num_os_cqe += err_cqes;
2894 spin_unlock_irqrestore(&dev->flush_q_lock, flags);
2899 int ocrdma_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags cq_flags)
2901 struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
2902 struct ocrdma_dev *dev = get_ocrdma_dev(ibcq->device);
2904 unsigned long flags;
2905 bool arm_needed = false, sol_needed = false;
2909 spin_lock_irqsave(&cq->cq_lock, flags);
2910 if (cq_flags & IB_CQ_NEXT_COMP || cq_flags & IB_CQ_SOLICITED)
2912 if (cq_flags & IB_CQ_SOLICITED)
2915 ocrdma_ring_cq_db(dev, cq_id, arm_needed, sol_needed, 0);
2916 spin_unlock_irqrestore(&cq->cq_lock, flags);
2921 struct ib_mr *ocrdma_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
2922 u32 max_num_sg, struct ib_udata *udata)
2925 struct ocrdma_mr *mr;
2926 struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
2927 struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
2929 if (mr_type != IB_MR_TYPE_MEM_REG)
2930 return ERR_PTR(-EINVAL);
2932 if (max_num_sg > dev->attr.max_pages_per_frmr)
2933 return ERR_PTR(-EINVAL);
2935 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
2937 return ERR_PTR(-ENOMEM);
2939 mr->pages = kcalloc(max_num_sg, sizeof(u64), GFP_KERNEL);
2945 status = ocrdma_get_pbl_info(dev, mr, max_num_sg);
2949 mr->hwmr.remote_rd = 0;
2950 mr->hwmr.remote_wr = 0;
2951 mr->hwmr.local_rd = 0;
2952 mr->hwmr.local_wr = 0;
2953 mr->hwmr.mw_bind = 0;
2954 status = ocrdma_build_pbl_tbl(dev, &mr->hwmr);
2957 status = ocrdma_reg_mr(dev, &mr->hwmr, pd->id, 0);
2960 mr->ibmr.rkey = mr->hwmr.lkey;
2961 mr->ibmr.lkey = mr->hwmr.lkey;
2962 dev->stag_arr[(mr->hwmr.lkey >> 8) & (OCRDMA_MAX_STAG - 1)] =
2966 ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr);
2971 return ERR_PTR(-ENOMEM);
2974 static int ocrdma_set_page(struct ib_mr *ibmr, u64 addr)
2976 struct ocrdma_mr *mr = get_ocrdma_mr(ibmr);
2978 if (unlikely(mr->npages == mr->hwmr.num_pbes))
2981 mr->pages[mr->npages++] = addr;
2986 int ocrdma_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
2987 unsigned int *sg_offset)
2989 struct ocrdma_mr *mr = get_ocrdma_mr(ibmr);
2993 return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, ocrdma_set_page);