1 /* QLogic qedr NIC Driver
2 * Copyright (c) 2015-2016 QLogic Corporation
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/dma-mapping.h>
33 #include <linux/crc32.h>
37 #include <linux/iommu.h>
39 #include <rdma/ib_verbs.h>
40 #include <rdma/ib_user_verbs.h>
41 #include <rdma/iw_cm.h>
42 #include <rdma/ib_umem.h>
43 #include <rdma/ib_addr.h>
44 #include <rdma/ib_cache.h>
46 #include <linux/qed/common_hsi.h>
47 #include "qedr_hsi_rdma.h"
48 #include <linux/qed/qed_if.h>
51 #include <rdma/qedr-abi.h>
52 #include "qedr_roce_cm.h"
54 #define QEDR_SRQ_WQE_ELEM_SIZE sizeof(union rdma_srq_elm)
55 #define RDMA_MAX_SGE_PER_SRQ (4)
56 #define RDMA_MAX_SRQ_WQE_SIZE (RDMA_MAX_SGE_PER_SRQ + 1)
58 #define DB_ADDR_SHIFT(addr) ((addr) << DB_PWM_ADDR_OFFSET_SHIFT)
60 static inline int qedr_ib_copy_to_udata(struct ib_udata *udata, void *src,
63 size_t min_len = min_t(size_t, len, udata->outlen);
65 return ib_copy_to_udata(udata, src, min_len);
68 int qedr_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
70 if (index >= QEDR_ROCE_PKEY_TABLE_LEN)
73 *pkey = QEDR_ROCE_PKEY_DEFAULT;
77 int qedr_iw_query_gid(struct ib_device *ibdev, u8 port,
78 int index, union ib_gid *sgid)
80 struct qedr_dev *dev = get_qedr_dev(ibdev);
82 memset(sgid->raw, 0, sizeof(sgid->raw));
83 ether_addr_copy(sgid->raw, dev->ndev->dev_addr);
85 DP_DEBUG(dev, QEDR_MSG_INIT, "QUERY sgid[%d]=%llx:%llx\n", index,
86 sgid->global.interface_id, sgid->global.subnet_prefix);
91 int qedr_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
93 struct qedr_dev *dev = get_qedr_dev(ibsrq->device);
94 struct qedr_device_attr *qattr = &dev->attr;
95 struct qedr_srq *srq = get_qedr_srq(ibsrq);
97 srq_attr->srq_limit = srq->srq_limit;
98 srq_attr->max_wr = qattr->max_srq_wr;
99 srq_attr->max_sge = qattr->max_sge;
104 int qedr_query_device(struct ib_device *ibdev,
105 struct ib_device_attr *attr, struct ib_udata *udata)
107 struct qedr_dev *dev = get_qedr_dev(ibdev);
108 struct qedr_device_attr *qattr = &dev->attr;
110 if (!dev->rdma_ctx) {
112 "qedr_query_device called with invalid params rdma_ctx=%p\n",
117 memset(attr, 0, sizeof(*attr));
119 attr->fw_ver = qattr->fw_ver;
120 attr->sys_image_guid = qattr->sys_image_guid;
121 attr->max_mr_size = qattr->max_mr_size;
122 attr->page_size_cap = qattr->page_size_caps;
123 attr->vendor_id = qattr->vendor_id;
124 attr->vendor_part_id = qattr->vendor_part_id;
125 attr->hw_ver = qattr->hw_ver;
126 attr->max_qp = qattr->max_qp;
127 attr->max_qp_wr = max_t(u32, qattr->max_sqe, qattr->max_rqe);
128 attr->device_cap_flags = IB_DEVICE_CURR_QP_STATE_MOD |
129 IB_DEVICE_RC_RNR_NAK_GEN |
130 IB_DEVICE_LOCAL_DMA_LKEY | IB_DEVICE_MEM_MGT_EXTENSIONS;
132 attr->max_send_sge = qattr->max_sge;
133 attr->max_recv_sge = qattr->max_sge;
134 attr->max_sge_rd = qattr->max_sge;
135 attr->max_cq = qattr->max_cq;
136 attr->max_cqe = qattr->max_cqe;
137 attr->max_mr = qattr->max_mr;
138 attr->max_mw = qattr->max_mw;
139 attr->max_pd = qattr->max_pd;
140 attr->atomic_cap = dev->atomic_cap;
141 attr->max_fmr = qattr->max_fmr;
142 attr->max_map_per_fmr = 16;
143 attr->max_qp_init_rd_atom =
144 1 << (fls(qattr->max_qp_req_rd_atomic_resc) - 1);
145 attr->max_qp_rd_atom =
146 min(1 << (fls(qattr->max_qp_resp_rd_atomic_resc) - 1),
147 attr->max_qp_init_rd_atom);
149 attr->max_srq = qattr->max_srq;
150 attr->max_srq_sge = qattr->max_srq_sge;
151 attr->max_srq_wr = qattr->max_srq_wr;
153 attr->local_ca_ack_delay = qattr->dev_ack_delay;
154 attr->max_fast_reg_page_list_len = qattr->max_mr / 8;
155 attr->max_pkeys = QEDR_ROCE_PKEY_MAX;
156 attr->max_ah = qattr->max_ah;
161 #define QEDR_SPEED_SDR (1)
162 #define QEDR_SPEED_DDR (2)
163 #define QEDR_SPEED_QDR (4)
164 #define QEDR_SPEED_FDR10 (8)
165 #define QEDR_SPEED_FDR (16)
166 #define QEDR_SPEED_EDR (32)
168 static inline void get_link_speed_and_width(int speed, u8 *ib_speed,
173 *ib_speed = QEDR_SPEED_SDR;
174 *ib_width = IB_WIDTH_1X;
177 *ib_speed = QEDR_SPEED_QDR;
178 *ib_width = IB_WIDTH_1X;
182 *ib_speed = QEDR_SPEED_DDR;
183 *ib_width = IB_WIDTH_4X;
187 *ib_speed = QEDR_SPEED_EDR;
188 *ib_width = IB_WIDTH_1X;
192 *ib_speed = QEDR_SPEED_QDR;
193 *ib_width = IB_WIDTH_4X;
197 *ib_speed = QEDR_SPEED_QDR;
198 *ib_width = IB_WIDTH_4X;
202 *ib_speed = QEDR_SPEED_EDR;
203 *ib_width = IB_WIDTH_4X;
208 *ib_speed = QEDR_SPEED_SDR;
209 *ib_width = IB_WIDTH_1X;
213 int qedr_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr *attr)
215 struct qedr_dev *dev;
216 struct qed_rdma_port *rdma_port;
218 dev = get_qedr_dev(ibdev);
220 if (!dev->rdma_ctx) {
221 DP_ERR(dev, "rdma_ctx is NULL\n");
225 rdma_port = dev->ops->rdma_query_port(dev->rdma_ctx);
227 /* *attr being zeroed by the caller, avoid zeroing it here */
228 if (rdma_port->port_state == QED_RDMA_PORT_UP) {
229 attr->state = IB_PORT_ACTIVE;
230 attr->phys_state = 5;
232 attr->state = IB_PORT_DOWN;
233 attr->phys_state = 3;
235 attr->max_mtu = IB_MTU_4096;
236 attr->active_mtu = iboe_get_mtu(dev->ndev->mtu);
241 attr->ip_gids = true;
242 if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
243 attr->gid_tbl_len = 1;
244 attr->pkey_tbl_len = 1;
246 attr->gid_tbl_len = QEDR_MAX_SGID;
247 attr->pkey_tbl_len = QEDR_ROCE_PKEY_TABLE_LEN;
249 attr->bad_pkey_cntr = rdma_port->pkey_bad_counter;
250 attr->qkey_viol_cntr = 0;
251 get_link_speed_and_width(rdma_port->link_speed,
252 &attr->active_speed, &attr->active_width);
253 attr->max_msg_sz = rdma_port->max_msg_size;
254 attr->max_vl_num = 4;
259 int qedr_modify_port(struct ib_device *ibdev, u8 port, int mask,
260 struct ib_port_modify *props)
265 static int qedr_add_mmap(struct qedr_ucontext *uctx, u64 phy_addr,
270 mm = kzalloc(sizeof(*mm), GFP_KERNEL);
274 mm->key.phy_addr = phy_addr;
275 /* This function might be called with a length which is not a multiple
276 * of PAGE_SIZE, while the mapping is PAGE_SIZE grained and the kernel
277 * forces this granularity by increasing the requested size if needed.
278 * When qedr_mmap is called, it will search the list with the updated
279 * length as a key. To prevent search failures, the length is rounded up
280 * in advance to PAGE_SIZE.
282 mm->key.len = roundup(len, PAGE_SIZE);
283 INIT_LIST_HEAD(&mm->entry);
285 mutex_lock(&uctx->mm_list_lock);
286 list_add(&mm->entry, &uctx->mm_head);
287 mutex_unlock(&uctx->mm_list_lock);
289 DP_DEBUG(uctx->dev, QEDR_MSG_MISC,
290 "added (addr=0x%llx,len=0x%lx) for ctx=%p\n",
291 (unsigned long long)mm->key.phy_addr,
292 (unsigned long)mm->key.len, uctx);
297 static bool qedr_search_mmap(struct qedr_ucontext *uctx, u64 phy_addr,
303 mutex_lock(&uctx->mm_list_lock);
304 list_for_each_entry(mm, &uctx->mm_head, entry) {
305 if (len != mm->key.len || phy_addr != mm->key.phy_addr)
311 mutex_unlock(&uctx->mm_list_lock);
312 DP_DEBUG(uctx->dev, QEDR_MSG_MISC,
313 "searched for (addr=0x%llx,len=0x%lx) for ctx=%p, result=%d\n",
314 mm->key.phy_addr, mm->key.len, uctx, found);
319 int qedr_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata)
321 struct ib_device *ibdev = uctx->device;
323 struct qedr_ucontext *ctx = get_qedr_ucontext(uctx);
324 struct qedr_alloc_ucontext_resp uresp = {};
325 struct qedr_dev *dev = get_qedr_dev(ibdev);
326 struct qed_rdma_add_user_out_params oparams;
331 rc = dev->ops->rdma_add_user(dev->rdma_ctx, &oparams);
334 "failed to allocate a DPI for a new RoCE application, rc=%d. To overcome this consider to increase the number of DPIs, increase the doorbell BAR size or just close unnecessary RoCE applications. In order to increase the number of DPIs consult the qedr readme\n",
339 ctx->dpi = oparams.dpi;
340 ctx->dpi_addr = oparams.dpi_addr;
341 ctx->dpi_phys_addr = oparams.dpi_phys_addr;
342 ctx->dpi_size = oparams.dpi_size;
343 INIT_LIST_HEAD(&ctx->mm_head);
344 mutex_init(&ctx->mm_list_lock);
346 uresp.dpm_enabled = dev->user_dpm_enabled;
347 uresp.wids_enabled = 1;
348 uresp.wid_count = oparams.wid_count;
349 uresp.db_pa = ctx->dpi_phys_addr;
350 uresp.db_size = ctx->dpi_size;
351 uresp.max_send_wr = dev->attr.max_sqe;
352 uresp.max_recv_wr = dev->attr.max_rqe;
353 uresp.max_srq_wr = dev->attr.max_srq_wr;
354 uresp.sges_per_send_wr = QEDR_MAX_SQE_ELEMENTS_PER_SQE;
355 uresp.sges_per_recv_wr = QEDR_MAX_RQE_ELEMENTS_PER_RQE;
356 uresp.sges_per_srq_wr = dev->attr.max_srq_sge;
357 uresp.max_cqes = QEDR_MAX_CQES;
359 rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
365 rc = qedr_add_mmap(ctx, ctx->dpi_phys_addr, ctx->dpi_size);
369 DP_DEBUG(dev, QEDR_MSG_INIT, "Allocating user context %p\n",
374 void qedr_dealloc_ucontext(struct ib_ucontext *ibctx)
376 struct qedr_ucontext *uctx = get_qedr_ucontext(ibctx);
377 struct qedr_mm *mm, *tmp;
379 DP_DEBUG(uctx->dev, QEDR_MSG_INIT, "Deallocating user context %p\n",
381 uctx->dev->ops->rdma_remove_user(uctx->dev->rdma_ctx, uctx->dpi);
383 list_for_each_entry_safe(mm, tmp, &uctx->mm_head, entry) {
384 DP_DEBUG(uctx->dev, QEDR_MSG_MISC,
385 "deleted (addr=0x%llx,len=0x%lx) for ctx=%p\n",
386 mm->key.phy_addr, mm->key.len, uctx);
387 list_del(&mm->entry);
392 int qedr_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
394 struct qedr_ucontext *ucontext = get_qedr_ucontext(context);
395 struct qedr_dev *dev = get_qedr_dev(context->device);
396 unsigned long phys_addr = vma->vm_pgoff << PAGE_SHIFT;
397 unsigned long len = (vma->vm_end - vma->vm_start);
398 unsigned long dpi_start;
400 dpi_start = dev->db_phys_addr + (ucontext->dpi * ucontext->dpi_size);
402 DP_DEBUG(dev, QEDR_MSG_INIT,
403 "mmap invoked with vm_start=0x%pK, vm_end=0x%pK,vm_pgoff=0x%pK; dpi_start=0x%pK dpi_size=0x%x\n",
404 (void *)vma->vm_start, (void *)vma->vm_end,
405 (void *)vma->vm_pgoff, (void *)dpi_start, ucontext->dpi_size);
407 if ((vma->vm_start & (PAGE_SIZE - 1)) || (len & (PAGE_SIZE - 1))) {
409 "failed mmap, addresses must be page aligned: start=0x%pK, end=0x%pK\n",
410 (void *)vma->vm_start, (void *)vma->vm_end);
414 if (!qedr_search_mmap(ucontext, phys_addr, len)) {
415 DP_ERR(dev, "failed mmap, vm_pgoff=0x%lx is not authorized\n",
420 if (phys_addr < dpi_start ||
421 ((phys_addr + len) > (dpi_start + ucontext->dpi_size))) {
423 "failed mmap, pages are outside of dpi; page address=0x%pK, dpi_start=0x%pK, dpi_size=0x%x\n",
424 (void *)phys_addr, (void *)dpi_start,
429 if (vma->vm_flags & VM_READ) {
430 DP_ERR(dev, "failed mmap, cannot map doorbell bar for read\n");
434 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
435 return io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, len,
439 int qedr_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context,
440 struct ib_udata *udata)
442 struct ib_device *ibdev = ibpd->device;
443 struct qedr_dev *dev = get_qedr_dev(ibdev);
444 struct qedr_pd *pd = get_qedr_pd(ibpd);
448 DP_DEBUG(dev, QEDR_MSG_INIT, "Function called from: %s\n",
449 (udata && context) ? "User Lib" : "Kernel");
451 if (!dev->rdma_ctx) {
452 DP_ERR(dev, "invalid RDMA context\n");
456 rc = dev->ops->rdma_alloc_pd(dev->rdma_ctx, &pd_id);
462 if (udata && context) {
463 struct qedr_alloc_pd_uresp uresp = {
467 rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
469 DP_ERR(dev, "copy error pd_id=0x%x.\n", pd_id);
470 dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd_id);
474 pd->uctx = get_qedr_ucontext(context);
481 void qedr_dealloc_pd(struct ib_pd *ibpd)
483 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
484 struct qedr_pd *pd = get_qedr_pd(ibpd);
486 DP_DEBUG(dev, QEDR_MSG_INIT, "Deallocating PD %d\n", pd->pd_id);
487 dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd->pd_id);
490 static void qedr_free_pbl(struct qedr_dev *dev,
491 struct qedr_pbl_info *pbl_info, struct qedr_pbl *pbl)
493 struct pci_dev *pdev = dev->pdev;
496 for (i = 0; i < pbl_info->num_pbls; i++) {
499 dma_free_coherent(&pdev->dev, pbl_info->pbl_size,
500 pbl[i].va, pbl[i].pa);
506 #define MIN_FW_PBL_PAGE_SIZE (4 * 1024)
507 #define MAX_FW_PBL_PAGE_SIZE (64 * 1024)
509 #define NUM_PBES_ON_PAGE(_page_size) (_page_size / sizeof(u64))
510 #define MAX_PBES_ON_PAGE NUM_PBES_ON_PAGE(MAX_FW_PBL_PAGE_SIZE)
511 #define MAX_PBES_TWO_LAYER (MAX_PBES_ON_PAGE * MAX_PBES_ON_PAGE)
513 static struct qedr_pbl *qedr_alloc_pbl_tbl(struct qedr_dev *dev,
514 struct qedr_pbl_info *pbl_info,
517 struct pci_dev *pdev = dev->pdev;
518 struct qedr_pbl *pbl_table;
519 dma_addr_t *pbl_main_tbl;
524 pbl_table = kcalloc(pbl_info->num_pbls, sizeof(*pbl_table), flags);
526 return ERR_PTR(-ENOMEM);
528 for (i = 0; i < pbl_info->num_pbls; i++) {
529 va = dma_alloc_coherent(&pdev->dev, pbl_info->pbl_size, &pa,
534 pbl_table[i].va = va;
535 pbl_table[i].pa = pa;
538 /* Two-Layer PBLs, if we have more than one pbl we need to initialize
539 * the first one with physical pointers to all of the rest
541 pbl_main_tbl = (dma_addr_t *)pbl_table[0].va;
542 for (i = 0; i < pbl_info->num_pbls - 1; i++)
543 pbl_main_tbl[i] = pbl_table[i + 1].pa;
548 for (i--; i >= 0; i--)
549 dma_free_coherent(&pdev->dev, pbl_info->pbl_size,
550 pbl_table[i].va, pbl_table[i].pa);
552 qedr_free_pbl(dev, pbl_info, pbl_table);
554 return ERR_PTR(-ENOMEM);
557 static int qedr_prepare_pbl_tbl(struct qedr_dev *dev,
558 struct qedr_pbl_info *pbl_info,
559 u32 num_pbes, int two_layer_capable)
565 if ((num_pbes > MAX_PBES_ON_PAGE) && two_layer_capable) {
566 if (num_pbes > MAX_PBES_TWO_LAYER) {
567 DP_ERR(dev, "prepare pbl table: too many pages %d\n",
572 /* calculate required pbl page size */
573 pbl_size = MIN_FW_PBL_PAGE_SIZE;
574 pbl_capacity = NUM_PBES_ON_PAGE(pbl_size) *
575 NUM_PBES_ON_PAGE(pbl_size);
577 while (pbl_capacity < num_pbes) {
579 pbl_capacity = pbl_size / sizeof(u64);
580 pbl_capacity = pbl_capacity * pbl_capacity;
583 num_pbls = DIV_ROUND_UP(num_pbes, NUM_PBES_ON_PAGE(pbl_size));
584 num_pbls++; /* One for the layer0 ( points to the pbls) */
585 pbl_info->two_layered = true;
587 /* One layered PBL */
589 pbl_size = max_t(u32, MIN_FW_PBL_PAGE_SIZE,
590 roundup_pow_of_two((num_pbes * sizeof(u64))));
591 pbl_info->two_layered = false;
594 pbl_info->num_pbls = num_pbls;
595 pbl_info->pbl_size = pbl_size;
596 pbl_info->num_pbes = num_pbes;
598 DP_DEBUG(dev, QEDR_MSG_MR,
599 "prepare pbl table: num_pbes=%d, num_pbls=%d, pbl_size=%d\n",
600 pbl_info->num_pbes, pbl_info->num_pbls, pbl_info->pbl_size);
605 static void qedr_populate_pbls(struct qedr_dev *dev, struct ib_umem *umem,
606 struct qedr_pbl *pbl,
607 struct qedr_pbl_info *pbl_info, u32 pg_shift)
609 int pbe_cnt, total_num_pbes = 0;
610 u32 fw_pg_cnt, fw_pg_per_umem_pg;
611 struct qedr_pbl *pbl_tbl;
612 struct sg_dma_page_iter sg_iter;
616 if (!pbl_info->num_pbes)
619 /* If we have a two layered pbl, the first pbl points to the rest
620 * of the pbls and the first entry lays on the second pbl in the table
622 if (pbl_info->two_layered)
627 pbe = (struct regpair *)pbl_tbl->va;
629 DP_ERR(dev, "cannot populate PBL due to a NULL PBE\n");
635 fw_pg_per_umem_pg = BIT(PAGE_SHIFT - pg_shift);
637 for_each_sg_dma_page (umem->sg_head.sgl, &sg_iter, umem->nmap, 0) {
638 pg_addr = sg_page_iter_dma_address(&sg_iter);
639 for (fw_pg_cnt = 0; fw_pg_cnt < fw_pg_per_umem_pg;) {
640 pbe->lo = cpu_to_le32(pg_addr);
641 pbe->hi = cpu_to_le32(upper_32_bits(pg_addr));
643 pg_addr += BIT(pg_shift);
648 if (total_num_pbes == pbl_info->num_pbes)
651 /* If the given pbl is full storing the pbes,
654 if (pbe_cnt == (pbl_info->pbl_size / sizeof(u64))) {
656 pbe = (struct regpair *)pbl_tbl->va;
665 static int qedr_copy_cq_uresp(struct qedr_dev *dev,
666 struct qedr_cq *cq, struct ib_udata *udata)
668 struct qedr_create_cq_uresp uresp;
671 memset(&uresp, 0, sizeof(uresp));
673 uresp.db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT);
674 uresp.icid = cq->icid;
676 rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
678 DP_ERR(dev, "copy error cqid=0x%x.\n", cq->icid);
683 static void consume_cqe(struct qedr_cq *cq)
685 if (cq->latest_cqe == cq->toggle_cqe)
686 cq->pbl_toggle ^= RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK;
688 cq->latest_cqe = qed_chain_consume(&cq->pbl);
691 static inline int qedr_align_cq_entries(int entries)
693 u64 size, aligned_size;
695 /* We allocate an extra entry that we don't report to the FW. */
696 size = (entries + 1) * QEDR_CQE_SIZE;
697 aligned_size = ALIGN(size, PAGE_SIZE);
699 return aligned_size / QEDR_CQE_SIZE;
702 static inline int qedr_init_user_queue(struct ib_udata *udata,
703 struct qedr_dev *dev,
704 struct qedr_userq *q, u64 buf_addr,
705 size_t buf_len, int access, int dmasync,
711 q->buf_addr = buf_addr;
712 q->buf_len = buf_len;
713 q->umem = ib_umem_get(udata, q->buf_addr, q->buf_len, access, dmasync);
714 if (IS_ERR(q->umem)) {
715 DP_ERR(dev, "create user queue: failed ib_umem_get, got %ld\n",
717 return PTR_ERR(q->umem);
720 fw_pages = ib_umem_page_count(q->umem) <<
721 (PAGE_SHIFT - FW_PAGE_SHIFT);
723 rc = qedr_prepare_pbl_tbl(dev, &q->pbl_info, fw_pages, 0);
727 if (alloc_and_init) {
728 q->pbl_tbl = qedr_alloc_pbl_tbl(dev, &q->pbl_info, GFP_KERNEL);
729 if (IS_ERR(q->pbl_tbl)) {
730 rc = PTR_ERR(q->pbl_tbl);
733 qedr_populate_pbls(dev, q->umem, q->pbl_tbl, &q->pbl_info,
736 q->pbl_tbl = kzalloc(sizeof(*q->pbl_tbl), GFP_KERNEL);
746 ib_umem_release(q->umem);
752 static inline void qedr_init_cq_params(struct qedr_cq *cq,
753 struct qedr_ucontext *ctx,
754 struct qedr_dev *dev, int vector,
755 int chain_entries, int page_cnt,
757 struct qed_rdma_create_cq_in_params
760 memset(params, 0, sizeof(*params));
761 params->cq_handle_hi = upper_32_bits((uintptr_t)cq);
762 params->cq_handle_lo = lower_32_bits((uintptr_t)cq);
763 params->cnq_id = vector;
764 params->cq_size = chain_entries - 1;
765 params->dpi = (ctx) ? ctx->dpi : dev->dpi;
766 params->pbl_num_pages = page_cnt;
767 params->pbl_ptr = pbl_ptr;
768 params->pbl_two_level = 0;
771 static void doorbell_cq(struct qedr_cq *cq, u32 cons, u8 flags)
773 cq->db.data.agg_flags = flags;
774 cq->db.data.value = cpu_to_le32(cons);
775 writeq(cq->db.raw, cq->db_addr);
778 int qedr_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
780 struct qedr_cq *cq = get_qedr_cq(ibcq);
781 unsigned long sflags;
782 struct qedr_dev *dev;
784 dev = get_qedr_dev(ibcq->device);
788 "warning: arm was invoked after destroy for cq %p (icid=%d)\n",
794 if (cq->cq_type == QEDR_CQ_TYPE_GSI)
797 spin_lock_irqsave(&cq->cq_lock, sflags);
801 if (flags & IB_CQ_SOLICITED)
802 cq->arm_flags |= DQ_UCM_ROCE_CQ_ARM_SE_CF_CMD;
804 if (flags & IB_CQ_NEXT_COMP)
805 cq->arm_flags |= DQ_UCM_ROCE_CQ_ARM_CF_CMD;
807 doorbell_cq(cq, cq->cq_cons - 1, cq->arm_flags);
809 spin_unlock_irqrestore(&cq->cq_lock, sflags);
814 struct ib_cq *qedr_create_cq(struct ib_device *ibdev,
815 const struct ib_cq_init_attr *attr,
816 struct ib_ucontext *ib_ctx, struct ib_udata *udata)
818 struct qedr_ucontext *ctx = get_qedr_ucontext(ib_ctx);
819 struct qed_rdma_destroy_cq_out_params destroy_oparams;
820 struct qed_rdma_destroy_cq_in_params destroy_iparams;
821 struct qedr_dev *dev = get_qedr_dev(ibdev);
822 struct qed_rdma_create_cq_in_params params;
823 struct qedr_create_cq_ureq ureq;
824 int vector = attr->comp_vector;
825 int entries = attr->cqe;
833 DP_DEBUG(dev, QEDR_MSG_INIT,
834 "create_cq: called from %s. entries=%d, vector=%d\n",
835 udata ? "User Lib" : "Kernel", entries, vector);
837 if (entries > QEDR_MAX_CQES) {
839 "create cq: the number of entries %d is too high. Must be equal or below %d.\n",
840 entries, QEDR_MAX_CQES);
841 return ERR_PTR(-EINVAL);
844 chain_entries = qedr_align_cq_entries(entries);
845 chain_entries = min_t(int, chain_entries, QEDR_MAX_CQES);
847 cq = kzalloc(sizeof(*cq), GFP_KERNEL);
849 return ERR_PTR(-ENOMEM);
852 memset(&ureq, 0, sizeof(ureq));
853 if (ib_copy_from_udata(&ureq, udata, sizeof(ureq))) {
855 "create cq: problem copying data from user space\n");
861 "create cq: cannot create a cq with 0 entries\n");
865 cq->cq_type = QEDR_CQ_TYPE_USER;
867 rc = qedr_init_user_queue(udata, dev, &cq->q, ureq.addr,
868 ureq.len, IB_ACCESS_LOCAL_WRITE, 1,
873 pbl_ptr = cq->q.pbl_tbl->pa;
874 page_cnt = cq->q.pbl_info.num_pbes;
876 cq->ibcq.cqe = chain_entries;
878 cq->cq_type = QEDR_CQ_TYPE_KERNEL;
880 rc = dev->ops->common->chain_alloc(dev->cdev,
881 QED_CHAIN_USE_TO_CONSUME,
883 QED_CHAIN_CNT_TYPE_U32,
885 sizeof(union rdma_cqe),
890 page_cnt = qed_chain_get_page_cnt(&cq->pbl);
891 pbl_ptr = qed_chain_get_pbl_phys(&cq->pbl);
892 cq->ibcq.cqe = cq->pbl.capacity;
895 qedr_init_cq_params(cq, ctx, dev, vector, chain_entries, page_cnt,
898 rc = dev->ops->rdma_create_cq(dev->rdma_ctx, ¶ms, &icid);
903 cq->sig = QEDR_CQ_MAGIC_NUMBER;
904 spin_lock_init(&cq->cq_lock);
907 rc = qedr_copy_cq_uresp(dev, cq, udata);
911 /* Generate doorbell address. */
912 cq->db_addr = dev->db_addr +
913 DB_ADDR_SHIFT(DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT);
914 cq->db.data.icid = cq->icid;
915 cq->db.data.params = DB_AGG_CMD_SET <<
916 RDMA_PWM_VAL32_DATA_AGG_CMD_SHIFT;
918 /* point to the very last element, passing it we will toggle */
919 cq->toggle_cqe = qed_chain_get_last_elem(&cq->pbl);
920 cq->pbl_toggle = RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK;
921 cq->latest_cqe = NULL;
923 cq->cq_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
926 DP_DEBUG(dev, QEDR_MSG_CQ,
927 "create cq: icid=0x%0x, addr=%p, size(entries)=0x%0x\n",
928 cq->icid, cq, params.cq_size);
933 destroy_iparams.icid = cq->icid;
934 dev->ops->rdma_destroy_cq(dev->rdma_ctx, &destroy_iparams,
938 qedr_free_pbl(dev, &cq->q.pbl_info, cq->q.pbl_tbl);
940 dev->ops->common->chain_free(dev->cdev, &cq->pbl);
943 ib_umem_release(cq->q.umem);
946 return ERR_PTR(-EINVAL);
949 int qedr_resize_cq(struct ib_cq *ibcq, int new_cnt, struct ib_udata *udata)
951 struct qedr_dev *dev = get_qedr_dev(ibcq->device);
952 struct qedr_cq *cq = get_qedr_cq(ibcq);
954 DP_ERR(dev, "cq %p RESIZE NOT SUPPORTED\n", cq);
959 #define QEDR_DESTROY_CQ_MAX_ITERATIONS (10)
960 #define QEDR_DESTROY_CQ_ITER_DURATION (10)
962 int qedr_destroy_cq(struct ib_cq *ibcq)
964 struct qedr_dev *dev = get_qedr_dev(ibcq->device);
965 struct qed_rdma_destroy_cq_out_params oparams;
966 struct qed_rdma_destroy_cq_in_params iparams;
967 struct qedr_cq *cq = get_qedr_cq(ibcq);
971 DP_DEBUG(dev, QEDR_MSG_CQ, "destroy cq %p (icid=%d)\n", cq, cq->icid);
975 /* GSIs CQs are handled by driver, so they don't exist in the FW */
976 if (cq->cq_type == QEDR_CQ_TYPE_GSI)
979 iparams.icid = cq->icid;
980 rc = dev->ops->rdma_destroy_cq(dev->rdma_ctx, &iparams, &oparams);
984 dev->ops->common->chain_free(dev->cdev, &cq->pbl);
986 if (ibcq->uobject && ibcq->uobject->context) {
987 qedr_free_pbl(dev, &cq->q.pbl_info, cq->q.pbl_tbl);
988 ib_umem_release(cq->q.umem);
991 /* We don't want the IRQ handler to handle a non-existing CQ so we
992 * wait until all CNQ interrupts, if any, are received. This will always
993 * happen and will always happen very fast. If not, then a serious error
994 * has occured. That is why we can use a long delay.
995 * We spin for a short time so we don’t lose time on context switching
996 * in case all the completions are handled in that span. Otherwise
997 * we sleep for a while and check again. Since the CNQ may be
998 * associated with (only) the current CPU we use msleep to allow the
999 * current CPU to be freed.
1000 * The CNQ notification is increased in qedr_irq_handler().
1002 iter = QEDR_DESTROY_CQ_MAX_ITERATIONS;
1003 while (oparams.num_cq_notif != READ_ONCE(cq->cnq_notif) && iter) {
1004 udelay(QEDR_DESTROY_CQ_ITER_DURATION);
1008 iter = QEDR_DESTROY_CQ_MAX_ITERATIONS;
1009 while (oparams.num_cq_notif != READ_ONCE(cq->cnq_notif) && iter) {
1010 msleep(QEDR_DESTROY_CQ_ITER_DURATION);
1014 if (oparams.num_cq_notif != cq->cnq_notif)
1017 /* Note that we don't need to have explicit code to wait for the
1018 * completion of the event handler because it is invoked from the EQ.
1019 * Since the destroy CQ ramrod has also been received on the EQ we can
1020 * be certain that there's no event handler in process.
1031 "CQ %p (icid=%d) not freed, expecting %d ints but got %d ints\n",
1032 cq, cq->icid, oparams.num_cq_notif, cq->cnq_notif);
1037 static inline int get_gid_info_from_table(struct ib_qp *ibqp,
1038 struct ib_qp_attr *attr,
1040 struct qed_rdma_modify_qp_in_params
1043 const struct ib_gid_attr *gid_attr;
1044 enum rdma_network_type nw_type;
1045 const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
1049 gid_attr = grh->sgid_attr;
1050 qp_params->vlan_id = rdma_vlan_dev_vlan_id(gid_attr->ndev);
1052 nw_type = rdma_gid_attr_network_type(gid_attr);
1054 case RDMA_NETWORK_IPV6:
1055 memcpy(&qp_params->sgid.bytes[0], &gid_attr->gid.raw[0],
1056 sizeof(qp_params->sgid));
1057 memcpy(&qp_params->dgid.bytes[0],
1059 sizeof(qp_params->dgid));
1060 qp_params->roce_mode = ROCE_V2_IPV6;
1061 SET_FIELD(qp_params->modify_flags,
1062 QED_ROCE_MODIFY_QP_VALID_ROCE_MODE, 1);
1064 case RDMA_NETWORK_IB:
1065 memcpy(&qp_params->sgid.bytes[0], &gid_attr->gid.raw[0],
1066 sizeof(qp_params->sgid));
1067 memcpy(&qp_params->dgid.bytes[0],
1069 sizeof(qp_params->dgid));
1070 qp_params->roce_mode = ROCE_V1;
1072 case RDMA_NETWORK_IPV4:
1073 memset(&qp_params->sgid, 0, sizeof(qp_params->sgid));
1074 memset(&qp_params->dgid, 0, sizeof(qp_params->dgid));
1075 ipv4_addr = qedr_get_ipv4_from_gid(gid_attr->gid.raw);
1076 qp_params->sgid.ipv4_addr = ipv4_addr;
1078 qedr_get_ipv4_from_gid(grh->dgid.raw);
1079 qp_params->dgid.ipv4_addr = ipv4_addr;
1080 SET_FIELD(qp_params->modify_flags,
1081 QED_ROCE_MODIFY_QP_VALID_ROCE_MODE, 1);
1082 qp_params->roce_mode = ROCE_V2_IPV4;
1086 for (i = 0; i < 4; i++) {
1087 qp_params->sgid.dwords[i] = ntohl(qp_params->sgid.dwords[i]);
1088 qp_params->dgid.dwords[i] = ntohl(qp_params->dgid.dwords[i]);
1091 if (qp_params->vlan_id >= VLAN_CFI_MASK)
1092 qp_params->vlan_id = 0;
1097 static int qedr_check_qp_attrs(struct ib_pd *ibpd, struct qedr_dev *dev,
1098 struct ib_qp_init_attr *attrs,
1099 struct ib_udata *udata)
1101 struct qedr_device_attr *qattr = &dev->attr;
1103 /* QP0... attrs->qp_type == IB_QPT_GSI */
1104 if (attrs->qp_type != IB_QPT_RC && attrs->qp_type != IB_QPT_GSI) {
1105 DP_DEBUG(dev, QEDR_MSG_QP,
1106 "create qp: unsupported qp type=0x%x requested\n",
1111 if (attrs->cap.max_send_wr > qattr->max_sqe) {
1113 "create qp: cannot create a SQ with %d elements (max_send_wr=0x%x)\n",
1114 attrs->cap.max_send_wr, qattr->max_sqe);
1118 if (attrs->cap.max_inline_data > qattr->max_inline) {
1120 "create qp: unsupported inline data size=0x%x requested (max_inline=0x%x)\n",
1121 attrs->cap.max_inline_data, qattr->max_inline);
1125 if (attrs->cap.max_send_sge > qattr->max_sge) {
1127 "create qp: unsupported send_sge=0x%x requested (max_send_sge=0x%x)\n",
1128 attrs->cap.max_send_sge, qattr->max_sge);
1132 if (attrs->cap.max_recv_sge > qattr->max_sge) {
1134 "create qp: unsupported recv_sge=0x%x requested (max_recv_sge=0x%x)\n",
1135 attrs->cap.max_recv_sge, qattr->max_sge);
1139 /* Unprivileged user space cannot create special QP */
1140 if (udata && attrs->qp_type == IB_QPT_GSI) {
1142 "create qp: userspace can't create special QPs of type=0x%x\n",
1150 static int qedr_copy_srq_uresp(struct qedr_dev *dev,
1151 struct qedr_srq *srq, struct ib_udata *udata)
1153 struct qedr_create_srq_uresp uresp = {};
1156 uresp.srq_id = srq->srq_id;
1158 rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
1160 DP_ERR(dev, "create srq: problem copying data to user space\n");
1165 static void qedr_copy_rq_uresp(struct qedr_dev *dev,
1166 struct qedr_create_qp_uresp *uresp,
1169 /* iWARP requires two doorbells per RQ. */
1170 if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
1171 uresp->rq_db_offset =
1172 DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_IWARP_RQ_PROD);
1173 uresp->rq_db2_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_FLAGS);
1175 uresp->rq_db_offset =
1176 DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD);
1179 uresp->rq_icid = qp->icid;
1182 static void qedr_copy_sq_uresp(struct qedr_dev *dev,
1183 struct qedr_create_qp_uresp *uresp,
1186 uresp->sq_db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
1188 /* iWARP uses the same cid for rq and sq */
1189 if (rdma_protocol_iwarp(&dev->ibdev, 1))
1190 uresp->sq_icid = qp->icid;
1192 uresp->sq_icid = qp->icid + 1;
1195 static int qedr_copy_qp_uresp(struct qedr_dev *dev,
1196 struct qedr_qp *qp, struct ib_udata *udata)
1198 struct qedr_create_qp_uresp uresp;
1201 memset(&uresp, 0, sizeof(uresp));
1202 qedr_copy_sq_uresp(dev, &uresp, qp);
1203 qedr_copy_rq_uresp(dev, &uresp, qp);
1205 uresp.atomic_supported = dev->atomic_cap != IB_ATOMIC_NONE;
1206 uresp.qp_id = qp->qp_id;
1208 rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
1211 "create qp: failed a copy to user space with qp icid=0x%x.\n",
1217 static void qedr_set_common_qp_params(struct qedr_dev *dev,
1220 struct ib_qp_init_attr *attrs)
1222 spin_lock_init(&qp->q_lock);
1223 atomic_set(&qp->refcnt, 1);
1225 qp->qp_type = attrs->qp_type;
1226 qp->max_inline_data = attrs->cap.max_inline_data;
1227 qp->sq.max_sges = attrs->cap.max_send_sge;
1228 qp->state = QED_ROCE_QP_STATE_RESET;
1229 qp->signaled = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR) ? true : false;
1230 qp->sq_cq = get_qedr_cq(attrs->send_cq);
1234 qp->srq = get_qedr_srq(attrs->srq);
1236 qp->rq_cq = get_qedr_cq(attrs->recv_cq);
1237 qp->rq.max_sges = attrs->cap.max_recv_sge;
1238 DP_DEBUG(dev, QEDR_MSG_QP,
1239 "RQ params:\trq_max_sges = %d, rq_cq_id = %d\n",
1240 qp->rq.max_sges, qp->rq_cq->icid);
1243 DP_DEBUG(dev, QEDR_MSG_QP,
1244 "QP params:\tpd = %d, qp_type = %d, max_inline_data = %d, state = %d, signaled = %d, use_srq=%d\n",
1245 pd->pd_id, qp->qp_type, qp->max_inline_data,
1246 qp->state, qp->signaled, (attrs->srq) ? 1 : 0);
1247 DP_DEBUG(dev, QEDR_MSG_QP,
1248 "SQ params:\tsq_max_sges = %d, sq_cq_id = %d\n",
1249 qp->sq.max_sges, qp->sq_cq->icid);
1252 static void qedr_set_roce_db_info(struct qedr_dev *dev, struct qedr_qp *qp)
1254 qp->sq.db = dev->db_addr +
1255 DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
1256 qp->sq.db_data.data.icid = qp->icid + 1;
1258 qp->rq.db = dev->db_addr +
1259 DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD);
1260 qp->rq.db_data.data.icid = qp->icid;
1264 static int qedr_check_srq_params(struct ib_pd *ibpd, struct qedr_dev *dev,
1265 struct ib_srq_init_attr *attrs,
1266 struct ib_udata *udata)
1268 struct qedr_device_attr *qattr = &dev->attr;
1270 if (attrs->attr.max_wr > qattr->max_srq_wr) {
1272 "create srq: unsupported srq_wr=0x%x requested (max_srq_wr=0x%x)\n",
1273 attrs->attr.max_wr, qattr->max_srq_wr);
1277 if (attrs->attr.max_sge > qattr->max_sge) {
1279 "create srq: unsupported sge=0x%x requested (max_srq_sge=0x%x)\n",
1280 attrs->attr.max_sge, qattr->max_sge);
1287 static void qedr_free_srq_user_params(struct qedr_srq *srq)
1289 qedr_free_pbl(srq->dev, &srq->usrq.pbl_info, srq->usrq.pbl_tbl);
1290 ib_umem_release(srq->usrq.umem);
1291 ib_umem_release(srq->prod_umem);
1294 static void qedr_free_srq_kernel_params(struct qedr_srq *srq)
1296 struct qedr_srq_hwq_info *hw_srq = &srq->hw_srq;
1297 struct qedr_dev *dev = srq->dev;
1299 dev->ops->common->chain_free(dev->cdev, &hw_srq->pbl);
1301 dma_free_coherent(&dev->pdev->dev, sizeof(struct rdma_srq_producers),
1302 hw_srq->virt_prod_pair_addr,
1303 hw_srq->phy_prod_pair_addr);
1306 static int qedr_init_srq_user_params(struct ib_udata *udata,
1307 struct qedr_srq *srq,
1308 struct qedr_create_srq_ureq *ureq,
1309 int access, int dmasync)
1311 struct scatterlist *sg;
1314 rc = qedr_init_user_queue(udata, srq->dev, &srq->usrq, ureq->srq_addr,
1315 ureq->srq_len, access, dmasync, 1);
1320 ib_umem_get(udata, ureq->prod_pair_addr,
1321 sizeof(struct rdma_srq_producers), access, dmasync);
1322 if (IS_ERR(srq->prod_umem)) {
1323 qedr_free_pbl(srq->dev, &srq->usrq.pbl_info, srq->usrq.pbl_tbl);
1324 ib_umem_release(srq->usrq.umem);
1326 "create srq: failed ib_umem_get for producer, got %ld\n",
1327 PTR_ERR(srq->prod_umem));
1328 return PTR_ERR(srq->prod_umem);
1331 sg = srq->prod_umem->sg_head.sgl;
1332 srq->hw_srq.phy_prod_pair_addr = sg_dma_address(sg);
1337 static int qedr_alloc_srq_kernel_params(struct qedr_srq *srq,
1338 struct qedr_dev *dev,
1339 struct ib_srq_init_attr *init_attr)
1341 struct qedr_srq_hwq_info *hw_srq = &srq->hw_srq;
1342 dma_addr_t phy_prod_pair_addr;
1347 va = dma_alloc_coherent(&dev->pdev->dev,
1348 sizeof(struct rdma_srq_producers),
1349 &phy_prod_pair_addr, GFP_KERNEL);
1352 "create srq: failed to allocate dma memory for producer\n");
1356 hw_srq->phy_prod_pair_addr = phy_prod_pair_addr;
1357 hw_srq->virt_prod_pair_addr = va;
1359 num_elems = init_attr->attr.max_wr * RDMA_MAX_SRQ_WQE_SIZE;
1360 rc = dev->ops->common->chain_alloc(dev->cdev,
1361 QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1363 QED_CHAIN_CNT_TYPE_U32,
1365 QEDR_SRQ_WQE_ELEM_SIZE,
1366 &hw_srq->pbl, NULL);
1370 hw_srq->num_elems = num_elems;
1375 dma_free_coherent(&dev->pdev->dev, sizeof(struct rdma_srq_producers),
1376 va, phy_prod_pair_addr);
1380 static int qedr_idr_add(struct qedr_dev *dev, struct qedr_idr *qidr,
1382 static void qedr_idr_remove(struct qedr_dev *dev,
1383 struct qedr_idr *qidr, u32 id);
1385 struct ib_srq *qedr_create_srq(struct ib_pd *ibpd,
1386 struct ib_srq_init_attr *init_attr,
1387 struct ib_udata *udata)
1389 struct qed_rdma_destroy_srq_in_params destroy_in_params;
1390 struct qed_rdma_create_srq_in_params in_params = {};
1391 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
1392 struct qed_rdma_create_srq_out_params out_params;
1393 struct qedr_pd *pd = get_qedr_pd(ibpd);
1394 struct qedr_create_srq_ureq ureq = {};
1395 u64 pbl_base_addr, phy_prod_pair_addr;
1396 struct qedr_srq_hwq_info *hw_srq;
1397 u32 page_cnt, page_size;
1398 struct qedr_srq *srq;
1401 DP_DEBUG(dev, QEDR_MSG_QP,
1402 "create SRQ called from %s (pd %p)\n",
1403 (udata) ? "User lib" : "kernel", pd);
1405 rc = qedr_check_srq_params(ibpd, dev, init_attr, udata);
1407 return ERR_PTR(-EINVAL);
1409 srq = kzalloc(sizeof(*srq), GFP_KERNEL);
1411 return ERR_PTR(-ENOMEM);
1414 hw_srq = &srq->hw_srq;
1415 spin_lock_init(&srq->lock);
1417 hw_srq->max_wr = init_attr->attr.max_wr;
1418 hw_srq->max_sges = init_attr->attr.max_sge;
1421 if (ib_copy_from_udata(&ureq, udata, sizeof(ureq))) {
1423 "create srq: problem copying data from user space\n");
1427 rc = qedr_init_srq_user_params(udata, srq, &ureq, 0, 0);
1431 page_cnt = srq->usrq.pbl_info.num_pbes;
1432 pbl_base_addr = srq->usrq.pbl_tbl->pa;
1433 phy_prod_pair_addr = hw_srq->phy_prod_pair_addr;
1434 page_size = PAGE_SIZE;
1436 struct qed_chain *pbl;
1438 rc = qedr_alloc_srq_kernel_params(srq, dev, init_attr);
1443 page_cnt = qed_chain_get_page_cnt(pbl);
1444 pbl_base_addr = qed_chain_get_pbl_phys(pbl);
1445 phy_prod_pair_addr = hw_srq->phy_prod_pair_addr;
1446 page_size = QED_CHAIN_PAGE_SIZE;
1449 in_params.pd_id = pd->pd_id;
1450 in_params.pbl_base_addr = pbl_base_addr;
1451 in_params.prod_pair_addr = phy_prod_pair_addr;
1452 in_params.num_pages = page_cnt;
1453 in_params.page_size = page_size;
1455 rc = dev->ops->rdma_create_srq(dev->rdma_ctx, &in_params, &out_params);
1459 srq->srq_id = out_params.srq_id;
1462 rc = qedr_copy_srq_uresp(dev, srq, udata);
1467 rc = qedr_idr_add(dev, &dev->srqidr, srq, srq->srq_id);
1471 DP_DEBUG(dev, QEDR_MSG_SRQ,
1472 "create srq: created srq with srq_id=0x%0x\n", srq->srq_id);
1476 destroy_in_params.srq_id = srq->srq_id;
1478 dev->ops->rdma_destroy_srq(dev->rdma_ctx, &destroy_in_params);
1481 qedr_free_srq_user_params(srq);
1483 qedr_free_srq_kernel_params(srq);
1487 return ERR_PTR(-EFAULT);
1490 int qedr_destroy_srq(struct ib_srq *ibsrq)
1492 struct qed_rdma_destroy_srq_in_params in_params = {};
1493 struct qedr_dev *dev = get_qedr_dev(ibsrq->device);
1494 struct qedr_srq *srq = get_qedr_srq(ibsrq);
1496 qedr_idr_remove(dev, &dev->srqidr, srq->srq_id);
1497 in_params.srq_id = srq->srq_id;
1498 dev->ops->rdma_destroy_srq(dev->rdma_ctx, &in_params);
1501 qedr_free_srq_user_params(srq);
1503 qedr_free_srq_kernel_params(srq);
1505 DP_DEBUG(dev, QEDR_MSG_SRQ,
1506 "destroy srq: destroyed srq with srq_id=0x%0x\n",
1513 int qedr_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
1514 enum ib_srq_attr_mask attr_mask, struct ib_udata *udata)
1516 struct qed_rdma_modify_srq_in_params in_params = {};
1517 struct qedr_dev *dev = get_qedr_dev(ibsrq->device);
1518 struct qedr_srq *srq = get_qedr_srq(ibsrq);
1521 if (attr_mask & IB_SRQ_MAX_WR) {
1523 "modify srq: invalid attribute mask=0x%x specified for %p\n",
1528 if (attr_mask & IB_SRQ_LIMIT) {
1529 if (attr->srq_limit >= srq->hw_srq.max_wr) {
1531 "modify srq: invalid srq_limit=0x%x (max_srq_limit=0x%x)\n",
1532 attr->srq_limit, srq->hw_srq.max_wr);
1536 in_params.srq_id = srq->srq_id;
1537 in_params.wqe_limit = attr->srq_limit;
1538 rc = dev->ops->rdma_modify_srq(dev->rdma_ctx, &in_params);
1543 srq->srq_limit = attr->srq_limit;
1545 DP_DEBUG(dev, QEDR_MSG_SRQ,
1546 "modify srq: modified srq with srq_id=0x%0x\n", srq->srq_id);
1552 qedr_init_common_qp_in_params(struct qedr_dev *dev,
1555 struct ib_qp_init_attr *attrs,
1556 bool fmr_and_reserved_lkey,
1557 struct qed_rdma_create_qp_in_params *params)
1559 /* QP handle to be written in an async event */
1560 params->qp_handle_async_lo = lower_32_bits((uintptr_t) qp);
1561 params->qp_handle_async_hi = upper_32_bits((uintptr_t) qp);
1563 params->signal_all = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR);
1564 params->fmr_and_reserved_lkey = fmr_and_reserved_lkey;
1565 params->pd = pd->pd_id;
1566 params->dpi = pd->uctx ? pd->uctx->dpi : dev->dpi;
1567 params->sq_cq_id = get_qedr_cq(attrs->send_cq)->icid;
1568 params->stats_queue = 0;
1570 params->use_srq = false;
1573 params->rq_cq_id = get_qedr_cq(attrs->recv_cq)->icid;
1576 params->rq_cq_id = get_qedr_cq(attrs->recv_cq)->icid;
1577 params->srq_id = qp->srq->srq_id;
1578 params->use_srq = true;
1582 static inline void qedr_qp_user_print(struct qedr_dev *dev, struct qedr_qp *qp)
1584 DP_DEBUG(dev, QEDR_MSG_QP, "create qp: successfully created user QP. "
1593 qp->usq.buf_len, qp->urq.buf_addr, qp->urq.buf_len);
1596 static int qedr_idr_add(struct qedr_dev *dev, struct qedr_idr *qidr,
1601 idr_preload(GFP_KERNEL);
1602 spin_lock_irq(&qidr->idr_lock);
1604 rc = idr_alloc(&qidr->idr, ptr, id, id + 1, GFP_ATOMIC);
1606 spin_unlock_irq(&qidr->idr_lock);
1609 return rc < 0 ? rc : 0;
1612 static void qedr_idr_remove(struct qedr_dev *dev, struct qedr_idr *qidr, u32 id)
1614 spin_lock_irq(&qidr->idr_lock);
1615 idr_remove(&qidr->idr, id);
1616 spin_unlock_irq(&qidr->idr_lock);
1620 qedr_iwarp_populate_user_qp(struct qedr_dev *dev,
1622 struct qed_rdma_create_qp_out_params *out_params)
1624 qp->usq.pbl_tbl->va = out_params->sq_pbl_virt;
1625 qp->usq.pbl_tbl->pa = out_params->sq_pbl_phys;
1627 qedr_populate_pbls(dev, qp->usq.umem, qp->usq.pbl_tbl,
1628 &qp->usq.pbl_info, FW_PAGE_SHIFT);
1630 qp->urq.pbl_tbl->va = out_params->rq_pbl_virt;
1631 qp->urq.pbl_tbl->pa = out_params->rq_pbl_phys;
1634 qedr_populate_pbls(dev, qp->urq.umem, qp->urq.pbl_tbl,
1635 &qp->urq.pbl_info, FW_PAGE_SHIFT);
1638 static void qedr_cleanup_user(struct qedr_dev *dev, struct qedr_qp *qp)
1641 ib_umem_release(qp->usq.umem);
1642 qp->usq.umem = NULL;
1645 ib_umem_release(qp->urq.umem);
1646 qp->urq.umem = NULL;
1649 static int qedr_create_user_qp(struct qedr_dev *dev,
1652 struct ib_udata *udata,
1653 struct ib_qp_init_attr *attrs)
1655 struct qed_rdma_create_qp_in_params in_params;
1656 struct qed_rdma_create_qp_out_params out_params;
1657 struct qedr_pd *pd = get_qedr_pd(ibpd);
1658 struct qedr_create_qp_ureq ureq;
1659 int alloc_and_init = rdma_protocol_roce(&dev->ibdev, 1);
1662 memset(&ureq, 0, sizeof(ureq));
1663 rc = ib_copy_from_udata(&ureq, udata, sizeof(ureq));
1665 DP_ERR(dev, "Problem copying data from user space\n");
1669 /* SQ - read access only (0), dma sync not required (0) */
1670 rc = qedr_init_user_queue(udata, dev, &qp->usq, ureq.sq_addr,
1671 ureq.sq_len, 0, 0, alloc_and_init);
1676 /* RQ - read access only (0), dma sync not required (0) */
1677 rc = qedr_init_user_queue(udata, dev, &qp->urq, ureq.rq_addr,
1678 ureq.rq_len, 0, 0, alloc_and_init);
1683 memset(&in_params, 0, sizeof(in_params));
1684 qedr_init_common_qp_in_params(dev, pd, qp, attrs, false, &in_params);
1685 in_params.qp_handle_lo = ureq.qp_handle_lo;
1686 in_params.qp_handle_hi = ureq.qp_handle_hi;
1687 in_params.sq_num_pages = qp->usq.pbl_info.num_pbes;
1688 in_params.sq_pbl_ptr = qp->usq.pbl_tbl->pa;
1690 in_params.rq_num_pages = qp->urq.pbl_info.num_pbes;
1691 in_params.rq_pbl_ptr = qp->urq.pbl_tbl->pa;
1694 qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx,
1695 &in_params, &out_params);
1702 if (rdma_protocol_iwarp(&dev->ibdev, 1))
1703 qedr_iwarp_populate_user_qp(dev, qp, &out_params);
1705 qp->qp_id = out_params.qp_id;
1706 qp->icid = out_params.icid;
1708 rc = qedr_copy_qp_uresp(dev, qp, udata);
1712 qedr_qp_user_print(dev, qp);
1716 rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
1718 DP_ERR(dev, "create qp: fatal fault. rc=%d", rc);
1721 qedr_cleanup_user(dev, qp);
1725 static void qedr_set_iwarp_db_info(struct qedr_dev *dev, struct qedr_qp *qp)
1727 qp->sq.db = dev->db_addr +
1728 DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
1729 qp->sq.db_data.data.icid = qp->icid;
1731 qp->rq.db = dev->db_addr +
1732 DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_IWARP_RQ_PROD);
1733 qp->rq.db_data.data.icid = qp->icid;
1734 qp->rq.iwarp_db2 = dev->db_addr +
1735 DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_FLAGS);
1736 qp->rq.iwarp_db2_data.data.icid = qp->icid;
1737 qp->rq.iwarp_db2_data.data.value = DQ_TCM_IWARP_POST_RQ_CF_CMD;
1741 qedr_roce_create_kernel_qp(struct qedr_dev *dev,
1743 struct qed_rdma_create_qp_in_params *in_params,
1744 u32 n_sq_elems, u32 n_rq_elems)
1746 struct qed_rdma_create_qp_out_params out_params;
1749 rc = dev->ops->common->chain_alloc(dev->cdev,
1750 QED_CHAIN_USE_TO_PRODUCE,
1752 QED_CHAIN_CNT_TYPE_U32,
1754 QEDR_SQE_ELEMENT_SIZE,
1760 in_params->sq_num_pages = qed_chain_get_page_cnt(&qp->sq.pbl);
1761 in_params->sq_pbl_ptr = qed_chain_get_pbl_phys(&qp->sq.pbl);
1763 rc = dev->ops->common->chain_alloc(dev->cdev,
1764 QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1766 QED_CHAIN_CNT_TYPE_U32,
1768 QEDR_RQE_ELEMENT_SIZE,
1773 in_params->rq_num_pages = qed_chain_get_page_cnt(&qp->rq.pbl);
1774 in_params->rq_pbl_ptr = qed_chain_get_pbl_phys(&qp->rq.pbl);
1776 qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx,
1777 in_params, &out_params);
1782 qp->qp_id = out_params.qp_id;
1783 qp->icid = out_params.icid;
1785 qedr_set_roce_db_info(dev, qp);
1790 qedr_iwarp_create_kernel_qp(struct qedr_dev *dev,
1792 struct qed_rdma_create_qp_in_params *in_params,
1793 u32 n_sq_elems, u32 n_rq_elems)
1795 struct qed_rdma_create_qp_out_params out_params;
1796 struct qed_chain_ext_pbl ext_pbl;
1799 in_params->sq_num_pages = QED_CHAIN_PAGE_CNT(n_sq_elems,
1800 QEDR_SQE_ELEMENT_SIZE,
1801 QED_CHAIN_MODE_PBL);
1802 in_params->rq_num_pages = QED_CHAIN_PAGE_CNT(n_rq_elems,
1803 QEDR_RQE_ELEMENT_SIZE,
1804 QED_CHAIN_MODE_PBL);
1806 qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx,
1807 in_params, &out_params);
1812 /* Now we allocate the chain */
1813 ext_pbl.p_pbl_virt = out_params.sq_pbl_virt;
1814 ext_pbl.p_pbl_phys = out_params.sq_pbl_phys;
1816 rc = dev->ops->common->chain_alloc(dev->cdev,
1817 QED_CHAIN_USE_TO_PRODUCE,
1819 QED_CHAIN_CNT_TYPE_U32,
1821 QEDR_SQE_ELEMENT_SIZE,
1822 &qp->sq.pbl, &ext_pbl);
1827 ext_pbl.p_pbl_virt = out_params.rq_pbl_virt;
1828 ext_pbl.p_pbl_phys = out_params.rq_pbl_phys;
1830 rc = dev->ops->common->chain_alloc(dev->cdev,
1831 QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1833 QED_CHAIN_CNT_TYPE_U32,
1835 QEDR_RQE_ELEMENT_SIZE,
1836 &qp->rq.pbl, &ext_pbl);
1841 qp->qp_id = out_params.qp_id;
1842 qp->icid = out_params.icid;
1844 qedr_set_iwarp_db_info(dev, qp);
1848 dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
1853 static void qedr_cleanup_kernel(struct qedr_dev *dev, struct qedr_qp *qp)
1855 dev->ops->common->chain_free(dev->cdev, &qp->sq.pbl);
1856 kfree(qp->wqe_wr_id);
1858 dev->ops->common->chain_free(dev->cdev, &qp->rq.pbl);
1859 kfree(qp->rqe_wr_id);
1862 static int qedr_create_kernel_qp(struct qedr_dev *dev,
1865 struct ib_qp_init_attr *attrs)
1867 struct qed_rdma_create_qp_in_params in_params;
1868 struct qedr_pd *pd = get_qedr_pd(ibpd);
1874 memset(&in_params, 0, sizeof(in_params));
1876 /* A single work request may take up to QEDR_MAX_SQ_WQE_SIZE elements in
1877 * the ring. The ring should allow at least a single WR, even if the
1878 * user requested none, due to allocation issues.
1879 * We should add an extra WR since the prod and cons indices of
1880 * wqe_wr_id are managed in such a way that the WQ is considered full
1881 * when (prod+1)%max_wr==cons. We currently don't do that because we
1882 * double the number of entries due an iSER issue that pushes far more
1883 * WRs than indicated. If we decline its ib_post_send() then we get
1884 * error prints in the dmesg we'd like to avoid.
1886 qp->sq.max_wr = min_t(u32, attrs->cap.max_send_wr * dev->wq_multiplier,
1889 qp->wqe_wr_id = kcalloc(qp->sq.max_wr, sizeof(*qp->wqe_wr_id),
1891 if (!qp->wqe_wr_id) {
1892 DP_ERR(dev, "create qp: failed SQ shadow memory allocation\n");
1896 /* QP handle to be written in CQE */
1897 in_params.qp_handle_lo = lower_32_bits((uintptr_t) qp);
1898 in_params.qp_handle_hi = upper_32_bits((uintptr_t) qp);
1900 /* A single work request may take up to QEDR_MAX_RQ_WQE_SIZE elements in
1901 * the ring. There ring should allow at least a single WR, even if the
1902 * user requested none, due to allocation issues.
1904 qp->rq.max_wr = (u16) max_t(u32, attrs->cap.max_recv_wr, 1);
1906 /* Allocate driver internal RQ array */
1907 qp->rqe_wr_id = kcalloc(qp->rq.max_wr, sizeof(*qp->rqe_wr_id),
1909 if (!qp->rqe_wr_id) {
1911 "create qp: failed RQ shadow memory allocation\n");
1912 kfree(qp->wqe_wr_id);
1916 qedr_init_common_qp_in_params(dev, pd, qp, attrs, true, &in_params);
1918 n_sq_entries = attrs->cap.max_send_wr;
1919 n_sq_entries = min_t(u32, n_sq_entries, dev->attr.max_sqe);
1920 n_sq_entries = max_t(u32, n_sq_entries, 1);
1921 n_sq_elems = n_sq_entries * QEDR_MAX_SQE_ELEMENTS_PER_SQE;
1923 n_rq_elems = qp->rq.max_wr * QEDR_MAX_RQE_ELEMENTS_PER_RQE;
1925 if (rdma_protocol_iwarp(&dev->ibdev, 1))
1926 rc = qedr_iwarp_create_kernel_qp(dev, qp, &in_params,
1927 n_sq_elems, n_rq_elems);
1929 rc = qedr_roce_create_kernel_qp(dev, qp, &in_params,
1930 n_sq_elems, n_rq_elems);
1932 qedr_cleanup_kernel(dev, qp);
1937 struct ib_qp *qedr_create_qp(struct ib_pd *ibpd,
1938 struct ib_qp_init_attr *attrs,
1939 struct ib_udata *udata)
1941 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
1942 struct qedr_pd *pd = get_qedr_pd(ibpd);
1947 DP_DEBUG(dev, QEDR_MSG_QP, "create qp: called from %s, pd=%p\n",
1948 udata ? "user library" : "kernel", pd);
1950 rc = qedr_check_qp_attrs(ibpd, dev, attrs, udata);
1954 DP_DEBUG(dev, QEDR_MSG_QP,
1955 "create qp: called from %s, event_handler=%p, eepd=%p sq_cq=%p, sq_icid=%d, rq_cq=%p, rq_icid=%d\n",
1956 udata ? "user library" : "kernel", attrs->event_handler, pd,
1957 get_qedr_cq(attrs->send_cq),
1958 get_qedr_cq(attrs->send_cq)->icid,
1959 get_qedr_cq(attrs->recv_cq),
1960 attrs->recv_cq ? get_qedr_cq(attrs->recv_cq)->icid : 0);
1962 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
1964 DP_ERR(dev, "create qp: failed allocating memory\n");
1965 return ERR_PTR(-ENOMEM);
1968 qedr_set_common_qp_params(dev, qp, pd, attrs);
1970 if (attrs->qp_type == IB_QPT_GSI) {
1971 ibqp = qedr_create_gsi_qp(dev, attrs, qp);
1978 rc = qedr_create_user_qp(dev, qp, ibpd, udata, attrs);
1980 rc = qedr_create_kernel_qp(dev, qp, ibpd, attrs);
1985 qp->ibqp.qp_num = qp->qp_id;
1987 if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
1988 rc = qedr_idr_add(dev, &dev->qpidr, qp, qp->qp_id);
1998 return ERR_PTR(-EFAULT);
2001 static enum ib_qp_state qedr_get_ibqp_state(enum qed_roce_qp_state qp_state)
2004 case QED_ROCE_QP_STATE_RESET:
2005 return IB_QPS_RESET;
2006 case QED_ROCE_QP_STATE_INIT:
2008 case QED_ROCE_QP_STATE_RTR:
2010 case QED_ROCE_QP_STATE_RTS:
2012 case QED_ROCE_QP_STATE_SQD:
2014 case QED_ROCE_QP_STATE_ERR:
2016 case QED_ROCE_QP_STATE_SQE:
2022 static enum qed_roce_qp_state qedr_get_state_from_ibqp(
2023 enum ib_qp_state qp_state)
2027 return QED_ROCE_QP_STATE_RESET;
2029 return QED_ROCE_QP_STATE_INIT;
2031 return QED_ROCE_QP_STATE_RTR;
2033 return QED_ROCE_QP_STATE_RTS;
2035 return QED_ROCE_QP_STATE_SQD;
2037 return QED_ROCE_QP_STATE_ERR;
2039 return QED_ROCE_QP_STATE_ERR;
2043 static void qedr_reset_qp_hwq_info(struct qedr_qp_hwq_info *qph)
2045 qed_chain_reset(&qph->pbl);
2049 qph->db_data.data.value = cpu_to_le16(0);
2052 static int qedr_update_qp_state(struct qedr_dev *dev,
2054 enum qed_roce_qp_state cur_state,
2055 enum qed_roce_qp_state new_state)
2059 if (new_state == cur_state)
2062 switch (cur_state) {
2063 case QED_ROCE_QP_STATE_RESET:
2064 switch (new_state) {
2065 case QED_ROCE_QP_STATE_INIT:
2066 qp->prev_wqe_size = 0;
2067 qedr_reset_qp_hwq_info(&qp->sq);
2068 qedr_reset_qp_hwq_info(&qp->rq);
2075 case QED_ROCE_QP_STATE_INIT:
2076 switch (new_state) {
2077 case QED_ROCE_QP_STATE_RTR:
2078 /* Update doorbell (in case post_recv was
2079 * done before move to RTR)
2082 if (rdma_protocol_roce(&dev->ibdev, 1)) {
2083 writel(qp->rq.db_data.raw, qp->rq.db);
2086 case QED_ROCE_QP_STATE_ERR:
2089 /* Invalid state change. */
2094 case QED_ROCE_QP_STATE_RTR:
2096 switch (new_state) {
2097 case QED_ROCE_QP_STATE_RTS:
2099 case QED_ROCE_QP_STATE_ERR:
2102 /* Invalid state change. */
2107 case QED_ROCE_QP_STATE_RTS:
2109 switch (new_state) {
2110 case QED_ROCE_QP_STATE_SQD:
2112 case QED_ROCE_QP_STATE_ERR:
2115 /* Invalid state change. */
2120 case QED_ROCE_QP_STATE_SQD:
2122 switch (new_state) {
2123 case QED_ROCE_QP_STATE_RTS:
2124 case QED_ROCE_QP_STATE_ERR:
2127 /* Invalid state change. */
2132 case QED_ROCE_QP_STATE_ERR:
2134 switch (new_state) {
2135 case QED_ROCE_QP_STATE_RESET:
2136 if ((qp->rq.prod != qp->rq.cons) ||
2137 (qp->sq.prod != qp->sq.cons)) {
2139 "Error->Reset with rq/sq not empty rq.prod=%x rq.cons=%x sq.prod=%x sq.cons=%x\n",
2140 qp->rq.prod, qp->rq.cons, qp->sq.prod,
2158 int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
2159 int attr_mask, struct ib_udata *udata)
2161 struct qedr_qp *qp = get_qedr_qp(ibqp);
2162 struct qed_rdma_modify_qp_in_params qp_params = { 0 };
2163 struct qedr_dev *dev = get_qedr_dev(&qp->dev->ibdev);
2164 const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
2165 enum ib_qp_state old_qp_state, new_qp_state;
2166 enum qed_roce_qp_state cur_state;
2169 DP_DEBUG(dev, QEDR_MSG_QP,
2170 "modify qp: qp %p attr_mask=0x%x, state=%d", qp, attr_mask,
2173 old_qp_state = qedr_get_ibqp_state(qp->state);
2174 if (attr_mask & IB_QP_STATE)
2175 new_qp_state = attr->qp_state;
2177 new_qp_state = old_qp_state;
2179 if (rdma_protocol_roce(&dev->ibdev, 1)) {
2180 if (!ib_modify_qp_is_ok(old_qp_state, new_qp_state,
2181 ibqp->qp_type, attr_mask)) {
2183 "modify qp: invalid attribute mask=0x%x specified for\n"
2184 "qpn=0x%x of type=0x%x old_qp_state=0x%x, new_qp_state=0x%x\n",
2185 attr_mask, qp->qp_id, ibqp->qp_type,
2186 old_qp_state, new_qp_state);
2192 /* Translate the masks... */
2193 if (attr_mask & IB_QP_STATE) {
2194 SET_FIELD(qp_params.modify_flags,
2195 QED_RDMA_MODIFY_QP_VALID_NEW_STATE, 1);
2196 qp_params.new_state = qedr_get_state_from_ibqp(attr->qp_state);
2199 if (attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY)
2200 qp_params.sqd_async = true;
2202 if (attr_mask & IB_QP_PKEY_INDEX) {
2203 SET_FIELD(qp_params.modify_flags,
2204 QED_ROCE_MODIFY_QP_VALID_PKEY, 1);
2205 if (attr->pkey_index >= QEDR_ROCE_PKEY_TABLE_LEN) {
2210 qp_params.pkey = QEDR_ROCE_PKEY_DEFAULT;
2213 if (attr_mask & IB_QP_QKEY)
2214 qp->qkey = attr->qkey;
2216 if (attr_mask & IB_QP_ACCESS_FLAGS) {
2217 SET_FIELD(qp_params.modify_flags,
2218 QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN, 1);
2219 qp_params.incoming_rdma_read_en = attr->qp_access_flags &
2220 IB_ACCESS_REMOTE_READ;
2221 qp_params.incoming_rdma_write_en = attr->qp_access_flags &
2222 IB_ACCESS_REMOTE_WRITE;
2223 qp_params.incoming_atomic_en = attr->qp_access_flags &
2224 IB_ACCESS_REMOTE_ATOMIC;
2227 if (attr_mask & (IB_QP_AV | IB_QP_PATH_MTU)) {
2228 if (rdma_protocol_iwarp(&dev->ibdev, 1))
2231 if (attr_mask & IB_QP_PATH_MTU) {
2232 if (attr->path_mtu < IB_MTU_256 ||
2233 attr->path_mtu > IB_MTU_4096) {
2234 pr_err("error: Only MTU sizes of 256, 512, 1024, 2048 and 4096 are supported by RoCE\n");
2238 qp->mtu = min(ib_mtu_enum_to_int(attr->path_mtu),
2239 ib_mtu_enum_to_int(iboe_get_mtu
2245 ib_mtu_enum_to_int(iboe_get_mtu(dev->ndev->mtu));
2246 pr_err("Fixing zeroed MTU to qp->mtu = %d\n", qp->mtu);
2249 SET_FIELD(qp_params.modify_flags,
2250 QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR, 1);
2252 qp_params.traffic_class_tos = grh->traffic_class;
2253 qp_params.flow_label = grh->flow_label;
2254 qp_params.hop_limit_ttl = grh->hop_limit;
2256 qp->sgid_idx = grh->sgid_index;
2258 rc = get_gid_info_from_table(ibqp, attr, attr_mask, &qp_params);
2261 "modify qp: problems with GID index %d (rc=%d)\n",
2262 grh->sgid_index, rc);
2266 rc = qedr_get_dmac(dev, &attr->ah_attr,
2267 qp_params.remote_mac_addr);
2271 qp_params.use_local_mac = true;
2272 ether_addr_copy(qp_params.local_mac_addr, dev->ndev->dev_addr);
2274 DP_DEBUG(dev, QEDR_MSG_QP, "dgid=%x:%x:%x:%x\n",
2275 qp_params.dgid.dwords[0], qp_params.dgid.dwords[1],
2276 qp_params.dgid.dwords[2], qp_params.dgid.dwords[3]);
2277 DP_DEBUG(dev, QEDR_MSG_QP, "sgid=%x:%x:%x:%x\n",
2278 qp_params.sgid.dwords[0], qp_params.sgid.dwords[1],
2279 qp_params.sgid.dwords[2], qp_params.sgid.dwords[3]);
2280 DP_DEBUG(dev, QEDR_MSG_QP, "remote_mac=[%pM]\n",
2281 qp_params.remote_mac_addr);
2283 qp_params.mtu = qp->mtu;
2284 qp_params.lb_indication = false;
2287 if (!qp_params.mtu) {
2288 /* Stay with current MTU */
2290 qp_params.mtu = qp->mtu;
2293 ib_mtu_enum_to_int(iboe_get_mtu(dev->ndev->mtu));
2296 if (attr_mask & IB_QP_TIMEOUT) {
2297 SET_FIELD(qp_params.modify_flags,
2298 QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT, 1);
2300 /* The received timeout value is an exponent used like this:
2301 * "12.7.34 LOCAL ACK TIMEOUT
2302 * Value representing the transport (ACK) timeout for use by
2303 * the remote, expressed as: 4.096 * 2^timeout [usec]"
2304 * The FW expects timeout in msec so we need to divide the usec
2305 * result by 1000. We'll approximate 1000~2^10, and 4.096 ~ 2^2,
2306 * so we get: 2^2 * 2^timeout / 2^10 = 2^(timeout - 8).
2307 * The value of zero means infinite so we use a 'max_t' to make
2308 * sure that sub 1 msec values will be configured as 1 msec.
2311 qp_params.ack_timeout =
2312 1 << max_t(int, attr->timeout - 8, 0);
2314 qp_params.ack_timeout = 0;
2317 if (attr_mask & IB_QP_RETRY_CNT) {
2318 SET_FIELD(qp_params.modify_flags,
2319 QED_ROCE_MODIFY_QP_VALID_RETRY_CNT, 1);
2320 qp_params.retry_cnt = attr->retry_cnt;
2323 if (attr_mask & IB_QP_RNR_RETRY) {
2324 SET_FIELD(qp_params.modify_flags,
2325 QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT, 1);
2326 qp_params.rnr_retry_cnt = attr->rnr_retry;
2329 if (attr_mask & IB_QP_RQ_PSN) {
2330 SET_FIELD(qp_params.modify_flags,
2331 QED_ROCE_MODIFY_QP_VALID_RQ_PSN, 1);
2332 qp_params.rq_psn = attr->rq_psn;
2333 qp->rq_psn = attr->rq_psn;
2336 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
2337 if (attr->max_rd_atomic > dev->attr.max_qp_req_rd_atomic_resc) {
2340 "unsupported max_rd_atomic=%d, supported=%d\n",
2341 attr->max_rd_atomic,
2342 dev->attr.max_qp_req_rd_atomic_resc);
2346 SET_FIELD(qp_params.modify_flags,
2347 QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ, 1);
2348 qp_params.max_rd_atomic_req = attr->max_rd_atomic;
2351 if (attr_mask & IB_QP_MIN_RNR_TIMER) {
2352 SET_FIELD(qp_params.modify_flags,
2353 QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER, 1);
2354 qp_params.min_rnr_nak_timer = attr->min_rnr_timer;
2357 if (attr_mask & IB_QP_SQ_PSN) {
2358 SET_FIELD(qp_params.modify_flags,
2359 QED_ROCE_MODIFY_QP_VALID_SQ_PSN, 1);
2360 qp_params.sq_psn = attr->sq_psn;
2361 qp->sq_psn = attr->sq_psn;
2364 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
2365 if (attr->max_dest_rd_atomic >
2366 dev->attr.max_qp_resp_rd_atomic_resc) {
2368 "unsupported max_dest_rd_atomic=%d, supported=%d\n",
2369 attr->max_dest_rd_atomic,
2370 dev->attr.max_qp_resp_rd_atomic_resc);
2376 SET_FIELD(qp_params.modify_flags,
2377 QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP, 1);
2378 qp_params.max_rd_atomic_resp = attr->max_dest_rd_atomic;
2381 if (attr_mask & IB_QP_DEST_QPN) {
2382 SET_FIELD(qp_params.modify_flags,
2383 QED_ROCE_MODIFY_QP_VALID_DEST_QP, 1);
2385 qp_params.dest_qp = attr->dest_qp_num;
2386 qp->dest_qp_num = attr->dest_qp_num;
2389 cur_state = qp->state;
2391 /* Update the QP state before the actual ramrod to prevent a race with
2392 * fast path. Modifying the QP state to error will cause the device to
2393 * flush the CQEs and while polling the flushed CQEs will considered as
2394 * a potential issue if the QP isn't in error state.
2396 if ((attr_mask & IB_QP_STATE) && qp->qp_type != IB_QPT_GSI &&
2397 !udata && qp_params.new_state == QED_ROCE_QP_STATE_ERR)
2398 qp->state = QED_ROCE_QP_STATE_ERR;
2400 if (qp->qp_type != IB_QPT_GSI)
2401 rc = dev->ops->rdma_modify_qp(dev->rdma_ctx,
2402 qp->qed_qp, &qp_params);
2404 if (attr_mask & IB_QP_STATE) {
2405 if ((qp->qp_type != IB_QPT_GSI) && (!udata))
2406 rc = qedr_update_qp_state(dev, qp, cur_state,
2407 qp_params.new_state);
2408 qp->state = qp_params.new_state;
2415 static int qedr_to_ib_qp_acc_flags(struct qed_rdma_query_qp_out_params *params)
2417 int ib_qp_acc_flags = 0;
2419 if (params->incoming_rdma_write_en)
2420 ib_qp_acc_flags |= IB_ACCESS_REMOTE_WRITE;
2421 if (params->incoming_rdma_read_en)
2422 ib_qp_acc_flags |= IB_ACCESS_REMOTE_READ;
2423 if (params->incoming_atomic_en)
2424 ib_qp_acc_flags |= IB_ACCESS_REMOTE_ATOMIC;
2425 ib_qp_acc_flags |= IB_ACCESS_LOCAL_WRITE;
2426 return ib_qp_acc_flags;
2429 int qedr_query_qp(struct ib_qp *ibqp,
2430 struct ib_qp_attr *qp_attr,
2431 int attr_mask, struct ib_qp_init_attr *qp_init_attr)
2433 struct qed_rdma_query_qp_out_params params;
2434 struct qedr_qp *qp = get_qedr_qp(ibqp);
2435 struct qedr_dev *dev = qp->dev;
2438 memset(¶ms, 0, sizeof(params));
2440 rc = dev->ops->rdma_query_qp(dev->rdma_ctx, qp->qed_qp, ¶ms);
2444 memset(qp_attr, 0, sizeof(*qp_attr));
2445 memset(qp_init_attr, 0, sizeof(*qp_init_attr));
2447 qp_attr->qp_state = qedr_get_ibqp_state(params.state);
2448 qp_attr->cur_qp_state = qedr_get_ibqp_state(params.state);
2449 qp_attr->path_mtu = ib_mtu_int_to_enum(params.mtu);
2450 qp_attr->path_mig_state = IB_MIG_MIGRATED;
2451 qp_attr->rq_psn = params.rq_psn;
2452 qp_attr->sq_psn = params.sq_psn;
2453 qp_attr->dest_qp_num = params.dest_qp;
2455 qp_attr->qp_access_flags = qedr_to_ib_qp_acc_flags(¶ms);
2457 qp_attr->cap.max_send_wr = qp->sq.max_wr;
2458 qp_attr->cap.max_recv_wr = qp->rq.max_wr;
2459 qp_attr->cap.max_send_sge = qp->sq.max_sges;
2460 qp_attr->cap.max_recv_sge = qp->rq.max_sges;
2461 qp_attr->cap.max_inline_data = ROCE_REQ_MAX_INLINE_DATA_SIZE;
2462 qp_init_attr->cap = qp_attr->cap;
2464 qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
2465 rdma_ah_set_grh(&qp_attr->ah_attr, NULL,
2466 params.flow_label, qp->sgid_idx,
2467 params.hop_limit_ttl, params.traffic_class_tos);
2468 rdma_ah_set_dgid_raw(&qp_attr->ah_attr, ¶ms.dgid.bytes[0]);
2469 rdma_ah_set_port_num(&qp_attr->ah_attr, 1);
2470 rdma_ah_set_sl(&qp_attr->ah_attr, 0);
2471 qp_attr->timeout = params.timeout;
2472 qp_attr->rnr_retry = params.rnr_retry;
2473 qp_attr->retry_cnt = params.retry_cnt;
2474 qp_attr->min_rnr_timer = params.min_rnr_nak_timer;
2475 qp_attr->pkey_index = params.pkey_index;
2476 qp_attr->port_num = 1;
2477 rdma_ah_set_path_bits(&qp_attr->ah_attr, 0);
2478 rdma_ah_set_static_rate(&qp_attr->ah_attr, 0);
2479 qp_attr->alt_pkey_index = 0;
2480 qp_attr->alt_port_num = 0;
2481 qp_attr->alt_timeout = 0;
2482 memset(&qp_attr->alt_ah_attr, 0, sizeof(qp_attr->alt_ah_attr));
2484 qp_attr->sq_draining = (params.state == QED_ROCE_QP_STATE_SQD) ? 1 : 0;
2485 qp_attr->max_dest_rd_atomic = params.max_dest_rd_atomic;
2486 qp_attr->max_rd_atomic = params.max_rd_atomic;
2487 qp_attr->en_sqd_async_notify = (params.sqd_async) ? 1 : 0;
2489 DP_DEBUG(dev, QEDR_MSG_QP, "QEDR_QUERY_QP: max_inline_data=%d\n",
2490 qp_attr->cap.max_inline_data);
2496 static int qedr_free_qp_resources(struct qedr_dev *dev, struct qedr_qp *qp)
2500 if (qp->qp_type != IB_QPT_GSI) {
2501 rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
2506 if (qp->ibqp.uobject && qp->ibqp.uobject->context)
2507 qedr_cleanup_user(dev, qp);
2509 qedr_cleanup_kernel(dev, qp);
2514 int qedr_destroy_qp(struct ib_qp *ibqp)
2516 struct qedr_qp *qp = get_qedr_qp(ibqp);
2517 struct qedr_dev *dev = qp->dev;
2518 struct ib_qp_attr attr;
2522 DP_DEBUG(dev, QEDR_MSG_QP, "destroy qp: destroying %p, qp type=%d\n",
2525 if (rdma_protocol_roce(&dev->ibdev, 1)) {
2526 if ((qp->state != QED_ROCE_QP_STATE_RESET) &&
2527 (qp->state != QED_ROCE_QP_STATE_ERR) &&
2528 (qp->state != QED_ROCE_QP_STATE_INIT)) {
2530 attr.qp_state = IB_QPS_ERR;
2531 attr_mask |= IB_QP_STATE;
2533 /* Change the QP state to ERROR */
2534 qedr_modify_qp(ibqp, &attr, attr_mask, NULL);
2537 /* Wait for the connect/accept to complete */
2541 while (qp->ep->during_connect) {
2542 DP_DEBUG(dev, QEDR_MSG_QP,
2543 "Still in during connect/accept\n");
2546 if (wait_count++ > 200) {
2548 "during connect timeout\n");
2555 if (qp->qp_type == IB_QPT_GSI)
2556 qedr_destroy_gsi_qp(dev);
2558 qedr_free_qp_resources(dev, qp);
2560 if (atomic_dec_and_test(&qp->refcnt) &&
2561 rdma_protocol_iwarp(&dev->ibdev, 1)) {
2562 qedr_idr_remove(dev, &dev->qpidr, qp->qp_id);
2568 struct ib_ah *qedr_create_ah(struct ib_pd *ibpd, struct rdma_ah_attr *attr,
2569 u32 flags, struct ib_udata *udata)
2573 ah = kzalloc(sizeof(*ah), GFP_ATOMIC);
2575 return ERR_PTR(-ENOMEM);
2577 rdma_copy_ah_attr(&ah->attr, attr);
2582 int qedr_destroy_ah(struct ib_ah *ibah, u32 flags)
2584 struct qedr_ah *ah = get_qedr_ah(ibah);
2586 rdma_destroy_ah_attr(&ah->attr);
2591 static void free_mr_info(struct qedr_dev *dev, struct mr_info *info)
2593 struct qedr_pbl *pbl, *tmp;
2595 if (info->pbl_table)
2596 list_add_tail(&info->pbl_table->list_entry,
2597 &info->free_pbl_list);
2599 if (!list_empty(&info->inuse_pbl_list))
2600 list_splice(&info->inuse_pbl_list, &info->free_pbl_list);
2602 list_for_each_entry_safe(pbl, tmp, &info->free_pbl_list, list_entry) {
2603 list_del(&pbl->list_entry);
2604 qedr_free_pbl(dev, &info->pbl_info, pbl);
2608 static int init_mr_info(struct qedr_dev *dev, struct mr_info *info,
2609 size_t page_list_len, bool two_layered)
2611 struct qedr_pbl *tmp;
2614 INIT_LIST_HEAD(&info->free_pbl_list);
2615 INIT_LIST_HEAD(&info->inuse_pbl_list);
2617 rc = qedr_prepare_pbl_tbl(dev, &info->pbl_info,
2618 page_list_len, two_layered);
2622 info->pbl_table = qedr_alloc_pbl_tbl(dev, &info->pbl_info, GFP_KERNEL);
2623 if (IS_ERR(info->pbl_table)) {
2624 rc = PTR_ERR(info->pbl_table);
2628 DP_DEBUG(dev, QEDR_MSG_MR, "pbl_table_pa = %pa\n",
2629 &info->pbl_table->pa);
2631 /* in usual case we use 2 PBLs, so we add one to free
2632 * list and allocating another one
2634 tmp = qedr_alloc_pbl_tbl(dev, &info->pbl_info, GFP_KERNEL);
2636 DP_DEBUG(dev, QEDR_MSG_MR, "Extra PBL is not allocated\n");
2640 list_add_tail(&tmp->list_entry, &info->free_pbl_list);
2642 DP_DEBUG(dev, QEDR_MSG_MR, "extra pbl_table_pa = %pa\n", &tmp->pa);
2646 free_mr_info(dev, info);
2651 struct ib_mr *qedr_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
2652 u64 usr_addr, int acc, struct ib_udata *udata)
2654 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
2659 pd = get_qedr_pd(ibpd);
2660 DP_DEBUG(dev, QEDR_MSG_MR,
2661 "qedr_register user mr pd = %d start = %lld, len = %lld, usr_addr = %lld, acc = %d\n",
2662 pd->pd_id, start, len, usr_addr, acc);
2664 if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE))
2665 return ERR_PTR(-EINVAL);
2667 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
2671 mr->type = QEDR_MR_USER;
2673 mr->umem = ib_umem_get(udata, start, len, acc, 0);
2674 if (IS_ERR(mr->umem)) {
2679 rc = init_mr_info(dev, &mr->info, ib_umem_page_count(mr->umem), 1);
2683 qedr_populate_pbls(dev, mr->umem, mr->info.pbl_table,
2684 &mr->info.pbl_info, PAGE_SHIFT);
2686 rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
2688 DP_ERR(dev, "roce alloc tid returned an error %d\n", rc);
2692 /* Index only, 18 bit long, lkey = itid << 8 | key */
2693 mr->hw_mr.tid_type = QED_RDMA_TID_REGISTERED_MR;
2695 mr->hw_mr.pd = pd->pd_id;
2696 mr->hw_mr.local_read = 1;
2697 mr->hw_mr.local_write = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
2698 mr->hw_mr.remote_read = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
2699 mr->hw_mr.remote_write = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
2700 mr->hw_mr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
2701 mr->hw_mr.mw_bind = false;
2702 mr->hw_mr.pbl_ptr = mr->info.pbl_table[0].pa;
2703 mr->hw_mr.pbl_two_level = mr->info.pbl_info.two_layered;
2704 mr->hw_mr.pbl_page_size_log = ilog2(mr->info.pbl_info.pbl_size);
2705 mr->hw_mr.page_size_log = PAGE_SHIFT;
2706 mr->hw_mr.fbo = ib_umem_offset(mr->umem);
2707 mr->hw_mr.length = len;
2708 mr->hw_mr.vaddr = usr_addr;
2709 mr->hw_mr.zbva = false;
2710 mr->hw_mr.phy_mr = false;
2711 mr->hw_mr.dma_mr = false;
2713 rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
2715 DP_ERR(dev, "roce register tid returned an error %d\n", rc);
2719 mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
2720 if (mr->hw_mr.remote_write || mr->hw_mr.remote_read ||
2721 mr->hw_mr.remote_atomic)
2722 mr->ibmr.rkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
2724 DP_DEBUG(dev, QEDR_MSG_MR, "register user mr lkey: %x\n",
2729 dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
2731 qedr_free_pbl(dev, &mr->info.pbl_info, mr->info.pbl_table);
2737 int qedr_dereg_mr(struct ib_mr *ib_mr)
2739 struct qedr_mr *mr = get_qedr_mr(ib_mr);
2740 struct qedr_dev *dev = get_qedr_dev(ib_mr->device);
2743 rc = dev->ops->rdma_deregister_tid(dev->rdma_ctx, mr->hw_mr.itid);
2747 dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
2749 if ((mr->type != QEDR_MR_DMA) && (mr->type != QEDR_MR_FRMR))
2750 qedr_free_pbl(dev, &mr->info.pbl_info, mr->info.pbl_table);
2752 /* it could be user registered memory. */
2754 ib_umem_release(mr->umem);
2761 static struct qedr_mr *__qedr_alloc_mr(struct ib_pd *ibpd,
2762 int max_page_list_len)
2764 struct qedr_pd *pd = get_qedr_pd(ibpd);
2765 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
2769 DP_DEBUG(dev, QEDR_MSG_MR,
2770 "qedr_alloc_frmr pd = %d max_page_list_len= %d\n", pd->pd_id,
2773 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
2778 mr->type = QEDR_MR_FRMR;
2780 rc = init_mr_info(dev, &mr->info, max_page_list_len, 1);
2784 rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
2786 DP_ERR(dev, "roce alloc tid returned an error %d\n", rc);
2790 /* Index only, 18 bit long, lkey = itid << 8 | key */
2791 mr->hw_mr.tid_type = QED_RDMA_TID_FMR;
2793 mr->hw_mr.pd = pd->pd_id;
2794 mr->hw_mr.local_read = 1;
2795 mr->hw_mr.local_write = 0;
2796 mr->hw_mr.remote_read = 0;
2797 mr->hw_mr.remote_write = 0;
2798 mr->hw_mr.remote_atomic = 0;
2799 mr->hw_mr.mw_bind = false;
2800 mr->hw_mr.pbl_ptr = 0;
2801 mr->hw_mr.pbl_two_level = mr->info.pbl_info.two_layered;
2802 mr->hw_mr.pbl_page_size_log = ilog2(mr->info.pbl_info.pbl_size);
2804 mr->hw_mr.length = 0;
2805 mr->hw_mr.vaddr = 0;
2806 mr->hw_mr.zbva = false;
2807 mr->hw_mr.phy_mr = true;
2808 mr->hw_mr.dma_mr = false;
2810 rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
2812 DP_ERR(dev, "roce register tid returned an error %d\n", rc);
2816 mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
2817 mr->ibmr.rkey = mr->ibmr.lkey;
2819 DP_DEBUG(dev, QEDR_MSG_MR, "alloc frmr: %x\n", mr->ibmr.lkey);
2823 dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
2829 struct ib_mr *qedr_alloc_mr(struct ib_pd *ibpd,
2830 enum ib_mr_type mr_type, u32 max_num_sg)
2834 if (mr_type != IB_MR_TYPE_MEM_REG)
2835 return ERR_PTR(-EINVAL);
2837 mr = __qedr_alloc_mr(ibpd, max_num_sg);
2840 return ERR_PTR(-EINVAL);
2845 static int qedr_set_page(struct ib_mr *ibmr, u64 addr)
2847 struct qedr_mr *mr = get_qedr_mr(ibmr);
2848 struct qedr_pbl *pbl_table;
2849 struct regpair *pbe;
2852 if (unlikely(mr->npages == mr->info.pbl_info.num_pbes)) {
2853 DP_ERR(mr->dev, "qedr_set_page fails when %d\n", mr->npages);
2857 DP_DEBUG(mr->dev, QEDR_MSG_MR, "qedr_set_page pages[%d] = 0x%llx\n",
2860 pbes_in_page = mr->info.pbl_info.pbl_size / sizeof(u64);
2861 pbl_table = mr->info.pbl_table + (mr->npages / pbes_in_page);
2862 pbe = (struct regpair *)pbl_table->va;
2863 pbe += mr->npages % pbes_in_page;
2864 pbe->lo = cpu_to_le32((u32)addr);
2865 pbe->hi = cpu_to_le32((u32)upper_32_bits(addr));
2872 static void handle_completed_mrs(struct qedr_dev *dev, struct mr_info *info)
2874 int work = info->completed - info->completed_handled - 1;
2876 DP_DEBUG(dev, QEDR_MSG_MR, "Special FMR work = %d\n", work);
2877 while (work-- > 0 && !list_empty(&info->inuse_pbl_list)) {
2878 struct qedr_pbl *pbl;
2880 /* Free all the page list that are possible to be freed
2881 * (all the ones that were invalidated), under the assumption
2882 * that if an FMR was completed successfully that means that
2883 * if there was an invalidate operation before it also ended
2885 pbl = list_first_entry(&info->inuse_pbl_list,
2886 struct qedr_pbl, list_entry);
2887 list_move_tail(&pbl->list_entry, &info->free_pbl_list);
2888 info->completed_handled++;
2892 int qedr_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
2893 int sg_nents, unsigned int *sg_offset)
2895 struct qedr_mr *mr = get_qedr_mr(ibmr);
2899 handle_completed_mrs(mr->dev, &mr->info);
2900 return ib_sg_to_pages(ibmr, sg, sg_nents, NULL, qedr_set_page);
2903 struct ib_mr *qedr_get_dma_mr(struct ib_pd *ibpd, int acc)
2905 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
2906 struct qedr_pd *pd = get_qedr_pd(ibpd);
2910 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
2912 return ERR_PTR(-ENOMEM);
2914 mr->type = QEDR_MR_DMA;
2916 rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
2918 DP_ERR(dev, "roce alloc tid returned an error %d\n", rc);
2922 /* index only, 18 bit long, lkey = itid << 8 | key */
2923 mr->hw_mr.tid_type = QED_RDMA_TID_REGISTERED_MR;
2924 mr->hw_mr.pd = pd->pd_id;
2925 mr->hw_mr.local_read = 1;
2926 mr->hw_mr.local_write = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
2927 mr->hw_mr.remote_read = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
2928 mr->hw_mr.remote_write = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
2929 mr->hw_mr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
2930 mr->hw_mr.dma_mr = true;
2932 rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
2934 DP_ERR(dev, "roce register tid returned an error %d\n", rc);
2938 mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
2939 if (mr->hw_mr.remote_write || mr->hw_mr.remote_read ||
2940 mr->hw_mr.remote_atomic)
2941 mr->ibmr.rkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
2943 DP_DEBUG(dev, QEDR_MSG_MR, "get dma mr: lkey = %x\n", mr->ibmr.lkey);
2947 dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
2953 static inline int qedr_wq_is_full(struct qedr_qp_hwq_info *wq)
2955 return (((wq->prod + 1) % wq->max_wr) == wq->cons);
2958 static int sge_data_len(struct ib_sge *sg_list, int num_sge)
2962 for (i = 0; i < num_sge; i++)
2963 len += sg_list[i].length;
2968 static void swap_wqe_data64(u64 *p)
2972 for (i = 0; i < QEDR_SQE_ELEMENT_SIZE / sizeof(u64); i++, p++)
2973 *p = cpu_to_be64(cpu_to_le64(*p));
2976 static u32 qedr_prepare_sq_inline_data(struct qedr_dev *dev,
2977 struct qedr_qp *qp, u8 *wqe_size,
2978 const struct ib_send_wr *wr,
2979 const struct ib_send_wr **bad_wr,
2982 u32 data_size = sge_data_len(wr->sg_list, wr->num_sge);
2983 char *seg_prt, *wqe;
2986 if (data_size > ROCE_REQ_MAX_INLINE_DATA_SIZE) {
2987 DP_ERR(dev, "Too much inline data in WR: %d\n", data_size);
3001 /* Copy data inline */
3002 for (i = 0; i < wr->num_sge; i++) {
3003 u32 len = wr->sg_list[i].length;
3004 void *src = (void *)(uintptr_t)wr->sg_list[i].addr;
3009 /* New segment required */
3011 wqe = (char *)qed_chain_produce(&qp->sq.pbl);
3013 seg_siz = sizeof(struct rdma_sq_common_wqe);
3017 /* Calculate currently allowed length */
3018 cur = min_t(u32, len, seg_siz);
3019 memcpy(seg_prt, src, cur);
3021 /* Update segment variables */
3025 /* Update sge variables */
3029 /* Swap fully-completed segments */
3031 swap_wqe_data64((u64 *)wqe);
3035 /* swap last not completed segment */
3037 swap_wqe_data64((u64 *)wqe);
3042 #define RQ_SGE_SET(sge, vaddr, vlength, vflags) \
3044 DMA_REGPAIR_LE(sge->addr, vaddr); \
3045 (sge)->length = cpu_to_le32(vlength); \
3046 (sge)->flags = cpu_to_le32(vflags); \
3049 #define SRQ_HDR_SET(hdr, vwr_id, num_sge) \
3051 DMA_REGPAIR_LE(hdr->wr_id, vwr_id); \
3052 (hdr)->num_sges = num_sge; \
3055 #define SRQ_SGE_SET(sge, vaddr, vlength, vlkey) \
3057 DMA_REGPAIR_LE(sge->addr, vaddr); \
3058 (sge)->length = cpu_to_le32(vlength); \
3059 (sge)->l_key = cpu_to_le32(vlkey); \
3062 static u32 qedr_prepare_sq_sges(struct qedr_qp *qp, u8 *wqe_size,
3063 const struct ib_send_wr *wr)
3068 for (i = 0; i < wr->num_sge; i++) {
3069 struct rdma_sq_sge *sge = qed_chain_produce(&qp->sq.pbl);
3071 DMA_REGPAIR_LE(sge->addr, wr->sg_list[i].addr);
3072 sge->l_key = cpu_to_le32(wr->sg_list[i].lkey);
3073 sge->length = cpu_to_le32(wr->sg_list[i].length);
3074 data_size += wr->sg_list[i].length;
3078 *wqe_size += wr->num_sge;
3083 static u32 qedr_prepare_sq_rdma_data(struct qedr_dev *dev,
3085 struct rdma_sq_rdma_wqe_1st *rwqe,
3086 struct rdma_sq_rdma_wqe_2nd *rwqe2,
3087 const struct ib_send_wr *wr,
3088 const struct ib_send_wr **bad_wr)
3090 rwqe2->r_key = cpu_to_le32(rdma_wr(wr)->rkey);
3091 DMA_REGPAIR_LE(rwqe2->remote_va, rdma_wr(wr)->remote_addr);
3093 if (wr->send_flags & IB_SEND_INLINE &&
3094 (wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM ||
3095 wr->opcode == IB_WR_RDMA_WRITE)) {
3098 SET_FIELD2(flags, RDMA_SQ_RDMA_WQE_1ST_INLINE_FLG, 1);
3099 return qedr_prepare_sq_inline_data(dev, qp, &rwqe->wqe_size, wr,
3100 bad_wr, &rwqe->flags, flags);
3103 return qedr_prepare_sq_sges(qp, &rwqe->wqe_size, wr);
3106 static u32 qedr_prepare_sq_send_data(struct qedr_dev *dev,
3108 struct rdma_sq_send_wqe_1st *swqe,
3109 struct rdma_sq_send_wqe_2st *swqe2,
3110 const struct ib_send_wr *wr,
3111 const struct ib_send_wr **bad_wr)
3113 memset(swqe2, 0, sizeof(*swqe2));
3114 if (wr->send_flags & IB_SEND_INLINE) {
3117 SET_FIELD2(flags, RDMA_SQ_SEND_WQE_INLINE_FLG, 1);
3118 return qedr_prepare_sq_inline_data(dev, qp, &swqe->wqe_size, wr,
3119 bad_wr, &swqe->flags, flags);
3122 return qedr_prepare_sq_sges(qp, &swqe->wqe_size, wr);
3125 static int qedr_prepare_reg(struct qedr_qp *qp,
3126 struct rdma_sq_fmr_wqe_1st *fwqe1,
3127 const struct ib_reg_wr *wr)
3129 struct qedr_mr *mr = get_qedr_mr(wr->mr);
3130 struct rdma_sq_fmr_wqe_2nd *fwqe2;
3132 fwqe2 = (struct rdma_sq_fmr_wqe_2nd *)qed_chain_produce(&qp->sq.pbl);
3133 fwqe1->addr.hi = upper_32_bits(mr->ibmr.iova);
3134 fwqe1->addr.lo = lower_32_bits(mr->ibmr.iova);
3135 fwqe1->l_key = wr->key;
3137 fwqe2->access_ctrl = 0;
3139 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_REMOTE_READ,
3140 !!(wr->access & IB_ACCESS_REMOTE_READ));
3141 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_REMOTE_WRITE,
3142 !!(wr->access & IB_ACCESS_REMOTE_WRITE));
3143 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_ENABLE_ATOMIC,
3144 !!(wr->access & IB_ACCESS_REMOTE_ATOMIC));
3145 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_LOCAL_READ, 1);
3146 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_LOCAL_WRITE,
3147 !!(wr->access & IB_ACCESS_LOCAL_WRITE));
3148 fwqe2->fmr_ctrl = 0;
3150 SET_FIELD2(fwqe2->fmr_ctrl, RDMA_SQ_FMR_WQE_2ND_PAGE_SIZE_LOG,
3151 ilog2(mr->ibmr.page_size) - 12);
3153 fwqe2->length_hi = 0;
3154 fwqe2->length_lo = mr->ibmr.length;
3155 fwqe2->pbl_addr.hi = upper_32_bits(mr->info.pbl_table->pa);
3156 fwqe2->pbl_addr.lo = lower_32_bits(mr->info.pbl_table->pa);
3158 qp->wqe_wr_id[qp->sq.prod].mr = mr;
3163 static enum ib_wc_opcode qedr_ib_to_wc_opcode(enum ib_wr_opcode opcode)
3166 case IB_WR_RDMA_WRITE:
3167 case IB_WR_RDMA_WRITE_WITH_IMM:
3168 return IB_WC_RDMA_WRITE;
3169 case IB_WR_SEND_WITH_IMM:
3171 case IB_WR_SEND_WITH_INV:
3173 case IB_WR_RDMA_READ:
3174 case IB_WR_RDMA_READ_WITH_INV:
3175 return IB_WC_RDMA_READ;
3176 case IB_WR_ATOMIC_CMP_AND_SWP:
3177 return IB_WC_COMP_SWAP;
3178 case IB_WR_ATOMIC_FETCH_AND_ADD:
3179 return IB_WC_FETCH_ADD;
3181 return IB_WC_REG_MR;
3182 case IB_WR_LOCAL_INV:
3183 return IB_WC_LOCAL_INV;
3189 static inline bool qedr_can_post_send(struct qedr_qp *qp,
3190 const struct ib_send_wr *wr)
3192 int wq_is_full, err_wr, pbl_is_full;
3193 struct qedr_dev *dev = qp->dev;
3195 /* prevent SQ overflow and/or processing of a bad WR */
3196 err_wr = wr->num_sge > qp->sq.max_sges;
3197 wq_is_full = qedr_wq_is_full(&qp->sq);
3198 pbl_is_full = qed_chain_get_elem_left_u32(&qp->sq.pbl) <
3199 QEDR_MAX_SQE_ELEMENTS_PER_SQE;
3200 if (wq_is_full || err_wr || pbl_is_full) {
3201 if (wq_is_full && !(qp->err_bitmap & QEDR_QP_ERR_SQ_FULL)) {
3203 "error: WQ is full. Post send on QP %p failed (this error appears only once)\n",
3205 qp->err_bitmap |= QEDR_QP_ERR_SQ_FULL;
3208 if (err_wr && !(qp->err_bitmap & QEDR_QP_ERR_BAD_SR)) {
3210 "error: WR is bad. Post send on QP %p failed (this error appears only once)\n",
3212 qp->err_bitmap |= QEDR_QP_ERR_BAD_SR;
3216 !(qp->err_bitmap & QEDR_QP_ERR_SQ_PBL_FULL)) {
3218 "error: WQ PBL is full. Post send on QP %p failed (this error appears only once)\n",
3220 qp->err_bitmap |= QEDR_QP_ERR_SQ_PBL_FULL;
3227 static int __qedr_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
3228 const struct ib_send_wr **bad_wr)
3230 struct qedr_dev *dev = get_qedr_dev(ibqp->device);
3231 struct qedr_qp *qp = get_qedr_qp(ibqp);
3232 struct rdma_sq_atomic_wqe_1st *awqe1;
3233 struct rdma_sq_atomic_wqe_2nd *awqe2;
3234 struct rdma_sq_atomic_wqe_3rd *awqe3;
3235 struct rdma_sq_send_wqe_2st *swqe2;
3236 struct rdma_sq_local_inv_wqe *iwqe;
3237 struct rdma_sq_rdma_wqe_2nd *rwqe2;
3238 struct rdma_sq_send_wqe_1st *swqe;
3239 struct rdma_sq_rdma_wqe_1st *rwqe;
3240 struct rdma_sq_fmr_wqe_1st *fwqe1;
3241 struct rdma_sq_common_wqe *wqe;
3246 if (!qedr_can_post_send(qp, wr)) {
3251 wqe = qed_chain_produce(&qp->sq.pbl);
3252 qp->wqe_wr_id[qp->sq.prod].signaled =
3253 !!(wr->send_flags & IB_SEND_SIGNALED) || qp->signaled;
3256 SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_SE_FLG,
3257 !!(wr->send_flags & IB_SEND_SOLICITED));
3258 comp = (!!(wr->send_flags & IB_SEND_SIGNALED)) || qp->signaled;
3259 SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_COMP_FLG, comp);
3260 SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_RD_FENCE_FLG,
3261 !!(wr->send_flags & IB_SEND_FENCE));
3262 wqe->prev_wqe_size = qp->prev_wqe_size;
3264 qp->wqe_wr_id[qp->sq.prod].opcode = qedr_ib_to_wc_opcode(wr->opcode);
3266 switch (wr->opcode) {
3267 case IB_WR_SEND_WITH_IMM:
3268 if (unlikely(rdma_protocol_iwarp(&dev->ibdev, 1))) {
3273 wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_IMM;
3274 swqe = (struct rdma_sq_send_wqe_1st *)wqe;
3276 swqe2 = qed_chain_produce(&qp->sq.pbl);
3278 swqe->inv_key_or_imm_data = cpu_to_le32(be32_to_cpu(wr->ex.imm_data));
3279 length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
3281 swqe->length = cpu_to_le32(length);
3282 qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
3283 qp->prev_wqe_size = swqe->wqe_size;
3284 qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
3287 wqe->req_type = RDMA_SQ_REQ_TYPE_SEND;
3288 swqe = (struct rdma_sq_send_wqe_1st *)wqe;
3291 swqe2 = qed_chain_produce(&qp->sq.pbl);
3292 length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
3294 swqe->length = cpu_to_le32(length);
3295 qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
3296 qp->prev_wqe_size = swqe->wqe_size;
3297 qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
3299 case IB_WR_SEND_WITH_INV:
3300 wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_INVALIDATE;
3301 swqe = (struct rdma_sq_send_wqe_1st *)wqe;
3302 swqe2 = qed_chain_produce(&qp->sq.pbl);
3304 swqe->inv_key_or_imm_data = cpu_to_le32(wr->ex.invalidate_rkey);
3305 length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
3307 swqe->length = cpu_to_le32(length);
3308 qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
3309 qp->prev_wqe_size = swqe->wqe_size;
3310 qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
3313 case IB_WR_RDMA_WRITE_WITH_IMM:
3314 if (unlikely(rdma_protocol_iwarp(&dev->ibdev, 1))) {
3319 wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR_WITH_IMM;
3320 rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
3323 rwqe->imm_data = htonl(cpu_to_le32(wr->ex.imm_data));
3324 rwqe2 = qed_chain_produce(&qp->sq.pbl);
3325 length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2,
3327 rwqe->length = cpu_to_le32(length);
3328 qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
3329 qp->prev_wqe_size = rwqe->wqe_size;
3330 qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
3332 case IB_WR_RDMA_WRITE:
3333 wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR;
3334 rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
3337 rwqe2 = qed_chain_produce(&qp->sq.pbl);
3338 length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2,
3340 rwqe->length = cpu_to_le32(length);
3341 qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
3342 qp->prev_wqe_size = rwqe->wqe_size;
3343 qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
3345 case IB_WR_RDMA_READ_WITH_INV:
3346 SET_FIELD2(wqe->flags, RDMA_SQ_RDMA_WQE_1ST_READ_INV_FLG, 1);
3347 /* fallthrough -- same is identical to RDMA READ */
3349 case IB_WR_RDMA_READ:
3350 wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_RD;
3351 rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
3354 rwqe2 = qed_chain_produce(&qp->sq.pbl);
3355 length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2,
3357 rwqe->length = cpu_to_le32(length);
3358 qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
3359 qp->prev_wqe_size = rwqe->wqe_size;
3360 qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
3363 case IB_WR_ATOMIC_CMP_AND_SWP:
3364 case IB_WR_ATOMIC_FETCH_AND_ADD:
3365 awqe1 = (struct rdma_sq_atomic_wqe_1st *)wqe;
3366 awqe1->wqe_size = 4;
3368 awqe2 = qed_chain_produce(&qp->sq.pbl);
3369 DMA_REGPAIR_LE(awqe2->remote_va, atomic_wr(wr)->remote_addr);
3370 awqe2->r_key = cpu_to_le32(atomic_wr(wr)->rkey);
3372 awqe3 = qed_chain_produce(&qp->sq.pbl);
3374 if (wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
3375 wqe->req_type = RDMA_SQ_REQ_TYPE_ATOMIC_ADD;
3376 DMA_REGPAIR_LE(awqe3->swap_data,
3377 atomic_wr(wr)->compare_add);
3379 wqe->req_type = RDMA_SQ_REQ_TYPE_ATOMIC_CMP_AND_SWAP;
3380 DMA_REGPAIR_LE(awqe3->swap_data,
3381 atomic_wr(wr)->swap);
3382 DMA_REGPAIR_LE(awqe3->cmp_data,
3383 atomic_wr(wr)->compare_add);
3386 qedr_prepare_sq_sges(qp, NULL, wr);
3388 qp->wqe_wr_id[qp->sq.prod].wqe_size = awqe1->wqe_size;
3389 qp->prev_wqe_size = awqe1->wqe_size;
3392 case IB_WR_LOCAL_INV:
3393 iwqe = (struct rdma_sq_local_inv_wqe *)wqe;
3396 iwqe->req_type = RDMA_SQ_REQ_TYPE_LOCAL_INVALIDATE;
3397 iwqe->inv_l_key = wr->ex.invalidate_rkey;
3398 qp->wqe_wr_id[qp->sq.prod].wqe_size = iwqe->wqe_size;
3399 qp->prev_wqe_size = iwqe->wqe_size;
3402 DP_DEBUG(dev, QEDR_MSG_CQ, "REG_MR\n");
3403 wqe->req_type = RDMA_SQ_REQ_TYPE_FAST_MR;
3404 fwqe1 = (struct rdma_sq_fmr_wqe_1st *)wqe;
3405 fwqe1->wqe_size = 2;
3407 rc = qedr_prepare_reg(qp, fwqe1, reg_wr(wr));
3409 DP_ERR(dev, "IB_REG_MR failed rc=%d\n", rc);
3414 qp->wqe_wr_id[qp->sq.prod].wqe_size = fwqe1->wqe_size;
3415 qp->prev_wqe_size = fwqe1->wqe_size;
3418 DP_ERR(dev, "invalid opcode 0x%x!\n", wr->opcode);
3427 /* Restore prod to its position before
3428 * this WR was processed
3430 value = le16_to_cpu(qp->sq.db_data.data.value);
3431 qed_chain_set_prod(&qp->sq.pbl, value, wqe);
3433 /* Restore prev_wqe_size */
3434 qp->prev_wqe_size = wqe->prev_wqe_size;
3436 DP_ERR(dev, "POST SEND FAILED\n");
3442 int qedr_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
3443 const struct ib_send_wr **bad_wr)
3445 struct qedr_dev *dev = get_qedr_dev(ibqp->device);
3446 struct qedr_qp *qp = get_qedr_qp(ibqp);
3447 unsigned long flags;
3452 if (qp->qp_type == IB_QPT_GSI)
3453 return qedr_gsi_post_send(ibqp, wr, bad_wr);
3455 spin_lock_irqsave(&qp->q_lock, flags);
3457 if (rdma_protocol_roce(&dev->ibdev, 1)) {
3458 if ((qp->state != QED_ROCE_QP_STATE_RTS) &&
3459 (qp->state != QED_ROCE_QP_STATE_ERR) &&
3460 (qp->state != QED_ROCE_QP_STATE_SQD)) {
3461 spin_unlock_irqrestore(&qp->q_lock, flags);
3463 DP_DEBUG(dev, QEDR_MSG_CQ,
3464 "QP in wrong state! QP icid=0x%x state %d\n",
3465 qp->icid, qp->state);
3471 rc = __qedr_post_send(ibqp, wr, bad_wr);
3475 qp->wqe_wr_id[qp->sq.prod].wr_id = wr->wr_id;
3477 qedr_inc_sw_prod(&qp->sq);
3479 qp->sq.db_data.data.value++;
3485 * If there was a failure in the first WR then it will be triggered in
3486 * vane. However this is not harmful (as long as the producer value is
3487 * unchanged). For performance reasons we avoid checking for this
3488 * redundant doorbell.
3490 * qp->wqe_wr_id is accessed during qedr_poll_cq, as
3491 * soon as we give the doorbell, we could get a completion
3492 * for this wr, therefore we need to make sure that the
3493 * memory is updated before giving the doorbell.
3494 * During qedr_poll_cq, rmb is called before accessing the
3495 * cqe. This covers for the smp_rmb as well.
3498 writel(qp->sq.db_data.raw, qp->sq.db);
3500 spin_unlock_irqrestore(&qp->q_lock, flags);
3505 static u32 qedr_srq_elem_left(struct qedr_srq_hwq_info *hw_srq)
3509 /* Calculate number of elements used based on producer
3510 * count and consumer count and subtract it from max
3511 * work request supported so that we get elements left.
3513 used = hw_srq->wr_prod_cnt - hw_srq->wr_cons_cnt;
3515 return hw_srq->max_wr - used;
3518 int qedr_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
3519 const struct ib_recv_wr **bad_wr)
3521 struct qedr_srq *srq = get_qedr_srq(ibsrq);
3522 struct qedr_srq_hwq_info *hw_srq;
3523 struct qedr_dev *dev = srq->dev;
3524 struct qed_chain *pbl;
3525 unsigned long flags;
3530 spin_lock_irqsave(&srq->lock, flags);
3532 hw_srq = &srq->hw_srq;
3533 pbl = &srq->hw_srq.pbl;
3535 struct rdma_srq_wqe_header *hdr;
3538 if (!qedr_srq_elem_left(hw_srq) ||
3539 wr->num_sge > srq->hw_srq.max_sges) {
3540 DP_ERR(dev, "Can't post WR (%d,%d) || (%d > %d)\n",
3541 hw_srq->wr_prod_cnt, hw_srq->wr_cons_cnt,
3542 wr->num_sge, srq->hw_srq.max_sges);
3548 hdr = qed_chain_produce(pbl);
3549 num_sge = wr->num_sge;
3550 /* Set number of sge and work request id in header */
3551 SRQ_HDR_SET(hdr, wr->wr_id, num_sge);
3553 srq->hw_srq.wr_prod_cnt++;
3557 DP_DEBUG(dev, QEDR_MSG_SRQ,
3558 "SRQ WR: SGEs: %d with wr_id[%d] = %llx\n",
3559 wr->num_sge, hw_srq->wqe_prod, wr->wr_id);
3561 for (i = 0; i < wr->num_sge; i++) {
3562 struct rdma_srq_sge *srq_sge = qed_chain_produce(pbl);
3564 /* Set SGE length, lkey and address */
3565 SRQ_SGE_SET(srq_sge, wr->sg_list[i].addr,
3566 wr->sg_list[i].length, wr->sg_list[i].lkey);
3568 DP_DEBUG(dev, QEDR_MSG_SRQ,
3569 "[%d]: len %d key %x addr %x:%x\n",
3570 i, srq_sge->length, srq_sge->l_key,
3571 srq_sge->addr.hi, srq_sge->addr.lo);
3575 /* Flush WQE and SGE information before
3576 * updating producer.
3580 /* SRQ producer is 8 bytes. Need to update SGE producer index
3581 * in first 4 bytes and need to update WQE producer in
3584 *srq->hw_srq.virt_prod_pair_addr = hw_srq->sge_prod;
3585 offset = offsetof(struct rdma_srq_producers, wqe_prod);
3586 *((u8 *)srq->hw_srq.virt_prod_pair_addr + offset) =
3589 /* Flush producer after updating it. */
3594 DP_DEBUG(dev, QEDR_MSG_SRQ, "POST: Elements in S-RQ: %d\n",
3595 qed_chain_get_elem_left(pbl));
3596 spin_unlock_irqrestore(&srq->lock, flags);
3601 int qedr_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
3602 const struct ib_recv_wr **bad_wr)
3604 struct qedr_qp *qp = get_qedr_qp(ibqp);
3605 struct qedr_dev *dev = qp->dev;
3606 unsigned long flags;
3609 if (qp->qp_type == IB_QPT_GSI)
3610 return qedr_gsi_post_recv(ibqp, wr, bad_wr);
3612 spin_lock_irqsave(&qp->q_lock, flags);
3614 if (qp->state == QED_ROCE_QP_STATE_RESET) {
3615 spin_unlock_irqrestore(&qp->q_lock, flags);
3623 if (qed_chain_get_elem_left_u32(&qp->rq.pbl) <
3624 QEDR_MAX_RQE_ELEMENTS_PER_RQE ||
3625 wr->num_sge > qp->rq.max_sges) {
3626 DP_ERR(dev, "Can't post WR (%d < %d) || (%d > %d)\n",
3627 qed_chain_get_elem_left_u32(&qp->rq.pbl),
3628 QEDR_MAX_RQE_ELEMENTS_PER_RQE, wr->num_sge,
3634 for (i = 0; i < wr->num_sge; i++) {
3636 struct rdma_rq_sge *rqe =
3637 qed_chain_produce(&qp->rq.pbl);
3639 /* First one must include the number
3640 * of SGE in the list
3643 SET_FIELD(flags, RDMA_RQ_SGE_NUM_SGES,
3646 SET_FIELD(flags, RDMA_RQ_SGE_L_KEY_LO,
3647 wr->sg_list[i].lkey);
3649 RQ_SGE_SET(rqe, wr->sg_list[i].addr,
3650 wr->sg_list[i].length, flags);
3653 /* Special case of no sges. FW requires between 1-4 sges...
3654 * in this case we need to post 1 sge with length zero. this is
3655 * because rdma write with immediate consumes an RQ.
3659 struct rdma_rq_sge *rqe =
3660 qed_chain_produce(&qp->rq.pbl);
3662 /* First one must include the number
3663 * of SGE in the list
3665 SET_FIELD(flags, RDMA_RQ_SGE_L_KEY_LO, 0);
3666 SET_FIELD(flags, RDMA_RQ_SGE_NUM_SGES, 1);
3668 RQ_SGE_SET(rqe, 0, 0, flags);
3672 qp->rqe_wr_id[qp->rq.prod].wr_id = wr->wr_id;
3673 qp->rqe_wr_id[qp->rq.prod].wqe_size = i;
3675 qedr_inc_sw_prod(&qp->rq);
3677 /* qp->rqe_wr_id is accessed during qedr_poll_cq, as
3678 * soon as we give the doorbell, we could get a completion
3679 * for this wr, therefore we need to make sure that the
3680 * memory is update before giving the doorbell.
3681 * During qedr_poll_cq, rmb is called before accessing the
3682 * cqe. This covers for the smp_rmb as well.
3686 qp->rq.db_data.data.value++;
3688 writel(qp->rq.db_data.raw, qp->rq.db);
3690 if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
3691 writel(qp->rq.iwarp_db2_data.raw, qp->rq.iwarp_db2);
3697 spin_unlock_irqrestore(&qp->q_lock, flags);
3702 static int is_valid_cqe(struct qedr_cq *cq, union rdma_cqe *cqe)
3704 struct rdma_cqe_requester *resp_cqe = &cqe->req;
3706 return (resp_cqe->flags & RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK) ==
3710 static struct qedr_qp *cqe_get_qp(union rdma_cqe *cqe)
3712 struct rdma_cqe_requester *resp_cqe = &cqe->req;
3715 qp = (struct qedr_qp *)(uintptr_t)HILO_GEN(resp_cqe->qp_handle.hi,
3716 resp_cqe->qp_handle.lo,
3721 static enum rdma_cqe_type cqe_get_type(union rdma_cqe *cqe)
3723 struct rdma_cqe_requester *resp_cqe = &cqe->req;
3725 return GET_FIELD(resp_cqe->flags, RDMA_CQE_REQUESTER_TYPE);
3728 /* Return latest CQE (needs processing) */
3729 static union rdma_cqe *get_cqe(struct qedr_cq *cq)
3731 return cq->latest_cqe;
3734 /* In fmr we need to increase the number of fmr completed counter for the fmr
3735 * algorithm determining whether we can free a pbl or not.
3736 * we need to perform this whether the work request was signaled or not. for
3737 * this purpose we call this function from the condition that checks if a wr
3738 * should be skipped, to make sure we don't miss it ( possibly this fmr
3739 * operation was not signalted)
3741 static inline void qedr_chk_if_fmr(struct qedr_qp *qp)
3743 if (qp->wqe_wr_id[qp->sq.cons].opcode == IB_WC_REG_MR)
3744 qp->wqe_wr_id[qp->sq.cons].mr->info.completed++;
3747 static int process_req(struct qedr_dev *dev, struct qedr_qp *qp,
3748 struct qedr_cq *cq, int num_entries,
3749 struct ib_wc *wc, u16 hw_cons, enum ib_wc_status status,
3754 while (num_entries && qp->sq.wqe_cons != hw_cons) {
3755 if (!qp->wqe_wr_id[qp->sq.cons].signaled && !force) {
3756 qedr_chk_if_fmr(qp);
3762 wc->status = status;
3765 wc->src_qp = qp->id;
3768 wc->wr_id = qp->wqe_wr_id[qp->sq.cons].wr_id;
3769 wc->opcode = qp->wqe_wr_id[qp->sq.cons].opcode;
3771 switch (wc->opcode) {
3772 case IB_WC_RDMA_WRITE:
3773 wc->byte_len = qp->wqe_wr_id[qp->sq.cons].bytes_len;
3775 case IB_WC_COMP_SWAP:
3776 case IB_WC_FETCH_ADD:
3780 qp->wqe_wr_id[qp->sq.cons].mr->info.completed++;
3782 case IB_WC_RDMA_READ:
3784 wc->byte_len = qp->wqe_wr_id[qp->sq.cons].bytes_len;
3794 while (qp->wqe_wr_id[qp->sq.cons].wqe_size--)
3795 qed_chain_consume(&qp->sq.pbl);
3796 qedr_inc_sw_cons(&qp->sq);
3802 static int qedr_poll_cq_req(struct qedr_dev *dev,
3803 struct qedr_qp *qp, struct qedr_cq *cq,
3804 int num_entries, struct ib_wc *wc,
3805 struct rdma_cqe_requester *req)
3809 switch (req->status) {
3810 case RDMA_CQE_REQ_STS_OK:
3811 cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons,
3814 case RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR:
3815 if (qp->state != QED_ROCE_QP_STATE_ERR)
3816 DP_DEBUG(dev, QEDR_MSG_CQ,
3817 "Error: POLL CQ with RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3818 cq->icid, qp->icid);
3819 cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons,
3820 IB_WC_WR_FLUSH_ERR, 1);
3823 /* process all WQE before the cosumer */
3824 qp->state = QED_ROCE_QP_STATE_ERR;
3825 cnt = process_req(dev, qp, cq, num_entries, wc,
3826 req->sq_cons - 1, IB_WC_SUCCESS, 0);
3828 /* if we have extra WC fill it with actual error info */
3829 if (cnt < num_entries) {
3830 enum ib_wc_status wc_status;
3832 switch (req->status) {
3833 case RDMA_CQE_REQ_STS_BAD_RESPONSE_ERR:
3835 "Error: POLL CQ with RDMA_CQE_REQ_STS_BAD_RESPONSE_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3836 cq->icid, qp->icid);
3837 wc_status = IB_WC_BAD_RESP_ERR;
3839 case RDMA_CQE_REQ_STS_LOCAL_LENGTH_ERR:
3841 "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_LENGTH_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3842 cq->icid, qp->icid);
3843 wc_status = IB_WC_LOC_LEN_ERR;
3845 case RDMA_CQE_REQ_STS_LOCAL_QP_OPERATION_ERR:
3847 "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_QP_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3848 cq->icid, qp->icid);
3849 wc_status = IB_WC_LOC_QP_OP_ERR;
3851 case RDMA_CQE_REQ_STS_LOCAL_PROTECTION_ERR:
3853 "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_PROTECTION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3854 cq->icid, qp->icid);
3855 wc_status = IB_WC_LOC_PROT_ERR;
3857 case RDMA_CQE_REQ_STS_MEMORY_MGT_OPERATION_ERR:
3859 "Error: POLL CQ with RDMA_CQE_REQ_STS_MEMORY_MGT_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3860 cq->icid, qp->icid);
3861 wc_status = IB_WC_MW_BIND_ERR;
3863 case RDMA_CQE_REQ_STS_REMOTE_INVALID_REQUEST_ERR:
3865 "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_INVALID_REQUEST_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3866 cq->icid, qp->icid);
3867 wc_status = IB_WC_REM_INV_REQ_ERR;
3869 case RDMA_CQE_REQ_STS_REMOTE_ACCESS_ERR:
3871 "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_ACCESS_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3872 cq->icid, qp->icid);
3873 wc_status = IB_WC_REM_ACCESS_ERR;
3875 case RDMA_CQE_REQ_STS_REMOTE_OPERATION_ERR:
3877 "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3878 cq->icid, qp->icid);
3879 wc_status = IB_WC_REM_OP_ERR;
3881 case RDMA_CQE_REQ_STS_RNR_NAK_RETRY_CNT_ERR:
3883 "Error: POLL CQ with RDMA_CQE_REQ_STS_RNR_NAK_RETRY_CNT_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3884 cq->icid, qp->icid);
3885 wc_status = IB_WC_RNR_RETRY_EXC_ERR;
3887 case RDMA_CQE_REQ_STS_TRANSPORT_RETRY_CNT_ERR:
3889 "Error: POLL CQ with ROCE_CQE_REQ_STS_TRANSPORT_RETRY_CNT_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3890 cq->icid, qp->icid);
3891 wc_status = IB_WC_RETRY_EXC_ERR;
3895 "Error: POLL CQ with IB_WC_GENERAL_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3896 cq->icid, qp->icid);
3897 wc_status = IB_WC_GENERAL_ERR;
3899 cnt += process_req(dev, qp, cq, 1, wc, req->sq_cons,
3907 static inline int qedr_cqe_resp_status_to_ib(u8 status)
3910 case RDMA_CQE_RESP_STS_LOCAL_ACCESS_ERR:
3911 return IB_WC_LOC_ACCESS_ERR;
3912 case RDMA_CQE_RESP_STS_LOCAL_LENGTH_ERR:
3913 return IB_WC_LOC_LEN_ERR;
3914 case RDMA_CQE_RESP_STS_LOCAL_QP_OPERATION_ERR:
3915 return IB_WC_LOC_QP_OP_ERR;
3916 case RDMA_CQE_RESP_STS_LOCAL_PROTECTION_ERR:
3917 return IB_WC_LOC_PROT_ERR;
3918 case RDMA_CQE_RESP_STS_MEMORY_MGT_OPERATION_ERR:
3919 return IB_WC_MW_BIND_ERR;
3920 case RDMA_CQE_RESP_STS_REMOTE_INVALID_REQUEST_ERR:
3921 return IB_WC_REM_INV_RD_REQ_ERR;
3922 case RDMA_CQE_RESP_STS_OK:
3923 return IB_WC_SUCCESS;
3925 return IB_WC_GENERAL_ERR;
3929 static inline int qedr_set_ok_cqe_resp_wc(struct rdma_cqe_responder *resp,
3932 wc->status = IB_WC_SUCCESS;
3933 wc->byte_len = le32_to_cpu(resp->length);
3935 if (resp->flags & QEDR_RESP_IMM) {
3936 wc->ex.imm_data = cpu_to_be32(le32_to_cpu(resp->imm_data_or_inv_r_Key));
3937 wc->wc_flags |= IB_WC_WITH_IMM;
3939 if (resp->flags & QEDR_RESP_RDMA)
3940 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
3942 if (resp->flags & QEDR_RESP_INV)
3945 } else if (resp->flags & QEDR_RESP_INV) {
3946 wc->ex.imm_data = le32_to_cpu(resp->imm_data_or_inv_r_Key);
3947 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
3949 if (resp->flags & QEDR_RESP_RDMA)
3952 } else if (resp->flags & QEDR_RESP_RDMA) {
3959 static void __process_resp_one(struct qedr_dev *dev, struct qedr_qp *qp,
3960 struct qedr_cq *cq, struct ib_wc *wc,
3961 struct rdma_cqe_responder *resp, u64 wr_id)
3963 /* Must fill fields before qedr_set_ok_cqe_resp_wc() */
3964 wc->opcode = IB_WC_RECV;
3967 if (likely(resp->status == RDMA_CQE_RESP_STS_OK)) {
3968 if (qedr_set_ok_cqe_resp_wc(resp, wc))
3970 "CQ %p (icid=%d) has invalid CQE responder flags=0x%x\n",
3971 cq, cq->icid, resp->flags);
3974 wc->status = qedr_cqe_resp_status_to_ib(resp->status);
3975 if (wc->status == IB_WC_GENERAL_ERR)
3977 "CQ %p (icid=%d) contains an invalid CQE status %d\n",
3978 cq, cq->icid, resp->status);
3981 /* Fill the rest of the WC */
3983 wc->src_qp = qp->id;
3988 static int process_resp_one_srq(struct qedr_dev *dev, struct qedr_qp *qp,
3989 struct qedr_cq *cq, struct ib_wc *wc,
3990 struct rdma_cqe_responder *resp)
3992 struct qedr_srq *srq = qp->srq;
3995 wr_id = HILO_GEN(le32_to_cpu(resp->srq_wr_id.hi),
3996 le32_to_cpu(resp->srq_wr_id.lo), u64);
3998 if (resp->status == RDMA_CQE_RESP_STS_WORK_REQUEST_FLUSHED_ERR) {
3999 wc->status = IB_WC_WR_FLUSH_ERR;
4003 wc->src_qp = qp->id;
4007 __process_resp_one(dev, qp, cq, wc, resp, wr_id);
4009 srq->hw_srq.wr_cons_cnt++;
4013 static int process_resp_one(struct qedr_dev *dev, struct qedr_qp *qp,
4014 struct qedr_cq *cq, struct ib_wc *wc,
4015 struct rdma_cqe_responder *resp)
4017 u64 wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id;
4019 __process_resp_one(dev, qp, cq, wc, resp, wr_id);
4021 while (qp->rqe_wr_id[qp->rq.cons].wqe_size--)
4022 qed_chain_consume(&qp->rq.pbl);
4023 qedr_inc_sw_cons(&qp->rq);
4028 static int process_resp_flush(struct qedr_qp *qp, struct qedr_cq *cq,
4029 int num_entries, struct ib_wc *wc, u16 hw_cons)
4033 while (num_entries && qp->rq.wqe_cons != hw_cons) {
4035 wc->status = IB_WC_WR_FLUSH_ERR;
4038 wc->src_qp = qp->id;
4040 wc->wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id;
4045 while (qp->rqe_wr_id[qp->rq.cons].wqe_size--)
4046 qed_chain_consume(&qp->rq.pbl);
4047 qedr_inc_sw_cons(&qp->rq);
4053 static void try_consume_resp_cqe(struct qedr_cq *cq, struct qedr_qp *qp,
4054 struct rdma_cqe_responder *resp, int *update)
4056 if (le16_to_cpu(resp->rq_cons_or_srq_id) == qp->rq.wqe_cons) {
4062 static int qedr_poll_cq_resp_srq(struct qedr_dev *dev, struct qedr_qp *qp,
4063 struct qedr_cq *cq, int num_entries,
4065 struct rdma_cqe_responder *resp)
4069 cnt = process_resp_one_srq(dev, qp, cq, wc, resp);
4075 static int qedr_poll_cq_resp(struct qedr_dev *dev, struct qedr_qp *qp,
4076 struct qedr_cq *cq, int num_entries,
4077 struct ib_wc *wc, struct rdma_cqe_responder *resp,
4082 if (resp->status == RDMA_CQE_RESP_STS_WORK_REQUEST_FLUSHED_ERR) {
4083 cnt = process_resp_flush(qp, cq, num_entries, wc,
4084 resp->rq_cons_or_srq_id);
4085 try_consume_resp_cqe(cq, qp, resp, update);
4087 cnt = process_resp_one(dev, qp, cq, wc, resp);
4095 static void try_consume_req_cqe(struct qedr_cq *cq, struct qedr_qp *qp,
4096 struct rdma_cqe_requester *req, int *update)
4098 if (le16_to_cpu(req->sq_cons) == qp->sq.wqe_cons) {
4104 int qedr_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
4106 struct qedr_dev *dev = get_qedr_dev(ibcq->device);
4107 struct qedr_cq *cq = get_qedr_cq(ibcq);
4108 union rdma_cqe *cqe;
4109 u32 old_cons, new_cons;
4110 unsigned long flags;
4114 if (cq->destroyed) {
4116 "warning: poll was invoked after destroy for cq %p (icid=%d)\n",
4121 if (cq->cq_type == QEDR_CQ_TYPE_GSI)
4122 return qedr_gsi_poll_cq(ibcq, num_entries, wc);
4124 spin_lock_irqsave(&cq->cq_lock, flags);
4125 cqe = cq->latest_cqe;
4126 old_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
4127 while (num_entries && is_valid_cqe(cq, cqe)) {
4131 /* prevent speculative reads of any field of CQE */
4134 qp = cqe_get_qp(cqe);
4136 WARN(1, "Error: CQE QP pointer is NULL. CQE=%p\n", cqe);
4142 switch (cqe_get_type(cqe)) {
4143 case RDMA_CQE_TYPE_REQUESTER:
4144 cnt = qedr_poll_cq_req(dev, qp, cq, num_entries, wc,
4146 try_consume_req_cqe(cq, qp, &cqe->req, &update);
4148 case RDMA_CQE_TYPE_RESPONDER_RQ:
4149 cnt = qedr_poll_cq_resp(dev, qp, cq, num_entries, wc,
4150 &cqe->resp, &update);
4152 case RDMA_CQE_TYPE_RESPONDER_SRQ:
4153 cnt = qedr_poll_cq_resp_srq(dev, qp, cq, num_entries,
4157 case RDMA_CQE_TYPE_INVALID:
4159 DP_ERR(dev, "Error: invalid CQE type = %d\n",
4168 new_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
4170 cq->cq_cons += new_cons - old_cons;
4173 /* doorbell notifies abount latest VALID entry,
4174 * but chain already point to the next INVALID one
4176 doorbell_cq(cq, cq->cq_cons - 1, cq->arm_flags);
4178 spin_unlock_irqrestore(&cq->cq_lock, flags);
4182 int qedr_process_mad(struct ib_device *ibdev, int process_mad_flags,
4184 const struct ib_wc *in_wc,
4185 const struct ib_grh *in_grh,
4186 const struct ib_mad_hdr *mad_hdr,
4187 size_t in_mad_size, struct ib_mad_hdr *out_mad,
4188 size_t *out_mad_size, u16 *out_mad_pkey_index)
4190 struct qedr_dev *dev = get_qedr_dev(ibdev);
4192 DP_DEBUG(dev, QEDR_MSG_GSI,
4193 "QEDR_PROCESS_MAD in_mad %x %x %x %x %x %x %x %x\n",
4194 mad_hdr->attr_id, mad_hdr->base_version, mad_hdr->attr_mod,
4195 mad_hdr->class_specific, mad_hdr->class_version,
4196 mad_hdr->method, mad_hdr->mgmt_class, mad_hdr->status);
4197 return IB_MAD_RESULT_SUCCESS;