1 /* QLogic qedr NIC Driver
2 * Copyright (c) 2015-2016 QLogic Corporation
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/dma-mapping.h>
33 #include <linux/crc32.h>
37 #include <linux/iommu.h>
39 #include <rdma/ib_verbs.h>
40 #include <rdma/ib_user_verbs.h>
41 #include <rdma/iw_cm.h>
42 #include <rdma/ib_umem.h>
43 #include <rdma/ib_addr.h>
44 #include <rdma/ib_cache.h>
45 #include <rdma/uverbs_ioctl.h>
47 #include <linux/qed/common_hsi.h>
48 #include "qedr_hsi_rdma.h"
49 #include <linux/qed/qed_if.h>
52 #include <rdma/qedr-abi.h>
53 #include "qedr_roce_cm.h"
54 #include "qedr_iw_cm.h"
56 #define QEDR_SRQ_WQE_ELEM_SIZE sizeof(union rdma_srq_elm)
57 #define RDMA_MAX_SGE_PER_SRQ (4)
58 #define RDMA_MAX_SRQ_WQE_SIZE (RDMA_MAX_SGE_PER_SRQ + 1)
60 #define DB_ADDR_SHIFT(addr) ((addr) << DB_PWM_ADDR_OFFSET_SHIFT)
63 QEDR_USER_MMAP_IO_WC = 0,
64 QEDR_USER_MMAP_PHYS_PAGE,
67 static inline int qedr_ib_copy_to_udata(struct ib_udata *udata, void *src,
70 size_t min_len = min_t(size_t, len, udata->outlen);
72 return ib_copy_to_udata(udata, src, min_len);
75 int qedr_query_pkey(struct ib_device *ibdev, u32 port, u16 index, u16 *pkey)
77 if (index >= QEDR_ROCE_PKEY_TABLE_LEN)
80 *pkey = QEDR_ROCE_PKEY_DEFAULT;
84 int qedr_iw_query_gid(struct ib_device *ibdev, u32 port,
85 int index, union ib_gid *sgid)
87 struct qedr_dev *dev = get_qedr_dev(ibdev);
89 memset(sgid->raw, 0, sizeof(sgid->raw));
90 ether_addr_copy(sgid->raw, dev->ndev->dev_addr);
92 DP_DEBUG(dev, QEDR_MSG_INIT, "QUERY sgid[%d]=%llx:%llx\n", index,
93 sgid->global.interface_id, sgid->global.subnet_prefix);
98 int qedr_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
100 struct qedr_dev *dev = get_qedr_dev(ibsrq->device);
101 struct qedr_device_attr *qattr = &dev->attr;
102 struct qedr_srq *srq = get_qedr_srq(ibsrq);
104 srq_attr->srq_limit = srq->srq_limit;
105 srq_attr->max_wr = qattr->max_srq_wr;
106 srq_attr->max_sge = qattr->max_sge;
111 int qedr_query_device(struct ib_device *ibdev,
112 struct ib_device_attr *attr, struct ib_udata *udata)
114 struct qedr_dev *dev = get_qedr_dev(ibdev);
115 struct qedr_device_attr *qattr = &dev->attr;
117 if (!dev->rdma_ctx) {
119 "qedr_query_device called with invalid params rdma_ctx=%p\n",
124 memset(attr, 0, sizeof(*attr));
126 attr->fw_ver = qattr->fw_ver;
127 attr->sys_image_guid = qattr->sys_image_guid;
128 attr->max_mr_size = qattr->max_mr_size;
129 attr->page_size_cap = qattr->page_size_caps;
130 attr->vendor_id = qattr->vendor_id;
131 attr->vendor_part_id = qattr->vendor_part_id;
132 attr->hw_ver = qattr->hw_ver;
133 attr->max_qp = qattr->max_qp;
134 attr->max_qp_wr = max_t(u32, qattr->max_sqe, qattr->max_rqe);
135 attr->device_cap_flags = IB_DEVICE_CURR_QP_STATE_MOD |
136 IB_DEVICE_RC_RNR_NAK_GEN |
137 IB_DEVICE_MEM_MGT_EXTENSIONS;
138 attr->kernel_cap_flags = IBK_LOCAL_DMA_LKEY;
140 if (!rdma_protocol_iwarp(&dev->ibdev, 1))
141 attr->device_cap_flags |= IB_DEVICE_XRC;
142 attr->max_send_sge = qattr->max_sge;
143 attr->max_recv_sge = qattr->max_sge;
144 attr->max_sge_rd = qattr->max_sge;
145 attr->max_cq = qattr->max_cq;
146 attr->max_cqe = qattr->max_cqe;
147 attr->max_mr = qattr->max_mr;
148 attr->max_mw = qattr->max_mw;
149 attr->max_pd = qattr->max_pd;
150 attr->atomic_cap = dev->atomic_cap;
151 attr->max_qp_init_rd_atom =
152 1 << (fls(qattr->max_qp_req_rd_atomic_resc) - 1);
153 attr->max_qp_rd_atom =
154 min(1 << (fls(qattr->max_qp_resp_rd_atomic_resc) - 1),
155 attr->max_qp_init_rd_atom);
157 attr->max_srq = qattr->max_srq;
158 attr->max_srq_sge = qattr->max_srq_sge;
159 attr->max_srq_wr = qattr->max_srq_wr;
161 attr->local_ca_ack_delay = qattr->dev_ack_delay;
162 attr->max_fast_reg_page_list_len = qattr->max_mr / 8;
163 attr->max_pkeys = qattr->max_pkey;
164 attr->max_ah = qattr->max_ah;
169 static inline void get_link_speed_and_width(int speed, u16 *ib_speed,
174 *ib_speed = IB_SPEED_SDR;
175 *ib_width = IB_WIDTH_1X;
178 *ib_speed = IB_SPEED_QDR;
179 *ib_width = IB_WIDTH_1X;
183 *ib_speed = IB_SPEED_DDR;
184 *ib_width = IB_WIDTH_4X;
188 *ib_speed = IB_SPEED_EDR;
189 *ib_width = IB_WIDTH_1X;
193 *ib_speed = IB_SPEED_QDR;
194 *ib_width = IB_WIDTH_4X;
198 *ib_speed = IB_SPEED_HDR;
199 *ib_width = IB_WIDTH_1X;
203 *ib_speed = IB_SPEED_EDR;
204 *ib_width = IB_WIDTH_4X;
209 *ib_speed = IB_SPEED_SDR;
210 *ib_width = IB_WIDTH_1X;
214 int qedr_query_port(struct ib_device *ibdev, u32 port,
215 struct ib_port_attr *attr)
217 struct qedr_dev *dev;
218 struct qed_rdma_port *rdma_port;
220 dev = get_qedr_dev(ibdev);
222 if (!dev->rdma_ctx) {
223 DP_ERR(dev, "rdma_ctx is NULL\n");
227 rdma_port = dev->ops->rdma_query_port(dev->rdma_ctx);
229 /* *attr being zeroed by the caller, avoid zeroing it here */
230 if (rdma_port->port_state == QED_RDMA_PORT_UP) {
231 attr->state = IB_PORT_ACTIVE;
232 attr->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
234 attr->state = IB_PORT_DOWN;
235 attr->phys_state = IB_PORT_PHYS_STATE_DISABLED;
237 attr->max_mtu = IB_MTU_4096;
242 attr->ip_gids = true;
243 if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
244 attr->active_mtu = iboe_get_mtu(dev->iwarp_max_mtu);
245 attr->gid_tbl_len = 1;
247 attr->active_mtu = iboe_get_mtu(dev->ndev->mtu);
248 attr->gid_tbl_len = QEDR_MAX_SGID;
249 attr->pkey_tbl_len = QEDR_ROCE_PKEY_TABLE_LEN;
251 attr->bad_pkey_cntr = rdma_port->pkey_bad_counter;
252 attr->qkey_viol_cntr = 0;
253 get_link_speed_and_width(rdma_port->link_speed,
254 &attr->active_speed, &attr->active_width);
255 attr->max_msg_sz = rdma_port->max_msg_size;
256 attr->max_vl_num = 4;
261 int qedr_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata)
263 struct ib_device *ibdev = uctx->device;
265 struct qedr_ucontext *ctx = get_qedr_ucontext(uctx);
266 struct qedr_alloc_ucontext_resp uresp = {};
267 struct qedr_alloc_ucontext_req ureq = {};
268 struct qedr_dev *dev = get_qedr_dev(ibdev);
269 struct qed_rdma_add_user_out_params oparams;
270 struct qedr_user_mmap_entry *entry;
276 rc = ib_copy_from_udata(&ureq, udata,
277 min(sizeof(ureq), udata->inlen));
279 DP_ERR(dev, "Problem copying data from user space\n");
282 ctx->edpm_mode = !!(ureq.context_flags &
283 QEDR_ALLOC_UCTX_EDPM_MODE);
284 ctx->db_rec = !!(ureq.context_flags & QEDR_ALLOC_UCTX_DB_REC);
287 rc = dev->ops->rdma_add_user(dev->rdma_ctx, &oparams);
290 "failed to allocate a DPI for a new RoCE application, rc=%d. To overcome this consider to increase the number of DPIs, increase the doorbell BAR size or just close unnecessary RoCE applications. In order to increase the number of DPIs consult the qedr readme\n",
295 ctx->dpi = oparams.dpi;
296 ctx->dpi_addr = oparams.dpi_addr;
297 ctx->dpi_phys_addr = oparams.dpi_phys_addr;
298 ctx->dpi_size = oparams.dpi_size;
299 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
305 entry->io_address = ctx->dpi_phys_addr;
306 entry->length = ctx->dpi_size;
307 entry->mmap_flag = QEDR_USER_MMAP_IO_WC;
308 entry->dpi = ctx->dpi;
310 rc = rdma_user_mmap_entry_insert(uctx, &entry->rdma_entry,
316 ctx->db_mmap_entry = &entry->rdma_entry;
318 if (!dev->user_dpm_enabled)
320 else if (rdma_protocol_iwarp(&dev->ibdev, 1))
321 uresp.dpm_flags = QEDR_DPM_TYPE_IWARP_LEGACY;
323 uresp.dpm_flags = QEDR_DPM_TYPE_ROCE_ENHANCED |
324 QEDR_DPM_TYPE_ROCE_LEGACY |
325 QEDR_DPM_TYPE_ROCE_EDPM_MODE;
327 if (ureq.context_flags & QEDR_SUPPORT_DPM_SIZES) {
328 uresp.dpm_flags |= QEDR_DPM_SIZES_SET;
329 uresp.ldpm_limit_size = QEDR_LDPM_MAX_SIZE;
330 uresp.edpm_trans_size = QEDR_EDPM_TRANS_SIZE;
331 uresp.edpm_limit_size = QEDR_EDPM_MAX_SIZE;
334 uresp.wids_enabled = 1;
335 uresp.wid_count = oparams.wid_count;
336 uresp.db_pa = rdma_user_mmap_get_offset(ctx->db_mmap_entry);
337 uresp.db_size = ctx->dpi_size;
338 uresp.max_send_wr = dev->attr.max_sqe;
339 uresp.max_recv_wr = dev->attr.max_rqe;
340 uresp.max_srq_wr = dev->attr.max_srq_wr;
341 uresp.sges_per_send_wr = QEDR_MAX_SQE_ELEMENTS_PER_SQE;
342 uresp.sges_per_recv_wr = QEDR_MAX_RQE_ELEMENTS_PER_RQE;
343 uresp.sges_per_srq_wr = dev->attr.max_srq_sge;
344 uresp.max_cqes = QEDR_MAX_CQES;
346 rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
352 DP_DEBUG(dev, QEDR_MSG_INIT, "Allocating user context %p\n",
357 if (!ctx->db_mmap_entry)
358 dev->ops->rdma_remove_user(dev->rdma_ctx, ctx->dpi);
360 rdma_user_mmap_entry_remove(ctx->db_mmap_entry);
365 void qedr_dealloc_ucontext(struct ib_ucontext *ibctx)
367 struct qedr_ucontext *uctx = get_qedr_ucontext(ibctx);
369 DP_DEBUG(uctx->dev, QEDR_MSG_INIT, "Deallocating user context %p\n",
372 rdma_user_mmap_entry_remove(uctx->db_mmap_entry);
375 void qedr_mmap_free(struct rdma_user_mmap_entry *rdma_entry)
377 struct qedr_user_mmap_entry *entry = get_qedr_mmap_entry(rdma_entry);
378 struct qedr_dev *dev = entry->dev;
380 if (entry->mmap_flag == QEDR_USER_MMAP_PHYS_PAGE)
381 free_page((unsigned long)entry->address);
382 else if (entry->mmap_flag == QEDR_USER_MMAP_IO_WC)
383 dev->ops->rdma_remove_user(dev->rdma_ctx, entry->dpi);
388 int qedr_mmap(struct ib_ucontext *ucontext, struct vm_area_struct *vma)
390 struct ib_device *dev = ucontext->device;
391 size_t length = vma->vm_end - vma->vm_start;
392 struct rdma_user_mmap_entry *rdma_entry;
393 struct qedr_user_mmap_entry *entry;
398 "start %#lx, end %#lx, length = %#zx, pgoff = %#lx\n",
399 vma->vm_start, vma->vm_end, length, vma->vm_pgoff);
401 rdma_entry = rdma_user_mmap_entry_get(ucontext, vma);
403 ibdev_dbg(dev, "pgoff[%#lx] does not have valid entry\n",
407 entry = get_qedr_mmap_entry(rdma_entry);
409 "Mapping address[%#llx], length[%#zx], mmap_flag[%d]\n",
410 entry->io_address, length, entry->mmap_flag);
412 switch (entry->mmap_flag) {
413 case QEDR_USER_MMAP_IO_WC:
414 pfn = entry->io_address >> PAGE_SHIFT;
415 rc = rdma_user_mmap_io(ucontext, vma, pfn, length,
416 pgprot_writecombine(vma->vm_page_prot),
419 case QEDR_USER_MMAP_PHYS_PAGE:
420 rc = vm_insert_page(vma, vma->vm_start,
421 virt_to_page(entry->address));
429 "Couldn't mmap address[%#llx] length[%#zx] mmap_flag[%d] err[%d]\n",
430 entry->io_address, length, entry->mmap_flag, rc);
432 rdma_user_mmap_entry_put(rdma_entry);
436 int qedr_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
438 struct ib_device *ibdev = ibpd->device;
439 struct qedr_dev *dev = get_qedr_dev(ibdev);
440 struct qedr_pd *pd = get_qedr_pd(ibpd);
444 DP_DEBUG(dev, QEDR_MSG_INIT, "Function called from: %s\n",
445 udata ? "User Lib" : "Kernel");
447 if (!dev->rdma_ctx) {
448 DP_ERR(dev, "invalid RDMA context\n");
452 rc = dev->ops->rdma_alloc_pd(dev->rdma_ctx, &pd_id);
459 struct qedr_alloc_pd_uresp uresp = {
462 struct qedr_ucontext *context = rdma_udata_to_drv_context(
463 udata, struct qedr_ucontext, ibucontext);
465 rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
467 DP_ERR(dev, "copy error pd_id=0x%x.\n", pd_id);
468 dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd_id);
479 int qedr_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
481 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
482 struct qedr_pd *pd = get_qedr_pd(ibpd);
484 DP_DEBUG(dev, QEDR_MSG_INIT, "Deallocating PD %d\n", pd->pd_id);
485 dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd->pd_id);
490 int qedr_alloc_xrcd(struct ib_xrcd *ibxrcd, struct ib_udata *udata)
492 struct qedr_dev *dev = get_qedr_dev(ibxrcd->device);
493 struct qedr_xrcd *xrcd = get_qedr_xrcd(ibxrcd);
495 return dev->ops->rdma_alloc_xrcd(dev->rdma_ctx, &xrcd->xrcd_id);
498 int qedr_dealloc_xrcd(struct ib_xrcd *ibxrcd, struct ib_udata *udata)
500 struct qedr_dev *dev = get_qedr_dev(ibxrcd->device);
501 u16 xrcd_id = get_qedr_xrcd(ibxrcd)->xrcd_id;
503 dev->ops->rdma_dealloc_xrcd(dev->rdma_ctx, xrcd_id);
506 static void qedr_free_pbl(struct qedr_dev *dev,
507 struct qedr_pbl_info *pbl_info, struct qedr_pbl *pbl)
509 struct pci_dev *pdev = dev->pdev;
512 for (i = 0; i < pbl_info->num_pbls; i++) {
515 dma_free_coherent(&pdev->dev, pbl_info->pbl_size,
516 pbl[i].va, pbl[i].pa);
522 #define MIN_FW_PBL_PAGE_SIZE (4 * 1024)
523 #define MAX_FW_PBL_PAGE_SIZE (64 * 1024)
525 #define NUM_PBES_ON_PAGE(_page_size) (_page_size / sizeof(u64))
526 #define MAX_PBES_ON_PAGE NUM_PBES_ON_PAGE(MAX_FW_PBL_PAGE_SIZE)
527 #define MAX_PBES_TWO_LAYER (MAX_PBES_ON_PAGE * MAX_PBES_ON_PAGE)
529 static struct qedr_pbl *qedr_alloc_pbl_tbl(struct qedr_dev *dev,
530 struct qedr_pbl_info *pbl_info,
533 struct pci_dev *pdev = dev->pdev;
534 struct qedr_pbl *pbl_table;
535 dma_addr_t *pbl_main_tbl;
540 pbl_table = kcalloc(pbl_info->num_pbls, sizeof(*pbl_table), flags);
542 return ERR_PTR(-ENOMEM);
544 for (i = 0; i < pbl_info->num_pbls; i++) {
545 va = dma_alloc_coherent(&pdev->dev, pbl_info->pbl_size, &pa,
550 pbl_table[i].va = va;
551 pbl_table[i].pa = pa;
554 /* Two-Layer PBLs, if we have more than one pbl we need to initialize
555 * the first one with physical pointers to all of the rest
557 pbl_main_tbl = (dma_addr_t *)pbl_table[0].va;
558 for (i = 0; i < pbl_info->num_pbls - 1; i++)
559 pbl_main_tbl[i] = pbl_table[i + 1].pa;
564 for (i--; i >= 0; i--)
565 dma_free_coherent(&pdev->dev, pbl_info->pbl_size,
566 pbl_table[i].va, pbl_table[i].pa);
568 qedr_free_pbl(dev, pbl_info, pbl_table);
570 return ERR_PTR(-ENOMEM);
573 static int qedr_prepare_pbl_tbl(struct qedr_dev *dev,
574 struct qedr_pbl_info *pbl_info,
575 u32 num_pbes, int two_layer_capable)
581 if ((num_pbes > MAX_PBES_ON_PAGE) && two_layer_capable) {
582 if (num_pbes > MAX_PBES_TWO_LAYER) {
583 DP_ERR(dev, "prepare pbl table: too many pages %d\n",
588 /* calculate required pbl page size */
589 pbl_size = MIN_FW_PBL_PAGE_SIZE;
590 pbl_capacity = NUM_PBES_ON_PAGE(pbl_size) *
591 NUM_PBES_ON_PAGE(pbl_size);
593 while (pbl_capacity < num_pbes) {
595 pbl_capacity = pbl_size / sizeof(u64);
596 pbl_capacity = pbl_capacity * pbl_capacity;
599 num_pbls = DIV_ROUND_UP(num_pbes, NUM_PBES_ON_PAGE(pbl_size));
600 num_pbls++; /* One for the layer0 ( points to the pbls) */
601 pbl_info->two_layered = true;
603 /* One layered PBL */
605 pbl_size = max_t(u32, MIN_FW_PBL_PAGE_SIZE,
606 roundup_pow_of_two((num_pbes * sizeof(u64))));
607 pbl_info->two_layered = false;
610 pbl_info->num_pbls = num_pbls;
611 pbl_info->pbl_size = pbl_size;
612 pbl_info->num_pbes = num_pbes;
614 DP_DEBUG(dev, QEDR_MSG_MR,
615 "prepare pbl table: num_pbes=%d, num_pbls=%d, pbl_size=%d\n",
616 pbl_info->num_pbes, pbl_info->num_pbls, pbl_info->pbl_size);
621 static void qedr_populate_pbls(struct qedr_dev *dev, struct ib_umem *umem,
622 struct qedr_pbl *pbl,
623 struct qedr_pbl_info *pbl_info, u32 pg_shift)
625 int pbe_cnt, total_num_pbes = 0;
626 struct qedr_pbl *pbl_tbl;
627 struct ib_block_iter biter;
630 if (!pbl_info->num_pbes)
633 /* If we have a two layered pbl, the first pbl points to the rest
634 * of the pbls and the first entry lays on the second pbl in the table
636 if (pbl_info->two_layered)
641 pbe = (struct regpair *)pbl_tbl->va;
643 DP_ERR(dev, "cannot populate PBL due to a NULL PBE\n");
649 rdma_umem_for_each_dma_block (umem, &biter, BIT(pg_shift)) {
650 u64 pg_addr = rdma_block_iter_dma_address(&biter);
652 pbe->lo = cpu_to_le32(pg_addr);
653 pbe->hi = cpu_to_le32(upper_32_bits(pg_addr));
659 if (total_num_pbes == pbl_info->num_pbes)
662 /* If the given pbl is full storing the pbes, move to next pbl.
664 if (pbe_cnt == (pbl_info->pbl_size / sizeof(u64))) {
666 pbe = (struct regpair *)pbl_tbl->va;
672 static int qedr_db_recovery_add(struct qedr_dev *dev,
673 void __iomem *db_addr,
675 enum qed_db_rec_width db_width,
676 enum qed_db_rec_space db_space)
679 DP_DEBUG(dev, QEDR_MSG_INIT, "avoiding db rec since old lib\n");
683 return dev->ops->common->db_recovery_add(dev->cdev, db_addr, db_data,
687 static void qedr_db_recovery_del(struct qedr_dev *dev,
688 void __iomem *db_addr,
692 DP_DEBUG(dev, QEDR_MSG_INIT, "avoiding db rec since old lib\n");
696 /* Ignore return code as there is not much we can do about it. Error
697 * log will be printed inside.
699 dev->ops->common->db_recovery_del(dev->cdev, db_addr, db_data);
702 static int qedr_copy_cq_uresp(struct qedr_dev *dev,
703 struct qedr_cq *cq, struct ib_udata *udata,
706 struct qedr_create_cq_uresp uresp;
709 memset(&uresp, 0, sizeof(uresp));
711 uresp.db_offset = db_offset;
712 uresp.icid = cq->icid;
713 if (cq->q.db_mmap_entry)
715 rdma_user_mmap_get_offset(cq->q.db_mmap_entry);
717 rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
719 DP_ERR(dev, "copy error cqid=0x%x.\n", cq->icid);
724 static void consume_cqe(struct qedr_cq *cq)
726 if (cq->latest_cqe == cq->toggle_cqe)
727 cq->pbl_toggle ^= RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK;
729 cq->latest_cqe = qed_chain_consume(&cq->pbl);
732 static inline int qedr_align_cq_entries(int entries)
734 u64 size, aligned_size;
736 /* We allocate an extra entry that we don't report to the FW. */
737 size = (entries + 1) * QEDR_CQE_SIZE;
738 aligned_size = ALIGN(size, PAGE_SIZE);
740 return aligned_size / QEDR_CQE_SIZE;
743 static int qedr_init_user_db_rec(struct ib_udata *udata,
744 struct qedr_dev *dev, struct qedr_userq *q,
745 bool requires_db_rec)
747 struct qedr_ucontext *uctx =
748 rdma_udata_to_drv_context(udata, struct qedr_ucontext,
750 struct qedr_user_mmap_entry *entry;
753 /* Aborting for non doorbell userqueue (SRQ) or non-supporting lib */
754 if (requires_db_rec == 0 || !uctx->db_rec)
757 /* Allocate a page for doorbell recovery, add to mmap */
758 q->db_rec_data = (void *)get_zeroed_page(GFP_USER);
759 if (!q->db_rec_data) {
760 DP_ERR(dev, "get_zeroed_page failed\n");
764 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
766 goto err_free_db_data;
768 entry->address = q->db_rec_data;
769 entry->length = PAGE_SIZE;
770 entry->mmap_flag = QEDR_USER_MMAP_PHYS_PAGE;
771 rc = rdma_user_mmap_entry_insert(&uctx->ibucontext,
777 q->db_mmap_entry = &entry->rdma_entry;
785 free_page((unsigned long)q->db_rec_data);
786 q->db_rec_data = NULL;
790 static inline int qedr_init_user_queue(struct ib_udata *udata,
791 struct qedr_dev *dev,
792 struct qedr_userq *q, u64 buf_addr,
793 size_t buf_len, bool requires_db_rec,
800 q->buf_addr = buf_addr;
801 q->buf_len = buf_len;
802 q->umem = ib_umem_get(&dev->ibdev, q->buf_addr, q->buf_len, access);
803 if (IS_ERR(q->umem)) {
804 DP_ERR(dev, "create user queue: failed ib_umem_get, got %ld\n",
806 return PTR_ERR(q->umem);
809 fw_pages = ib_umem_num_dma_blocks(q->umem, 1 << FW_PAGE_SHIFT);
810 rc = qedr_prepare_pbl_tbl(dev, &q->pbl_info, fw_pages, 0);
814 if (alloc_and_init) {
815 q->pbl_tbl = qedr_alloc_pbl_tbl(dev, &q->pbl_info, GFP_KERNEL);
816 if (IS_ERR(q->pbl_tbl)) {
817 rc = PTR_ERR(q->pbl_tbl);
820 qedr_populate_pbls(dev, q->umem, q->pbl_tbl, &q->pbl_info,
823 q->pbl_tbl = kzalloc(sizeof(*q->pbl_tbl), GFP_KERNEL);
830 /* mmap the user address used to store doorbell data for recovery */
831 return qedr_init_user_db_rec(udata, dev, q, requires_db_rec);
834 ib_umem_release(q->umem);
840 static inline void qedr_init_cq_params(struct qedr_cq *cq,
841 struct qedr_ucontext *ctx,
842 struct qedr_dev *dev, int vector,
843 int chain_entries, int page_cnt,
845 struct qed_rdma_create_cq_in_params
848 memset(params, 0, sizeof(*params));
849 params->cq_handle_hi = upper_32_bits((uintptr_t)cq);
850 params->cq_handle_lo = lower_32_bits((uintptr_t)cq);
851 params->cnq_id = vector;
852 params->cq_size = chain_entries - 1;
853 params->dpi = (ctx) ? ctx->dpi : dev->dpi;
854 params->pbl_num_pages = page_cnt;
855 params->pbl_ptr = pbl_ptr;
856 params->pbl_two_level = 0;
859 static void doorbell_cq(struct qedr_cq *cq, u32 cons, u8 flags)
861 cq->db.data.agg_flags = flags;
862 cq->db.data.value = cpu_to_le32(cons);
863 writeq(cq->db.raw, cq->db_addr);
866 int qedr_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
868 struct qedr_cq *cq = get_qedr_cq(ibcq);
869 unsigned long sflags;
870 struct qedr_dev *dev;
872 dev = get_qedr_dev(ibcq->device);
876 "warning: arm was invoked after destroy for cq %p (icid=%d)\n",
882 if (cq->cq_type == QEDR_CQ_TYPE_GSI)
885 spin_lock_irqsave(&cq->cq_lock, sflags);
889 if (flags & IB_CQ_SOLICITED)
890 cq->arm_flags |= DQ_UCM_ROCE_CQ_ARM_SE_CF_CMD;
892 if (flags & IB_CQ_NEXT_COMP)
893 cq->arm_flags |= DQ_UCM_ROCE_CQ_ARM_CF_CMD;
895 doorbell_cq(cq, cq->cq_cons - 1, cq->arm_flags);
897 spin_unlock_irqrestore(&cq->cq_lock, sflags);
902 int qedr_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
903 struct ib_udata *udata)
905 struct ib_device *ibdev = ibcq->device;
906 struct qedr_ucontext *ctx = rdma_udata_to_drv_context(
907 udata, struct qedr_ucontext, ibucontext);
908 struct qed_rdma_destroy_cq_out_params destroy_oparams;
909 struct qed_rdma_destroy_cq_in_params destroy_iparams;
910 struct qed_chain_init_params chain_params = {
911 .mode = QED_CHAIN_MODE_PBL,
912 .intended_use = QED_CHAIN_USE_TO_CONSUME,
913 .cnt_type = QED_CHAIN_CNT_TYPE_U32,
914 .elem_size = sizeof(union rdma_cqe),
916 struct qedr_dev *dev = get_qedr_dev(ibdev);
917 struct qed_rdma_create_cq_in_params params;
918 struct qedr_create_cq_ureq ureq = {};
919 int vector = attr->comp_vector;
920 int entries = attr->cqe;
921 struct qedr_cq *cq = get_qedr_cq(ibcq);
929 DP_DEBUG(dev, QEDR_MSG_INIT,
930 "create_cq: called from %s. entries=%d, vector=%d\n",
931 udata ? "User Lib" : "Kernel", entries, vector);
936 if (entries > QEDR_MAX_CQES) {
938 "create cq: the number of entries %d is too high. Must be equal or below %d.\n",
939 entries, QEDR_MAX_CQES);
943 chain_entries = qedr_align_cq_entries(entries);
944 chain_entries = min_t(int, chain_entries, QEDR_MAX_CQES);
945 chain_params.num_elems = chain_entries;
947 /* calc db offset. user will add DPI base, kernel will add db addr */
948 db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT);
951 if (ib_copy_from_udata(&ureq, udata, min(sizeof(ureq),
954 "create cq: problem copying data from user space\n");
960 "create cq: cannot create a cq with 0 entries\n");
964 cq->cq_type = QEDR_CQ_TYPE_USER;
966 rc = qedr_init_user_queue(udata, dev, &cq->q, ureq.addr,
967 ureq.len, true, IB_ACCESS_LOCAL_WRITE,
972 pbl_ptr = cq->q.pbl_tbl->pa;
973 page_cnt = cq->q.pbl_info.num_pbes;
975 cq->ibcq.cqe = chain_entries;
976 cq->q.db_addr = ctx->dpi_addr + db_offset;
978 cq->cq_type = QEDR_CQ_TYPE_KERNEL;
980 rc = dev->ops->common->chain_alloc(dev->cdev, &cq->pbl,
985 page_cnt = qed_chain_get_page_cnt(&cq->pbl);
986 pbl_ptr = qed_chain_get_pbl_phys(&cq->pbl);
987 cq->ibcq.cqe = cq->pbl.capacity;
990 qedr_init_cq_params(cq, ctx, dev, vector, chain_entries, page_cnt,
993 rc = dev->ops->rdma_create_cq(dev->rdma_ctx, ¶ms, &icid);
998 cq->sig = QEDR_CQ_MAGIC_NUMBER;
999 spin_lock_init(&cq->cq_lock);
1002 rc = qedr_copy_cq_uresp(dev, cq, udata, db_offset);
1006 rc = qedr_db_recovery_add(dev, cq->q.db_addr,
1007 &cq->q.db_rec_data->db_data,
1014 /* Generate doorbell address. */
1015 cq->db.data.icid = cq->icid;
1016 cq->db_addr = dev->db_addr + db_offset;
1017 cq->db.data.params = DB_AGG_CMD_MAX <<
1018 RDMA_PWM_VAL32_DATA_AGG_CMD_SHIFT;
1020 /* point to the very last element, passing it we will toggle */
1021 cq->toggle_cqe = qed_chain_get_last_elem(&cq->pbl);
1022 cq->pbl_toggle = RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK;
1023 cq->latest_cqe = NULL;
1025 cq->cq_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
1027 rc = qedr_db_recovery_add(dev, cq->db_addr, &cq->db.data,
1028 DB_REC_WIDTH_64B, DB_REC_KERNEL);
1033 DP_DEBUG(dev, QEDR_MSG_CQ,
1034 "create cq: icid=0x%0x, addr=%p, size(entries)=0x%0x\n",
1035 cq->icid, cq, params.cq_size);
1040 destroy_iparams.icid = cq->icid;
1041 dev->ops->rdma_destroy_cq(dev->rdma_ctx, &destroy_iparams,
1045 qedr_free_pbl(dev, &cq->q.pbl_info, cq->q.pbl_tbl);
1046 ib_umem_release(cq->q.umem);
1047 if (cq->q.db_mmap_entry)
1048 rdma_user_mmap_entry_remove(cq->q.db_mmap_entry);
1050 dev->ops->common->chain_free(dev->cdev, &cq->pbl);
1056 #define QEDR_DESTROY_CQ_MAX_ITERATIONS (10)
1057 #define QEDR_DESTROY_CQ_ITER_DURATION (10)
1059 int qedr_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
1061 struct qedr_dev *dev = get_qedr_dev(ibcq->device);
1062 struct qed_rdma_destroy_cq_out_params oparams;
1063 struct qed_rdma_destroy_cq_in_params iparams;
1064 struct qedr_cq *cq = get_qedr_cq(ibcq);
1067 DP_DEBUG(dev, QEDR_MSG_CQ, "destroy cq %p (icid=%d)\n", cq, cq->icid);
1071 /* GSIs CQs are handled by driver, so they don't exist in the FW */
1072 if (cq->cq_type == QEDR_CQ_TYPE_GSI) {
1073 qedr_db_recovery_del(dev, cq->db_addr, &cq->db.data);
1077 iparams.icid = cq->icid;
1078 dev->ops->rdma_destroy_cq(dev->rdma_ctx, &iparams, &oparams);
1079 dev->ops->common->chain_free(dev->cdev, &cq->pbl);
1082 qedr_free_pbl(dev, &cq->q.pbl_info, cq->q.pbl_tbl);
1083 ib_umem_release(cq->q.umem);
1085 if (cq->q.db_rec_data) {
1086 qedr_db_recovery_del(dev, cq->q.db_addr,
1087 &cq->q.db_rec_data->db_data);
1088 rdma_user_mmap_entry_remove(cq->q.db_mmap_entry);
1091 qedr_db_recovery_del(dev, cq->db_addr, &cq->db.data);
1094 /* We don't want the IRQ handler to handle a non-existing CQ so we
1095 * wait until all CNQ interrupts, if any, are received. This will always
1096 * happen and will always happen very fast. If not, then a serious error
1097 * has occured. That is why we can use a long delay.
1098 * We spin for a short time so we don’t lose time on context switching
1099 * in case all the completions are handled in that span. Otherwise
1100 * we sleep for a while and check again. Since the CNQ may be
1101 * associated with (only) the current CPU we use msleep to allow the
1102 * current CPU to be freed.
1103 * The CNQ notification is increased in qedr_irq_handler().
1105 iter = QEDR_DESTROY_CQ_MAX_ITERATIONS;
1106 while (oparams.num_cq_notif != READ_ONCE(cq->cnq_notif) && iter) {
1107 udelay(QEDR_DESTROY_CQ_ITER_DURATION);
1111 iter = QEDR_DESTROY_CQ_MAX_ITERATIONS;
1112 while (oparams.num_cq_notif != READ_ONCE(cq->cnq_notif) && iter) {
1113 msleep(QEDR_DESTROY_CQ_ITER_DURATION);
1117 /* Note that we don't need to have explicit code to wait for the
1118 * completion of the event handler because it is invoked from the EQ.
1119 * Since the destroy CQ ramrod has also been received on the EQ we can
1120 * be certain that there's no event handler in process.
1125 static inline int get_gid_info_from_table(struct ib_qp *ibqp,
1126 struct ib_qp_attr *attr,
1128 struct qed_rdma_modify_qp_in_params
1131 const struct ib_gid_attr *gid_attr;
1132 enum rdma_network_type nw_type;
1133 const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
1138 gid_attr = grh->sgid_attr;
1139 ret = rdma_read_gid_l2_fields(gid_attr, &qp_params->vlan_id, NULL);
1143 nw_type = rdma_gid_attr_network_type(gid_attr);
1145 case RDMA_NETWORK_IPV6:
1146 memcpy(&qp_params->sgid.bytes[0], &gid_attr->gid.raw[0],
1147 sizeof(qp_params->sgid));
1148 memcpy(&qp_params->dgid.bytes[0],
1150 sizeof(qp_params->dgid));
1151 qp_params->roce_mode = ROCE_V2_IPV6;
1152 SET_FIELD(qp_params->modify_flags,
1153 QED_ROCE_MODIFY_QP_VALID_ROCE_MODE, 1);
1155 case RDMA_NETWORK_ROCE_V1:
1156 memcpy(&qp_params->sgid.bytes[0], &gid_attr->gid.raw[0],
1157 sizeof(qp_params->sgid));
1158 memcpy(&qp_params->dgid.bytes[0],
1160 sizeof(qp_params->dgid));
1161 qp_params->roce_mode = ROCE_V1;
1163 case RDMA_NETWORK_IPV4:
1164 memset(&qp_params->sgid, 0, sizeof(qp_params->sgid));
1165 memset(&qp_params->dgid, 0, sizeof(qp_params->dgid));
1166 ipv4_addr = qedr_get_ipv4_from_gid(gid_attr->gid.raw);
1167 qp_params->sgid.ipv4_addr = ipv4_addr;
1169 qedr_get_ipv4_from_gid(grh->dgid.raw);
1170 qp_params->dgid.ipv4_addr = ipv4_addr;
1171 SET_FIELD(qp_params->modify_flags,
1172 QED_ROCE_MODIFY_QP_VALID_ROCE_MODE, 1);
1173 qp_params->roce_mode = ROCE_V2_IPV4;
1179 for (i = 0; i < 4; i++) {
1180 qp_params->sgid.dwords[i] = ntohl(qp_params->sgid.dwords[i]);
1181 qp_params->dgid.dwords[i] = ntohl(qp_params->dgid.dwords[i]);
1184 if (qp_params->vlan_id >= VLAN_CFI_MASK)
1185 qp_params->vlan_id = 0;
1190 static int qedr_check_qp_attrs(struct ib_pd *ibpd, struct qedr_dev *dev,
1191 struct ib_qp_init_attr *attrs,
1192 struct ib_udata *udata)
1194 struct qedr_device_attr *qattr = &dev->attr;
1196 /* QP0... attrs->qp_type == IB_QPT_GSI */
1197 if (attrs->qp_type != IB_QPT_RC &&
1198 attrs->qp_type != IB_QPT_GSI &&
1199 attrs->qp_type != IB_QPT_XRC_INI &&
1200 attrs->qp_type != IB_QPT_XRC_TGT) {
1201 DP_DEBUG(dev, QEDR_MSG_QP,
1202 "create qp: unsupported qp type=0x%x requested\n",
1207 if (attrs->cap.max_send_wr > qattr->max_sqe) {
1209 "create qp: cannot create a SQ with %d elements (max_send_wr=0x%x)\n",
1210 attrs->cap.max_send_wr, qattr->max_sqe);
1214 if (attrs->cap.max_inline_data > qattr->max_inline) {
1216 "create qp: unsupported inline data size=0x%x requested (max_inline=0x%x)\n",
1217 attrs->cap.max_inline_data, qattr->max_inline);
1221 if (attrs->cap.max_send_sge > qattr->max_sge) {
1223 "create qp: unsupported send_sge=0x%x requested (max_send_sge=0x%x)\n",
1224 attrs->cap.max_send_sge, qattr->max_sge);
1228 if (attrs->cap.max_recv_sge > qattr->max_sge) {
1230 "create qp: unsupported recv_sge=0x%x requested (max_recv_sge=0x%x)\n",
1231 attrs->cap.max_recv_sge, qattr->max_sge);
1235 /* verify consumer QPs are not trying to use GSI QP's CQ.
1236 * TGT QP isn't associated with RQ/SQ
1238 if ((attrs->qp_type != IB_QPT_GSI) && (dev->gsi_qp_created) &&
1239 (attrs->qp_type != IB_QPT_XRC_TGT) &&
1240 (attrs->qp_type != IB_QPT_XRC_INI)) {
1241 struct qedr_cq *send_cq = get_qedr_cq(attrs->send_cq);
1242 struct qedr_cq *recv_cq = get_qedr_cq(attrs->recv_cq);
1244 if ((send_cq->cq_type == QEDR_CQ_TYPE_GSI) ||
1245 (recv_cq->cq_type == QEDR_CQ_TYPE_GSI)) {
1247 "create qp: consumer QP cannot use GSI CQs.\n");
1255 static int qedr_copy_srq_uresp(struct qedr_dev *dev,
1256 struct qedr_srq *srq, struct ib_udata *udata)
1258 struct qedr_create_srq_uresp uresp = {};
1261 uresp.srq_id = srq->srq_id;
1263 rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
1265 DP_ERR(dev, "create srq: problem copying data to user space\n");
1270 static void qedr_copy_rq_uresp(struct qedr_dev *dev,
1271 struct qedr_create_qp_uresp *uresp,
1274 /* iWARP requires two doorbells per RQ. */
1275 if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
1276 uresp->rq_db_offset =
1277 DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_IWARP_RQ_PROD);
1278 uresp->rq_db2_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_FLAGS);
1280 uresp->rq_db_offset =
1281 DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD);
1284 uresp->rq_icid = qp->icid;
1285 if (qp->urq.db_mmap_entry)
1286 uresp->rq_db_rec_addr =
1287 rdma_user_mmap_get_offset(qp->urq.db_mmap_entry);
1290 static void qedr_copy_sq_uresp(struct qedr_dev *dev,
1291 struct qedr_create_qp_uresp *uresp,
1294 uresp->sq_db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
1296 /* iWARP uses the same cid for rq and sq */
1297 if (rdma_protocol_iwarp(&dev->ibdev, 1))
1298 uresp->sq_icid = qp->icid;
1300 uresp->sq_icid = qp->icid + 1;
1302 if (qp->usq.db_mmap_entry)
1303 uresp->sq_db_rec_addr =
1304 rdma_user_mmap_get_offset(qp->usq.db_mmap_entry);
1307 static int qedr_copy_qp_uresp(struct qedr_dev *dev,
1308 struct qedr_qp *qp, struct ib_udata *udata,
1309 struct qedr_create_qp_uresp *uresp)
1313 memset(uresp, 0, sizeof(*uresp));
1315 if (qedr_qp_has_sq(qp))
1316 qedr_copy_sq_uresp(dev, uresp, qp);
1318 if (qedr_qp_has_rq(qp))
1319 qedr_copy_rq_uresp(dev, uresp, qp);
1321 uresp->atomic_supported = dev->atomic_cap != IB_ATOMIC_NONE;
1322 uresp->qp_id = qp->qp_id;
1324 rc = qedr_ib_copy_to_udata(udata, uresp, sizeof(*uresp));
1327 "create qp: failed a copy to user space with qp icid=0x%x.\n",
1333 static void qedr_reset_qp_hwq_info(struct qedr_qp_hwq_info *qph)
1335 qed_chain_reset(&qph->pbl);
1339 qph->db_data.data.value = cpu_to_le16(0);
1342 static void qedr_set_common_qp_params(struct qedr_dev *dev,
1345 struct ib_qp_init_attr *attrs)
1347 spin_lock_init(&qp->q_lock);
1348 if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
1349 kref_init(&qp->refcnt);
1350 init_completion(&qp->iwarp_cm_comp);
1351 init_completion(&qp->qp_rel_comp);
1355 qp->qp_type = attrs->qp_type;
1356 qp->max_inline_data = attrs->cap.max_inline_data;
1357 qp->state = QED_ROCE_QP_STATE_RESET;
1359 qp->prev_wqe_size = 0;
1361 qp->signaled = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR) ? true : false;
1363 if (qedr_qp_has_sq(qp)) {
1364 qedr_reset_qp_hwq_info(&qp->sq);
1365 qp->sq.max_sges = attrs->cap.max_send_sge;
1366 qp->sq_cq = get_qedr_cq(attrs->send_cq);
1367 DP_DEBUG(dev, QEDR_MSG_QP,
1368 "SQ params:\tsq_max_sges = %d, sq_cq_id = %d\n",
1369 qp->sq.max_sges, qp->sq_cq->icid);
1373 qp->srq = get_qedr_srq(attrs->srq);
1375 if (qedr_qp_has_rq(qp)) {
1376 qedr_reset_qp_hwq_info(&qp->rq);
1377 qp->rq_cq = get_qedr_cq(attrs->recv_cq);
1378 qp->rq.max_sges = attrs->cap.max_recv_sge;
1379 DP_DEBUG(dev, QEDR_MSG_QP,
1380 "RQ params:\trq_max_sges = %d, rq_cq_id = %d\n",
1381 qp->rq.max_sges, qp->rq_cq->icid);
1384 DP_DEBUG(dev, QEDR_MSG_QP,
1385 "QP params:\tpd = %d, qp_type = %d, max_inline_data = %d, state = %d, signaled = %d, use_srq=%d\n",
1386 pd->pd_id, qp->qp_type, qp->max_inline_data,
1387 qp->state, qp->signaled, (attrs->srq) ? 1 : 0);
1388 DP_DEBUG(dev, QEDR_MSG_QP,
1389 "SQ params:\tsq_max_sges = %d, sq_cq_id = %d\n",
1390 qp->sq.max_sges, qp->sq_cq->icid);
1393 static int qedr_set_roce_db_info(struct qedr_dev *dev, struct qedr_qp *qp)
1397 if (qedr_qp_has_sq(qp)) {
1398 qp->sq.db = dev->db_addr +
1399 DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
1400 qp->sq.db_data.data.icid = qp->icid + 1;
1401 rc = qedr_db_recovery_add(dev, qp->sq.db, &qp->sq.db_data,
1402 DB_REC_WIDTH_32B, DB_REC_KERNEL);
1407 if (qedr_qp_has_rq(qp)) {
1408 qp->rq.db = dev->db_addr +
1409 DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD);
1410 qp->rq.db_data.data.icid = qp->icid;
1411 rc = qedr_db_recovery_add(dev, qp->rq.db, &qp->rq.db_data,
1412 DB_REC_WIDTH_32B, DB_REC_KERNEL);
1413 if (rc && qedr_qp_has_sq(qp))
1414 qedr_db_recovery_del(dev, qp->sq.db, &qp->sq.db_data);
1420 static int qedr_check_srq_params(struct qedr_dev *dev,
1421 struct ib_srq_init_attr *attrs,
1422 struct ib_udata *udata)
1424 struct qedr_device_attr *qattr = &dev->attr;
1426 if (attrs->attr.max_wr > qattr->max_srq_wr) {
1428 "create srq: unsupported srq_wr=0x%x requested (max_srq_wr=0x%x)\n",
1429 attrs->attr.max_wr, qattr->max_srq_wr);
1433 if (attrs->attr.max_sge > qattr->max_sge) {
1435 "create srq: unsupported sge=0x%x requested (max_srq_sge=0x%x)\n",
1436 attrs->attr.max_sge, qattr->max_sge);
1439 if (!udata && attrs->srq_type == IB_SRQT_XRC) {
1440 DP_ERR(dev, "XRC SRQs are not supported in kernel-space\n");
1447 static void qedr_free_srq_user_params(struct qedr_srq *srq)
1449 qedr_free_pbl(srq->dev, &srq->usrq.pbl_info, srq->usrq.pbl_tbl);
1450 ib_umem_release(srq->usrq.umem);
1451 ib_umem_release(srq->prod_umem);
1454 static void qedr_free_srq_kernel_params(struct qedr_srq *srq)
1456 struct qedr_srq_hwq_info *hw_srq = &srq->hw_srq;
1457 struct qedr_dev *dev = srq->dev;
1459 dev->ops->common->chain_free(dev->cdev, &hw_srq->pbl);
1461 dma_free_coherent(&dev->pdev->dev, sizeof(struct rdma_srq_producers),
1462 hw_srq->virt_prod_pair_addr,
1463 hw_srq->phy_prod_pair_addr);
1466 static int qedr_init_srq_user_params(struct ib_udata *udata,
1467 struct qedr_srq *srq,
1468 struct qedr_create_srq_ureq *ureq,
1471 struct scatterlist *sg;
1474 rc = qedr_init_user_queue(udata, srq->dev, &srq->usrq, ureq->srq_addr,
1475 ureq->srq_len, false, access, 1);
1479 srq->prod_umem = ib_umem_get(srq->ibsrq.device, ureq->prod_pair_addr,
1480 sizeof(struct rdma_srq_producers), access);
1481 if (IS_ERR(srq->prod_umem)) {
1482 qedr_free_pbl(srq->dev, &srq->usrq.pbl_info, srq->usrq.pbl_tbl);
1483 ib_umem_release(srq->usrq.umem);
1485 "create srq: failed ib_umem_get for producer, got %ld\n",
1486 PTR_ERR(srq->prod_umem));
1487 return PTR_ERR(srq->prod_umem);
1490 sg = srq->prod_umem->sgt_append.sgt.sgl;
1491 srq->hw_srq.phy_prod_pair_addr = sg_dma_address(sg);
1496 static int qedr_alloc_srq_kernel_params(struct qedr_srq *srq,
1497 struct qedr_dev *dev,
1498 struct ib_srq_init_attr *init_attr)
1500 struct qedr_srq_hwq_info *hw_srq = &srq->hw_srq;
1501 struct qed_chain_init_params params = {
1502 .mode = QED_CHAIN_MODE_PBL,
1503 .intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1504 .cnt_type = QED_CHAIN_CNT_TYPE_U32,
1505 .elem_size = QEDR_SRQ_WQE_ELEM_SIZE,
1507 dma_addr_t phy_prod_pair_addr;
1512 va = dma_alloc_coherent(&dev->pdev->dev,
1513 sizeof(struct rdma_srq_producers),
1514 &phy_prod_pair_addr, GFP_KERNEL);
1517 "create srq: failed to allocate dma memory for producer\n");
1521 hw_srq->phy_prod_pair_addr = phy_prod_pair_addr;
1522 hw_srq->virt_prod_pair_addr = va;
1524 num_elems = init_attr->attr.max_wr * RDMA_MAX_SRQ_WQE_SIZE;
1525 params.num_elems = num_elems;
1527 rc = dev->ops->common->chain_alloc(dev->cdev, &hw_srq->pbl, ¶ms);
1531 hw_srq->num_elems = num_elems;
1536 dma_free_coherent(&dev->pdev->dev, sizeof(struct rdma_srq_producers),
1537 va, phy_prod_pair_addr);
1541 int qedr_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init_attr,
1542 struct ib_udata *udata)
1544 struct qed_rdma_destroy_srq_in_params destroy_in_params;
1545 struct qed_rdma_create_srq_in_params in_params = {};
1546 struct qedr_dev *dev = get_qedr_dev(ibsrq->device);
1547 struct qed_rdma_create_srq_out_params out_params;
1548 struct qedr_pd *pd = get_qedr_pd(ibsrq->pd);
1549 struct qedr_create_srq_ureq ureq = {};
1550 u64 pbl_base_addr, phy_prod_pair_addr;
1551 struct qedr_srq_hwq_info *hw_srq;
1552 u32 page_cnt, page_size;
1553 struct qedr_srq *srq = get_qedr_srq(ibsrq);
1556 DP_DEBUG(dev, QEDR_MSG_QP,
1557 "create SRQ called from %s (pd %p)\n",
1558 (udata) ? "User lib" : "kernel", pd);
1560 if (init_attr->srq_type != IB_SRQT_BASIC &&
1561 init_attr->srq_type != IB_SRQT_XRC)
1564 rc = qedr_check_srq_params(dev, init_attr, udata);
1569 srq->is_xrc = (init_attr->srq_type == IB_SRQT_XRC);
1570 hw_srq = &srq->hw_srq;
1571 spin_lock_init(&srq->lock);
1573 hw_srq->max_wr = init_attr->attr.max_wr;
1574 hw_srq->max_sges = init_attr->attr.max_sge;
1577 if (ib_copy_from_udata(&ureq, udata, min(sizeof(ureq),
1580 "create srq: problem copying data from user space\n");
1584 rc = qedr_init_srq_user_params(udata, srq, &ureq, 0);
1588 page_cnt = srq->usrq.pbl_info.num_pbes;
1589 pbl_base_addr = srq->usrq.pbl_tbl->pa;
1590 phy_prod_pair_addr = hw_srq->phy_prod_pair_addr;
1591 page_size = PAGE_SIZE;
1593 struct qed_chain *pbl;
1595 rc = qedr_alloc_srq_kernel_params(srq, dev, init_attr);
1600 page_cnt = qed_chain_get_page_cnt(pbl);
1601 pbl_base_addr = qed_chain_get_pbl_phys(pbl);
1602 phy_prod_pair_addr = hw_srq->phy_prod_pair_addr;
1603 page_size = QED_CHAIN_PAGE_SIZE;
1606 in_params.pd_id = pd->pd_id;
1607 in_params.pbl_base_addr = pbl_base_addr;
1608 in_params.prod_pair_addr = phy_prod_pair_addr;
1609 in_params.num_pages = page_cnt;
1610 in_params.page_size = page_size;
1612 struct qedr_xrcd *xrcd = get_qedr_xrcd(init_attr->ext.xrc.xrcd);
1613 struct qedr_cq *cq = get_qedr_cq(init_attr->ext.cq);
1615 in_params.is_xrc = 1;
1616 in_params.xrcd_id = xrcd->xrcd_id;
1617 in_params.cq_cid = cq->icid;
1620 rc = dev->ops->rdma_create_srq(dev->rdma_ctx, &in_params, &out_params);
1624 srq->srq_id = out_params.srq_id;
1627 rc = qedr_copy_srq_uresp(dev, srq, udata);
1632 rc = xa_insert_irq(&dev->srqs, srq->srq_id, srq, GFP_KERNEL);
1636 DP_DEBUG(dev, QEDR_MSG_SRQ,
1637 "create srq: created srq with srq_id=0x%0x\n", srq->srq_id);
1641 destroy_in_params.srq_id = srq->srq_id;
1643 dev->ops->rdma_destroy_srq(dev->rdma_ctx, &destroy_in_params);
1646 qedr_free_srq_user_params(srq);
1648 qedr_free_srq_kernel_params(srq);
1653 int qedr_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
1655 struct qed_rdma_destroy_srq_in_params in_params = {};
1656 struct qedr_dev *dev = get_qedr_dev(ibsrq->device);
1657 struct qedr_srq *srq = get_qedr_srq(ibsrq);
1659 xa_erase_irq(&dev->srqs, srq->srq_id);
1660 in_params.srq_id = srq->srq_id;
1661 in_params.is_xrc = srq->is_xrc;
1662 dev->ops->rdma_destroy_srq(dev->rdma_ctx, &in_params);
1665 qedr_free_srq_user_params(srq);
1667 qedr_free_srq_kernel_params(srq);
1669 DP_DEBUG(dev, QEDR_MSG_SRQ,
1670 "destroy srq: destroyed srq with srq_id=0x%0x\n",
1675 int qedr_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
1676 enum ib_srq_attr_mask attr_mask, struct ib_udata *udata)
1678 struct qed_rdma_modify_srq_in_params in_params = {};
1679 struct qedr_dev *dev = get_qedr_dev(ibsrq->device);
1680 struct qedr_srq *srq = get_qedr_srq(ibsrq);
1683 if (attr_mask & IB_SRQ_MAX_WR) {
1685 "modify srq: invalid attribute mask=0x%x specified for %p\n",
1690 if (attr_mask & IB_SRQ_LIMIT) {
1691 if (attr->srq_limit >= srq->hw_srq.max_wr) {
1693 "modify srq: invalid srq_limit=0x%x (max_srq_limit=0x%x)\n",
1694 attr->srq_limit, srq->hw_srq.max_wr);
1698 in_params.srq_id = srq->srq_id;
1699 in_params.wqe_limit = attr->srq_limit;
1700 rc = dev->ops->rdma_modify_srq(dev->rdma_ctx, &in_params);
1705 srq->srq_limit = attr->srq_limit;
1707 DP_DEBUG(dev, QEDR_MSG_SRQ,
1708 "modify srq: modified srq with srq_id=0x%0x\n", srq->srq_id);
1713 static enum qed_rdma_qp_type qedr_ib_to_qed_qp_type(enum ib_qp_type ib_qp_type)
1715 switch (ib_qp_type) {
1717 return QED_RDMA_QP_TYPE_RC;
1718 case IB_QPT_XRC_INI:
1719 return QED_RDMA_QP_TYPE_XRC_INI;
1720 case IB_QPT_XRC_TGT:
1721 return QED_RDMA_QP_TYPE_XRC_TGT;
1723 return QED_RDMA_QP_TYPE_INVAL;
1728 qedr_init_common_qp_in_params(struct qedr_dev *dev,
1731 struct ib_qp_init_attr *attrs,
1732 bool fmr_and_reserved_lkey,
1733 struct qed_rdma_create_qp_in_params *params)
1735 /* QP handle to be written in an async event */
1736 params->qp_handle_async_lo = lower_32_bits((uintptr_t) qp);
1737 params->qp_handle_async_hi = upper_32_bits((uintptr_t) qp);
1739 params->signal_all = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR);
1740 params->fmr_and_reserved_lkey = fmr_and_reserved_lkey;
1741 params->qp_type = qedr_ib_to_qed_qp_type(attrs->qp_type);
1742 params->stats_queue = 0;
1745 params->pd = pd->pd_id;
1746 params->dpi = pd->uctx ? pd->uctx->dpi : dev->dpi;
1749 if (qedr_qp_has_sq(qp))
1750 params->sq_cq_id = get_qedr_cq(attrs->send_cq)->icid;
1752 if (qedr_qp_has_rq(qp))
1753 params->rq_cq_id = get_qedr_cq(attrs->recv_cq)->icid;
1755 if (qedr_qp_has_srq(qp)) {
1756 params->rq_cq_id = get_qedr_cq(attrs->recv_cq)->icid;
1757 params->srq_id = qp->srq->srq_id;
1758 params->use_srq = true;
1761 params->use_srq = false;
1765 static inline void qedr_qp_user_print(struct qedr_dev *dev, struct qedr_qp *qp)
1767 DP_DEBUG(dev, QEDR_MSG_QP, "create qp: successfully created user QP. "
1775 qedr_qp_has_sq(qp) ? qp->usq.buf_addr : 0x0,
1776 qedr_qp_has_sq(qp) ? qp->usq.buf_len : 0,
1777 qedr_qp_has_rq(qp) ? qp->urq.buf_addr : 0x0,
1778 qedr_qp_has_sq(qp) ? qp->urq.buf_len : 0);
1782 qedr_iwarp_populate_user_qp(struct qedr_dev *dev,
1784 struct qed_rdma_create_qp_out_params *out_params)
1786 qp->usq.pbl_tbl->va = out_params->sq_pbl_virt;
1787 qp->usq.pbl_tbl->pa = out_params->sq_pbl_phys;
1789 qedr_populate_pbls(dev, qp->usq.umem, qp->usq.pbl_tbl,
1790 &qp->usq.pbl_info, FW_PAGE_SHIFT);
1792 qp->urq.pbl_tbl->va = out_params->rq_pbl_virt;
1793 qp->urq.pbl_tbl->pa = out_params->rq_pbl_phys;
1796 qedr_populate_pbls(dev, qp->urq.umem, qp->urq.pbl_tbl,
1797 &qp->urq.pbl_info, FW_PAGE_SHIFT);
1800 static void qedr_cleanup_user(struct qedr_dev *dev,
1801 struct qedr_ucontext *ctx,
1804 if (qedr_qp_has_sq(qp)) {
1805 ib_umem_release(qp->usq.umem);
1806 qp->usq.umem = NULL;
1809 if (qedr_qp_has_rq(qp)) {
1810 ib_umem_release(qp->urq.umem);
1811 qp->urq.umem = NULL;
1814 if (rdma_protocol_roce(&dev->ibdev, 1)) {
1815 qedr_free_pbl(dev, &qp->usq.pbl_info, qp->usq.pbl_tbl);
1816 qedr_free_pbl(dev, &qp->urq.pbl_info, qp->urq.pbl_tbl);
1818 kfree(qp->usq.pbl_tbl);
1819 kfree(qp->urq.pbl_tbl);
1822 if (qp->usq.db_rec_data) {
1823 qedr_db_recovery_del(dev, qp->usq.db_addr,
1824 &qp->usq.db_rec_data->db_data);
1825 rdma_user_mmap_entry_remove(qp->usq.db_mmap_entry);
1828 if (qp->urq.db_rec_data) {
1829 qedr_db_recovery_del(dev, qp->urq.db_addr,
1830 &qp->urq.db_rec_data->db_data);
1831 rdma_user_mmap_entry_remove(qp->urq.db_mmap_entry);
1834 if (rdma_protocol_iwarp(&dev->ibdev, 1))
1835 qedr_db_recovery_del(dev, qp->urq.db_rec_db2_addr,
1836 &qp->urq.db_rec_db2_data);
1839 static int qedr_create_user_qp(struct qedr_dev *dev,
1842 struct ib_udata *udata,
1843 struct ib_qp_init_attr *attrs)
1845 struct qed_rdma_create_qp_in_params in_params;
1846 struct qed_rdma_create_qp_out_params out_params;
1847 struct qedr_create_qp_uresp uresp = {};
1848 struct qedr_create_qp_ureq ureq = {};
1849 int alloc_and_init = rdma_protocol_roce(&dev->ibdev, 1);
1850 struct qedr_ucontext *ctx = NULL;
1851 struct qedr_pd *pd = NULL;
1854 qp->create_type = QEDR_QP_CREATE_USER;
1857 pd = get_qedr_pd(ibpd);
1862 rc = ib_copy_from_udata(&ureq, udata, min(sizeof(ureq),
1865 DP_ERR(dev, "Problem copying data from user space\n");
1870 if (qedr_qp_has_sq(qp)) {
1871 /* SQ - read access only (0) */
1872 rc = qedr_init_user_queue(udata, dev, &qp->usq, ureq.sq_addr,
1873 ureq.sq_len, true, 0, alloc_and_init);
1878 if (qedr_qp_has_rq(qp)) {
1879 /* RQ - read access only (0) */
1880 rc = qedr_init_user_queue(udata, dev, &qp->urq, ureq.rq_addr,
1881 ureq.rq_len, true, 0, alloc_and_init);
1886 memset(&in_params, 0, sizeof(in_params));
1887 qedr_init_common_qp_in_params(dev, pd, qp, attrs, false, &in_params);
1888 in_params.qp_handle_lo = ureq.qp_handle_lo;
1889 in_params.qp_handle_hi = ureq.qp_handle_hi;
1891 if (qp->qp_type == IB_QPT_XRC_TGT) {
1892 struct qedr_xrcd *xrcd = get_qedr_xrcd(attrs->xrcd);
1894 in_params.xrcd_id = xrcd->xrcd_id;
1895 in_params.qp_handle_lo = qp->qp_id;
1896 in_params.use_srq = 1;
1899 if (qedr_qp_has_sq(qp)) {
1900 in_params.sq_num_pages = qp->usq.pbl_info.num_pbes;
1901 in_params.sq_pbl_ptr = qp->usq.pbl_tbl->pa;
1904 if (qedr_qp_has_rq(qp)) {
1905 in_params.rq_num_pages = qp->urq.pbl_info.num_pbes;
1906 in_params.rq_pbl_ptr = qp->urq.pbl_tbl->pa;
1910 SET_FIELD(in_params.flags, QED_ROCE_EDPM_MODE, ctx->edpm_mode);
1912 qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx,
1913 &in_params, &out_params);
1920 if (rdma_protocol_iwarp(&dev->ibdev, 1))
1921 qedr_iwarp_populate_user_qp(dev, qp, &out_params);
1923 qp->qp_id = out_params.qp_id;
1924 qp->icid = out_params.icid;
1927 rc = qedr_copy_qp_uresp(dev, qp, udata, &uresp);
1932 /* db offset was calculated in copy_qp_uresp, now set in the user q */
1933 if (qedr_qp_has_sq(qp)) {
1934 qp->usq.db_addr = ctx->dpi_addr + uresp.sq_db_offset;
1935 qp->sq.max_wr = attrs->cap.max_send_wr;
1936 rc = qedr_db_recovery_add(dev, qp->usq.db_addr,
1937 &qp->usq.db_rec_data->db_data,
1944 if (qedr_qp_has_rq(qp)) {
1945 qp->urq.db_addr = ctx->dpi_addr + uresp.rq_db_offset;
1946 qp->rq.max_wr = attrs->cap.max_recv_wr;
1947 rc = qedr_db_recovery_add(dev, qp->urq.db_addr,
1948 &qp->urq.db_rec_data->db_data,
1955 if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
1956 qp->urq.db_rec_db2_addr = ctx->dpi_addr + uresp.rq_db2_offset;
1958 /* calculate the db_rec_db2 data since it is constant so no
1959 * need to reflect from user
1961 qp->urq.db_rec_db2_data.data.icid = cpu_to_le16(qp->icid);
1962 qp->urq.db_rec_db2_data.data.value =
1963 cpu_to_le16(DQ_TCM_IWARP_POST_RQ_CF_CMD);
1965 rc = qedr_db_recovery_add(dev, qp->urq.db_rec_db2_addr,
1966 &qp->urq.db_rec_db2_data,
1972 qedr_qp_user_print(dev, qp);
1975 rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
1977 DP_ERR(dev, "create qp: fatal fault. rc=%d", rc);
1980 qedr_cleanup_user(dev, ctx, qp);
1984 static int qedr_set_iwarp_db_info(struct qedr_dev *dev, struct qedr_qp *qp)
1988 qp->sq.db = dev->db_addr +
1989 DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
1990 qp->sq.db_data.data.icid = qp->icid;
1992 rc = qedr_db_recovery_add(dev, qp->sq.db,
1999 qp->rq.db = dev->db_addr +
2000 DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_IWARP_RQ_PROD);
2001 qp->rq.db_data.data.icid = qp->icid;
2002 qp->rq.iwarp_db2 = dev->db_addr +
2003 DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_FLAGS);
2004 qp->rq.iwarp_db2_data.data.icid = qp->icid;
2005 qp->rq.iwarp_db2_data.data.value = DQ_TCM_IWARP_POST_RQ_CF_CMD;
2007 rc = qedr_db_recovery_add(dev, qp->rq.db,
2014 rc = qedr_db_recovery_add(dev, qp->rq.iwarp_db2,
2015 &qp->rq.iwarp_db2_data,
2022 qedr_roce_create_kernel_qp(struct qedr_dev *dev,
2024 struct qed_rdma_create_qp_in_params *in_params,
2025 u32 n_sq_elems, u32 n_rq_elems)
2027 struct qed_rdma_create_qp_out_params out_params;
2028 struct qed_chain_init_params params = {
2029 .mode = QED_CHAIN_MODE_PBL,
2030 .cnt_type = QED_CHAIN_CNT_TYPE_U32,
2034 params.intended_use = QED_CHAIN_USE_TO_PRODUCE;
2035 params.num_elems = n_sq_elems;
2036 params.elem_size = QEDR_SQE_ELEMENT_SIZE;
2038 rc = dev->ops->common->chain_alloc(dev->cdev, &qp->sq.pbl, ¶ms);
2042 in_params->sq_num_pages = qed_chain_get_page_cnt(&qp->sq.pbl);
2043 in_params->sq_pbl_ptr = qed_chain_get_pbl_phys(&qp->sq.pbl);
2045 params.intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE;
2046 params.num_elems = n_rq_elems;
2047 params.elem_size = QEDR_RQE_ELEMENT_SIZE;
2049 rc = dev->ops->common->chain_alloc(dev->cdev, &qp->rq.pbl, ¶ms);
2053 in_params->rq_num_pages = qed_chain_get_page_cnt(&qp->rq.pbl);
2054 in_params->rq_pbl_ptr = qed_chain_get_pbl_phys(&qp->rq.pbl);
2056 qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx,
2057 in_params, &out_params);
2062 qp->qp_id = out_params.qp_id;
2063 qp->icid = out_params.icid;
2065 return qedr_set_roce_db_info(dev, qp);
2069 qedr_iwarp_create_kernel_qp(struct qedr_dev *dev,
2071 struct qed_rdma_create_qp_in_params *in_params,
2072 u32 n_sq_elems, u32 n_rq_elems)
2074 struct qed_rdma_create_qp_out_params out_params;
2075 struct qed_chain_init_params params = {
2076 .mode = QED_CHAIN_MODE_PBL,
2077 .cnt_type = QED_CHAIN_CNT_TYPE_U32,
2081 in_params->sq_num_pages = QED_CHAIN_PAGE_CNT(n_sq_elems,
2082 QEDR_SQE_ELEMENT_SIZE,
2083 QED_CHAIN_PAGE_SIZE,
2084 QED_CHAIN_MODE_PBL);
2085 in_params->rq_num_pages = QED_CHAIN_PAGE_CNT(n_rq_elems,
2086 QEDR_RQE_ELEMENT_SIZE,
2087 QED_CHAIN_PAGE_SIZE,
2088 QED_CHAIN_MODE_PBL);
2090 qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx,
2091 in_params, &out_params);
2096 /* Now we allocate the chain */
2098 params.intended_use = QED_CHAIN_USE_TO_PRODUCE;
2099 params.num_elems = n_sq_elems;
2100 params.elem_size = QEDR_SQE_ELEMENT_SIZE;
2101 params.ext_pbl_virt = out_params.sq_pbl_virt;
2102 params.ext_pbl_phys = out_params.sq_pbl_phys;
2104 rc = dev->ops->common->chain_alloc(dev->cdev, &qp->sq.pbl, ¶ms);
2108 params.intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE;
2109 params.num_elems = n_rq_elems;
2110 params.elem_size = QEDR_RQE_ELEMENT_SIZE;
2111 params.ext_pbl_virt = out_params.rq_pbl_virt;
2112 params.ext_pbl_phys = out_params.rq_pbl_phys;
2114 rc = dev->ops->common->chain_alloc(dev->cdev, &qp->rq.pbl, ¶ms);
2118 qp->qp_id = out_params.qp_id;
2119 qp->icid = out_params.icid;
2121 return qedr_set_iwarp_db_info(dev, qp);
2124 dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
2129 static void qedr_cleanup_kernel(struct qedr_dev *dev, struct qedr_qp *qp)
2131 dev->ops->common->chain_free(dev->cdev, &qp->sq.pbl);
2132 kfree(qp->wqe_wr_id);
2134 dev->ops->common->chain_free(dev->cdev, &qp->rq.pbl);
2135 kfree(qp->rqe_wr_id);
2137 /* GSI qp is not registered to db mechanism so no need to delete */
2138 if (qp->qp_type == IB_QPT_GSI)
2141 qedr_db_recovery_del(dev, qp->sq.db, &qp->sq.db_data);
2144 qedr_db_recovery_del(dev, qp->rq.db, &qp->rq.db_data);
2146 if (rdma_protocol_iwarp(&dev->ibdev, 1))
2147 qedr_db_recovery_del(dev, qp->rq.iwarp_db2,
2148 &qp->rq.iwarp_db2_data);
2152 static int qedr_create_kernel_qp(struct qedr_dev *dev,
2155 struct ib_qp_init_attr *attrs)
2157 struct qed_rdma_create_qp_in_params in_params;
2158 struct qedr_pd *pd = get_qedr_pd(ibpd);
2164 memset(&in_params, 0, sizeof(in_params));
2165 qp->create_type = QEDR_QP_CREATE_KERNEL;
2167 /* A single work request may take up to QEDR_MAX_SQ_WQE_SIZE elements in
2168 * the ring. The ring should allow at least a single WR, even if the
2169 * user requested none, due to allocation issues.
2170 * We should add an extra WR since the prod and cons indices of
2171 * wqe_wr_id are managed in such a way that the WQ is considered full
2172 * when (prod+1)%max_wr==cons. We currently don't do that because we
2173 * double the number of entries due an iSER issue that pushes far more
2174 * WRs than indicated. If we decline its ib_post_send() then we get
2175 * error prints in the dmesg we'd like to avoid.
2177 qp->sq.max_wr = min_t(u32, attrs->cap.max_send_wr * dev->wq_multiplier,
2180 qp->wqe_wr_id = kcalloc(qp->sq.max_wr, sizeof(*qp->wqe_wr_id),
2182 if (!qp->wqe_wr_id) {
2183 DP_ERR(dev, "create qp: failed SQ shadow memory allocation\n");
2187 /* QP handle to be written in CQE */
2188 in_params.qp_handle_lo = lower_32_bits((uintptr_t) qp);
2189 in_params.qp_handle_hi = upper_32_bits((uintptr_t) qp);
2191 /* A single work request may take up to QEDR_MAX_RQ_WQE_SIZE elements in
2192 * the ring. There ring should allow at least a single WR, even if the
2193 * user requested none, due to allocation issues.
2195 qp->rq.max_wr = (u16) max_t(u32, attrs->cap.max_recv_wr, 1);
2197 /* Allocate driver internal RQ array */
2198 qp->rqe_wr_id = kcalloc(qp->rq.max_wr, sizeof(*qp->rqe_wr_id),
2200 if (!qp->rqe_wr_id) {
2202 "create qp: failed RQ shadow memory allocation\n");
2203 kfree(qp->wqe_wr_id);
2207 qedr_init_common_qp_in_params(dev, pd, qp, attrs, true, &in_params);
2209 n_sq_entries = attrs->cap.max_send_wr;
2210 n_sq_entries = min_t(u32, n_sq_entries, dev->attr.max_sqe);
2211 n_sq_entries = max_t(u32, n_sq_entries, 1);
2212 n_sq_elems = n_sq_entries * QEDR_MAX_SQE_ELEMENTS_PER_SQE;
2214 n_rq_elems = qp->rq.max_wr * QEDR_MAX_RQE_ELEMENTS_PER_RQE;
2216 if (rdma_protocol_iwarp(&dev->ibdev, 1))
2217 rc = qedr_iwarp_create_kernel_qp(dev, qp, &in_params,
2218 n_sq_elems, n_rq_elems);
2220 rc = qedr_roce_create_kernel_qp(dev, qp, &in_params,
2221 n_sq_elems, n_rq_elems);
2223 qedr_cleanup_kernel(dev, qp);
2228 static int qedr_free_qp_resources(struct qedr_dev *dev, struct qedr_qp *qp,
2229 struct ib_udata *udata)
2231 struct qedr_ucontext *ctx =
2232 rdma_udata_to_drv_context(udata, struct qedr_ucontext,
2236 if (qp->qp_type != IB_QPT_GSI) {
2237 rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
2242 if (qp->create_type == QEDR_QP_CREATE_USER)
2243 qedr_cleanup_user(dev, ctx, qp);
2245 qedr_cleanup_kernel(dev, qp);
2250 int qedr_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attrs,
2251 struct ib_udata *udata)
2253 struct qedr_xrcd *xrcd = NULL;
2254 struct ib_pd *ibpd = ibqp->pd;
2255 struct qedr_pd *pd = get_qedr_pd(ibpd);
2256 struct qedr_dev *dev = get_qedr_dev(ibqp->device);
2257 struct qedr_qp *qp = get_qedr_qp(ibqp);
2260 if (attrs->create_flags)
2263 if (attrs->qp_type == IB_QPT_XRC_TGT)
2264 xrcd = get_qedr_xrcd(attrs->xrcd);
2266 pd = get_qedr_pd(ibpd);
2268 DP_DEBUG(dev, QEDR_MSG_QP, "create qp: called from %s, pd=%p\n",
2269 udata ? "user library" : "kernel", pd);
2271 rc = qedr_check_qp_attrs(ibpd, dev, attrs, udata);
2275 DP_DEBUG(dev, QEDR_MSG_QP,
2276 "create qp: called from %s, event_handler=%p, eepd=%p sq_cq=%p, sq_icid=%d, rq_cq=%p, rq_icid=%d\n",
2277 udata ? "user library" : "kernel", attrs->event_handler, pd,
2278 get_qedr_cq(attrs->send_cq),
2279 get_qedr_cq(attrs->send_cq)->icid,
2280 get_qedr_cq(attrs->recv_cq),
2281 attrs->recv_cq ? get_qedr_cq(attrs->recv_cq)->icid : 0);
2283 qedr_set_common_qp_params(dev, qp, pd, attrs);
2285 if (attrs->qp_type == IB_QPT_GSI)
2286 return qedr_create_gsi_qp(dev, attrs, qp);
2289 rc = qedr_create_user_qp(dev, qp, ibpd, udata, attrs);
2291 rc = qedr_create_kernel_qp(dev, qp, ibpd, attrs);
2296 qp->ibqp.qp_num = qp->qp_id;
2298 if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
2299 rc = xa_insert(&dev->qps, qp->qp_id, qp, GFP_KERNEL);
2301 goto out_free_qp_resources;
2306 out_free_qp_resources:
2307 qedr_free_qp_resources(dev, qp, udata);
2311 static enum ib_qp_state qedr_get_ibqp_state(enum qed_roce_qp_state qp_state)
2314 case QED_ROCE_QP_STATE_RESET:
2315 return IB_QPS_RESET;
2316 case QED_ROCE_QP_STATE_INIT:
2318 case QED_ROCE_QP_STATE_RTR:
2320 case QED_ROCE_QP_STATE_RTS:
2322 case QED_ROCE_QP_STATE_SQD:
2324 case QED_ROCE_QP_STATE_ERR:
2326 case QED_ROCE_QP_STATE_SQE:
2332 static enum qed_roce_qp_state qedr_get_state_from_ibqp(
2333 enum ib_qp_state qp_state)
2337 return QED_ROCE_QP_STATE_RESET;
2339 return QED_ROCE_QP_STATE_INIT;
2341 return QED_ROCE_QP_STATE_RTR;
2343 return QED_ROCE_QP_STATE_RTS;
2345 return QED_ROCE_QP_STATE_SQD;
2347 return QED_ROCE_QP_STATE_ERR;
2349 return QED_ROCE_QP_STATE_ERR;
2353 static int qedr_update_qp_state(struct qedr_dev *dev,
2355 enum qed_roce_qp_state cur_state,
2356 enum qed_roce_qp_state new_state)
2360 if (new_state == cur_state)
2363 switch (cur_state) {
2364 case QED_ROCE_QP_STATE_RESET:
2365 switch (new_state) {
2366 case QED_ROCE_QP_STATE_INIT:
2373 case QED_ROCE_QP_STATE_INIT:
2374 switch (new_state) {
2375 case QED_ROCE_QP_STATE_RTR:
2376 /* Update doorbell (in case post_recv was
2377 * done before move to RTR)
2380 if (rdma_protocol_roce(&dev->ibdev, 1)) {
2381 writel(qp->rq.db_data.raw, qp->rq.db);
2384 case QED_ROCE_QP_STATE_ERR:
2387 /* Invalid state change. */
2392 case QED_ROCE_QP_STATE_RTR:
2394 switch (new_state) {
2395 case QED_ROCE_QP_STATE_RTS:
2397 case QED_ROCE_QP_STATE_ERR:
2400 /* Invalid state change. */
2405 case QED_ROCE_QP_STATE_RTS:
2407 switch (new_state) {
2408 case QED_ROCE_QP_STATE_SQD:
2410 case QED_ROCE_QP_STATE_ERR:
2413 /* Invalid state change. */
2418 case QED_ROCE_QP_STATE_SQD:
2420 switch (new_state) {
2421 case QED_ROCE_QP_STATE_RTS:
2422 case QED_ROCE_QP_STATE_ERR:
2425 /* Invalid state change. */
2430 case QED_ROCE_QP_STATE_ERR:
2432 switch (new_state) {
2433 case QED_ROCE_QP_STATE_RESET:
2434 if ((qp->rq.prod != qp->rq.cons) ||
2435 (qp->sq.prod != qp->sq.cons)) {
2437 "Error->Reset with rq/sq not empty rq.prod=%x rq.cons=%x sq.prod=%x sq.cons=%x\n",
2438 qp->rq.prod, qp->rq.cons, qp->sq.prod,
2456 int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
2457 int attr_mask, struct ib_udata *udata)
2459 struct qedr_qp *qp = get_qedr_qp(ibqp);
2460 struct qed_rdma_modify_qp_in_params qp_params = { 0 };
2461 struct qedr_dev *dev = get_qedr_dev(&qp->dev->ibdev);
2462 const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
2463 enum ib_qp_state old_qp_state, new_qp_state;
2464 enum qed_roce_qp_state cur_state;
2467 DP_DEBUG(dev, QEDR_MSG_QP,
2468 "modify qp: qp %p attr_mask=0x%x, state=%d", qp, attr_mask,
2471 if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
2474 old_qp_state = qedr_get_ibqp_state(qp->state);
2475 if (attr_mask & IB_QP_STATE)
2476 new_qp_state = attr->qp_state;
2478 new_qp_state = old_qp_state;
2480 if (rdma_protocol_roce(&dev->ibdev, 1)) {
2481 if (!ib_modify_qp_is_ok(old_qp_state, new_qp_state,
2482 ibqp->qp_type, attr_mask)) {
2484 "modify qp: invalid attribute mask=0x%x specified for\n"
2485 "qpn=0x%x of type=0x%x old_qp_state=0x%x, new_qp_state=0x%x\n",
2486 attr_mask, qp->qp_id, ibqp->qp_type,
2487 old_qp_state, new_qp_state);
2493 /* Translate the masks... */
2494 if (attr_mask & IB_QP_STATE) {
2495 SET_FIELD(qp_params.modify_flags,
2496 QED_RDMA_MODIFY_QP_VALID_NEW_STATE, 1);
2497 qp_params.new_state = qedr_get_state_from_ibqp(attr->qp_state);
2500 if (attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY)
2501 qp_params.sqd_async = true;
2503 if (attr_mask & IB_QP_PKEY_INDEX) {
2504 SET_FIELD(qp_params.modify_flags,
2505 QED_ROCE_MODIFY_QP_VALID_PKEY, 1);
2506 if (attr->pkey_index >= QEDR_ROCE_PKEY_TABLE_LEN) {
2511 qp_params.pkey = QEDR_ROCE_PKEY_DEFAULT;
2514 if (attr_mask & IB_QP_QKEY)
2515 qp->qkey = attr->qkey;
2517 if (attr_mask & IB_QP_ACCESS_FLAGS) {
2518 SET_FIELD(qp_params.modify_flags,
2519 QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN, 1);
2520 qp_params.incoming_rdma_read_en = attr->qp_access_flags &
2521 IB_ACCESS_REMOTE_READ;
2522 qp_params.incoming_rdma_write_en = attr->qp_access_flags &
2523 IB_ACCESS_REMOTE_WRITE;
2524 qp_params.incoming_atomic_en = attr->qp_access_flags &
2525 IB_ACCESS_REMOTE_ATOMIC;
2528 if (attr_mask & (IB_QP_AV | IB_QP_PATH_MTU)) {
2529 if (rdma_protocol_iwarp(&dev->ibdev, 1))
2532 if (attr_mask & IB_QP_PATH_MTU) {
2533 if (attr->path_mtu < IB_MTU_256 ||
2534 attr->path_mtu > IB_MTU_4096) {
2535 pr_err("error: Only MTU sizes of 256, 512, 1024, 2048 and 4096 are supported by RoCE\n");
2539 qp->mtu = min(ib_mtu_enum_to_int(attr->path_mtu),
2540 ib_mtu_enum_to_int(iboe_get_mtu
2546 ib_mtu_enum_to_int(iboe_get_mtu(dev->ndev->mtu));
2547 pr_err("Fixing zeroed MTU to qp->mtu = %d\n", qp->mtu);
2550 SET_FIELD(qp_params.modify_flags,
2551 QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR, 1);
2553 qp_params.traffic_class_tos = grh->traffic_class;
2554 qp_params.flow_label = grh->flow_label;
2555 qp_params.hop_limit_ttl = grh->hop_limit;
2557 qp->sgid_idx = grh->sgid_index;
2559 rc = get_gid_info_from_table(ibqp, attr, attr_mask, &qp_params);
2562 "modify qp: problems with GID index %d (rc=%d)\n",
2563 grh->sgid_index, rc);
2567 rc = qedr_get_dmac(dev, &attr->ah_attr,
2568 qp_params.remote_mac_addr);
2572 qp_params.use_local_mac = true;
2573 ether_addr_copy(qp_params.local_mac_addr, dev->ndev->dev_addr);
2575 DP_DEBUG(dev, QEDR_MSG_QP, "dgid=%x:%x:%x:%x\n",
2576 qp_params.dgid.dwords[0], qp_params.dgid.dwords[1],
2577 qp_params.dgid.dwords[2], qp_params.dgid.dwords[3]);
2578 DP_DEBUG(dev, QEDR_MSG_QP, "sgid=%x:%x:%x:%x\n",
2579 qp_params.sgid.dwords[0], qp_params.sgid.dwords[1],
2580 qp_params.sgid.dwords[2], qp_params.sgid.dwords[3]);
2581 DP_DEBUG(dev, QEDR_MSG_QP, "remote_mac=[%pM]\n",
2582 qp_params.remote_mac_addr);
2584 qp_params.mtu = qp->mtu;
2585 qp_params.lb_indication = false;
2588 if (!qp_params.mtu) {
2589 /* Stay with current MTU */
2591 qp_params.mtu = qp->mtu;
2594 ib_mtu_enum_to_int(iboe_get_mtu(dev->ndev->mtu));
2597 if (attr_mask & IB_QP_TIMEOUT) {
2598 SET_FIELD(qp_params.modify_flags,
2599 QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT, 1);
2601 /* The received timeout value is an exponent used like this:
2602 * "12.7.34 LOCAL ACK TIMEOUT
2603 * Value representing the transport (ACK) timeout for use by
2604 * the remote, expressed as: 4.096 * 2^timeout [usec]"
2605 * The FW expects timeout in msec so we need to divide the usec
2606 * result by 1000. We'll approximate 1000~2^10, and 4.096 ~ 2^2,
2607 * so we get: 2^2 * 2^timeout / 2^10 = 2^(timeout - 8).
2608 * The value of zero means infinite so we use a 'max_t' to make
2609 * sure that sub 1 msec values will be configured as 1 msec.
2612 qp_params.ack_timeout =
2613 1 << max_t(int, attr->timeout - 8, 0);
2615 qp_params.ack_timeout = 0;
2617 qp->timeout = attr->timeout;
2620 if (attr_mask & IB_QP_RETRY_CNT) {
2621 SET_FIELD(qp_params.modify_flags,
2622 QED_ROCE_MODIFY_QP_VALID_RETRY_CNT, 1);
2623 qp_params.retry_cnt = attr->retry_cnt;
2626 if (attr_mask & IB_QP_RNR_RETRY) {
2627 SET_FIELD(qp_params.modify_flags,
2628 QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT, 1);
2629 qp_params.rnr_retry_cnt = attr->rnr_retry;
2632 if (attr_mask & IB_QP_RQ_PSN) {
2633 SET_FIELD(qp_params.modify_flags,
2634 QED_ROCE_MODIFY_QP_VALID_RQ_PSN, 1);
2635 qp_params.rq_psn = attr->rq_psn;
2636 qp->rq_psn = attr->rq_psn;
2639 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
2640 if (attr->max_rd_atomic > dev->attr.max_qp_req_rd_atomic_resc) {
2643 "unsupported max_rd_atomic=%d, supported=%d\n",
2644 attr->max_rd_atomic,
2645 dev->attr.max_qp_req_rd_atomic_resc);
2649 SET_FIELD(qp_params.modify_flags,
2650 QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ, 1);
2651 qp_params.max_rd_atomic_req = attr->max_rd_atomic;
2654 if (attr_mask & IB_QP_MIN_RNR_TIMER) {
2655 SET_FIELD(qp_params.modify_flags,
2656 QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER, 1);
2657 qp_params.min_rnr_nak_timer = attr->min_rnr_timer;
2660 if (attr_mask & IB_QP_SQ_PSN) {
2661 SET_FIELD(qp_params.modify_flags,
2662 QED_ROCE_MODIFY_QP_VALID_SQ_PSN, 1);
2663 qp_params.sq_psn = attr->sq_psn;
2664 qp->sq_psn = attr->sq_psn;
2667 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
2668 if (attr->max_dest_rd_atomic >
2669 dev->attr.max_qp_resp_rd_atomic_resc) {
2671 "unsupported max_dest_rd_atomic=%d, supported=%d\n",
2672 attr->max_dest_rd_atomic,
2673 dev->attr.max_qp_resp_rd_atomic_resc);
2679 SET_FIELD(qp_params.modify_flags,
2680 QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP, 1);
2681 qp_params.max_rd_atomic_resp = attr->max_dest_rd_atomic;
2684 if (attr_mask & IB_QP_DEST_QPN) {
2685 SET_FIELD(qp_params.modify_flags,
2686 QED_ROCE_MODIFY_QP_VALID_DEST_QP, 1);
2688 qp_params.dest_qp = attr->dest_qp_num;
2689 qp->dest_qp_num = attr->dest_qp_num;
2692 cur_state = qp->state;
2694 /* Update the QP state before the actual ramrod to prevent a race with
2695 * fast path. Modifying the QP state to error will cause the device to
2696 * flush the CQEs and while polling the flushed CQEs will considered as
2697 * a potential issue if the QP isn't in error state.
2699 if ((attr_mask & IB_QP_STATE) && qp->qp_type != IB_QPT_GSI &&
2700 !udata && qp_params.new_state == QED_ROCE_QP_STATE_ERR)
2701 qp->state = QED_ROCE_QP_STATE_ERR;
2703 if (qp->qp_type != IB_QPT_GSI)
2704 rc = dev->ops->rdma_modify_qp(dev->rdma_ctx,
2705 qp->qed_qp, &qp_params);
2707 if (attr_mask & IB_QP_STATE) {
2708 if ((qp->qp_type != IB_QPT_GSI) && (!udata))
2709 rc = qedr_update_qp_state(dev, qp, cur_state,
2710 qp_params.new_state);
2711 qp->state = qp_params.new_state;
2718 static int qedr_to_ib_qp_acc_flags(struct qed_rdma_query_qp_out_params *params)
2720 int ib_qp_acc_flags = 0;
2722 if (params->incoming_rdma_write_en)
2723 ib_qp_acc_flags |= IB_ACCESS_REMOTE_WRITE;
2724 if (params->incoming_rdma_read_en)
2725 ib_qp_acc_flags |= IB_ACCESS_REMOTE_READ;
2726 if (params->incoming_atomic_en)
2727 ib_qp_acc_flags |= IB_ACCESS_REMOTE_ATOMIC;
2728 ib_qp_acc_flags |= IB_ACCESS_LOCAL_WRITE;
2729 return ib_qp_acc_flags;
2732 int qedr_query_qp(struct ib_qp *ibqp,
2733 struct ib_qp_attr *qp_attr,
2734 int attr_mask, struct ib_qp_init_attr *qp_init_attr)
2736 struct qed_rdma_query_qp_out_params params;
2737 struct qedr_qp *qp = get_qedr_qp(ibqp);
2738 struct qedr_dev *dev = qp->dev;
2741 memset(¶ms, 0, sizeof(params));
2742 memset(qp_attr, 0, sizeof(*qp_attr));
2743 memset(qp_init_attr, 0, sizeof(*qp_init_attr));
2745 if (qp->qp_type != IB_QPT_GSI) {
2746 rc = dev->ops->rdma_query_qp(dev->rdma_ctx, qp->qed_qp, ¶ms);
2749 qp_attr->qp_state = qedr_get_ibqp_state(params.state);
2751 qp_attr->qp_state = qedr_get_ibqp_state(QED_ROCE_QP_STATE_RTS);
2754 qp_attr->cur_qp_state = qedr_get_ibqp_state(params.state);
2755 qp_attr->path_mtu = ib_mtu_int_to_enum(params.mtu);
2756 qp_attr->path_mig_state = IB_MIG_MIGRATED;
2757 qp_attr->rq_psn = params.rq_psn;
2758 qp_attr->sq_psn = params.sq_psn;
2759 qp_attr->dest_qp_num = params.dest_qp;
2761 qp_attr->qp_access_flags = qedr_to_ib_qp_acc_flags(¶ms);
2763 qp_attr->cap.max_send_wr = qp->sq.max_wr;
2764 qp_attr->cap.max_recv_wr = qp->rq.max_wr;
2765 qp_attr->cap.max_send_sge = qp->sq.max_sges;
2766 qp_attr->cap.max_recv_sge = qp->rq.max_sges;
2767 qp_attr->cap.max_inline_data = dev->attr.max_inline;
2768 qp_init_attr->cap = qp_attr->cap;
2770 qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
2771 rdma_ah_set_grh(&qp_attr->ah_attr, NULL,
2772 params.flow_label, qp->sgid_idx,
2773 params.hop_limit_ttl, params.traffic_class_tos);
2774 rdma_ah_set_dgid_raw(&qp_attr->ah_attr, ¶ms.dgid.bytes[0]);
2775 rdma_ah_set_port_num(&qp_attr->ah_attr, 1);
2776 rdma_ah_set_sl(&qp_attr->ah_attr, 0);
2777 qp_attr->timeout = qp->timeout;
2778 qp_attr->rnr_retry = params.rnr_retry;
2779 qp_attr->retry_cnt = params.retry_cnt;
2780 qp_attr->min_rnr_timer = params.min_rnr_nak_timer;
2781 qp_attr->pkey_index = params.pkey_index;
2782 qp_attr->port_num = 1;
2783 rdma_ah_set_path_bits(&qp_attr->ah_attr, 0);
2784 rdma_ah_set_static_rate(&qp_attr->ah_attr, 0);
2785 qp_attr->alt_pkey_index = 0;
2786 qp_attr->alt_port_num = 0;
2787 qp_attr->alt_timeout = 0;
2788 memset(&qp_attr->alt_ah_attr, 0, sizeof(qp_attr->alt_ah_attr));
2790 qp_attr->sq_draining = (params.state == QED_ROCE_QP_STATE_SQD) ? 1 : 0;
2791 qp_attr->max_dest_rd_atomic = params.max_dest_rd_atomic;
2792 qp_attr->max_rd_atomic = params.max_rd_atomic;
2793 qp_attr->en_sqd_async_notify = (params.sqd_async) ? 1 : 0;
2795 DP_DEBUG(dev, QEDR_MSG_QP, "QEDR_QUERY_QP: max_inline_data=%d\n",
2796 qp_attr->cap.max_inline_data);
2802 int qedr_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
2804 struct qedr_qp *qp = get_qedr_qp(ibqp);
2805 struct qedr_dev *dev = qp->dev;
2806 struct ib_qp_attr attr;
2809 DP_DEBUG(dev, QEDR_MSG_QP, "destroy qp: destroying %p, qp type=%d\n",
2812 if (rdma_protocol_roce(&dev->ibdev, 1)) {
2813 if ((qp->state != QED_ROCE_QP_STATE_RESET) &&
2814 (qp->state != QED_ROCE_QP_STATE_ERR) &&
2815 (qp->state != QED_ROCE_QP_STATE_INIT)) {
2817 attr.qp_state = IB_QPS_ERR;
2818 attr_mask |= IB_QP_STATE;
2820 /* Change the QP state to ERROR */
2821 qedr_modify_qp(ibqp, &attr, attr_mask, NULL);
2824 /* If connection establishment started the WAIT_FOR_CONNECT
2825 * bit will be on and we need to Wait for the establishment
2826 * to complete before destroying the qp.
2828 if (test_and_set_bit(QEDR_IWARP_CM_WAIT_FOR_CONNECT,
2829 &qp->iwarp_cm_flags))
2830 wait_for_completion(&qp->iwarp_cm_comp);
2832 /* If graceful disconnect started, the WAIT_FOR_DISCONNECT
2833 * bit will be on, and we need to wait for the disconnect to
2834 * complete before continuing. We can use the same completion,
2835 * iwarp_cm_comp, since this is the only place that waits for
2836 * this completion and it is sequential. In addition,
2837 * disconnect can't occur before the connection is fully
2838 * established, therefore if WAIT_FOR_DISCONNECT is on it
2839 * means WAIT_FOR_CONNECT is also on and the completion for
2840 * CONNECT already occurred.
2842 if (test_and_set_bit(QEDR_IWARP_CM_WAIT_FOR_DISCONNECT,
2843 &qp->iwarp_cm_flags))
2844 wait_for_completion(&qp->iwarp_cm_comp);
2847 if (qp->qp_type == IB_QPT_GSI)
2848 qedr_destroy_gsi_qp(dev);
2850 /* We need to remove the entry from the xarray before we release the
2851 * qp_id to avoid a race of the qp_id being reallocated and failing
2854 if (rdma_protocol_iwarp(&dev->ibdev, 1))
2855 xa_erase(&dev->qps, qp->qp_id);
2857 qedr_free_qp_resources(dev, qp, udata);
2859 if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
2860 qedr_iw_qp_rem_ref(&qp->ibqp);
2861 wait_for_completion(&qp->qp_rel_comp);
2867 int qedr_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
2868 struct ib_udata *udata)
2870 struct qedr_ah *ah = get_qedr_ah(ibah);
2872 rdma_copy_ah_attr(&ah->attr, init_attr->ah_attr);
2877 int qedr_destroy_ah(struct ib_ah *ibah, u32 flags)
2879 struct qedr_ah *ah = get_qedr_ah(ibah);
2881 rdma_destroy_ah_attr(&ah->attr);
2885 static void free_mr_info(struct qedr_dev *dev, struct mr_info *info)
2887 struct qedr_pbl *pbl, *tmp;
2889 if (info->pbl_table)
2890 list_add_tail(&info->pbl_table->list_entry,
2891 &info->free_pbl_list);
2893 if (!list_empty(&info->inuse_pbl_list))
2894 list_splice(&info->inuse_pbl_list, &info->free_pbl_list);
2896 list_for_each_entry_safe(pbl, tmp, &info->free_pbl_list, list_entry) {
2897 list_del(&pbl->list_entry);
2898 qedr_free_pbl(dev, &info->pbl_info, pbl);
2902 static int init_mr_info(struct qedr_dev *dev, struct mr_info *info,
2903 size_t page_list_len, bool two_layered)
2905 struct qedr_pbl *tmp;
2908 INIT_LIST_HEAD(&info->free_pbl_list);
2909 INIT_LIST_HEAD(&info->inuse_pbl_list);
2911 rc = qedr_prepare_pbl_tbl(dev, &info->pbl_info,
2912 page_list_len, two_layered);
2916 info->pbl_table = qedr_alloc_pbl_tbl(dev, &info->pbl_info, GFP_KERNEL);
2917 if (IS_ERR(info->pbl_table)) {
2918 rc = PTR_ERR(info->pbl_table);
2922 DP_DEBUG(dev, QEDR_MSG_MR, "pbl_table_pa = %pa\n",
2923 &info->pbl_table->pa);
2925 /* in usual case we use 2 PBLs, so we add one to free
2926 * list and allocating another one
2928 tmp = qedr_alloc_pbl_tbl(dev, &info->pbl_info, GFP_KERNEL);
2930 DP_DEBUG(dev, QEDR_MSG_MR, "Extra PBL is not allocated\n");
2934 list_add_tail(&tmp->list_entry, &info->free_pbl_list);
2936 DP_DEBUG(dev, QEDR_MSG_MR, "extra pbl_table_pa = %pa\n", &tmp->pa);
2940 free_mr_info(dev, info);
2945 struct ib_mr *qedr_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
2946 u64 usr_addr, int acc, struct ib_udata *udata)
2948 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
2953 pd = get_qedr_pd(ibpd);
2954 DP_DEBUG(dev, QEDR_MSG_MR,
2955 "qedr_register user mr pd = %d start = %lld, len = %lld, usr_addr = %lld, acc = %d\n",
2956 pd->pd_id, start, len, usr_addr, acc);
2958 if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE))
2959 return ERR_PTR(-EINVAL);
2961 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
2965 mr->type = QEDR_MR_USER;
2967 mr->umem = ib_umem_get(ibpd->device, start, len, acc);
2968 if (IS_ERR(mr->umem)) {
2973 rc = init_mr_info(dev, &mr->info,
2974 ib_umem_num_dma_blocks(mr->umem, PAGE_SIZE), 1);
2978 qedr_populate_pbls(dev, mr->umem, mr->info.pbl_table,
2979 &mr->info.pbl_info, PAGE_SHIFT);
2981 rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
2984 DP_ERR(dev, "Out of MR resources\n");
2986 DP_ERR(dev, "roce alloc tid returned error %d\n", rc);
2991 /* Index only, 18 bit long, lkey = itid << 8 | key */
2992 mr->hw_mr.tid_type = QED_RDMA_TID_REGISTERED_MR;
2994 mr->hw_mr.pd = pd->pd_id;
2995 mr->hw_mr.local_read = 1;
2996 mr->hw_mr.local_write = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
2997 mr->hw_mr.remote_read = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
2998 mr->hw_mr.remote_write = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
2999 mr->hw_mr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
3000 mr->hw_mr.mw_bind = false;
3001 mr->hw_mr.pbl_ptr = mr->info.pbl_table[0].pa;
3002 mr->hw_mr.pbl_two_level = mr->info.pbl_info.two_layered;
3003 mr->hw_mr.pbl_page_size_log = ilog2(mr->info.pbl_info.pbl_size);
3004 mr->hw_mr.page_size_log = PAGE_SHIFT;
3005 mr->hw_mr.length = len;
3006 mr->hw_mr.vaddr = usr_addr;
3007 mr->hw_mr.phy_mr = false;
3008 mr->hw_mr.dma_mr = false;
3010 rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
3012 DP_ERR(dev, "roce register tid returned an error %d\n", rc);
3016 mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
3017 if (mr->hw_mr.remote_write || mr->hw_mr.remote_read ||
3018 mr->hw_mr.remote_atomic)
3019 mr->ibmr.rkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
3021 DP_DEBUG(dev, QEDR_MSG_MR, "register user mr lkey: %x\n",
3026 dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
3028 qedr_free_pbl(dev, &mr->info.pbl_info, mr->info.pbl_table);
3034 int qedr_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
3036 struct qedr_mr *mr = get_qedr_mr(ib_mr);
3037 struct qedr_dev *dev = get_qedr_dev(ib_mr->device);
3040 rc = dev->ops->rdma_deregister_tid(dev->rdma_ctx, mr->hw_mr.itid);
3044 dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
3046 if (mr->type != QEDR_MR_DMA)
3047 free_mr_info(dev, &mr->info);
3049 /* it could be user registered memory. */
3050 ib_umem_release(mr->umem);
3057 static struct qedr_mr *__qedr_alloc_mr(struct ib_pd *ibpd,
3058 int max_page_list_len)
3060 struct qedr_pd *pd = get_qedr_pd(ibpd);
3061 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
3065 DP_DEBUG(dev, QEDR_MSG_MR,
3066 "qedr_alloc_frmr pd = %d max_page_list_len= %d\n", pd->pd_id,
3069 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
3074 mr->type = QEDR_MR_FRMR;
3076 rc = init_mr_info(dev, &mr->info, max_page_list_len, 1);
3080 rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
3083 DP_ERR(dev, "Out of MR resources\n");
3085 DP_ERR(dev, "roce alloc tid returned error %d\n", rc);
3090 /* Index only, 18 bit long, lkey = itid << 8 | key */
3091 mr->hw_mr.tid_type = QED_RDMA_TID_FMR;
3093 mr->hw_mr.pd = pd->pd_id;
3094 mr->hw_mr.local_read = 1;
3095 mr->hw_mr.local_write = 0;
3096 mr->hw_mr.remote_read = 0;
3097 mr->hw_mr.remote_write = 0;
3098 mr->hw_mr.remote_atomic = 0;
3099 mr->hw_mr.mw_bind = false;
3100 mr->hw_mr.pbl_ptr = 0;
3101 mr->hw_mr.pbl_two_level = mr->info.pbl_info.two_layered;
3102 mr->hw_mr.pbl_page_size_log = ilog2(mr->info.pbl_info.pbl_size);
3103 mr->hw_mr.length = 0;
3104 mr->hw_mr.vaddr = 0;
3105 mr->hw_mr.phy_mr = true;
3106 mr->hw_mr.dma_mr = false;
3108 rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
3110 DP_ERR(dev, "roce register tid returned an error %d\n", rc);
3114 mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
3115 mr->ibmr.rkey = mr->ibmr.lkey;
3117 DP_DEBUG(dev, QEDR_MSG_MR, "alloc frmr: %x\n", mr->ibmr.lkey);
3121 dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
3127 struct ib_mr *qedr_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
3132 if (mr_type != IB_MR_TYPE_MEM_REG)
3133 return ERR_PTR(-EINVAL);
3135 mr = __qedr_alloc_mr(ibpd, max_num_sg);
3138 return ERR_PTR(-EINVAL);
3143 static int qedr_set_page(struct ib_mr *ibmr, u64 addr)
3145 struct qedr_mr *mr = get_qedr_mr(ibmr);
3146 struct qedr_pbl *pbl_table;
3147 struct regpair *pbe;
3150 if (unlikely(mr->npages == mr->info.pbl_info.num_pbes)) {
3151 DP_ERR(mr->dev, "qedr_set_page fails when %d\n", mr->npages);
3155 DP_DEBUG(mr->dev, QEDR_MSG_MR, "qedr_set_page pages[%d] = 0x%llx\n",
3158 pbes_in_page = mr->info.pbl_info.pbl_size / sizeof(u64);
3159 pbl_table = mr->info.pbl_table + (mr->npages / pbes_in_page);
3160 pbe = (struct regpair *)pbl_table->va;
3161 pbe += mr->npages % pbes_in_page;
3162 pbe->lo = cpu_to_le32((u32)addr);
3163 pbe->hi = cpu_to_le32((u32)upper_32_bits(addr));
3170 static void handle_completed_mrs(struct qedr_dev *dev, struct mr_info *info)
3172 int work = info->completed - info->completed_handled - 1;
3174 DP_DEBUG(dev, QEDR_MSG_MR, "Special FMR work = %d\n", work);
3175 while (work-- > 0 && !list_empty(&info->inuse_pbl_list)) {
3176 struct qedr_pbl *pbl;
3178 /* Free all the page list that are possible to be freed
3179 * (all the ones that were invalidated), under the assumption
3180 * that if an FMR was completed successfully that means that
3181 * if there was an invalidate operation before it also ended
3183 pbl = list_first_entry(&info->inuse_pbl_list,
3184 struct qedr_pbl, list_entry);
3185 list_move_tail(&pbl->list_entry, &info->free_pbl_list);
3186 info->completed_handled++;
3190 int qedr_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
3191 int sg_nents, unsigned int *sg_offset)
3193 struct qedr_mr *mr = get_qedr_mr(ibmr);
3197 handle_completed_mrs(mr->dev, &mr->info);
3198 return ib_sg_to_pages(ibmr, sg, sg_nents, NULL, qedr_set_page);
3201 struct ib_mr *qedr_get_dma_mr(struct ib_pd *ibpd, int acc)
3203 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
3204 struct qedr_pd *pd = get_qedr_pd(ibpd);
3208 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
3210 return ERR_PTR(-ENOMEM);
3212 mr->type = QEDR_MR_DMA;
3214 rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
3217 DP_ERR(dev, "Out of MR resources\n");
3219 DP_ERR(dev, "roce alloc tid returned error %d\n", rc);
3224 /* index only, 18 bit long, lkey = itid << 8 | key */
3225 mr->hw_mr.tid_type = QED_RDMA_TID_REGISTERED_MR;
3226 mr->hw_mr.pd = pd->pd_id;
3227 mr->hw_mr.local_read = 1;
3228 mr->hw_mr.local_write = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
3229 mr->hw_mr.remote_read = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
3230 mr->hw_mr.remote_write = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
3231 mr->hw_mr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
3232 mr->hw_mr.dma_mr = true;
3234 rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
3236 DP_ERR(dev, "roce register tid returned an error %d\n", rc);
3240 mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
3241 if (mr->hw_mr.remote_write || mr->hw_mr.remote_read ||
3242 mr->hw_mr.remote_atomic)
3243 mr->ibmr.rkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
3245 DP_DEBUG(dev, QEDR_MSG_MR, "get dma mr: lkey = %x\n", mr->ibmr.lkey);
3249 dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
3255 static inline int qedr_wq_is_full(struct qedr_qp_hwq_info *wq)
3257 return (((wq->prod + 1) % wq->max_wr) == wq->cons);
3260 static int sge_data_len(struct ib_sge *sg_list, int num_sge)
3264 for (i = 0; i < num_sge; i++)
3265 len += sg_list[i].length;
3270 static void swap_wqe_data64(u64 *p)
3274 for (i = 0; i < QEDR_SQE_ELEMENT_SIZE / sizeof(u64); i++, p++)
3275 *p = cpu_to_be64(cpu_to_le64(*p));
3278 static u32 qedr_prepare_sq_inline_data(struct qedr_dev *dev,
3279 struct qedr_qp *qp, u8 *wqe_size,
3280 const struct ib_send_wr *wr,
3281 const struct ib_send_wr **bad_wr,
3284 u32 data_size = sge_data_len(wr->sg_list, wr->num_sge);
3285 char *seg_prt, *wqe;
3288 if (data_size > ROCE_REQ_MAX_INLINE_DATA_SIZE) {
3289 DP_ERR(dev, "Too much inline data in WR: %d\n", data_size);
3303 /* Copy data inline */
3304 for (i = 0; i < wr->num_sge; i++) {
3305 u32 len = wr->sg_list[i].length;
3306 void *src = (void *)(uintptr_t)wr->sg_list[i].addr;
3311 /* New segment required */
3313 wqe = (char *)qed_chain_produce(&qp->sq.pbl);
3315 seg_siz = sizeof(struct rdma_sq_common_wqe);
3319 /* Calculate currently allowed length */
3320 cur = min_t(u32, len, seg_siz);
3321 memcpy(seg_prt, src, cur);
3323 /* Update segment variables */
3327 /* Update sge variables */
3331 /* Swap fully-completed segments */
3333 swap_wqe_data64((u64 *)wqe);
3337 /* swap last not completed segment */
3339 swap_wqe_data64((u64 *)wqe);
3344 #define RQ_SGE_SET(sge, vaddr, vlength, vflags) \
3346 DMA_REGPAIR_LE(sge->addr, vaddr); \
3347 (sge)->length = cpu_to_le32(vlength); \
3348 (sge)->flags = cpu_to_le32(vflags); \
3351 #define SRQ_HDR_SET(hdr, vwr_id, num_sge) \
3353 DMA_REGPAIR_LE(hdr->wr_id, vwr_id); \
3354 (hdr)->num_sges = num_sge; \
3357 #define SRQ_SGE_SET(sge, vaddr, vlength, vlkey) \
3359 DMA_REGPAIR_LE(sge->addr, vaddr); \
3360 (sge)->length = cpu_to_le32(vlength); \
3361 (sge)->l_key = cpu_to_le32(vlkey); \
3364 static u32 qedr_prepare_sq_sges(struct qedr_qp *qp, u8 *wqe_size,
3365 const struct ib_send_wr *wr)
3370 for (i = 0; i < wr->num_sge; i++) {
3371 struct rdma_sq_sge *sge = qed_chain_produce(&qp->sq.pbl);
3373 DMA_REGPAIR_LE(sge->addr, wr->sg_list[i].addr);
3374 sge->l_key = cpu_to_le32(wr->sg_list[i].lkey);
3375 sge->length = cpu_to_le32(wr->sg_list[i].length);
3376 data_size += wr->sg_list[i].length;
3380 *wqe_size += wr->num_sge;
3385 static u32 qedr_prepare_sq_rdma_data(struct qedr_dev *dev,
3387 struct rdma_sq_rdma_wqe_1st *rwqe,
3388 struct rdma_sq_rdma_wqe_2nd *rwqe2,
3389 const struct ib_send_wr *wr,
3390 const struct ib_send_wr **bad_wr)
3392 rwqe2->r_key = cpu_to_le32(rdma_wr(wr)->rkey);
3393 DMA_REGPAIR_LE(rwqe2->remote_va, rdma_wr(wr)->remote_addr);
3395 if (wr->send_flags & IB_SEND_INLINE &&
3396 (wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM ||
3397 wr->opcode == IB_WR_RDMA_WRITE)) {
3400 SET_FIELD2(flags, RDMA_SQ_RDMA_WQE_1ST_INLINE_FLG, 1);
3401 return qedr_prepare_sq_inline_data(dev, qp, &rwqe->wqe_size, wr,
3402 bad_wr, &rwqe->flags, flags);
3405 return qedr_prepare_sq_sges(qp, &rwqe->wqe_size, wr);
3408 static u32 qedr_prepare_sq_send_data(struct qedr_dev *dev,
3410 struct rdma_sq_send_wqe_1st *swqe,
3411 struct rdma_sq_send_wqe_2st *swqe2,
3412 const struct ib_send_wr *wr,
3413 const struct ib_send_wr **bad_wr)
3415 memset(swqe2, 0, sizeof(*swqe2));
3416 if (wr->send_flags & IB_SEND_INLINE) {
3419 SET_FIELD2(flags, RDMA_SQ_SEND_WQE_INLINE_FLG, 1);
3420 return qedr_prepare_sq_inline_data(dev, qp, &swqe->wqe_size, wr,
3421 bad_wr, &swqe->flags, flags);
3424 return qedr_prepare_sq_sges(qp, &swqe->wqe_size, wr);
3427 static int qedr_prepare_reg(struct qedr_qp *qp,
3428 struct rdma_sq_fmr_wqe_1st *fwqe1,
3429 const struct ib_reg_wr *wr)
3431 struct qedr_mr *mr = get_qedr_mr(wr->mr);
3432 struct rdma_sq_fmr_wqe_2nd *fwqe2;
3434 fwqe2 = (struct rdma_sq_fmr_wqe_2nd *)qed_chain_produce(&qp->sq.pbl);
3435 fwqe1->addr.hi = upper_32_bits(mr->ibmr.iova);
3436 fwqe1->addr.lo = lower_32_bits(mr->ibmr.iova);
3437 fwqe1->l_key = wr->key;
3439 fwqe2->access_ctrl = 0;
3441 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_REMOTE_READ,
3442 !!(wr->access & IB_ACCESS_REMOTE_READ));
3443 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_REMOTE_WRITE,
3444 !!(wr->access & IB_ACCESS_REMOTE_WRITE));
3445 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_ENABLE_ATOMIC,
3446 !!(wr->access & IB_ACCESS_REMOTE_ATOMIC));
3447 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_LOCAL_READ, 1);
3448 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_LOCAL_WRITE,
3449 !!(wr->access & IB_ACCESS_LOCAL_WRITE));
3450 fwqe2->fmr_ctrl = 0;
3452 SET_FIELD2(fwqe2->fmr_ctrl, RDMA_SQ_FMR_WQE_2ND_PAGE_SIZE_LOG,
3453 ilog2(mr->ibmr.page_size) - 12);
3455 fwqe2->length_hi = 0;
3456 fwqe2->length_lo = mr->ibmr.length;
3457 fwqe2->pbl_addr.hi = upper_32_bits(mr->info.pbl_table->pa);
3458 fwqe2->pbl_addr.lo = lower_32_bits(mr->info.pbl_table->pa);
3460 qp->wqe_wr_id[qp->sq.prod].mr = mr;
3465 static enum ib_wc_opcode qedr_ib_to_wc_opcode(enum ib_wr_opcode opcode)
3468 case IB_WR_RDMA_WRITE:
3469 case IB_WR_RDMA_WRITE_WITH_IMM:
3470 return IB_WC_RDMA_WRITE;
3471 case IB_WR_SEND_WITH_IMM:
3473 case IB_WR_SEND_WITH_INV:
3475 case IB_WR_RDMA_READ:
3476 case IB_WR_RDMA_READ_WITH_INV:
3477 return IB_WC_RDMA_READ;
3478 case IB_WR_ATOMIC_CMP_AND_SWP:
3479 return IB_WC_COMP_SWAP;
3480 case IB_WR_ATOMIC_FETCH_AND_ADD:
3481 return IB_WC_FETCH_ADD;
3483 return IB_WC_REG_MR;
3484 case IB_WR_LOCAL_INV:
3485 return IB_WC_LOCAL_INV;
3491 static inline bool qedr_can_post_send(struct qedr_qp *qp,
3492 const struct ib_send_wr *wr)
3494 int wq_is_full, err_wr, pbl_is_full;
3495 struct qedr_dev *dev = qp->dev;
3497 /* prevent SQ overflow and/or processing of a bad WR */
3498 err_wr = wr->num_sge > qp->sq.max_sges;
3499 wq_is_full = qedr_wq_is_full(&qp->sq);
3500 pbl_is_full = qed_chain_get_elem_left_u32(&qp->sq.pbl) <
3501 QEDR_MAX_SQE_ELEMENTS_PER_SQE;
3502 if (wq_is_full || err_wr || pbl_is_full) {
3503 if (wq_is_full && !(qp->err_bitmap & QEDR_QP_ERR_SQ_FULL)) {
3505 "error: WQ is full. Post send on QP %p failed (this error appears only once)\n",
3507 qp->err_bitmap |= QEDR_QP_ERR_SQ_FULL;
3510 if (err_wr && !(qp->err_bitmap & QEDR_QP_ERR_BAD_SR)) {
3512 "error: WR is bad. Post send on QP %p failed (this error appears only once)\n",
3514 qp->err_bitmap |= QEDR_QP_ERR_BAD_SR;
3518 !(qp->err_bitmap & QEDR_QP_ERR_SQ_PBL_FULL)) {
3520 "error: WQ PBL is full. Post send on QP %p failed (this error appears only once)\n",
3522 qp->err_bitmap |= QEDR_QP_ERR_SQ_PBL_FULL;
3529 static int __qedr_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
3530 const struct ib_send_wr **bad_wr)
3532 struct qedr_dev *dev = get_qedr_dev(ibqp->device);
3533 struct qedr_qp *qp = get_qedr_qp(ibqp);
3534 struct rdma_sq_atomic_wqe_1st *awqe1;
3535 struct rdma_sq_atomic_wqe_2nd *awqe2;
3536 struct rdma_sq_atomic_wqe_3rd *awqe3;
3537 struct rdma_sq_send_wqe_2st *swqe2;
3538 struct rdma_sq_local_inv_wqe *iwqe;
3539 struct rdma_sq_rdma_wqe_2nd *rwqe2;
3540 struct rdma_sq_send_wqe_1st *swqe;
3541 struct rdma_sq_rdma_wqe_1st *rwqe;
3542 struct rdma_sq_fmr_wqe_1st *fwqe1;
3543 struct rdma_sq_common_wqe *wqe;
3548 if (!qedr_can_post_send(qp, wr)) {
3553 wqe = qed_chain_produce(&qp->sq.pbl);
3554 qp->wqe_wr_id[qp->sq.prod].signaled =
3555 !!(wr->send_flags & IB_SEND_SIGNALED) || qp->signaled;
3558 SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_SE_FLG,
3559 !!(wr->send_flags & IB_SEND_SOLICITED));
3560 comp = (!!(wr->send_flags & IB_SEND_SIGNALED)) || qp->signaled;
3561 SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_COMP_FLG, comp);
3562 SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_RD_FENCE_FLG,
3563 !!(wr->send_flags & IB_SEND_FENCE));
3564 wqe->prev_wqe_size = qp->prev_wqe_size;
3566 qp->wqe_wr_id[qp->sq.prod].opcode = qedr_ib_to_wc_opcode(wr->opcode);
3568 switch (wr->opcode) {
3569 case IB_WR_SEND_WITH_IMM:
3570 if (unlikely(rdma_protocol_iwarp(&dev->ibdev, 1))) {
3575 wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_IMM;
3576 swqe = (struct rdma_sq_send_wqe_1st *)wqe;
3578 swqe2 = qed_chain_produce(&qp->sq.pbl);
3580 swqe->inv_key_or_imm_data = cpu_to_le32(be32_to_cpu(wr->ex.imm_data));
3581 length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
3583 swqe->length = cpu_to_le32(length);
3584 qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
3585 qp->prev_wqe_size = swqe->wqe_size;
3586 qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
3589 wqe->req_type = RDMA_SQ_REQ_TYPE_SEND;
3590 swqe = (struct rdma_sq_send_wqe_1st *)wqe;
3593 swqe2 = qed_chain_produce(&qp->sq.pbl);
3594 length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
3596 swqe->length = cpu_to_le32(length);
3597 qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
3598 qp->prev_wqe_size = swqe->wqe_size;
3599 qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
3601 case IB_WR_SEND_WITH_INV:
3602 wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_INVALIDATE;
3603 swqe = (struct rdma_sq_send_wqe_1st *)wqe;
3604 swqe2 = qed_chain_produce(&qp->sq.pbl);
3606 swqe->inv_key_or_imm_data = cpu_to_le32(wr->ex.invalidate_rkey);
3607 length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
3609 swqe->length = cpu_to_le32(length);
3610 qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
3611 qp->prev_wqe_size = swqe->wqe_size;
3612 qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
3615 case IB_WR_RDMA_WRITE_WITH_IMM:
3616 if (unlikely(rdma_protocol_iwarp(&dev->ibdev, 1))) {
3621 wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR_WITH_IMM;
3622 rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
3625 rwqe->imm_data = htonl(cpu_to_le32(wr->ex.imm_data));
3626 rwqe2 = qed_chain_produce(&qp->sq.pbl);
3627 length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2,
3629 rwqe->length = cpu_to_le32(length);
3630 qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
3631 qp->prev_wqe_size = rwqe->wqe_size;
3632 qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
3634 case IB_WR_RDMA_WRITE:
3635 wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR;
3636 rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
3639 rwqe2 = qed_chain_produce(&qp->sq.pbl);
3640 length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2,
3642 rwqe->length = cpu_to_le32(length);
3643 qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
3644 qp->prev_wqe_size = rwqe->wqe_size;
3645 qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
3647 case IB_WR_RDMA_READ_WITH_INV:
3648 SET_FIELD2(wqe->flags, RDMA_SQ_RDMA_WQE_1ST_READ_INV_FLG, 1);
3649 fallthrough; /* same is identical to RDMA READ */
3651 case IB_WR_RDMA_READ:
3652 wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_RD;
3653 rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
3656 rwqe2 = qed_chain_produce(&qp->sq.pbl);
3657 length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2,
3659 rwqe->length = cpu_to_le32(length);
3660 qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
3661 qp->prev_wqe_size = rwqe->wqe_size;
3662 qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
3665 case IB_WR_ATOMIC_CMP_AND_SWP:
3666 case IB_WR_ATOMIC_FETCH_AND_ADD:
3667 awqe1 = (struct rdma_sq_atomic_wqe_1st *)wqe;
3668 awqe1->wqe_size = 4;
3670 awqe2 = qed_chain_produce(&qp->sq.pbl);
3671 DMA_REGPAIR_LE(awqe2->remote_va, atomic_wr(wr)->remote_addr);
3672 awqe2->r_key = cpu_to_le32(atomic_wr(wr)->rkey);
3674 awqe3 = qed_chain_produce(&qp->sq.pbl);
3676 if (wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
3677 wqe->req_type = RDMA_SQ_REQ_TYPE_ATOMIC_ADD;
3678 DMA_REGPAIR_LE(awqe3->swap_data,
3679 atomic_wr(wr)->compare_add);
3681 wqe->req_type = RDMA_SQ_REQ_TYPE_ATOMIC_CMP_AND_SWAP;
3682 DMA_REGPAIR_LE(awqe3->swap_data,
3683 atomic_wr(wr)->swap);
3684 DMA_REGPAIR_LE(awqe3->cmp_data,
3685 atomic_wr(wr)->compare_add);
3688 qedr_prepare_sq_sges(qp, NULL, wr);
3690 qp->wqe_wr_id[qp->sq.prod].wqe_size = awqe1->wqe_size;
3691 qp->prev_wqe_size = awqe1->wqe_size;
3694 case IB_WR_LOCAL_INV:
3695 iwqe = (struct rdma_sq_local_inv_wqe *)wqe;
3698 iwqe->req_type = RDMA_SQ_REQ_TYPE_LOCAL_INVALIDATE;
3699 iwqe->inv_l_key = wr->ex.invalidate_rkey;
3700 qp->wqe_wr_id[qp->sq.prod].wqe_size = iwqe->wqe_size;
3701 qp->prev_wqe_size = iwqe->wqe_size;
3704 DP_DEBUG(dev, QEDR_MSG_CQ, "REG_MR\n");
3705 wqe->req_type = RDMA_SQ_REQ_TYPE_FAST_MR;
3706 fwqe1 = (struct rdma_sq_fmr_wqe_1st *)wqe;
3707 fwqe1->wqe_size = 2;
3709 rc = qedr_prepare_reg(qp, fwqe1, reg_wr(wr));
3711 DP_ERR(dev, "IB_REG_MR failed rc=%d\n", rc);
3716 qp->wqe_wr_id[qp->sq.prod].wqe_size = fwqe1->wqe_size;
3717 qp->prev_wqe_size = fwqe1->wqe_size;
3720 DP_ERR(dev, "invalid opcode 0x%x!\n", wr->opcode);
3729 /* Restore prod to its position before
3730 * this WR was processed
3732 value = le16_to_cpu(qp->sq.db_data.data.value);
3733 qed_chain_set_prod(&qp->sq.pbl, value, wqe);
3735 /* Restore prev_wqe_size */
3736 qp->prev_wqe_size = wqe->prev_wqe_size;
3738 DP_ERR(dev, "POST SEND FAILED\n");
3744 int qedr_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
3745 const struct ib_send_wr **bad_wr)
3747 struct qedr_dev *dev = get_qedr_dev(ibqp->device);
3748 struct qedr_qp *qp = get_qedr_qp(ibqp);
3749 unsigned long flags;
3754 if (qp->qp_type == IB_QPT_GSI)
3755 return qedr_gsi_post_send(ibqp, wr, bad_wr);
3757 spin_lock_irqsave(&qp->q_lock, flags);
3759 if (rdma_protocol_roce(&dev->ibdev, 1)) {
3760 if ((qp->state != QED_ROCE_QP_STATE_RTS) &&
3761 (qp->state != QED_ROCE_QP_STATE_ERR) &&
3762 (qp->state != QED_ROCE_QP_STATE_SQD)) {
3763 spin_unlock_irqrestore(&qp->q_lock, flags);
3765 DP_DEBUG(dev, QEDR_MSG_CQ,
3766 "QP in wrong state! QP icid=0x%x state %d\n",
3767 qp->icid, qp->state);
3773 rc = __qedr_post_send(ibqp, wr, bad_wr);
3777 qp->wqe_wr_id[qp->sq.prod].wr_id = wr->wr_id;
3779 qedr_inc_sw_prod(&qp->sq);
3781 qp->sq.db_data.data.value++;
3787 * If there was a failure in the first WR then it will be triggered in
3788 * vane. However this is not harmful (as long as the producer value is
3789 * unchanged). For performance reasons we avoid checking for this
3790 * redundant doorbell.
3792 * qp->wqe_wr_id is accessed during qedr_poll_cq, as
3793 * soon as we give the doorbell, we could get a completion
3794 * for this wr, therefore we need to make sure that the
3795 * memory is updated before giving the doorbell.
3796 * During qedr_poll_cq, rmb is called before accessing the
3797 * cqe. This covers for the smp_rmb as well.
3800 writel(qp->sq.db_data.raw, qp->sq.db);
3802 spin_unlock_irqrestore(&qp->q_lock, flags);
3807 static u32 qedr_srq_elem_left(struct qedr_srq_hwq_info *hw_srq)
3811 /* Calculate number of elements used based on producer
3812 * count and consumer count and subtract it from max
3813 * work request supported so that we get elements left.
3815 used = hw_srq->wr_prod_cnt - (u32)atomic_read(&hw_srq->wr_cons_cnt);
3817 return hw_srq->max_wr - used;
3820 int qedr_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
3821 const struct ib_recv_wr **bad_wr)
3823 struct qedr_srq *srq = get_qedr_srq(ibsrq);
3824 struct qedr_srq_hwq_info *hw_srq;
3825 struct qedr_dev *dev = srq->dev;
3826 struct qed_chain *pbl;
3827 unsigned long flags;
3831 spin_lock_irqsave(&srq->lock, flags);
3833 hw_srq = &srq->hw_srq;
3834 pbl = &srq->hw_srq.pbl;
3836 struct rdma_srq_wqe_header *hdr;
3839 if (!qedr_srq_elem_left(hw_srq) ||
3840 wr->num_sge > srq->hw_srq.max_sges) {
3841 DP_ERR(dev, "Can't post WR (%d,%d) || (%d > %d)\n",
3842 hw_srq->wr_prod_cnt,
3843 atomic_read(&hw_srq->wr_cons_cnt),
3844 wr->num_sge, srq->hw_srq.max_sges);
3850 hdr = qed_chain_produce(pbl);
3851 num_sge = wr->num_sge;
3852 /* Set number of sge and work request id in header */
3853 SRQ_HDR_SET(hdr, wr->wr_id, num_sge);
3855 srq->hw_srq.wr_prod_cnt++;
3859 DP_DEBUG(dev, QEDR_MSG_SRQ,
3860 "SRQ WR: SGEs: %d with wr_id[%d] = %llx\n",
3861 wr->num_sge, hw_srq->wqe_prod, wr->wr_id);
3863 for (i = 0; i < wr->num_sge; i++) {
3864 struct rdma_srq_sge *srq_sge = qed_chain_produce(pbl);
3866 /* Set SGE length, lkey and address */
3867 SRQ_SGE_SET(srq_sge, wr->sg_list[i].addr,
3868 wr->sg_list[i].length, wr->sg_list[i].lkey);
3870 DP_DEBUG(dev, QEDR_MSG_SRQ,
3871 "[%d]: len %d key %x addr %x:%x\n",
3872 i, srq_sge->length, srq_sge->l_key,
3873 srq_sge->addr.hi, srq_sge->addr.lo);
3877 /* Update WQE and SGE information before
3878 * updating producer.
3882 /* SRQ producer is 8 bytes. Need to update SGE producer index
3883 * in first 4 bytes and need to update WQE producer in
3886 srq->hw_srq.virt_prod_pair_addr->sge_prod = cpu_to_le32(hw_srq->sge_prod);
3887 /* Make sure sge producer is updated first */
3889 srq->hw_srq.virt_prod_pair_addr->wqe_prod = cpu_to_le32(hw_srq->wqe_prod);
3894 DP_DEBUG(dev, QEDR_MSG_SRQ, "POST: Elements in S-RQ: %d\n",
3895 qed_chain_get_elem_left(pbl));
3896 spin_unlock_irqrestore(&srq->lock, flags);
3901 int qedr_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
3902 const struct ib_recv_wr **bad_wr)
3904 struct qedr_qp *qp = get_qedr_qp(ibqp);
3905 struct qedr_dev *dev = qp->dev;
3906 unsigned long flags;
3909 if (qp->qp_type == IB_QPT_GSI)
3910 return qedr_gsi_post_recv(ibqp, wr, bad_wr);
3912 spin_lock_irqsave(&qp->q_lock, flags);
3917 if (qed_chain_get_elem_left_u32(&qp->rq.pbl) <
3918 QEDR_MAX_RQE_ELEMENTS_PER_RQE ||
3919 wr->num_sge > qp->rq.max_sges) {
3920 DP_ERR(dev, "Can't post WR (%d < %d) || (%d > %d)\n",
3921 qed_chain_get_elem_left_u32(&qp->rq.pbl),
3922 QEDR_MAX_RQE_ELEMENTS_PER_RQE, wr->num_sge,
3928 for (i = 0; i < wr->num_sge; i++) {
3930 struct rdma_rq_sge *rqe =
3931 qed_chain_produce(&qp->rq.pbl);
3933 /* First one must include the number
3934 * of SGE in the list
3937 SET_FIELD(flags, RDMA_RQ_SGE_NUM_SGES,
3940 SET_FIELD(flags, RDMA_RQ_SGE_L_KEY_LO,
3941 wr->sg_list[i].lkey);
3943 RQ_SGE_SET(rqe, wr->sg_list[i].addr,
3944 wr->sg_list[i].length, flags);
3947 /* Special case of no sges. FW requires between 1-4 sges...
3948 * in this case we need to post 1 sge with length zero. this is
3949 * because rdma write with immediate consumes an RQ.
3953 struct rdma_rq_sge *rqe =
3954 qed_chain_produce(&qp->rq.pbl);
3956 /* First one must include the number
3957 * of SGE in the list
3959 SET_FIELD(flags, RDMA_RQ_SGE_L_KEY_LO, 0);
3960 SET_FIELD(flags, RDMA_RQ_SGE_NUM_SGES, 1);
3962 RQ_SGE_SET(rqe, 0, 0, flags);
3966 qp->rqe_wr_id[qp->rq.prod].wr_id = wr->wr_id;
3967 qp->rqe_wr_id[qp->rq.prod].wqe_size = i;
3969 qedr_inc_sw_prod(&qp->rq);
3971 /* qp->rqe_wr_id is accessed during qedr_poll_cq, as
3972 * soon as we give the doorbell, we could get a completion
3973 * for this wr, therefore we need to make sure that the
3974 * memory is update before giving the doorbell.
3975 * During qedr_poll_cq, rmb is called before accessing the
3976 * cqe. This covers for the smp_rmb as well.
3980 qp->rq.db_data.data.value++;
3982 writel(qp->rq.db_data.raw, qp->rq.db);
3984 if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
3985 writel(qp->rq.iwarp_db2_data.raw, qp->rq.iwarp_db2);
3991 spin_unlock_irqrestore(&qp->q_lock, flags);
3996 static int is_valid_cqe(struct qedr_cq *cq, union rdma_cqe *cqe)
3998 struct rdma_cqe_requester *resp_cqe = &cqe->req;
4000 return (resp_cqe->flags & RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK) ==
4004 static struct qedr_qp *cqe_get_qp(union rdma_cqe *cqe)
4006 struct rdma_cqe_requester *resp_cqe = &cqe->req;
4009 qp = (struct qedr_qp *)(uintptr_t)HILO_GEN(resp_cqe->qp_handle.hi,
4010 resp_cqe->qp_handle.lo,
4015 static enum rdma_cqe_type cqe_get_type(union rdma_cqe *cqe)
4017 struct rdma_cqe_requester *resp_cqe = &cqe->req;
4019 return GET_FIELD(resp_cqe->flags, RDMA_CQE_REQUESTER_TYPE);
4022 /* Return latest CQE (needs processing) */
4023 static union rdma_cqe *get_cqe(struct qedr_cq *cq)
4025 return cq->latest_cqe;
4028 /* In fmr we need to increase the number of fmr completed counter for the fmr
4029 * algorithm determining whether we can free a pbl or not.
4030 * we need to perform this whether the work request was signaled or not. for
4031 * this purpose we call this function from the condition that checks if a wr
4032 * should be skipped, to make sure we don't miss it ( possibly this fmr
4033 * operation was not signalted)
4035 static inline void qedr_chk_if_fmr(struct qedr_qp *qp)
4037 if (qp->wqe_wr_id[qp->sq.cons].opcode == IB_WC_REG_MR)
4038 qp->wqe_wr_id[qp->sq.cons].mr->info.completed++;
4041 static int process_req(struct qedr_dev *dev, struct qedr_qp *qp,
4042 struct qedr_cq *cq, int num_entries,
4043 struct ib_wc *wc, u16 hw_cons, enum ib_wc_status status,
4048 while (num_entries && qp->sq.wqe_cons != hw_cons) {
4049 if (!qp->wqe_wr_id[qp->sq.cons].signaled && !force) {
4050 qedr_chk_if_fmr(qp);
4056 wc->status = status;
4059 wc->src_qp = qp->id;
4062 wc->wr_id = qp->wqe_wr_id[qp->sq.cons].wr_id;
4063 wc->opcode = qp->wqe_wr_id[qp->sq.cons].opcode;
4065 switch (wc->opcode) {
4066 case IB_WC_RDMA_WRITE:
4067 wc->byte_len = qp->wqe_wr_id[qp->sq.cons].bytes_len;
4069 case IB_WC_COMP_SWAP:
4070 case IB_WC_FETCH_ADD:
4074 qp->wqe_wr_id[qp->sq.cons].mr->info.completed++;
4076 case IB_WC_RDMA_READ:
4078 wc->byte_len = qp->wqe_wr_id[qp->sq.cons].bytes_len;
4088 while (qp->wqe_wr_id[qp->sq.cons].wqe_size--)
4089 qed_chain_consume(&qp->sq.pbl);
4090 qedr_inc_sw_cons(&qp->sq);
4096 static int qedr_poll_cq_req(struct qedr_dev *dev,
4097 struct qedr_qp *qp, struct qedr_cq *cq,
4098 int num_entries, struct ib_wc *wc,
4099 struct rdma_cqe_requester *req)
4103 switch (req->status) {
4104 case RDMA_CQE_REQ_STS_OK:
4105 cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons,
4108 case RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR:
4109 if (qp->state != QED_ROCE_QP_STATE_ERR)
4110 DP_DEBUG(dev, QEDR_MSG_CQ,
4111 "Error: POLL CQ with RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4112 cq->icid, qp->icid);
4113 cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons,
4114 IB_WC_WR_FLUSH_ERR, 1);
4117 /* process all WQE before the cosumer */
4118 qp->state = QED_ROCE_QP_STATE_ERR;
4119 cnt = process_req(dev, qp, cq, num_entries, wc,
4120 req->sq_cons - 1, IB_WC_SUCCESS, 0);
4122 /* if we have extra WC fill it with actual error info */
4123 if (cnt < num_entries) {
4124 enum ib_wc_status wc_status;
4126 switch (req->status) {
4127 case RDMA_CQE_REQ_STS_BAD_RESPONSE_ERR:
4129 "Error: POLL CQ with RDMA_CQE_REQ_STS_BAD_RESPONSE_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4130 cq->icid, qp->icid);
4131 wc_status = IB_WC_BAD_RESP_ERR;
4133 case RDMA_CQE_REQ_STS_LOCAL_LENGTH_ERR:
4135 "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_LENGTH_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4136 cq->icid, qp->icid);
4137 wc_status = IB_WC_LOC_LEN_ERR;
4139 case RDMA_CQE_REQ_STS_LOCAL_QP_OPERATION_ERR:
4141 "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_QP_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4142 cq->icid, qp->icid);
4143 wc_status = IB_WC_LOC_QP_OP_ERR;
4145 case RDMA_CQE_REQ_STS_LOCAL_PROTECTION_ERR:
4147 "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_PROTECTION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4148 cq->icid, qp->icid);
4149 wc_status = IB_WC_LOC_PROT_ERR;
4151 case RDMA_CQE_REQ_STS_MEMORY_MGT_OPERATION_ERR:
4153 "Error: POLL CQ with RDMA_CQE_REQ_STS_MEMORY_MGT_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4154 cq->icid, qp->icid);
4155 wc_status = IB_WC_MW_BIND_ERR;
4157 case RDMA_CQE_REQ_STS_REMOTE_INVALID_REQUEST_ERR:
4159 "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_INVALID_REQUEST_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4160 cq->icid, qp->icid);
4161 wc_status = IB_WC_REM_INV_REQ_ERR;
4163 case RDMA_CQE_REQ_STS_REMOTE_ACCESS_ERR:
4165 "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_ACCESS_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4166 cq->icid, qp->icid);
4167 wc_status = IB_WC_REM_ACCESS_ERR;
4169 case RDMA_CQE_REQ_STS_REMOTE_OPERATION_ERR:
4171 "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4172 cq->icid, qp->icid);
4173 wc_status = IB_WC_REM_OP_ERR;
4175 case RDMA_CQE_REQ_STS_RNR_NAK_RETRY_CNT_ERR:
4177 "Error: POLL CQ with RDMA_CQE_REQ_STS_RNR_NAK_RETRY_CNT_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4178 cq->icid, qp->icid);
4179 wc_status = IB_WC_RNR_RETRY_EXC_ERR;
4181 case RDMA_CQE_REQ_STS_TRANSPORT_RETRY_CNT_ERR:
4183 "Error: POLL CQ with ROCE_CQE_REQ_STS_TRANSPORT_RETRY_CNT_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4184 cq->icid, qp->icid);
4185 wc_status = IB_WC_RETRY_EXC_ERR;
4189 "Error: POLL CQ with IB_WC_GENERAL_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4190 cq->icid, qp->icid);
4191 wc_status = IB_WC_GENERAL_ERR;
4193 cnt += process_req(dev, qp, cq, 1, wc, req->sq_cons,
4201 static inline int qedr_cqe_resp_status_to_ib(u8 status)
4204 case RDMA_CQE_RESP_STS_LOCAL_ACCESS_ERR:
4205 return IB_WC_LOC_ACCESS_ERR;
4206 case RDMA_CQE_RESP_STS_LOCAL_LENGTH_ERR:
4207 return IB_WC_LOC_LEN_ERR;
4208 case RDMA_CQE_RESP_STS_LOCAL_QP_OPERATION_ERR:
4209 return IB_WC_LOC_QP_OP_ERR;
4210 case RDMA_CQE_RESP_STS_LOCAL_PROTECTION_ERR:
4211 return IB_WC_LOC_PROT_ERR;
4212 case RDMA_CQE_RESP_STS_MEMORY_MGT_OPERATION_ERR:
4213 return IB_WC_MW_BIND_ERR;
4214 case RDMA_CQE_RESP_STS_REMOTE_INVALID_REQUEST_ERR:
4215 return IB_WC_REM_INV_RD_REQ_ERR;
4216 case RDMA_CQE_RESP_STS_OK:
4217 return IB_WC_SUCCESS;
4219 return IB_WC_GENERAL_ERR;
4223 static inline int qedr_set_ok_cqe_resp_wc(struct rdma_cqe_responder *resp,
4226 wc->status = IB_WC_SUCCESS;
4227 wc->byte_len = le32_to_cpu(resp->length);
4229 if (resp->flags & QEDR_RESP_IMM) {
4230 wc->ex.imm_data = cpu_to_be32(le32_to_cpu(resp->imm_data_or_inv_r_Key));
4231 wc->wc_flags |= IB_WC_WITH_IMM;
4233 if (resp->flags & QEDR_RESP_RDMA)
4234 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
4236 if (resp->flags & QEDR_RESP_INV)
4239 } else if (resp->flags & QEDR_RESP_INV) {
4240 wc->ex.imm_data = le32_to_cpu(resp->imm_data_or_inv_r_Key);
4241 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
4243 if (resp->flags & QEDR_RESP_RDMA)
4246 } else if (resp->flags & QEDR_RESP_RDMA) {
4253 static void __process_resp_one(struct qedr_dev *dev, struct qedr_qp *qp,
4254 struct qedr_cq *cq, struct ib_wc *wc,
4255 struct rdma_cqe_responder *resp, u64 wr_id)
4257 /* Must fill fields before qedr_set_ok_cqe_resp_wc() */
4258 wc->opcode = IB_WC_RECV;
4261 if (likely(resp->status == RDMA_CQE_RESP_STS_OK)) {
4262 if (qedr_set_ok_cqe_resp_wc(resp, wc))
4264 "CQ %p (icid=%d) has invalid CQE responder flags=0x%x\n",
4265 cq, cq->icid, resp->flags);
4268 wc->status = qedr_cqe_resp_status_to_ib(resp->status);
4269 if (wc->status == IB_WC_GENERAL_ERR)
4271 "CQ %p (icid=%d) contains an invalid CQE status %d\n",
4272 cq, cq->icid, resp->status);
4275 /* Fill the rest of the WC */
4277 wc->src_qp = qp->id;
4282 static int process_resp_one_srq(struct qedr_dev *dev, struct qedr_qp *qp,
4283 struct qedr_cq *cq, struct ib_wc *wc,
4284 struct rdma_cqe_responder *resp)
4286 struct qedr_srq *srq = qp->srq;
4289 wr_id = HILO_GEN(le32_to_cpu(resp->srq_wr_id.hi),
4290 le32_to_cpu(resp->srq_wr_id.lo), u64);
4292 if (resp->status == RDMA_CQE_RESP_STS_WORK_REQUEST_FLUSHED_ERR) {
4293 wc->status = IB_WC_WR_FLUSH_ERR;
4297 wc->src_qp = qp->id;
4301 __process_resp_one(dev, qp, cq, wc, resp, wr_id);
4303 atomic_inc(&srq->hw_srq.wr_cons_cnt);
4307 static int process_resp_one(struct qedr_dev *dev, struct qedr_qp *qp,
4308 struct qedr_cq *cq, struct ib_wc *wc,
4309 struct rdma_cqe_responder *resp)
4311 u64 wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id;
4313 __process_resp_one(dev, qp, cq, wc, resp, wr_id);
4315 while (qp->rqe_wr_id[qp->rq.cons].wqe_size--)
4316 qed_chain_consume(&qp->rq.pbl);
4317 qedr_inc_sw_cons(&qp->rq);
4322 static int process_resp_flush(struct qedr_qp *qp, struct qedr_cq *cq,
4323 int num_entries, struct ib_wc *wc, u16 hw_cons)
4327 while (num_entries && qp->rq.wqe_cons != hw_cons) {
4329 wc->status = IB_WC_WR_FLUSH_ERR;
4332 wc->src_qp = qp->id;
4334 wc->wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id;
4339 while (qp->rqe_wr_id[qp->rq.cons].wqe_size--)
4340 qed_chain_consume(&qp->rq.pbl);
4341 qedr_inc_sw_cons(&qp->rq);
4347 static void try_consume_resp_cqe(struct qedr_cq *cq, struct qedr_qp *qp,
4348 struct rdma_cqe_responder *resp, int *update)
4350 if (le16_to_cpu(resp->rq_cons_or_srq_id) == qp->rq.wqe_cons) {
4356 static int qedr_poll_cq_resp_srq(struct qedr_dev *dev, struct qedr_qp *qp,
4357 struct qedr_cq *cq, int num_entries,
4359 struct rdma_cqe_responder *resp)
4363 cnt = process_resp_one_srq(dev, qp, cq, wc, resp);
4369 static int qedr_poll_cq_resp(struct qedr_dev *dev, struct qedr_qp *qp,
4370 struct qedr_cq *cq, int num_entries,
4371 struct ib_wc *wc, struct rdma_cqe_responder *resp,
4376 if (resp->status == RDMA_CQE_RESP_STS_WORK_REQUEST_FLUSHED_ERR) {
4377 cnt = process_resp_flush(qp, cq, num_entries, wc,
4378 resp->rq_cons_or_srq_id);
4379 try_consume_resp_cqe(cq, qp, resp, update);
4381 cnt = process_resp_one(dev, qp, cq, wc, resp);
4389 static void try_consume_req_cqe(struct qedr_cq *cq, struct qedr_qp *qp,
4390 struct rdma_cqe_requester *req, int *update)
4392 if (le16_to_cpu(req->sq_cons) == qp->sq.wqe_cons) {
4398 int qedr_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
4400 struct qedr_dev *dev = get_qedr_dev(ibcq->device);
4401 struct qedr_cq *cq = get_qedr_cq(ibcq);
4402 union rdma_cqe *cqe;
4403 u32 old_cons, new_cons;
4404 unsigned long flags;
4408 if (cq->destroyed) {
4410 "warning: poll was invoked after destroy for cq %p (icid=%d)\n",
4415 if (cq->cq_type == QEDR_CQ_TYPE_GSI)
4416 return qedr_gsi_poll_cq(ibcq, num_entries, wc);
4418 spin_lock_irqsave(&cq->cq_lock, flags);
4419 cqe = cq->latest_cqe;
4420 old_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
4421 while (num_entries && is_valid_cqe(cq, cqe)) {
4425 /* prevent speculative reads of any field of CQE */
4428 qp = cqe_get_qp(cqe);
4430 WARN(1, "Error: CQE QP pointer is NULL. CQE=%p\n", cqe);
4436 switch (cqe_get_type(cqe)) {
4437 case RDMA_CQE_TYPE_REQUESTER:
4438 cnt = qedr_poll_cq_req(dev, qp, cq, num_entries, wc,
4440 try_consume_req_cqe(cq, qp, &cqe->req, &update);
4442 case RDMA_CQE_TYPE_RESPONDER_RQ:
4443 cnt = qedr_poll_cq_resp(dev, qp, cq, num_entries, wc,
4444 &cqe->resp, &update);
4446 case RDMA_CQE_TYPE_RESPONDER_SRQ:
4447 cnt = qedr_poll_cq_resp_srq(dev, qp, cq, num_entries,
4451 case RDMA_CQE_TYPE_INVALID:
4453 DP_ERR(dev, "Error: invalid CQE type = %d\n",
4462 new_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
4464 cq->cq_cons += new_cons - old_cons;
4467 /* doorbell notifies abount latest VALID entry,
4468 * but chain already point to the next INVALID one
4470 doorbell_cq(cq, cq->cq_cons - 1, cq->arm_flags);
4472 spin_unlock_irqrestore(&cq->cq_lock, flags);
4476 int qedr_process_mad(struct ib_device *ibdev, int process_mad_flags,
4477 u32 port_num, const struct ib_wc *in_wc,
4478 const struct ib_grh *in_grh, const struct ib_mad *in,
4479 struct ib_mad *out_mad, size_t *out_mad_size,
4480 u16 *out_mad_pkey_index)
4482 return IB_MAD_RESULT_SUCCESS;