RDMA/bnxt_re: Use rdma_umem_for_each_dma_block()
authorJason Gunthorpe <jgg@nvidia.com>
Wed, 30 Sep 2020 00:24:35 +0000 (21:24 -0300)
committerJason Gunthorpe <jgg@nvidia.com>
Tue, 6 Oct 2020 19:45:53 +0000 (16:45 -0300)
This driver is taking the SGL out of the umem and passing it through a
struct bnxt_qplib_sg_info. Instead of passing the SGL pass the umem and
then use rdma_umem_for_each_dma_block() directly.

Move the calls of ib_umem_num_dma_blocks() closer to their actual point of
use, npages is only set for non-umem pbl flows.

Link: https://lore.kernel.org/r/0-v1-b37437a73f35+49c-bnxt_re_dma_block_jgg@nvidia.com
Acked-by: Selvin Xavier <selvin.xavier@broadcom.com>
Tested-by: Selvin Xavier <selvin.xavier@broadcom.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
drivers/infiniband/hw/bnxt_re/ib_verbs.c
drivers/infiniband/hw/bnxt_re/qplib_res.c
drivers/infiniband/hw/bnxt_re/qplib_res.h

index a0e8d93..e2707b2 100644 (file)
@@ -940,9 +940,7 @@ static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd,
                return PTR_ERR(umem);
 
        qp->sumem = umem;
-       qplib_qp->sq.sg_info.sghead = umem->sg_head.sgl;
-       qplib_qp->sq.sg_info.npages = ib_umem_num_dma_blocks(umem, PAGE_SIZE);
-       qplib_qp->sq.sg_info.nmap = umem->nmap;
+       qplib_qp->sq.sg_info.umem = umem;
        qplib_qp->sq.sg_info.pgsize = PAGE_SIZE;
        qplib_qp->sq.sg_info.pgshft = PAGE_SHIFT;
        qplib_qp->qp_handle = ureq.qp_handle;
@@ -955,10 +953,7 @@ static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd,
                if (IS_ERR(umem))
                        goto rqfail;
                qp->rumem = umem;
-               qplib_qp->rq.sg_info.sghead = umem->sg_head.sgl;
-               qplib_qp->rq.sg_info.npages =
-                       ib_umem_num_dma_blocks(umem, PAGE_SIZE);
-               qplib_qp->rq.sg_info.nmap = umem->nmap;
+               qplib_qp->rq.sg_info.umem = umem;
                qplib_qp->rq.sg_info.pgsize = PAGE_SIZE;
                qplib_qp->rq.sg_info.pgshft = PAGE_SHIFT;
        }
@@ -1612,9 +1607,7 @@ static int bnxt_re_init_user_srq(struct bnxt_re_dev *rdev,
                return PTR_ERR(umem);
 
        srq->umem = umem;
-       qplib_srq->sg_info.sghead = umem->sg_head.sgl;
-       qplib_srq->sg_info.npages = ib_umem_num_dma_blocks(umem, PAGE_SIZE);
-       qplib_srq->sg_info.nmap = umem->nmap;
+       qplib_srq->sg_info.umem = umem;
        qplib_srq->sg_info.pgsize = PAGE_SIZE;
        qplib_srq->sg_info.pgshft = PAGE_SHIFT;
        qplib_srq->srq_handle = ureq.srq_handle;
@@ -2865,10 +2858,7 @@ int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
                        rc = PTR_ERR(cq->umem);
                        goto fail;
                }
-               cq->qplib_cq.sg_info.sghead = cq->umem->sg_head.sgl;
-               cq->qplib_cq.sg_info.npages =
-                       ib_umem_num_dma_blocks(cq->umem, PAGE_SIZE);
-               cq->qplib_cq.sg_info.nmap = cq->umem->nmap;
+               cq->qplib_cq.sg_info.umem = cq->umem;
                cq->qplib_cq.dpi = &uctx->dpi;
        } else {
                cq->max_cql = min_t(u32, entries, MAX_CQL_PER_POLL);
index 7efa6e5..fa78783 100644 (file)
@@ -45,6 +45,9 @@
 #include <linux/dma-mapping.h>
 #include <linux/if_vlan.h>
 #include <linux/vmalloc.h>
+#include <rdma/ib_verbs.h>
+#include <rdma/ib_umem.h>
+
 #include "roce_hsi.h"
 #include "qplib_res.h"
 #include "qplib_sp.h"
@@ -87,12 +90,11 @@ static void __free_pbl(struct bnxt_qplib_res *res, struct bnxt_qplib_pbl *pbl,
 static void bnxt_qplib_fill_user_dma_pages(struct bnxt_qplib_pbl *pbl,
                                           struct bnxt_qplib_sg_info *sginfo)
 {
-       struct scatterlist *sghead = sginfo->sghead;
-       struct sg_dma_page_iter sg_iter;
+       struct ib_block_iter biter;
        int i = 0;
 
-       for_each_sg_dma_page(sghead, &sg_iter, sginfo->nmap, 0) {
-               pbl->pg_map_arr[i] = sg_page_iter_dma_address(&sg_iter);
+       rdma_umem_for_each_dma_block(sginfo->umem, &biter, sginfo->pgsize) {
+               pbl->pg_map_arr[i] = rdma_block_iter_dma_address(&biter);
                pbl->pg_arr[i] = NULL;
                pbl->pg_count++;
                i++;
@@ -104,15 +106,16 @@ static int __alloc_pbl(struct bnxt_qplib_res *res,
                       struct bnxt_qplib_sg_info *sginfo)
 {
        struct pci_dev *pdev = res->pdev;
-       struct scatterlist *sghead;
        bool is_umem = false;
        u32 pages;
        int i;
 
        if (sginfo->nopte)
                return 0;
-       pages = sginfo->npages;
-       sghead = sginfo->sghead;
+       if (sginfo->umem)
+               pages = ib_umem_num_dma_blocks(sginfo->umem, sginfo->pgsize);
+       else
+               pages = sginfo->npages;
        /* page ptr arrays */
        pbl->pg_arr = vmalloc(pages * sizeof(void *));
        if (!pbl->pg_arr)
@@ -127,7 +130,7 @@ static int __alloc_pbl(struct bnxt_qplib_res *res,
        pbl->pg_count = 0;
        pbl->pg_size = sginfo->pgsize;
 
-       if (!sghead) {
+       if (!sginfo->umem) {
                for (i = 0; i < pages; i++) {
                        pbl->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
                                                            pbl->pg_size,
@@ -183,14 +186,12 @@ int bnxt_qplib_alloc_init_hwq(struct bnxt_qplib_hwq *hwq,
        struct bnxt_qplib_sg_info sginfo = {};
        u32 depth, stride, npbl, npde;
        dma_addr_t *src_phys_ptr, **dst_virt_ptr;
-       struct scatterlist *sghead = NULL;
        struct bnxt_qplib_res *res;
        struct pci_dev *pdev;
        int i, rc, lvl;
 
        res = hwq_attr->res;
        pdev = res->pdev;
-       sghead = hwq_attr->sginfo->sghead;
        pg_size = hwq_attr->sginfo->pgsize;
        hwq->level = PBL_LVL_MAX;
 
@@ -204,7 +205,7 @@ int bnxt_qplib_alloc_init_hwq(struct bnxt_qplib_hwq *hwq,
                        aux_pages++;
        }
 
-       if (!sghead) {
+       if (!hwq_attr->sginfo->umem) {
                hwq->is_user = false;
                npages = (depth * stride) / pg_size + aux_pages;
                if ((depth * stride) % pg_size)
@@ -213,11 +214,14 @@ int bnxt_qplib_alloc_init_hwq(struct bnxt_qplib_hwq *hwq,
                        return -EINVAL;
                hwq_attr->sginfo->npages = npages;
        } else {
+               unsigned long sginfo_num_pages = ib_umem_num_dma_blocks(
+                       hwq_attr->sginfo->umem, hwq_attr->sginfo->pgsize);
+
                hwq->is_user = true;
-               npages = hwq_attr->sginfo->npages;
+               npages = sginfo_num_pages;
                npages = (npages * PAGE_SIZE) /
                          BIT_ULL(hwq_attr->sginfo->pgshft);
-               if ((hwq_attr->sginfo->npages * PAGE_SIZE) %
+               if ((sginfo_num_pages * PAGE_SIZE) %
                     BIT_ULL(hwq_attr->sginfo->pgshft))
                        if (!npages)
                                npages++;
index 9da470d..7a1ab38 100644 (file)
@@ -126,8 +126,7 @@ struct bnxt_qplib_pbl {
 };
 
 struct bnxt_qplib_sg_info {
-       struct scatterlist              *sghead;
-       u32                             nmap;
+       struct ib_umem                  *umem;
        u32                             npages;
        u32                             pgshft;
        u32                             pgsize;