RDMA/rxe: Fix memory leak in error path code
authorBob Pearson <rpearsonhpe@gmail.com>
Mon, 5 Jul 2021 16:41:54 +0000 (11:41 -0500)
committerJason Gunthorpe <jgg@nvidia.com>
Thu, 15 Jul 2021 17:44:12 +0000 (14:44 -0300)
In rxe_mr_init_user() at the third error the driver fails to free the
memory at mr->map. This patch adds code to do that.  This error only
occurs if page_address() fails to return a non zero address which should
never happen for 64 bit architectures.

Fixes: 8700e3e7c485 ("Soft RoCE driver")
Link: https://lore.kernel.org/r/20210705164153.17652-1-rpearsonhpe@gmail.com
Reported by: Haakon Bugge <haakon.bugge@oracle.com>
Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com>
Reviewed-by: Zhu Yanjun <zyjzyj2000@gmail.com>
Reviewed-by: HÃ¥kon Bugge <haakon.bugge@oracle.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
drivers/infiniband/sw/rxe/rxe_mr.c

index 6aabcb4..be4bcb4 100644 (file)
@@ -113,13 +113,14 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
        int                     num_buf;
        void                    *vaddr;
        int err;
+       int i;
 
        umem = ib_umem_get(pd->ibpd.device, start, length, access);
        if (IS_ERR(umem)) {
-               pr_warn("err %d from rxe_umem_get\n",
-                       (int)PTR_ERR(umem));
+               pr_warn("%s: Unable to pin memory region err = %d\n",
+                       __func__, (int)PTR_ERR(umem));
                err = PTR_ERR(umem);
-               goto err1;
+               goto err_out;
        }
 
        mr->umem = umem;
@@ -129,9 +130,9 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
 
        err = rxe_mr_alloc(mr, num_buf);
        if (err) {
-               pr_warn("err %d from rxe_mr_alloc\n", err);
-               ib_umem_release(umem);
-               goto err1;
+               pr_warn("%s: Unable to allocate memory for map\n",
+                               __func__);
+               goto err_release_umem;
        }
 
        mr->page_shift = PAGE_SHIFT;
@@ -151,10 +152,10 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
 
                        vaddr = page_address(sg_page_iter_page(&sg_iter));
                        if (!vaddr) {
-                               pr_warn("null vaddr\n");
-                               ib_umem_release(umem);
+                               pr_warn("%s: Unable to get virtual address\n",
+                                               __func__);
                                err = -ENOMEM;
-                               goto err1;
+                               goto err_cleanup_map;
                        }
 
                        buf->addr = (uintptr_t)vaddr;
@@ -177,7 +178,13 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
 
        return 0;
 
-err1:
+err_cleanup_map:
+       for (i = 0; i < mr->num_map; i++)
+               kfree(mr->map[i]);
+       kfree(mr->map);
+err_release_umem:
+       ib_umem_release(umem);
+err_out:
        return err;
 }