staging: kpc2000: kpc_dma: Remove additional goto statements
authorSouptick Joarder <jrdr.linux@gmail.com>
Wed, 1 Jul 2020 06:17:44 +0000 (11:47 +0530)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 1 Jul 2020 13:44:26 +0000 (15:44 +0200)
As 3 goto level referring to same common code, those can be
accomodated with a single goto level and renameing it to
unpin_pages. Set the -ERRNO when returning partial mapped
pages in more appropriate place.

When dma_map_sg() failed, the previously allocated memory was
not freed properly. This is corrected now.

Signed-off-by: Souptick Joarder <jrdr.linux@gmail.com>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Bharath Vedartham <linux.bhar@gmail.com>
Cc: Dan Carpenter <dan.carpenter@oracle.com>
Link: https://lore.kernel.org/r/1593584264-16982-5-git-send-email-jrdr.linux@gmail.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
drivers/staging/kpc2000/kpc_dma/fileops.c

index 8cd20ad..dd716ed 100644 (file)
@@ -35,7 +35,7 @@ static int kpc_dma_transfer(struct dev_private_data *priv,
                            unsigned long iov_base, size_t iov_len)
 {
        unsigned int i = 0;
-       int rv = 0;
+       int rv = 0, nr_pages = 0;
        struct kpc_dma_device *ldev;
        struct aio_cb_data *acd;
        DECLARE_COMPLETION_ONSTACK(done);
@@ -79,22 +79,27 @@ static int kpc_dma_transfer(struct dev_private_data *priv,
        rv = pin_user_pages(iov_base, acd->page_count, FOLL_TOUCH | FOLL_WRITE, acd->user_pages, NULL);
        mmap_read_unlock(current->mm);        /*  release the semaphore */
        if (rv != acd->page_count) {
+               nr_pages = rv;
+               if (rv > 0)
+                       rv = -EFAULT;
+
                dev_err(&priv->ldev->pldev->dev, "Couldn't pin_user_pages (%d)\n", rv);
-               goto err_get_user_pages;
+               goto unpin_pages;
        }
+       nr_pages = acd->page_count;
 
        // Allocate and setup the sg_table (scatterlist entries)
        rv = sg_alloc_table_from_pages(&acd->sgt, acd->user_pages, acd->page_count, iov_base & (PAGE_SIZE - 1), iov_len, GFP_KERNEL);
        if (rv) {
                dev_err(&priv->ldev->pldev->dev, "Couldn't alloc sg_table (%d)\n", rv);
-               goto err_alloc_sg_table;
+               goto unpin_pages;
        }
 
        // Setup the DMA mapping for all the sg entries
        acd->mapped_entry_count = dma_map_sg(&ldev->pldev->dev, acd->sgt.sgl, acd->sgt.nents, ldev->dir);
        if (acd->mapped_entry_count <= 0) {
                dev_err(&priv->ldev->pldev->dev, "Couldn't dma_map_sg (%d)\n", acd->mapped_entry_count);
-               goto err_dma_map_sg;
+               goto free_table;
        }
 
        // Calculate how many descriptors are actually needed for this transfer.
@@ -186,16 +191,12 @@ static int kpc_dma_transfer(struct dev_private_data *priv,
  err_descr_too_many:
        unlock_engine(ldev);
        dma_unmap_sg(&ldev->pldev->dev, acd->sgt.sgl, acd->sgt.nents, ldev->dir);
+ free_table:
        sg_free_table(&acd->sgt);
- err_dma_map_sg:
- err_alloc_sg_table:
-       unpin_user_pages(acd->user_pages, acd->page_count);
 
- err_get_user_pages:
-       if (rv > 0) {
-               unpin_user_pages(acd->user_pages, rv);
-               rv = -EFAULT;
-       }
+ unpin_pages:
+       if (nr_pages > 0)
+               unpin_user_pages(acd->user_pages, nr_pages);
        kfree(acd->user_pages);
  err_alloc_userpages:
        kfree(acd);