dma-mapping: don't return errors from dma_set_max_seg_size
authorChristoph Hellwig <hch@lst.de>
Fri, 19 Jul 2024 04:07:38 +0000 (06:07 +0200)
committerChristoph Hellwig <hch@lst.de>
Thu, 29 Aug 2024 04:22:49 +0000 (07:22 +0300)
A NULL dev->dma_parms indicates either a bus that is not DMA capable or
grave bug in the implementation of the bus code.

There isn't much the driver can do in terms of error handling for either
case, so just warn and continue as DMA operations will fail anyway.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Robin Murphy <robin.murphy@arm.com>
Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com>
Acked-by: Ulf Hansson <ulf.hansson@linaro.org> # For MMC
13 files changed:
drivers/accel/qaic/qaic_drv.c
drivers/dma/idma64.c
drivers/dma/pl330.c
drivers/dma/qcom/bam_dma.c
drivers/dma/sh/rcar-dmac.c
drivers/dma/ste_dma40.c
drivers/gpu/drm/mediatek/mtk_drm_drv.c
drivers/media/common/videobuf2/videobuf2-dma-contig.c
drivers/media/pci/intel/ipu6/ipu6.c
drivers/mmc/host/mmci_stm32_sdmmc.c
drivers/net/ethernet/microsoft/mana/gdma_main.c
drivers/scsi/lpfc/lpfc_init.c
include/linux/dma-mapping.h

index 580b29e..bf10156 100644 (file)
@@ -447,9 +447,7 @@ static int init_pci(struct qaic_device *qdev, struct pci_dev *pdev)
        ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
        if (ret)
                return ret;
-       ret = dma_set_max_seg_size(&pdev->dev, UINT_MAX);
-       if (ret)
-               return ret;
+       dma_set_max_seg_size(&pdev->dev, UINT_MAX);
 
        qdev->bar_0 = devm_ioremap_resource(&pdev->dev, &pdev->resource[0]);
        if (IS_ERR(qdev->bar_0))
index e3505e5..1398814 100644 (file)
@@ -598,9 +598,7 @@ static int idma64_probe(struct idma64_chip *chip)
 
        idma64->dma.dev = chip->sysdev;
 
-       ret = dma_set_max_seg_size(idma64->dma.dev, IDMA64C_CTLH_BLOCK_TS_MASK);
-       if (ret)
-               return ret;
+       dma_set_max_seg_size(idma64->dma.dev, IDMA64C_CTLH_BLOCK_TS_MASK);
 
        ret = dma_async_device_register(&idma64->dma);
        if (ret)
index 60c4de8..82a9fe8 100644 (file)
@@ -3163,10 +3163,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
         * This is the limit for transfers with a buswidth of 1, larger
         * buswidths will have larger limits.
         */
-       ret = dma_set_max_seg_size(&adev->dev, 1900800);
-       if (ret)
-               dev_err(&adev->dev, "unable to set the seg size\n");
-
+       dma_set_max_seg_size(&adev->dev, 1900800);
 
        init_pl330_debugfs(pl330);
        dev_info(&adev->dev,
index 5e7d332..368ffaa 100644 (file)
@@ -1325,11 +1325,7 @@ static int bam_dma_probe(struct platform_device *pdev)
 
        /* set max dma segment size */
        bdev->common.dev = bdev->dev;
-       ret = dma_set_max_seg_size(bdev->common.dev, BAM_FIFO_SIZE);
-       if (ret) {
-               dev_err(bdev->dev, "cannot set maximum segment size\n");
-               goto err_bam_channel_exit;
-       }
+       dma_set_max_seg_size(bdev->common.dev, BAM_FIFO_SIZE);
 
        platform_set_drvdata(pdev, bdev);
 
index 40482cb..1094a2f 100644 (file)
@@ -1868,9 +1868,7 @@ static int rcar_dmac_probe(struct platform_device *pdev)
 
        dmac->dev = &pdev->dev;
        platform_set_drvdata(pdev, dmac);
-       ret = dma_set_max_seg_size(dmac->dev, RCAR_DMATCR_MASK);
-       if (ret)
-               return ret;
+       dma_set_max_seg_size(dmac->dev, RCAR_DMATCR_MASK);
 
        ret = dma_set_mask_and_coherent(dmac->dev, DMA_BIT_MASK(40));
        if (ret)
index 2c48929..d52e168 100644 (file)
@@ -3632,11 +3632,7 @@ static int __init d40_probe(struct platform_device *pdev)
        if (ret)
                goto destroy_cache;
 
-       ret = dma_set_max_seg_size(base->dev, STEDMA40_MAX_SEG_SIZE);
-       if (ret) {
-               d40_err(dev, "Failed to set dma max seg size\n");
-               goto destroy_cache;
-       }
+       dma_set_max_seg_size(base->dev, STEDMA40_MAX_SEG_SIZE);
 
        d40_hw_init(base);
 
index 77b50c5..3e80719 100644 (file)
@@ -559,11 +559,7 @@ static int mtk_drm_kms_init(struct drm_device *drm)
         * Configure the DMA segment size to make sure we get contiguous IOVA
         * when importing PRIME buffers.
         */
-       ret = dma_set_max_seg_size(dma_dev, UINT_MAX);
-       if (ret) {
-               dev_err(dma_dev, "Failed to set DMA segment size\n");
-               goto err_component_unbind;
-       }
+       dma_set_max_seg_size(dma_dev, UINT_MAX);
 
        ret = drm_vblank_init(drm, MAX_CRTC);
        if (ret < 0)
index 3d4fd4e..bb0b7fa 100644 (file)
@@ -854,8 +854,7 @@ int vb2_dma_contig_set_max_seg_size(struct device *dev, unsigned int size)
                return -ENODEV;
        }
        if (dma_get_max_seg_size(dev) < size)
-               return dma_set_max_seg_size(dev, size);
-
+               dma_set_max_seg_size(dev, size);
        return 0;
 }
 EXPORT_SYMBOL_GPL(vb2_dma_contig_set_max_seg_size);
index bbd6463..83e70c6 100644 (file)
@@ -576,9 +576,7 @@ static int ipu6_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        if (ret)
                return dev_err_probe(dev, ret, "Failed to set DMA mask\n");
 
-       ret = dma_set_max_seg_size(dev, UINT_MAX);
-       if (ret)
-               return dev_err_probe(dev, ret, "Failed to set max_seg_size\n");
+       dma_set_max_seg_size(dev, UINT_MAX);
 
        ret = ipu6_pci_config_setup(pdev, isp->hw_ver);
        if (ret)
index f5da7f9..9dc5185 100644 (file)
@@ -213,7 +213,8 @@ static int sdmmc_idma_setup(struct mmci_host *host)
                host->mmc->max_seg_size = host->mmc->max_req_size;
        }
 
-       return dma_set_max_seg_size(dev, host->mmc->max_seg_size);
+       dma_set_max_seg_size(dev, host->mmc->max_seg_size);
+       return 0;
 }
 
 static int sdmmc_idma_start(struct mmci_host *host, unsigned int *datactrl)
index ddb8f68..ca4ed58 100644 (file)
@@ -1496,11 +1496,7 @@ static int mana_gd_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        if (err)
                goto release_region;
 
-       err = dma_set_max_seg_size(&pdev->dev, UINT_MAX);
-       if (err) {
-               dev_err(&pdev->dev, "Failed to set dma device segment size\n");
-               goto release_region;
-       }
+       dma_set_max_seg_size(&pdev->dev, UINT_MAX);
 
        err = -ENOMEM;
        gc = vzalloc(sizeof(*gc));
index e1dfa96..5062091 100644 (file)
@@ -13861,12 +13861,7 @@ fcponly:
        if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
                sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
 
-       rc = dma_set_max_seg_size(&phba->pcidev->dev, sli4_params->sge_supp_len);
-       if (unlikely(rc)) {
-               lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
-                               "6400 Can't set dma maximum segment size\n");
-               return rc;
-       }
+       dma_set_max_seg_size(&phba->pcidev->dev, sli4_params->sge_supp_len);
 
        /*
         * Check whether the adapter supports an embedded copy of the
index 6bd1333..1524da3 100644 (file)
@@ -524,13 +524,11 @@ static inline unsigned int dma_get_max_seg_size(struct device *dev)
        return SZ_64K;
 }
 
-static inline int dma_set_max_seg_size(struct device *dev, unsigned int size)
+static inline void dma_set_max_seg_size(struct device *dev, unsigned int size)
 {
-       if (dev->dma_parms) {
-               dev->dma_parms->max_segment_size = size;
-               return 0;
-       }
-       return -EIO;
+       if (WARN_ON_ONCE(!dev->dma_parms))
+               return;
+       dev->dma_parms->max_segment_size = size;
 }
 
 static inline unsigned long dma_get_seg_boundary(struct device *dev)