Merge tag 'dmaengine-fix-5.2-rc4' of git://git.infradead.org/users/vkoul/slave-dma
authorLinus Torvalds <torvalds@linux-foundation.org>
Sat, 8 Jun 2019 19:46:31 +0000 (12:46 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sat, 8 Jun 2019 19:46:31 +0000 (12:46 -0700)
Pull dmaengine fixes from Vinod Koul:

 - jz4780 transfer fix for acking descriptors early

 - fsl-qdma: clean registers on error

 - dw-axi-dmac: null pointer dereference fix

 - mediatek-cqdma: fix sleeping in atomic context

 - tegra210-adma: fix bunch os issues like crashing in driver probe,
   channel FIFO configuration etc.

 - sprd: Fixes for possible crash on descriptor status, block length
   overflow. For 2-stage transfer fix incorrect start, configuration and
   interrupt handling.

* tag 'dmaengine-fix-5.2-rc4' of git://git.infradead.org/users/vkoul/slave-dma:
  dmaengine: sprd: Add interrupt support for 2-stage transfer
  dmaengine: sprd: Fix the right place to configure 2-stage transfer
  dmaengine: sprd: Fix block length overflow
  dmaengine: sprd: Fix the incorrect start for 2-stage destination channels
  dmaengine: sprd: Add validation of current descriptor in irq handler
  dmaengine: sprd: Fix the possible crash when getting descriptor status
  dmaengine: tegra210-adma: Fix spelling
  dmaengine: tegra210-adma: Fix channel FIFO configuration
  dmaengine: tegra210-adma: Fix crash during probe
  dmaengine: mediatek-cqdma: sleeping in atomic context
  dmaengine: dw-axi-dmac: fix null dereference when pointer first is null
  dmaengine: fsl-qdma: Add improvement
  dmaengine: jz4780: Fix transfers being ACKed too soon

drivers/dma/dma-jz4780.c
drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
drivers/dma/fsl-qdma.c
drivers/dma/mediatek/mtk-cqdma.c
drivers/dma/sprd-dma.c
drivers/dma/tegra210-adma.c

index 7204fde..263bee7 100644 (file)
@@ -662,10 +662,11 @@ static enum dma_status jz4780_dma_tx_status(struct dma_chan *chan,
        return status;
 }
 
-static void jz4780_dma_chan_irq(struct jz4780_dma_dev *jzdma,
-       struct jz4780_dma_chan *jzchan)
+static bool jz4780_dma_chan_irq(struct jz4780_dma_dev *jzdma,
+                               struct jz4780_dma_chan *jzchan)
 {
        uint32_t dcs;
+       bool ack = true;
 
        spin_lock(&jzchan->vchan.lock);
 
@@ -688,12 +689,20 @@ static void jz4780_dma_chan_irq(struct jz4780_dma_dev *jzdma,
                if ((dcs & (JZ_DMA_DCS_AR | JZ_DMA_DCS_HLT)) == 0) {
                        if (jzchan->desc->type == DMA_CYCLIC) {
                                vchan_cyclic_callback(&jzchan->desc->vdesc);
-                       } else {
+
+                               jz4780_dma_begin(jzchan);
+                       } else if (dcs & JZ_DMA_DCS_TT) {
                                vchan_cookie_complete(&jzchan->desc->vdesc);
                                jzchan->desc = NULL;
-                       }
 
-                       jz4780_dma_begin(jzchan);
+                               jz4780_dma_begin(jzchan);
+                       } else {
+                               /* False positive - continue the transfer */
+                               ack = false;
+                               jz4780_dma_chn_writel(jzdma, jzchan->id,
+                                                     JZ_DMA_REG_DCS,
+                                                     JZ_DMA_DCS_CTE);
+                       }
                }
        } else {
                dev_err(&jzchan->vchan.chan.dev->device,
@@ -701,21 +710,22 @@ static void jz4780_dma_chan_irq(struct jz4780_dma_dev *jzdma,
        }
 
        spin_unlock(&jzchan->vchan.lock);
+
+       return ack;
 }
 
 static irqreturn_t jz4780_dma_irq_handler(int irq, void *data)
 {
        struct jz4780_dma_dev *jzdma = data;
+       unsigned int nb_channels = jzdma->soc_data->nb_channels;
        uint32_t pending, dmac;
        int i;
 
        pending = jz4780_dma_ctrl_readl(jzdma, JZ_DMA_REG_DIRQP);
 
-       for (i = 0; i < jzdma->soc_data->nb_channels; i++) {
-               if (!(pending & (1<<i)))
-                       continue;
-
-               jz4780_dma_chan_irq(jzdma, &jzdma->chan[i]);
+       for_each_set_bit(i, (unsigned long *)&pending, nb_channels) {
+               if (jz4780_dma_chan_irq(jzdma, &jzdma->chan[i]))
+                       pending &= ~BIT(i);
        }
 
        /* Clear halt and address error status of all channels. */
@@ -724,7 +734,7 @@ static irqreturn_t jz4780_dma_irq_handler(int irq, void *data)
        jz4780_dma_ctrl_writel(jzdma, JZ_DMA_REG_DMAC, dmac);
 
        /* Clear interrupt pending status. */
-       jz4780_dma_ctrl_writel(jzdma, JZ_DMA_REG_DIRQP, 0);
+       jz4780_dma_ctrl_writel(jzdma, JZ_DMA_REG_DIRQP, pending);
 
        return IRQ_HANDLED;
 }
index b2ac1d2..a1ce307 100644 (file)
@@ -512,7 +512,8 @@ dma_chan_prep_dma_memcpy(struct dma_chan *dchan, dma_addr_t dst_adr,
        return vchan_tx_prep(&chan->vc, &first->vd, flags);
 
 err_desc_get:
-       axi_desc_put(first);
+       if (first)
+               axi_desc_put(first);
        return NULL;
 }
 
index aa1d0ae..60b062c 100644 (file)
@@ -701,10 +701,8 @@ static irqreturn_t fsl_qdma_error_handler(int irq, void *dev_id)
 
        intr = qdma_readl(fsl_qdma, status + FSL_QDMA_DEDR);
 
-       if (intr) {
+       if (intr)
                dev_err(fsl_qdma->dma_dev.dev, "DMA transaction error!\n");
-               return IRQ_NONE;
-       }
 
        qdma_writel(fsl_qdma, FSL_QDMA_DEDR_CLEAR, status + FSL_QDMA_DEDR);
        return IRQ_HANDLED;
index 8148538..723b11c 100644 (file)
@@ -225,7 +225,7 @@ static int mtk_cqdma_hard_reset(struct mtk_cqdma_pchan *pc)
        mtk_dma_set(pc, MTK_CQDMA_RESET, MTK_CQDMA_HARD_RST_BIT);
        mtk_dma_clr(pc, MTK_CQDMA_RESET, MTK_CQDMA_HARD_RST_BIT);
 
-       return mtk_cqdma_poll_engine_done(pc, false);
+       return mtk_cqdma_poll_engine_done(pc, true);
 }
 
 static void mtk_cqdma_start(struct mtk_cqdma_pchan *pc,
@@ -671,7 +671,7 @@ static void mtk_cqdma_free_chan_resources(struct dma_chan *c)
                mtk_dma_set(cvc->pc, MTK_CQDMA_FLUSH, MTK_CQDMA_FLUSH_BIT);
 
                /* wait for the completion of flush operation */
-               if (mtk_cqdma_poll_engine_done(cvc->pc, false) < 0)
+               if (mtk_cqdma_poll_engine_done(cvc->pc, true) < 0)
                        dev_err(cqdma2dev(to_cqdma_dev(c)), "cqdma flush timeout\n");
 
                /* clear the flush bit and interrupt flag */
index 48431e2..baac476 100644 (file)
@@ -62,6 +62,8 @@
 /* SPRD_DMA_GLB_2STAGE_GRP register definition */
 #define SPRD_DMA_GLB_2STAGE_EN         BIT(24)
 #define SPRD_DMA_GLB_CHN_INT_MASK      GENMASK(23, 20)
+#define SPRD_DMA_GLB_DEST_INT          BIT(22)
+#define SPRD_DMA_GLB_SRC_INT           BIT(20)
 #define SPRD_DMA_GLB_LIST_DONE_TRG     BIT(19)
 #define SPRD_DMA_GLB_TRANS_DONE_TRG    BIT(18)
 #define SPRD_DMA_GLB_BLOCK_DONE_TRG    BIT(17)
 /* define DMA channel mode & trigger mode mask */
 #define SPRD_DMA_CHN_MODE_MASK         GENMASK(7, 0)
 #define SPRD_DMA_TRG_MODE_MASK         GENMASK(7, 0)
+#define SPRD_DMA_INT_TYPE_MASK         GENMASK(7, 0)
 
 /* define the DMA transfer step type */
 #define SPRD_DMA_NONE_STEP             0
@@ -190,6 +193,7 @@ struct sprd_dma_chn {
        u32                     dev_id;
        enum sprd_dma_chn_mode  chn_mode;
        enum sprd_dma_trg_mode  trg_mode;
+       enum sprd_dma_int_type  int_type;
        struct sprd_dma_desc    *cur_desc;
 };
 
@@ -429,6 +433,9 @@ static int sprd_dma_set_2stage_config(struct sprd_dma_chn *schan)
                val = chn & SPRD_DMA_GLB_SRC_CHN_MASK;
                val |= BIT(schan->trg_mode - 1) << SPRD_DMA_GLB_TRG_OFFSET;
                val |= SPRD_DMA_GLB_2STAGE_EN;
+               if (schan->int_type != SPRD_DMA_NO_INT)
+                       val |= SPRD_DMA_GLB_SRC_INT;
+
                sprd_dma_glb_update(sdev, SPRD_DMA_GLB_2STAGE_GRP1, val, val);
                break;
 
@@ -436,6 +443,9 @@ static int sprd_dma_set_2stage_config(struct sprd_dma_chn *schan)
                val = chn & SPRD_DMA_GLB_SRC_CHN_MASK;
                val |= BIT(schan->trg_mode - 1) << SPRD_DMA_GLB_TRG_OFFSET;
                val |= SPRD_DMA_GLB_2STAGE_EN;
+               if (schan->int_type != SPRD_DMA_NO_INT)
+                       val |= SPRD_DMA_GLB_SRC_INT;
+
                sprd_dma_glb_update(sdev, SPRD_DMA_GLB_2STAGE_GRP2, val, val);
                break;
 
@@ -443,6 +453,9 @@ static int sprd_dma_set_2stage_config(struct sprd_dma_chn *schan)
                val = (chn << SPRD_DMA_GLB_DEST_CHN_OFFSET) &
                        SPRD_DMA_GLB_DEST_CHN_MASK;
                val |= SPRD_DMA_GLB_2STAGE_EN;
+               if (schan->int_type != SPRD_DMA_NO_INT)
+                       val |= SPRD_DMA_GLB_DEST_INT;
+
                sprd_dma_glb_update(sdev, SPRD_DMA_GLB_2STAGE_GRP1, val, val);
                break;
 
@@ -450,6 +463,9 @@ static int sprd_dma_set_2stage_config(struct sprd_dma_chn *schan)
                val = (chn << SPRD_DMA_GLB_DEST_CHN_OFFSET) &
                        SPRD_DMA_GLB_DEST_CHN_MASK;
                val |= SPRD_DMA_GLB_2STAGE_EN;
+               if (schan->int_type != SPRD_DMA_NO_INT)
+                       val |= SPRD_DMA_GLB_DEST_INT;
+
                sprd_dma_glb_update(sdev, SPRD_DMA_GLB_2STAGE_GRP2, val, val);
                break;
 
@@ -510,7 +526,9 @@ static void sprd_dma_start(struct sprd_dma_chn *schan)
        sprd_dma_set_uid(schan);
        sprd_dma_enable_chn(schan);
 
-       if (schan->dev_id == SPRD_DMA_SOFTWARE_UID)
+       if (schan->dev_id == SPRD_DMA_SOFTWARE_UID &&
+           schan->chn_mode != SPRD_DMA_DST_CHN0 &&
+           schan->chn_mode != SPRD_DMA_DST_CHN1)
                sprd_dma_soft_request(schan);
 }
 
@@ -552,12 +570,17 @@ static irqreturn_t dma_irq_handle(int irq, void *dev_id)
                schan = &sdev->channels[i];
 
                spin_lock(&schan->vc.lock);
+
+               sdesc = schan->cur_desc;
+               if (!sdesc) {
+                       spin_unlock(&schan->vc.lock);
+                       return IRQ_HANDLED;
+               }
+
                int_type = sprd_dma_get_int_type(schan);
                req_type = sprd_dma_get_req_type(schan);
                sprd_dma_clear_int(schan);
 
-               sdesc = schan->cur_desc;
-
                /* cyclic mode schedule callback */
                cyclic = schan->linklist.phy_addr ? true : false;
                if (cyclic == true) {
@@ -625,7 +648,7 @@ static enum dma_status sprd_dma_tx_status(struct dma_chan *chan,
                else
                        pos = 0;
        } else if (schan->cur_desc && schan->cur_desc->vd.tx.cookie == cookie) {
-               struct sprd_dma_desc *sdesc = to_sprd_dma_desc(vd);
+               struct sprd_dma_desc *sdesc = schan->cur_desc;
 
                if (sdesc->dir == DMA_DEV_TO_MEM)
                        pos = sprd_dma_get_dst_addr(schan);
@@ -771,7 +794,7 @@ static int sprd_dma_fill_desc(struct dma_chan *chan,
        temp |= slave_cfg->src_maxburst & SPRD_DMA_FRG_LEN_MASK;
        hw->frg_len = temp;
 
-       hw->blk_len = len & SPRD_DMA_BLK_LEN_MASK;
+       hw->blk_len = slave_cfg->src_maxburst & SPRD_DMA_BLK_LEN_MASK;
        hw->trsc_len = len & SPRD_DMA_TRSC_LEN_MASK;
 
        temp = (dst_step & SPRD_DMA_TRSF_STEP_MASK) << SPRD_DMA_DEST_TRSF_STEP_OFFSET;
@@ -904,6 +927,16 @@ sprd_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
                schan->linklist.virt_addr = 0;
        }
 
+       /*
+        * Set channel mode, interrupt mode and trigger mode for 2-stage
+        * transfer.
+        */
+       schan->chn_mode =
+               (flags >> SPRD_DMA_CHN_MODE_SHIFT) & SPRD_DMA_CHN_MODE_MASK;
+       schan->trg_mode =
+               (flags >> SPRD_DMA_TRG_MODE_SHIFT) & SPRD_DMA_TRG_MODE_MASK;
+       schan->int_type = flags & SPRD_DMA_INT_TYPE_MASK;
+
        sdesc = kzalloc(sizeof(*sdesc), GFP_NOWAIT);
        if (!sdesc)
                return NULL;
@@ -937,12 +970,6 @@ sprd_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
                }
        }
 
-       /* Set channel mode and trigger mode for 2-stage transfer */
-       schan->chn_mode =
-               (flags >> SPRD_DMA_CHN_MODE_SHIFT) & SPRD_DMA_CHN_MODE_MASK;
-       schan->trg_mode =
-               (flags >> SPRD_DMA_TRG_MODE_SHIFT) & SPRD_DMA_TRG_MODE_MASK;
-
        ret = sprd_dma_fill_desc(chan, &sdesc->chn_hw, 0, 0, src, dst, len,
                                 dir, flags, slave_cfg);
        if (ret) {
index d51550d..2805853 100644 (file)
 #define ADMA_CH_CONFIG_MAX_BUFS                                8
 
 #define ADMA_CH_FIFO_CTRL                              0x2c
-#define ADMA_CH_FIFO_CTRL_OVRFW_THRES(val)             (((val) & 0xf) << 24)
-#define ADMA_CH_FIFO_CTRL_STARV_THRES(val)             (((val) & 0xf) << 16)
-#define ADMA_CH_FIFO_CTRL_TX_FIFO_SIZE_SHIFT           8
-#define ADMA_CH_FIFO_CTRL_RX_FIFO_SIZE_SHIFT           0
+#define TEGRA210_ADMA_CH_FIFO_CTRL_OFLWTHRES(val)      (((val) & 0xf) << 24)
+#define TEGRA210_ADMA_CH_FIFO_CTRL_STRVTHRES(val)      (((val) & 0xf) << 16)
+#define TEGRA210_ADMA_CH_FIFO_CTRL_TXSIZE(val)         (((val) & 0xf) << 8)
+#define TEGRA210_ADMA_CH_FIFO_CTRL_RXSIZE(val)         ((val) & 0xf)
+#define TEGRA186_ADMA_CH_FIFO_CTRL_OFLWTHRES(val)      (((val) & 0x1f) << 24)
+#define TEGRA186_ADMA_CH_FIFO_CTRL_STRVTHRES(val)      (((val) & 0x1f) << 16)
+#define TEGRA186_ADMA_CH_FIFO_CTRL_TXSIZE(val)         (((val) & 0x1f) << 8)
+#define TEGRA186_ADMA_CH_FIFO_CTRL_RXSIZE(val)         ((val) & 0x1f)
 
 #define ADMA_CH_LOWER_SRC_ADDR                         0x34
 #define ADMA_CH_LOWER_TRG_ADDR                         0x3c
 
 #define TEGRA_ADMA_BURST_COMPLETE_TIME                 20
 
-#define ADMA_CH_FIFO_CTRL_DEFAULT      (ADMA_CH_FIFO_CTRL_OVRFW_THRES(1) | \
-                                        ADMA_CH_FIFO_CTRL_STARV_THRES(1))
+#define TEGRA210_FIFO_CTRL_DEFAULT (TEGRA210_ADMA_CH_FIFO_CTRL_OFLWTHRES(1) | \
+                                   TEGRA210_ADMA_CH_FIFO_CTRL_STRVTHRES(1) | \
+                                   TEGRA210_ADMA_CH_FIFO_CTRL_TXSIZE(3)    | \
+                                   TEGRA210_ADMA_CH_FIFO_CTRL_RXSIZE(3))
+
+#define TEGRA186_FIFO_CTRL_DEFAULT (TEGRA186_ADMA_CH_FIFO_CTRL_OFLWTHRES(1) | \
+                                   TEGRA186_ADMA_CH_FIFO_CTRL_STRVTHRES(1) | \
+                                   TEGRA186_ADMA_CH_FIFO_CTRL_TXSIZE(3)    | \
+                                   TEGRA186_ADMA_CH_FIFO_CTRL_RXSIZE(3))
 
 #define ADMA_CH_REG_FIELD_VAL(val, mask, shift)        (((val) & mask) << shift)
 
@@ -73,7 +84,8 @@ struct tegra_adma;
  * @global_int_clear: Register offset of DMA global interrupt clear.
  * @ch_req_tx_shift: Register offset for AHUB transmit channel select.
  * @ch_req_rx_shift: Register offset for AHUB receive channel select.
- * @ch_base_offset: Reister offset of DMA channel registers.
+ * @ch_base_offset: Register offset of DMA channel registers.
+ * @ch_fifo_ctrl: Default value for channel FIFO CTRL register.
  * @ch_req_mask: Mask for Tx or Rx channel select.
  * @ch_req_max: Maximum number of Tx or Rx channels available.
  * @ch_reg_size: Size of DMA channel register space.
@@ -86,6 +98,7 @@ struct tegra_adma_chip_data {
        unsigned int ch_req_tx_shift;
        unsigned int ch_req_rx_shift;
        unsigned int ch_base_offset;
+       unsigned int ch_fifo_ctrl;
        unsigned int ch_req_mask;
        unsigned int ch_req_max;
        unsigned int ch_reg_size;
@@ -589,7 +602,7 @@ static int tegra_adma_set_xfer_params(struct tegra_adma_chan *tdc,
                         ADMA_CH_CTRL_FLOWCTRL_EN;
        ch_regs->config |= cdata->adma_get_burst_config(burst_size);
        ch_regs->config |= ADMA_CH_CONFIG_WEIGHT_FOR_WRR(1);
-       ch_regs->fifo_ctrl = ADMA_CH_FIFO_CTRL_DEFAULT;
+       ch_regs->fifo_ctrl = cdata->ch_fifo_ctrl;
        ch_regs->tc = desc->period_len & ADMA_CH_TC_COUNT_MASK;
 
        return tegra_adma_request_alloc(tdc, direction);
@@ -773,6 +786,7 @@ static const struct tegra_adma_chip_data tegra210_chip_data = {
        .ch_req_tx_shift        = 28,
        .ch_req_rx_shift        = 24,
        .ch_base_offset         = 0,
+       .ch_fifo_ctrl           = TEGRA210_FIFO_CTRL_DEFAULT,
        .ch_req_mask            = 0xf,
        .ch_req_max             = 10,
        .ch_reg_size            = 0x80,
@@ -786,6 +800,7 @@ static const struct tegra_adma_chip_data tegra186_chip_data = {
        .ch_req_tx_shift        = 27,
        .ch_req_rx_shift        = 22,
        .ch_base_offset         = 0x10000,
+       .ch_fifo_ctrl           = TEGRA186_FIFO_CTRL_DEFAULT,
        .ch_req_mask            = 0x1f,
        .ch_req_max             = 20,
        .ch_reg_size            = 0x100,
@@ -834,16 +849,6 @@ static int tegra_adma_probe(struct platform_device *pdev)
                return PTR_ERR(tdma->ahub_clk);
        }
 
-       pm_runtime_enable(&pdev->dev);
-
-       ret = pm_runtime_get_sync(&pdev->dev);
-       if (ret < 0)
-               goto rpm_disable;
-
-       ret = tegra_adma_init(tdma);
-       if (ret)
-               goto rpm_put;
-
        INIT_LIST_HEAD(&tdma->dma_dev.channels);
        for (i = 0; i < tdma->nr_channels; i++) {
                struct tegra_adma_chan *tdc = &tdma->channels[i];
@@ -862,6 +867,16 @@ static int tegra_adma_probe(struct platform_device *pdev)
                tdc->tdma = tdma;
        }
 
+       pm_runtime_enable(&pdev->dev);
+
+       ret = pm_runtime_get_sync(&pdev->dev);
+       if (ret < 0)
+               goto rpm_disable;
+
+       ret = tegra_adma_init(tdma);
+       if (ret)
+               goto rpm_put;
+
        dma_cap_set(DMA_SLAVE, tdma->dma_dev.cap_mask);
        dma_cap_set(DMA_PRIVATE, tdma->dma_dev.cap_mask);
        dma_cap_set(DMA_CYCLIC, tdma->dma_dev.cap_mask);
@@ -905,13 +920,13 @@ static int tegra_adma_probe(struct platform_device *pdev)
 
 dma_remove:
        dma_async_device_unregister(&tdma->dma_dev);
-irq_dispose:
-       while (--i >= 0)
-               irq_dispose_mapping(tdma->channels[i].irq);
 rpm_put:
        pm_runtime_put_sync(&pdev->dev);
 rpm_disable:
        pm_runtime_disable(&pdev->dev);
+irq_dispose:
+       while (--i >= 0)
+               irq_dispose_mapping(tdma->channels[i].irq);
 
        return ret;
 }