Merge branch 'topic/renesas' into for-linus
[linux-2.6-microblaze.git] / drivers / dma / dma-jz4780.c
index 85820a2..a8b6225 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/interrupt.h>
 #include <linux/module.h>
 #include <linux/of.h>
+#include <linux/of_device.h>
 #include <linux/of_dma.h>
 #include <linux/platform_device.h>
 #include <linux/slab.h>
 #include "dmaengine.h"
 #include "virt-dma.h"
 
-#define JZ_DMA_NR_CHANNELS     32
-
 /* Global registers. */
-#define JZ_DMA_REG_DMAC                0x1000
-#define JZ_DMA_REG_DIRQP       0x1004
-#define JZ_DMA_REG_DDR         0x1008
-#define JZ_DMA_REG_DDRS                0x100c
-#define JZ_DMA_REG_DMACP       0x101c
-#define JZ_DMA_REG_DSIRQP      0x1020
-#define JZ_DMA_REG_DSIRQM      0x1024
-#define JZ_DMA_REG_DCIRQP      0x1028
-#define JZ_DMA_REG_DCIRQM      0x102c
+#define JZ_DMA_REG_DMAC                0x00
+#define JZ_DMA_REG_DIRQP       0x04
+#define JZ_DMA_REG_DDR         0x08
+#define JZ_DMA_REG_DDRS                0x0c
+#define JZ_DMA_REG_DCKE                0x10
+#define JZ_DMA_REG_DCKES       0x14
+#define JZ_DMA_REG_DCKEC       0x18
+#define JZ_DMA_REG_DMACP       0x1c
+#define JZ_DMA_REG_DSIRQP      0x20
+#define JZ_DMA_REG_DSIRQM      0x24
+#define JZ_DMA_REG_DCIRQP      0x28
+#define JZ_DMA_REG_DCIRQM      0x2c
 
 /* Per-channel registers. */
 #define JZ_DMA_REG_CHAN(n)     (n * 0x20)
-#define JZ_DMA_REG_DSA(n)      (0x00 + JZ_DMA_REG_CHAN(n))
-#define JZ_DMA_REG_DTA(n)      (0x04 + JZ_DMA_REG_CHAN(n))
-#define JZ_DMA_REG_DTC(n)      (0x08 + JZ_DMA_REG_CHAN(n))
-#define JZ_DMA_REG_DRT(n)      (0x0c + JZ_DMA_REG_CHAN(n))
-#define JZ_DMA_REG_DCS(n)      (0x10 + JZ_DMA_REG_CHAN(n))
-#define JZ_DMA_REG_DCM(n)      (0x14 + JZ_DMA_REG_CHAN(n))
-#define JZ_DMA_REG_DDA(n)      (0x18 + JZ_DMA_REG_CHAN(n))
-#define JZ_DMA_REG_DSD(n)      (0x1c + JZ_DMA_REG_CHAN(n))
+#define JZ_DMA_REG_DSA         0x00
+#define JZ_DMA_REG_DTA         0x04
+#define JZ_DMA_REG_DTC         0x08
+#define JZ_DMA_REG_DRT         0x0c
+#define JZ_DMA_REG_DCS         0x10
+#define JZ_DMA_REG_DCM         0x14
+#define JZ_DMA_REG_DDA         0x18
+#define JZ_DMA_REG_DSD         0x1c
 
 #define JZ_DMA_DMAC_DMAE       BIT(0)
 #define JZ_DMA_DMAC_AR         BIT(2)
 #define JZ_DMA_DMAC_HLT                BIT(3)
+#define JZ_DMA_DMAC_FAIC       BIT(27)
 #define JZ_DMA_DMAC_FMSC       BIT(31)
 
 #define JZ_DMA_DRT_AUTO                0x8
                                 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
                                 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
 
+#define JZ4780_DMA_CTRL_OFFSET 0x1000
+
+/* macros for use with jz4780_dma_soc_data.flags */
+#define JZ_SOC_DATA_ALLOW_LEGACY_DT    BIT(0)
+#define JZ_SOC_DATA_PROGRAMMABLE_DMA   BIT(1)
+#define JZ_SOC_DATA_PER_CHAN_PM                BIT(2)
+#define JZ_SOC_DATA_NO_DCKES_DCKEC     BIT(3)
+
 /**
  * struct jz4780_dma_hwdesc - descriptor structure read by the DMA controller.
  * @dcm: value for the DCM (channel command) register
  * @dtc: transfer count (number of blocks of the transfer size specified in DCM
  * to transfer) in the low 24 bits, offset of the next descriptor from the
  * descriptor base address in the upper 8 bits.
- * @sd: target/source stride difference (in stride transfer mode).
- * @drt: request type
  */
 struct jz4780_dma_hwdesc {
        uint32_t dcm;
        uint32_t dsa;
        uint32_t dta;
        uint32_t dtc;
-       uint32_t sd;
-       uint32_t drt;
-       uint32_t reserved[2];
 };
 
 /* Size of allocations for hardware descriptor blocks. */
@@ -135,14 +141,22 @@ struct jz4780_dma_chan {
        unsigned int curr_hwdesc;
 };
 
+struct jz4780_dma_soc_data {
+       unsigned int nb_channels;
+       unsigned int transfer_ord_max;
+       unsigned long flags;
+};
+
 struct jz4780_dma_dev {
        struct dma_device dma_device;
-       void __iomem *base;
+       void __iomem *chn_base;
+       void __iomem *ctrl_base;
        struct clk *clk;
        unsigned int irq;
+       const struct jz4780_dma_soc_data *soc_data;
 
        uint32_t chan_reserved;
-       struct jz4780_dma_chan chan[JZ_DMA_NR_CHANNELS];
+       struct jz4780_dma_chan chan[];
 };
 
 struct jz4780_dma_filter_data {
@@ -169,16 +183,51 @@ static inline struct jz4780_dma_dev *jz4780_dma_chan_parent(
                            dma_device);
 }
 
-static inline uint32_t jz4780_dma_readl(struct jz4780_dma_dev *jzdma,
+static inline uint32_t jz4780_dma_chn_readl(struct jz4780_dma_dev *jzdma,
+       unsigned int chn, unsigned int reg)
+{
+       return readl(jzdma->chn_base + reg + JZ_DMA_REG_CHAN(chn));
+}
+
+static inline void jz4780_dma_chn_writel(struct jz4780_dma_dev *jzdma,
+       unsigned int chn, unsigned int reg, uint32_t val)
+{
+       writel(val, jzdma->chn_base + reg + JZ_DMA_REG_CHAN(chn));
+}
+
+static inline uint32_t jz4780_dma_ctrl_readl(struct jz4780_dma_dev *jzdma,
        unsigned int reg)
 {
-       return readl(jzdma->base + reg);
+       return readl(jzdma->ctrl_base + reg);
 }
 
-static inline void jz4780_dma_writel(struct jz4780_dma_dev *jzdma,
+static inline void jz4780_dma_ctrl_writel(struct jz4780_dma_dev *jzdma,
        unsigned int reg, uint32_t val)
 {
-       writel(val, jzdma->base + reg);
+       writel(val, jzdma->ctrl_base + reg);
+}
+
+static inline void jz4780_dma_chan_enable(struct jz4780_dma_dev *jzdma,
+       unsigned int chn)
+{
+       if (jzdma->soc_data->flags & JZ_SOC_DATA_PER_CHAN_PM) {
+               unsigned int reg;
+
+               if (jzdma->soc_data->flags & JZ_SOC_DATA_NO_DCKES_DCKEC)
+                       reg = JZ_DMA_REG_DCKE;
+               else
+                       reg = JZ_DMA_REG_DCKES;
+
+               jz4780_dma_ctrl_writel(jzdma, reg, BIT(chn));
+       }
+}
+
+static inline void jz4780_dma_chan_disable(struct jz4780_dma_dev *jzdma,
+       unsigned int chn)
+{
+       if ((jzdma->soc_data->flags & JZ_SOC_DATA_PER_CHAN_PM) &&
+                       !(jzdma->soc_data->flags & JZ_SOC_DATA_NO_DCKES_DCKEC))
+               jz4780_dma_ctrl_writel(jzdma, JZ_DMA_REG_DCKEC, BIT(chn));
 }
 
 static struct jz4780_dma_desc *jz4780_dma_desc_alloc(
@@ -215,8 +264,10 @@ static void jz4780_dma_desc_free(struct virt_dma_desc *vdesc)
        kfree(desc);
 }
 
-static uint32_t jz4780_dma_transfer_size(unsigned long val, uint32_t *shift)
+static uint32_t jz4780_dma_transfer_size(struct jz4780_dma_chan *jzchan,
+       unsigned long val, uint32_t *shift)
 {
+       struct jz4780_dma_dev *jzdma = jz4780_dma_chan_parent(jzchan);
        int ord = ffs(val) - 1;
 
        /*
@@ -228,8 +279,8 @@ static uint32_t jz4780_dma_transfer_size(unsigned long val, uint32_t *shift)
         */
        if (ord == 3)
                ord = 2;
-       else if (ord > 7)
-               ord = 7;
+       else if (ord > jzdma->soc_data->transfer_ord_max)
+               ord = jzdma->soc_data->transfer_ord_max;
 
        *shift = ord;
 
@@ -262,7 +313,6 @@ static int jz4780_dma_setup_hwdesc(struct jz4780_dma_chan *jzchan,
                desc->dcm = JZ_DMA_DCM_SAI;
                desc->dsa = addr;
                desc->dta = config->dst_addr;
-               desc->drt = jzchan->transfer_type;
 
                width = config->dst_addr_width;
                maxburst = config->dst_maxburst;
@@ -270,7 +320,6 @@ static int jz4780_dma_setup_hwdesc(struct jz4780_dma_chan *jzchan,
                desc->dcm = JZ_DMA_DCM_DAI;
                desc->dsa = config->src_addr;
                desc->dta = addr;
-               desc->drt = jzchan->transfer_type;
 
                width = config->src_addr_width;
                maxburst = config->src_maxburst;
@@ -283,7 +332,7 @@ static int jz4780_dma_setup_hwdesc(struct jz4780_dma_chan *jzchan,
         * divisible by the transfer size, and we must not use more than the
         * maximum burst specified by the user.
         */
-       tsz = jz4780_dma_transfer_size(addr | len | (width * maxburst),
+       tsz = jz4780_dma_transfer_size(jzchan, addr | len | (width * maxburst),
                                       &jzchan->transfer_shift);
 
        switch (width) {
@@ -412,12 +461,13 @@ static struct dma_async_tx_descriptor *jz4780_dma_prep_dma_memcpy(
        if (!desc)
                return NULL;
 
-       tsz = jz4780_dma_transfer_size(dest | src | len,
+       tsz = jz4780_dma_transfer_size(jzchan, dest | src | len,
                                       &jzchan->transfer_shift);
 
+       jzchan->transfer_type = JZ_DMA_DRT_AUTO;
+
        desc->desc[0].dsa = src;
        desc->desc[0].dta = dest;
-       desc->desc[0].drt = JZ_DMA_DRT_AUTO;
        desc->desc[0].dcm = JZ_DMA_DCM_TIE | JZ_DMA_DCM_SAI | JZ_DMA_DCM_DAI |
                            tsz << JZ_DMA_DCM_TSZ_SHIFT |
                            JZ_DMA_WIDTH_32_BIT << JZ_DMA_DCM_SP_SHIFT |
@@ -472,18 +522,34 @@ static void jz4780_dma_begin(struct jz4780_dma_chan *jzchan)
                        (jzchan->curr_hwdesc + 1) % jzchan->desc->count;
        }
 
-       /* Use 8-word descriptors. */
-       jz4780_dma_writel(jzdma, JZ_DMA_REG_DCS(jzchan->id), JZ_DMA_DCS_DES8);
+       /* Enable the channel's clock. */
+       jz4780_dma_chan_enable(jzdma, jzchan->id);
+
+       /* Use 4-word descriptors. */
+       jz4780_dma_chn_writel(jzdma, jzchan->id, JZ_DMA_REG_DCS, 0);
+
+       /* Set transfer type. */
+       jz4780_dma_chn_writel(jzdma, jzchan->id, JZ_DMA_REG_DRT,
+                             jzchan->transfer_type);
+
+       /*
+        * Set the transfer count. This is redundant for a descriptor-driven
+        * transfer. However, there can be a delay between the transfer start
+        * time and when DTCn reg contains the new transfer count. Setting
+        * it explicitly ensures residue is computed correctly at all times.
+        */
+       jz4780_dma_chn_writel(jzdma, jzchan->id, JZ_DMA_REG_DTC,
+                               jzchan->desc->desc[jzchan->curr_hwdesc].dtc);
 
        /* Write descriptor address and initiate descriptor fetch. */
        desc_phys = jzchan->desc->desc_phys +
                    (jzchan->curr_hwdesc * sizeof(*jzchan->desc->desc));
-       jz4780_dma_writel(jzdma, JZ_DMA_REG_DDA(jzchan->id), desc_phys);
-       jz4780_dma_writel(jzdma, JZ_DMA_REG_DDRS, BIT(jzchan->id));
+       jz4780_dma_chn_writel(jzdma, jzchan->id, JZ_DMA_REG_DDA, desc_phys);
+       jz4780_dma_ctrl_writel(jzdma, JZ_DMA_REG_DDRS, BIT(jzchan->id));
 
        /* Enable the channel. */
-       jz4780_dma_writel(jzdma, JZ_DMA_REG_DCS(jzchan->id),
-                         JZ_DMA_DCS_DES8 | JZ_DMA_DCS_CTE);
+       jz4780_dma_chn_writel(jzdma, jzchan->id, JZ_DMA_REG_DCS,
+                             JZ_DMA_DCS_CTE);
 }
 
 static void jz4780_dma_issue_pending(struct dma_chan *chan)
@@ -509,12 +575,14 @@ static int jz4780_dma_terminate_all(struct dma_chan *chan)
        spin_lock_irqsave(&jzchan->vchan.lock, flags);
 
        /* Clear the DMA status and stop the transfer. */
-       jz4780_dma_writel(jzdma, JZ_DMA_REG_DCS(jzchan->id), 0);
+       jz4780_dma_chn_writel(jzdma, jzchan->id, JZ_DMA_REG_DCS, 0);
        if (jzchan->desc) {
                vchan_terminate_vdesc(&jzchan->desc->vdesc);
                jzchan->desc = NULL;
        }
 
+       jz4780_dma_chan_disable(jzdma, jzchan->id);
+
        vchan_get_all_descriptors(&jzchan->vchan, &head);
 
        spin_unlock_irqrestore(&jzchan->vchan.lock, flags);
@@ -526,8 +594,10 @@ static int jz4780_dma_terminate_all(struct dma_chan *chan)
 static void jz4780_dma_synchronize(struct dma_chan *chan)
 {
        struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
+       struct jz4780_dma_dev *jzdma = jz4780_dma_chan_parent(jzchan);
 
        vchan_synchronize(&jzchan->vchan);
+       jz4780_dma_chan_disable(jzdma, jzchan->id);
 }
 
 static int jz4780_dma_config(struct dma_chan *chan,
@@ -549,21 +619,17 @@ static size_t jz4780_dma_desc_residue(struct jz4780_dma_chan *jzchan,
        struct jz4780_dma_desc *desc, unsigned int next_sg)
 {
        struct jz4780_dma_dev *jzdma = jz4780_dma_chan_parent(jzchan);
-       unsigned int residue, count;
+       unsigned int count = 0;
        unsigned int i;
 
-       residue = 0;
-
        for (i = next_sg; i < desc->count; i++)
-               residue += desc->desc[i].dtc << jzchan->transfer_shift;
+               count += desc->desc[i].dtc & GENMASK(23, 0);
 
-       if (next_sg != 0) {
-               count = jz4780_dma_readl(jzdma,
-                                        JZ_DMA_REG_DTC(jzchan->id));
-               residue += count << jzchan->transfer_shift;
-       }
+       if (next_sg != 0)
+               count += jz4780_dma_chn_readl(jzdma, jzchan->id,
+                                        JZ_DMA_REG_DTC);
 
-       return residue;
+       return count << jzchan->transfer_shift;
 }
 
 static enum dma_status jz4780_dma_tx_status(struct dma_chan *chan,
@@ -573,6 +639,7 @@ static enum dma_status jz4780_dma_tx_status(struct dma_chan *chan,
        struct virt_dma_desc *vdesc;
        enum dma_status status;
        unsigned long flags;
+       unsigned long residue = 0;
 
        status = dma_cookie_status(chan, cookie, txstate);
        if ((status == DMA_COMPLETE) || (txstate == NULL))
@@ -583,13 +650,13 @@ static enum dma_status jz4780_dma_tx_status(struct dma_chan *chan,
        vdesc = vchan_find_desc(&jzchan->vchan, cookie);
        if (vdesc) {
                /* On the issued list, so hasn't been processed yet */
-               txstate->residue = jz4780_dma_desc_residue(jzchan,
+               residue = jz4780_dma_desc_residue(jzchan,
                                        to_jz4780_dma_desc(vdesc), 0);
        } else if (cookie == jzchan->desc->vdesc.tx.cookie) {
-               txstate->residue = jz4780_dma_desc_residue(jzchan, jzchan->desc,
-                         (jzchan->curr_hwdesc + 1) % jzchan->desc->count);
-       } else
-               txstate->residue = 0;
+               residue = jz4780_dma_desc_residue(jzchan, jzchan->desc,
+                                       jzchan->curr_hwdesc + 1);
+       }
+       dma_set_residue(txstate, residue);
 
        if (vdesc && jzchan->desc && vdesc == &jzchan->desc->vdesc
            && jzchan->desc->status & (JZ_DMA_DCS_AR | JZ_DMA_DCS_HLT))
@@ -606,8 +673,8 @@ static void jz4780_dma_chan_irq(struct jz4780_dma_dev *jzdma,
 
        spin_lock(&jzchan->vchan.lock);
 
-       dcs = jz4780_dma_readl(jzdma, JZ_DMA_REG_DCS(jzchan->id));
-       jz4780_dma_writel(jzdma, JZ_DMA_REG_DCS(jzchan->id), 0);
+       dcs = jz4780_dma_chn_readl(jzdma, jzchan->id, JZ_DMA_REG_DCS);
+       jz4780_dma_chn_writel(jzdma, jzchan->id, JZ_DMA_REG_DCS, 0);
 
        if (dcs & JZ_DMA_DCS_AR) {
                dev_warn(&jzchan->vchan.chan.dev->device,
@@ -646,9 +713,9 @@ static irqreturn_t jz4780_dma_irq_handler(int irq, void *data)
        uint32_t pending, dmac;
        int i;
 
-       pending = jz4780_dma_readl(jzdma, JZ_DMA_REG_DIRQP);
+       pending = jz4780_dma_ctrl_readl(jzdma, JZ_DMA_REG_DIRQP);
 
-       for (i = 0; i < JZ_DMA_NR_CHANNELS; i++) {
+       for (i = 0; i < jzdma->soc_data->nb_channels; i++) {
                if (!(pending & (1<<i)))
                        continue;
 
@@ -656,12 +723,12 @@ static irqreturn_t jz4780_dma_irq_handler(int irq, void *data)
        }
 
        /* Clear halt and address error status of all channels. */
-       dmac = jz4780_dma_readl(jzdma, JZ_DMA_REG_DMAC);
+       dmac = jz4780_dma_ctrl_readl(jzdma, JZ_DMA_REG_DMAC);
        dmac &= ~(JZ_DMA_DMAC_HLT | JZ_DMA_DMAC_AR);
-       jz4780_dma_writel(jzdma, JZ_DMA_REG_DMAC, dmac);
+       jz4780_dma_ctrl_writel(jzdma, JZ_DMA_REG_DMAC, dmac);
 
        /* Clear interrupt pending status. */
-       jz4780_dma_writel(jzdma, JZ_DMA_REG_DIRQP, 0);
+       jz4780_dma_ctrl_writel(jzdma, JZ_DMA_REG_DIRQP, 0);
 
        return IRQ_HANDLED;
 }
@@ -728,7 +795,7 @@ static struct dma_chan *jz4780_of_dma_xlate(struct of_phandle_args *dma_spec,
        data.channel = dma_spec->args[1];
 
        if (data.channel > -1) {
-               if (data.channel >= JZ_DMA_NR_CHANNELS) {
+               if (data.channel >= jzdma->soc_data->nb_channels) {
                        dev_err(jzdma->dma_device.dev,
                                "device requested non-existent channel %u\n",
                                data.channel);
@@ -755,16 +822,29 @@ static struct dma_chan *jz4780_of_dma_xlate(struct of_phandle_args *dma_spec,
 static int jz4780_dma_probe(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
+       const struct jz4780_dma_soc_data *soc_data;
        struct jz4780_dma_dev *jzdma;
        struct jz4780_dma_chan *jzchan;
        struct dma_device *dd;
        struct resource *res;
        int i, ret;
 
-       jzdma = devm_kzalloc(dev, sizeof(*jzdma), GFP_KERNEL);
+       if (!dev->of_node) {
+               dev_err(dev, "This driver must be probed from devicetree\n");
+               return -EINVAL;
+       }
+
+       soc_data = device_get_match_data(dev);
+       if (!soc_data)
+               return -EINVAL;
+
+       jzdma = devm_kzalloc(dev, sizeof(*jzdma)
+                               + sizeof(*jzdma->chan) * soc_data->nb_channels,
+                               GFP_KERNEL);
        if (!jzdma)
                return -ENOMEM;
 
+       jzdma->soc_data = soc_data;
        platform_set_drvdata(pdev, jzdma);
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -773,9 +853,26 @@ static int jz4780_dma_probe(struct platform_device *pdev)
                return -EINVAL;
        }
 
-       jzdma->base = devm_ioremap_resource(dev, res);
-       if (IS_ERR(jzdma->base))
-               return PTR_ERR(jzdma->base);
+       jzdma->chn_base = devm_ioremap_resource(dev, res);
+       if (IS_ERR(jzdma->chn_base))
+               return PTR_ERR(jzdma->chn_base);
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+       if (res) {
+               jzdma->ctrl_base = devm_ioremap_resource(dev, res);
+               if (IS_ERR(jzdma->ctrl_base))
+                       return PTR_ERR(jzdma->ctrl_base);
+       } else if (soc_data->flags & JZ_SOC_DATA_ALLOW_LEGACY_DT) {
+               /*
+                * On JZ4780, if the second memory resource was not supplied,
+                * assume we're using an old devicetree, and calculate the
+                * offset to the control registers.
+                */
+               jzdma->ctrl_base = jzdma->chn_base + JZ4780_DMA_CTRL_OFFSET;
+       } else {
+               dev_err(dev, "failed to get I/O memory\n");
+               return -EINVAL;
+       }
 
        ret = platform_get_irq(pdev, 0);
        if (ret < 0) {
@@ -833,13 +930,15 @@ static int jz4780_dma_probe(struct platform_device *pdev)
         * Also set the FMSC bit - it increases MSC performance, so it makes
         * little sense not to enable it.
         */
-       jz4780_dma_writel(jzdma, JZ_DMA_REG_DMAC,
-                         JZ_DMA_DMAC_DMAE | JZ_DMA_DMAC_FMSC);
-       jz4780_dma_writel(jzdma, JZ_DMA_REG_DMACP, 0);
+       jz4780_dma_ctrl_writel(jzdma, JZ_DMA_REG_DMAC, JZ_DMA_DMAC_DMAE |
+                              JZ_DMA_DMAC_FAIC | JZ_DMA_DMAC_FMSC);
+
+       if (soc_data->flags & JZ_SOC_DATA_PROGRAMMABLE_DMA)
+               jz4780_dma_ctrl_writel(jzdma, JZ_DMA_REG_DMACP, 0);
 
        INIT_LIST_HEAD(&dd->channels);
 
-       for (i = 0; i < JZ_DMA_NR_CHANNELS; i++) {
+       for (i = 0; i < soc_data->nb_channels; i++) {
                jzchan = &jzdma->chan[i];
                jzchan->id = i;
 
@@ -847,7 +946,7 @@ static int jz4780_dma_probe(struct platform_device *pdev)
                jzchan->vchan.desc_free = jz4780_dma_desc_free;
        }
 
-       ret = dma_async_device_register(dd);
+       ret = dmaenginem_async_device_register(dd);
        if (ret) {
                dev_err(dev, "failed to register device\n");
                goto err_disable_clk;
@@ -858,15 +957,12 @@ static int jz4780_dma_probe(struct platform_device *pdev)
                                         jzdma);
        if (ret) {
                dev_err(dev, "failed to register OF DMA controller\n");
-               goto err_unregister_dev;
+               goto err_disable_clk;
        }
 
        dev_info(dev, "JZ4780 DMA controller initialised\n");
        return 0;
 
-err_unregister_dev:
-       dma_async_device_unregister(dd);
-
 err_disable_clk:
        clk_disable_unprepare(jzdma->clk);
 
@@ -884,15 +980,40 @@ static int jz4780_dma_remove(struct platform_device *pdev)
 
        free_irq(jzdma->irq, jzdma);
 
-       for (i = 0; i < JZ_DMA_NR_CHANNELS; i++)
+       for (i = 0; i < jzdma->soc_data->nb_channels; i++)
                tasklet_kill(&jzdma->chan[i].vchan.task);
 
-       dma_async_device_unregister(&jzdma->dma_device);
        return 0;
 }
 
+static const struct jz4780_dma_soc_data jz4740_dma_soc_data = {
+       .nb_channels = 6,
+       .transfer_ord_max = 5,
+};
+
+static const struct jz4780_dma_soc_data jz4725b_dma_soc_data = {
+       .nb_channels = 6,
+       .transfer_ord_max = 5,
+       .flags = JZ_SOC_DATA_PER_CHAN_PM | JZ_SOC_DATA_NO_DCKES_DCKEC,
+};
+
+static const struct jz4780_dma_soc_data jz4770_dma_soc_data = {
+       .nb_channels = 6,
+       .transfer_ord_max = 6,
+       .flags = JZ_SOC_DATA_PER_CHAN_PM,
+};
+
+static const struct jz4780_dma_soc_data jz4780_dma_soc_data = {
+       .nb_channels = 32,
+       .transfer_ord_max = 7,
+       .flags = JZ_SOC_DATA_ALLOW_LEGACY_DT | JZ_SOC_DATA_PROGRAMMABLE_DMA,
+};
+
 static const struct of_device_id jz4780_dma_dt_match[] = {
-       { .compatible = "ingenic,jz4780-dma", .data = NULL },
+       { .compatible = "ingenic,jz4740-dma", .data = &jz4740_dma_soc_data },
+       { .compatible = "ingenic,jz4725b-dma", .data = &jz4725b_dma_soc_data },
+       { .compatible = "ingenic,jz4770-dma", .data = &jz4770_dma_soc_data },
+       { .compatible = "ingenic,jz4780-dma", .data = &jz4780_dma_soc_data },
        {},
 };
 MODULE_DEVICE_TABLE(of, jz4780_dma_dt_match);