Merge tag 'dmaengine-4.11-rc1' of git://git.infradead.org/users/vkoul/slave-dma
authorLinus Torvalds <torvalds@linux-foundation.org>
Wed, 22 Feb 2017 01:06:22 +0000 (17:06 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 22 Feb 2017 01:06:22 +0000 (17:06 -0800)
Pull dmaengine updates from Vinod Koul:
 "This time we fairly boring and bit small update.

   - Support for Intel iDMA 32-bit hardware
   - deprecate broken support for channel switching in async_tx
   - bunch of updates on stm32-dma
   - Cyclic support for zx dma and making in generic zx dma driver
   - Small updates to bunch of other drivers"

* tag 'dmaengine-4.11-rc1' of git://git.infradead.org/users/vkoul/slave-dma: (29 commits)
  async_tx: deprecate broken support for channel switching
  dmaengine: rcar-dmac: Widen DMA mask to 40 bits
  dmaengine: sun6i: allow build on ARM64 platforms (sun50i)
  dmaengine: Provide a wrapper for memcpy operations
  dmaengine: zx: fix build warning
  dmaengine: dw: we do support Merrifield SoC in PCI mode
  dmaengine: dw: add support of iDMA 32-bit hardware
  dmaengine: dw: introduce register mappings for iDMA 32-bit
  dmaengine: dw: introduce block2bytes() and bytes2block()
  dmaengine: dw: extract dwc_chan_pause() for future use
  dmaengine: dw: replace convert_burst() with one liner
  dmaengine: dw: register IRQ and DMA pool with instance ID
  dmaengine: dw: Fix data corruption in large device to memory transfers
  dmaengine: ste_dma40: indicate granularity on channels
  dmaengine: ste_dma40: indicate directions on channels
  dmaengine: stm32-dma: Add error messages if xlate fails
  dmaengine: dw: pci: remove LPE Audio DMA ID
  dmaengine: stm32-dma: Add max_burst support
  dmaengine: stm32-dma: Add synchronization support
  dmaengine: stm32-dma: Fix residue computation issue in cyclic mode
  ...

20 files changed:
Documentation/ABI/testing/sysfs-platform-hidma
Documentation/ABI/testing/sysfs-platform-hidma-mgmt
Documentation/devicetree/bindings/dma/stm32-dma.txt
drivers/dma/Kconfig
drivers/dma/Makefile
drivers/dma/dmaengine.c
drivers/dma/dw/core.c
drivers/dma/dw/pci.c
drivers/dma/dw/platform.c
drivers/dma/dw/regs.h
drivers/dma/ipu/ipu_irq.c
drivers/dma/sh/rcar-dmac.c
drivers/dma/ste_dma40.c
drivers/dma/stm32-dma.c
drivers/dma/zx296702_dma.c [deleted file]
drivers/dma/zx_dma.c [new file with mode: 0644]
include/linux/async_tx.h
include/linux/dma/dw.h
include/linux/dmaengine.h
include/linux/platform_data/dma-dw.h

index d364415..fca40a5 100644 (file)
@@ -2,7 +2,7 @@ What:           /sys/devices/platform/hidma-*/chid
                /sys/devices/platform/QCOM8061:*/chid
 Date:          Dec 2015
 KernelVersion: 4.4
-Contact:       "Sinan Kaya <okaya@cudeaurora.org>"
+Contact:       "Sinan Kaya <okaya@codeaurora.org>"
 Description:
                Contains the ID of the channel within the HIDMA instance.
                It is used to associate a given HIDMA channel with the
index c2fb5d0..3b6c5c9 100644 (file)
@@ -2,7 +2,7 @@ What:           /sys/devices/platform/hidma-mgmt*/chanops/chan*/priority
                /sys/devices/platform/QCOM8060:*/chanops/chan*/priority
 Date:          Nov 2015
 KernelVersion: 4.4
-Contact:       "Sinan Kaya <okaya@cudeaurora.org>"
+Contact:       "Sinan Kaya <okaya@codeaurora.org>"
 Description:
                Contains either 0 or 1 and indicates if the DMA channel is a
                low priority (0) or high priority (1) channel.
@@ -11,7 +11,7 @@ What:         /sys/devices/platform/hidma-mgmt*/chanops/chan*/weight
                /sys/devices/platform/QCOM8060:*/chanops/chan*/weight
 Date:          Nov 2015
 KernelVersion: 4.4
-Contact:       "Sinan Kaya <okaya@cudeaurora.org>"
+Contact:       "Sinan Kaya <okaya@codeaurora.org>"
 Description:
                Contains 0..15 and indicates the weight of the channel among
                equal priority channels during round robin scheduling.
@@ -20,7 +20,7 @@ What:         /sys/devices/platform/hidma-mgmt*/chreset_timeout_cycles
                /sys/devices/platform/QCOM8060:*/chreset_timeout_cycles
 Date:          Nov 2015
 KernelVersion: 4.4
-Contact:       "Sinan Kaya <okaya@cudeaurora.org>"
+Contact:       "Sinan Kaya <okaya@codeaurora.org>"
 Description:
                Contains the platform specific cycle value to wait after a
                reset command is issued. If the value is chosen too short,
@@ -32,7 +32,7 @@ What:         /sys/devices/platform/hidma-mgmt*/dma_channels
                /sys/devices/platform/QCOM8060:*/dma_channels
 Date:          Nov 2015
 KernelVersion: 4.4
-Contact:       "Sinan Kaya <okaya@cudeaurora.org>"
+Contact:       "Sinan Kaya <okaya@codeaurora.org>"
 Description:
                Contains the number of dma channels supported by one instance
                of HIDMA hardware. The value may change from chip to chip.
@@ -41,7 +41,7 @@ What:         /sys/devices/platform/hidma-mgmt*/hw_version_major
                /sys/devices/platform/QCOM8060:*/hw_version_major
 Date:          Nov 2015
 KernelVersion: 4.4
-Contact:       "Sinan Kaya <okaya@cudeaurora.org>"
+Contact:       "Sinan Kaya <okaya@codeaurora.org>"
 Description:
                Version number major for the hardware.
 
@@ -49,7 +49,7 @@ What:         /sys/devices/platform/hidma-mgmt*/hw_version_minor
                /sys/devices/platform/QCOM8060:*/hw_version_minor
 Date:          Nov 2015
 KernelVersion: 4.4
-Contact:       "Sinan Kaya <okaya@cudeaurora.org>"
+Contact:       "Sinan Kaya <okaya@codeaurora.org>"
 Description:
                Version number minor for the hardware.
 
@@ -57,7 +57,7 @@ What:         /sys/devices/platform/hidma-mgmt*/max_rd_xactions
                /sys/devices/platform/QCOM8060:*/max_rd_xactions
 Date:          Nov 2015
 KernelVersion: 4.4
-Contact:       "Sinan Kaya <okaya@cudeaurora.org>"
+Contact:       "Sinan Kaya <okaya@codeaurora.org>"
 Description:
                Contains a value between 0 and 31. Maximum number of
                read transactions that can be issued back to back.
@@ -69,7 +69,7 @@ What:         /sys/devices/platform/hidma-mgmt*/max_read_request
                /sys/devices/platform/QCOM8060:*/max_read_request
 Date:          Nov 2015
 KernelVersion: 4.4
-Contact:       "Sinan Kaya <okaya@cudeaurora.org>"
+Contact:       "Sinan Kaya <okaya@codeaurora.org>"
 Description:
                Size of each read request. The value needs to be a power
                of two and can be between 128 and 1024.
@@ -78,7 +78,7 @@ What:         /sys/devices/platform/hidma-mgmt*/max_wr_xactions
                /sys/devices/platform/QCOM8060:*/max_wr_xactions
 Date:          Nov 2015
 KernelVersion: 4.4
-Contact:       "Sinan Kaya <okaya@cudeaurora.org>"
+Contact:       "Sinan Kaya <okaya@codeaurora.org>"
 Description:
                Contains a value between 0 and 31. Maximum number of
                write transactions that can be issued back to back.
@@ -91,7 +91,7 @@ What:         /sys/devices/platform/hidma-mgmt*/max_write_request
                /sys/devices/platform/QCOM8060:*/max_write_request
 Date:          Nov 2015
 KernelVersion: 4.4
-Contact:       "Sinan Kaya <okaya@cudeaurora.org>"
+Contact:       "Sinan Kaya <okaya@codeaurora.org>"
 Description:
                Size of each write request. The value needs to be a power
                of two and can be between 128 and 1024.
index 70cd13f..4408af6 100644 (file)
@@ -40,8 +40,7 @@ Example:
 
 DMA clients connected to the STM32 DMA controller must use the format
 described in the dma.txt file, using a five-cell specifier for each
-channel: a phandle plus four integer cells.
-The four cells in order are:
+channel: a phandle to the DMA controller plus the following four integer cells:
 
 1. The channel id
 2. The request line number
@@ -61,7 +60,7 @@ The four cells in order are:
        0x1: medium
        0x2: high
        0x3: very high
-5. A 32bit mask specifying the DMA FIFO threshold configuration which are device
+4. A 32bit mask specifying the DMA FIFO threshold configuration which are device
    dependent:
  -bit 0-1: Fifo threshold
        0x0: 1/4 full FIFO
index 263495d..d01d598 100644 (file)
@@ -157,7 +157,7 @@ config DMA_SUN4I
 
 config DMA_SUN6I
        tristate "Allwinner A31 SoCs DMA support"
-       depends on MACH_SUN6I || MACH_SUN8I || COMPILE_TEST
+       depends on MACH_SUN6I || MACH_SUN8I || (ARM64 && ARCH_SUNXI) || COMPILE_TEST
        depends on RESET_CONTROLLER
        select DMA_ENGINE
        select DMA_VIRTUAL_CHANNELS
@@ -458,7 +458,7 @@ config STM32_DMA
        help
          Enable support for the on-chip DMA controller on STMicroelectronics
          STM32 MCUs.
-         If you have a board based on such a MCU and wish to use DMA say Y or M
+         If you have a board based on such a MCU and wish to use DMA say Y
          here.
 
 config S3C24XX_DMAC
@@ -571,12 +571,12 @@ config XILINX_ZYNQMP_DMA
          Enable support for Xilinx ZynqMP DMA controller.
 
 config ZX_DMA
-       tristate "ZTE ZX296702 DMA support"
+       tristate "ZTE ZX DMA support"
        depends on ARCH_ZX || COMPILE_TEST
        select DMA_ENGINE
        select DMA_VIRTUAL_CHANNELS
        help
-         Support the DMA engine for ZTE ZX296702 platform devices.
+         Support the DMA engine for ZTE ZX family platform devices.
 
 
 # driver files
index a4fa336..0b723e9 100644 (file)
@@ -66,7 +66,7 @@ obj-$(CONFIG_TI_CPPI41) += cppi41.o
 obj-$(CONFIG_TI_DMA_CROSSBAR) += ti-dma-crossbar.o
 obj-$(CONFIG_TI_EDMA) += edma.o
 obj-$(CONFIG_XGENE_DMA) += xgene-dma.o
-obj-$(CONFIG_ZX_DMA) += zx296702_dma.o
+obj-$(CONFIG_ZX_DMA) += zx_dma.o
 obj-$(CONFIG_ST_FDMA) += st_fdma.o
 
 obj-y += qcom/
index 6b53526..24e0221 100644 (file)
@@ -65,7 +65,7 @@
 #include <linux/mempool.h>
 
 static DEFINE_MUTEX(dma_list_mutex);
-static DEFINE_IDR(dma_idr);
+static DEFINE_IDA(dma_ida);
 static LIST_HEAD(dma_device_list);
 static long dmaengine_ref_count;
 
@@ -162,7 +162,7 @@ static void chan_dev_release(struct device *dev)
        chan_dev = container_of(dev, typeof(*chan_dev), device);
        if (atomic_dec_and_test(chan_dev->idr_ref)) {
                mutex_lock(&dma_list_mutex);
-               idr_remove(&dma_idr, chan_dev->dev_id);
+               ida_remove(&dma_ida, chan_dev->dev_id);
                mutex_unlock(&dma_list_mutex);
                kfree(chan_dev->idr_ref);
        }
@@ -898,14 +898,15 @@ static int get_dma_id(struct dma_device *device)
 {
        int rc;
 
-       mutex_lock(&dma_list_mutex);
-
-       rc = idr_alloc(&dma_idr, NULL, 0, 0, GFP_KERNEL);
-       if (rc >= 0)
-               device->dev_id = rc;
+       do {
+               if (!ida_pre_get(&dma_ida, GFP_KERNEL))
+                       return -ENOMEM;
+               mutex_lock(&dma_list_mutex);
+               rc = ida_get_new(&dma_ida, &device->dev_id);
+               mutex_unlock(&dma_list_mutex);
+       } while (rc == -EAGAIN);
 
-       mutex_unlock(&dma_list_mutex);
-       return rc < 0 ? rc : 0;
+       return rc;
 }
 
 /**
@@ -1035,7 +1036,7 @@ err_out:
        /* if we never registered a channel just release the idr */
        if (atomic_read(idr_ref) == 0) {
                mutex_lock(&dma_list_mutex);
-               idr_remove(&dma_idr, device->dev_id);
+               ida_remove(&dma_ida, device->dev_id);
                mutex_unlock(&dma_list_mutex);
                kfree(idr_ref);
                return rc;
index e5adf5d..e500950 100644 (file)
@@ -138,16 +138,32 @@ static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc)
        dwc->descs_allocated--;
 }
 
-static void dwc_initialize(struct dw_dma_chan *dwc)
+static void dwc_initialize_chan_idma32(struct dw_dma_chan *dwc)
+{
+       u32 cfghi = 0;
+       u32 cfglo = 0;
+
+       /* Set default burst alignment */
+       cfglo |= IDMA32C_CFGL_DST_BURST_ALIGN | IDMA32C_CFGL_SRC_BURST_ALIGN;
+
+       /* Low 4 bits of the request lines */
+       cfghi |= IDMA32C_CFGH_DST_PER(dwc->dws.dst_id & 0xf);
+       cfghi |= IDMA32C_CFGH_SRC_PER(dwc->dws.src_id & 0xf);
+
+       /* Request line extension (2 bits) */
+       cfghi |= IDMA32C_CFGH_DST_PER_EXT(dwc->dws.dst_id >> 4 & 0x3);
+       cfghi |= IDMA32C_CFGH_SRC_PER_EXT(dwc->dws.src_id >> 4 & 0x3);
+
+       channel_writel(dwc, CFG_LO, cfglo);
+       channel_writel(dwc, CFG_HI, cfghi);
+}
+
+static void dwc_initialize_chan_dw(struct dw_dma_chan *dwc)
 {
-       struct dw_dma *dw = to_dw_dma(dwc->chan.device);
        u32 cfghi = DWC_CFGH_FIFO_MODE;
        u32 cfglo = DWC_CFGL_CH_PRIOR(dwc->priority);
        bool hs_polarity = dwc->dws.hs_polarity;
 
-       if (test_bit(DW_DMA_IS_INITIALIZED, &dwc->flags))
-               return;
-
        cfghi |= DWC_CFGH_DST_PER(dwc->dws.dst_id);
        cfghi |= DWC_CFGH_SRC_PER(dwc->dws.src_id);
 
@@ -156,6 +172,19 @@ static void dwc_initialize(struct dw_dma_chan *dwc)
 
        channel_writel(dwc, CFG_LO, cfglo);
        channel_writel(dwc, CFG_HI, cfghi);
+}
+
+static void dwc_initialize(struct dw_dma_chan *dwc)
+{
+       struct dw_dma *dw = to_dw_dma(dwc->chan.device);
+
+       if (test_bit(DW_DMA_IS_INITIALIZED, &dwc->flags))
+               return;
+
+       if (dw->pdata->is_idma32)
+               dwc_initialize_chan_idma32(dwc);
+       else
+               dwc_initialize_chan_dw(dwc);
 
        /* Enable interrupts */
        channel_set_bit(dw, MASK.XFER, dwc->mask);
@@ -184,6 +213,37 @@ static inline void dwc_chan_disable(struct dw_dma *dw, struct dw_dma_chan *dwc)
                cpu_relax();
 }
 
+static u32 bytes2block(struct dw_dma_chan *dwc, size_t bytes,
+                         unsigned int width, size_t *len)
+{
+       struct dw_dma *dw = to_dw_dma(dwc->chan.device);
+       u32 block;
+
+       /* Always in bytes for iDMA 32-bit */
+       if (dw->pdata->is_idma32)
+               width = 0;
+
+       if ((bytes >> width) > dwc->block_size) {
+               block = dwc->block_size;
+               *len = block << width;
+       } else {
+               block = bytes >> width;
+               *len = bytes;
+       }
+
+       return block;
+}
+
+static size_t block2bytes(struct dw_dma_chan *dwc, u32 block, u32 width)
+{
+       struct dw_dma *dw = to_dw_dma(dwc->chan.device);
+
+       if (dw->pdata->is_idma32)
+               return IDMA32C_CTLH_BLOCK_TS(block);
+
+       return DWC_CTLH_BLOCK_TS(block) << width;
+}
+
 /*----------------------------------------------------------------------*/
 
 /* Perform single block transfer */
@@ -332,7 +392,7 @@ static inline u32 dwc_get_sent(struct dw_dma_chan *dwc)
        u32 ctlhi = channel_readl(dwc, CTL_HI);
        u32 ctllo = channel_readl(dwc, CTL_LO);
 
-       return (ctlhi & DWC_CTLH_BLOCK_TS_MASK) * (1 << (ctllo >> 4 & 7));
+       return block2bytes(dwc, ctlhi, ctllo >> 4 & 7);
 }
 
 static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
@@ -692,10 +752,7 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
                        | DWC_CTLL_FC_M2M;
        prev = first = NULL;
 
-       for (offset = 0; offset < len; offset += xfer_count << src_width) {
-               xfer_count = min_t(size_t, (len - offset) >> src_width,
-                                          dwc->block_size);
-
+       for (offset = 0; offset < len; offset += xfer_count) {
                desc = dwc_desc_get(dwc);
                if (!desc)
                        goto err_desc_get;
@@ -703,8 +760,8 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
                lli_write(desc, sar, src + offset);
                lli_write(desc, dar, dest + offset);
                lli_write(desc, ctllo, ctllo);
-               lli_write(desc, ctlhi, xfer_count);
-               desc->len = xfer_count << src_width;
+               lli_write(desc, ctlhi, bytes2block(dwc, len - offset, src_width, &xfer_count));
+               desc->len = xfer_count;
 
                if (!first) {
                        first = desc;
@@ -775,7 +832,8 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
 
                for_each_sg(sgl, sg, sg_len, i) {
                        struct dw_desc  *desc;
-                       u32             len, dlen, mem;
+                       u32             len, mem;
+                       size_t          dlen;
 
                        mem = sg_dma_address(sg);
                        len = sg_dma_len(sg);
@@ -789,17 +847,8 @@ slave_sg_todev_fill_desc:
 
                        lli_write(desc, sar, mem);
                        lli_write(desc, dar, reg);
+                       lli_write(desc, ctlhi, bytes2block(dwc, len, mem_width, &dlen));
                        lli_write(desc, ctllo, ctllo | DWC_CTLL_SRC_WIDTH(mem_width));
-                       if ((len >> mem_width) > dwc->block_size) {
-                               dlen = dwc->block_size << mem_width;
-                               mem += dlen;
-                               len -= dlen;
-                       } else {
-                               dlen = len;
-                               len = 0;
-                       }
-
-                       lli_write(desc, ctlhi, dlen >> mem_width);
                        desc->len = dlen;
 
                        if (!first) {
@@ -809,6 +858,9 @@ slave_sg_todev_fill_desc:
                                list_add_tail(&desc->desc_node, &first->tx_list);
                        }
                        prev = desc;
+
+                       mem += dlen;
+                       len -= dlen;
                        total_len += dlen;
 
                        if (len)
@@ -828,13 +880,12 @@ slave_sg_todev_fill_desc:
 
                for_each_sg(sgl, sg, sg_len, i) {
                        struct dw_desc  *desc;
-                       u32             len, dlen, mem;
+                       u32             len, mem;
+                       size_t          dlen;
 
                        mem = sg_dma_address(sg);
                        len = sg_dma_len(sg);
 
-                       mem_width = __ffs(data_width | mem | len);
-
 slave_sg_fromdev_fill_desc:
                        desc = dwc_desc_get(dwc);
                        if (!desc)
@@ -842,16 +893,9 @@ slave_sg_fromdev_fill_desc:
 
                        lli_write(desc, sar, reg);
                        lli_write(desc, dar, mem);
+                       lli_write(desc, ctlhi, bytes2block(dwc, len, reg_width, &dlen));
+                       mem_width = __ffs(data_width | mem | dlen);
                        lli_write(desc, ctllo, ctllo | DWC_CTLL_DST_WIDTH(mem_width));
-                       if ((len >> reg_width) > dwc->block_size) {
-                               dlen = dwc->block_size << reg_width;
-                               mem += dlen;
-                               len -= dlen;
-                       } else {
-                               dlen = len;
-                               len = 0;
-                       }
-                       lli_write(desc, ctlhi, dlen >> reg_width);
                        desc->len = dlen;
 
                        if (!first) {
@@ -861,6 +905,9 @@ slave_sg_fromdev_fill_desc:
                                list_add_tail(&desc->desc_node, &first->tx_list);
                        }
                        prev = desc;
+
+                       mem += dlen;
+                       len -= dlen;
                        total_len += dlen;
 
                        if (len)
@@ -903,25 +950,20 @@ bool dw_dma_filter(struct dma_chan *chan, void *param)
 }
 EXPORT_SYMBOL_GPL(dw_dma_filter);
 
-/*
- * Fix sconfig's burst size according to dw_dmac. We need to convert them as:
- * 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3.
- *
- * NOTE: burst size 2 is not supported by controller.
- *
- * This can be done by finding least significant bit set: n & (n - 1)
- */
-static inline void convert_burst(u32 *maxburst)
-{
-       if (*maxburst > 1)
-               *maxburst = fls(*maxburst) - 2;
-       else
-               *maxburst = 0;
-}
-
 static int dwc_config(struct dma_chan *chan, struct dma_slave_config *sconfig)
 {
        struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
+       struct dma_slave_config *sc = &dwc->dma_sconfig;
+       struct dw_dma *dw = to_dw_dma(chan->device);
+       /*
+        * Fix sconfig's burst size according to dw_dmac. We need to convert
+        * them as:
+        * 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3.
+        *
+        * NOTE: burst size 2 is not supported by DesignWare controller.
+        *       iDMA 32-bit supports it.
+        */
+       u32 s = dw->pdata->is_idma32 ? 1 : 2;
 
        /* Check if chan will be configured for slave transfers */
        if (!is_slave_direction(sconfig->direction))
@@ -930,28 +972,39 @@ static int dwc_config(struct dma_chan *chan, struct dma_slave_config *sconfig)
        memcpy(&dwc->dma_sconfig, sconfig, sizeof(*sconfig));
        dwc->direction = sconfig->direction;
 
-       convert_burst(&dwc->dma_sconfig.src_maxburst);
-       convert_burst(&dwc->dma_sconfig.dst_maxburst);
+       sc->src_maxburst = sc->src_maxburst > 1 ? fls(sc->src_maxburst) - s : 0;
+       sc->dst_maxburst = sc->dst_maxburst > 1 ? fls(sc->dst_maxburst) - s : 0;
 
        return 0;
 }
 
-static int dwc_pause(struct dma_chan *chan)
+static void dwc_chan_pause(struct dw_dma_chan *dwc, bool drain)
 {
-       struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
-       unsigned long           flags;
+       struct dw_dma *dw = to_dw_dma(dwc->chan.device);
        unsigned int            count = 20;     /* timeout iterations */
        u32                     cfglo;
 
-       spin_lock_irqsave(&dwc->lock, flags);
-
        cfglo = channel_readl(dwc, CFG_LO);
+       if (dw->pdata->is_idma32) {
+               if (drain)
+                       cfglo |= IDMA32C_CFGL_CH_DRAIN;
+               else
+                       cfglo &= ~IDMA32C_CFGL_CH_DRAIN;
+       }
        channel_writel(dwc, CFG_LO, cfglo | DWC_CFGL_CH_SUSP);
        while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY) && count--)
                udelay(2);
 
        set_bit(DW_DMA_IS_PAUSED, &dwc->flags);
+}
 
+static int dwc_pause(struct dma_chan *chan)
+{
+       struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
+       unsigned long           flags;
+
+       spin_lock_irqsave(&dwc->lock, flags);
+       dwc_chan_pause(dwc, false);
        spin_unlock_irqrestore(&dwc->lock, flags);
 
        return 0;
@@ -993,6 +1046,8 @@ static int dwc_terminate_all(struct dma_chan *chan)
 
        clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags);
 
+       dwc_chan_pause(dwc, true);
+
        dwc_chan_disable(dw, dwc);
 
        dwc_chan_resume(dwc);
@@ -1085,6 +1140,32 @@ static void dwc_issue_pending(struct dma_chan *chan)
 
 /*----------------------------------------------------------------------*/
 
+/*
+ * Program FIFO size of channels.
+ *
+ * By default full FIFO (1024 bytes) is assigned to channel 0. Here we
+ * slice FIFO on equal parts between channels.
+ */
+static void idma32_fifo_partition(struct dw_dma *dw)
+{
+       u64 value = IDMA32C_FP_PSIZE_CH0(128) | IDMA32C_FP_PSIZE_CH1(128) |
+                   IDMA32C_FP_UPDATE;
+       u64 fifo_partition = 0;
+
+       if (!dw->pdata->is_idma32)
+               return;
+
+       /* Fill FIFO_PARTITION low bits (Channels 0..1, 4..5) */
+       fifo_partition |= value << 0;
+
+       /* Fill FIFO_PARTITION high bits (Channels 2..3, 6..7) */
+       fifo_partition |= value << 32;
+
+       /* Program FIFO Partition registers - 128 bytes for each channel */
+       idma32_writeq(dw, FIFO_PARTITION1, fifo_partition);
+       idma32_writeq(dw, FIFO_PARTITION0, fifo_partition);
+}
+
 static void dw_dma_off(struct dw_dma *dw)
 {
        unsigned int i;
@@ -1504,8 +1585,16 @@ int dw_dma_probe(struct dw_dma_chip *chip)
        /* Force dma off, just in case */
        dw_dma_off(dw);
 
+       idma32_fifo_partition(dw);
+
+       /* Device and instance ID for IRQ and DMA pool */
+       if (pdata->is_idma32)
+               snprintf(dw->name, sizeof(dw->name), "idma32:dmac%d", chip->id);
+       else
+               snprintf(dw->name, sizeof(dw->name), "dw:dmac%d", chip->id);
+
        /* Create a pool of consistent memory blocks for hardware descriptors */
-       dw->desc_pool = dmam_pool_create("dw_dmac_desc_pool", chip->dev,
+       dw->desc_pool = dmam_pool_create(dw->name, chip->dev,
                                         sizeof(struct dw_desc), 4, 0);
        if (!dw->desc_pool) {
                dev_err(chip->dev, "No memory for descriptors dma pool\n");
@@ -1516,7 +1605,7 @@ int dw_dma_probe(struct dw_dma_chip *chip)
        tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw);
 
        err = request_irq(chip->irq, dw_dma_interrupt, IRQF_SHARED,
-                         "dw_dmac", dw);
+                         dw->name, dw);
        if (err)
                goto err_pdata;
 
@@ -1665,6 +1754,8 @@ int dw_dma_enable(struct dw_dma_chip *chip)
 {
        struct dw_dma *dw = chip->dw;
 
+       idma32_fifo_partition(dw);
+
        dw_dma_on(dw);
        return 0;
 }
index 0ae6c3b..7778ed7 100644 (file)
 
 #include "internal.h"
 
+static struct dw_dma_platform_data mrfld_pdata = {
+       .nr_channels = 8,
+       .is_private = true,
+       .is_memcpy = true,
+       .is_idma32 = true,
+       .chan_allocation_order = CHAN_ALLOCATION_ASCENDING,
+       .chan_priority = CHAN_PRIORITY_ASCENDING,
+       .block_size = 131071,
+       .nr_masters = 1,
+       .data_width = {4},
+};
+
 static int dw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid)
 {
        const struct dw_dma_platform_data *pdata = (void *)pid->driver_data;
@@ -47,6 +59,7 @@ static int dw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid)
                return -ENOMEM;
 
        chip->dev = &pdev->dev;
+       chip->id = pdev->devfn;
        chip->regs = pcim_iomap_table(pdev)[0];
        chip->irq = pdev->irq;
        chip->pdata = pdata;
@@ -95,14 +108,16 @@ static const struct dev_pm_ops dw_pci_dev_pm_ops = {
 };
 
 static const struct pci_device_id dw_pci_id_table[] = {
-       /* Medfield */
+       /* Medfield (GPDMA) */
        { PCI_VDEVICE(INTEL, 0x0827) },
-       { PCI_VDEVICE(INTEL, 0x0830) },
 
        /* BayTrail */
        { PCI_VDEVICE(INTEL, 0x0f06) },
        { PCI_VDEVICE(INTEL, 0x0f40) },
 
+       /* Merrifield iDMA 32-bit (GPDMA) */
+       { PCI_VDEVICE(INTEL, 0x11a2), (kernel_ulong_t)&mrfld_pdata },
+
        /* Braswell */
        { PCI_VDEVICE(INTEL, 0x2286) },
        { PCI_VDEVICE(INTEL, 0x22c0) },
index b1655e4..c639c60 100644 (file)
@@ -202,6 +202,7 @@ static int dw_probe(struct platform_device *pdev)
                pdata = dw_dma_parse_dt(pdev);
 
        chip->dev = dev;
+       chip->id = pdev->id;
        chip->pdata = pdata;
 
        chip->clk = devm_clk_get(chip->dev, "hclk");
index 4e0128c..32a3287 100644 (file)
@@ -3,15 +3,19 @@
  *
  * Copyright (C) 2005-2007 Atmel Corporation
  * Copyright (C) 2010-2011 ST Microelectronics
+ * Copyright (C) 2016 Intel Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
  */
 
+#include <linux/bitops.h>
 #include <linux/interrupt.h>
 #include <linux/dmaengine.h>
 
+#include <linux/io-64-nonatomic-hi-lo.h>
+
 #include "internal.h"
 
 #define DW_DMA_MAX_NR_REQUESTS 16
@@ -85,9 +89,9 @@ struct dw_dma_regs {
        DW_REG(ID);
        DW_REG(TEST);
 
-       /* reserved */
-       DW_REG(__reserved0);
-       DW_REG(__reserved1);
+       /* iDMA 32-bit support */
+       DW_REG(CLASS_PRIORITY0);
+       DW_REG(CLASS_PRIORITY1);
 
        /* optional encoded params, 0x3c8..0x3f7 */
        u32     __reserved;
@@ -99,6 +103,17 @@ struct dw_dma_regs {
 
        /* top-level parameters */
        u32     DW_PARAMS;
+
+       /* component ID */
+       u32     COMP_TYPE;
+       u32     COMP_VERSION;
+
+       /* iDMA 32-bit support */
+       DW_REG(FIFO_PARTITION0);
+       DW_REG(FIFO_PARTITION1);
+
+       DW_REG(SAI_ERR);
+       DW_REG(GLOBAL_CFG);
 };
 
 /*
@@ -170,8 +185,9 @@ enum dw_dma_msize {
 #define DWC_CTLL_LLP_S_EN      (1 << 28)       /* src block chain */
 
 /* Bitfields in CTL_HI */
-#define DWC_CTLH_DONE          0x00001000
-#define DWC_CTLH_BLOCK_TS_MASK 0x00000fff
+#define DWC_CTLH_BLOCK_TS_MASK GENMASK(11, 0)
+#define DWC_CTLH_BLOCK_TS(x)   ((x) & DWC_CTLH_BLOCK_TS_MASK)
+#define DWC_CTLH_DONE          (1 << 12)
 
 /* Bitfields in CFG_LO */
 #define DWC_CFGL_CH_PRIOR_MASK (0x7 << 5)      /* priority mask */
@@ -214,6 +230,33 @@ enum dw_dma_msize {
 /* Bitfields in CFG */
 #define DW_CFG_DMA_EN          (1 << 0)
 
+/* iDMA 32-bit support */
+
+/* Bitfields in CTL_HI */
+#define IDMA32C_CTLH_BLOCK_TS_MASK     GENMASK(16, 0)
+#define IDMA32C_CTLH_BLOCK_TS(x)       ((x) & IDMA32C_CTLH_BLOCK_TS_MASK)
+#define IDMA32C_CTLH_DONE              (1 << 17)
+
+/* Bitfields in CFG_LO */
+#define IDMA32C_CFGL_DST_BURST_ALIGN   (1 << 0)        /* dst burst align */
+#define IDMA32C_CFGL_SRC_BURST_ALIGN   (1 << 1)        /* src burst align */
+#define IDMA32C_CFGL_CH_DRAIN          (1 << 10)       /* drain FIFO */
+#define IDMA32C_CFGL_DST_OPT_BL                (1 << 20)       /* optimize dst burst length */
+#define IDMA32C_CFGL_SRC_OPT_BL                (1 << 21)       /* optimize src burst length */
+
+/* Bitfields in CFG_HI */
+#define IDMA32C_CFGH_SRC_PER(x)                ((x) << 0)
+#define IDMA32C_CFGH_DST_PER(x)                ((x) << 4)
+#define IDMA32C_CFGH_RD_ISSUE_THD(x)   ((x) << 8)
+#define IDMA32C_CFGH_RW_ISSUE_THD(x)   ((x) << 18)
+#define IDMA32C_CFGH_SRC_PER_EXT(x)    ((x) << 28)     /* src peripheral extension */
+#define IDMA32C_CFGH_DST_PER_EXT(x)    ((x) << 30)     /* dst peripheral extension */
+
+/* Bitfields in FIFO_PARTITION */
+#define IDMA32C_FP_PSIZE_CH0(x)                ((x) << 0)
+#define IDMA32C_FP_PSIZE_CH1(x)                ((x) << 13)
+#define IDMA32C_FP_UPDATE              (1 << 26)
+
 enum dw_dmac_flags {
        DW_DMA_IS_CYCLIC = 0,
        DW_DMA_IS_SOFT_LLP = 1,
@@ -270,6 +313,7 @@ static inline struct dw_dma_chan *to_dw_dma_chan(struct dma_chan *chan)
 
 struct dw_dma {
        struct dma_device       dma;
+       char                    name[20];
        void __iomem            *regs;
        struct dma_pool         *desc_pool;
        struct tasklet_struct   tasklet;
@@ -293,6 +337,11 @@ static inline struct dw_dma_regs __iomem *__dw_regs(struct dw_dma *dw)
 #define dma_writel(dw, name, val) \
        dma_writel_native((val), &(__dw_regs(dw)->name))
 
+#define idma32_readq(dw, name)                         \
+       hi_lo_readq(&(__dw_regs(dw)->name))
+#define idma32_writeq(dw, name, val)                   \
+       hi_lo_writeq((val), &(__dw_regs(dw)->name))
+
 #define channel_set_bit(dw, reg, mask) \
        dma_writel(dw, reg, ((mask) << 8) | (mask))
 #define channel_clear_bit(dw, reg, mask) \
index dd184b5..2846278 100644 (file)
@@ -272,7 +272,7 @@ static void ipu_irq_handler(struct irq_desc *desc)
        u32 status;
        int i, line;
 
-       for (i = IPU_IRQ_NR_FN_BANKS; i < IPU_IRQ_NR_BANKS; i++) {
+       for (i = 0; i < IPU_IRQ_NR_BANKS; i++) {
                struct ipu_irq_bank *bank = irq_bank + i;
 
                raw_spin_lock(&bank_lock);
index 4c357d4..48b22d5 100644 (file)
@@ -1724,6 +1724,7 @@ static int rcar_dmac_probe(struct platform_device *pdev)
 
        dmac->dev = &pdev->dev;
        platform_set_drvdata(pdev, dmac);
+       dma_set_mask_and_coherent(dmac->dev, DMA_BIT_MASK(40));
 
        ret = rcar_dmac_parse_of(&pdev->dev, dmac);
        if (ret < 0)
index 8684d11..a6620b6 100644 (file)
@@ -2809,12 +2809,14 @@ static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma,
 
 static void d40_ops_init(struct d40_base *base, struct dma_device *dev)
 {
-       if (dma_has_cap(DMA_SLAVE, dev->cap_mask))
+       if (dma_has_cap(DMA_SLAVE, dev->cap_mask)) {
                dev->device_prep_slave_sg = d40_prep_slave_sg;
+               dev->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+       }
 
        if (dma_has_cap(DMA_MEMCPY, dev->cap_mask)) {
                dev->device_prep_dma_memcpy = d40_prep_memcpy;
-
+               dev->directions = BIT(DMA_MEM_TO_MEM);
                /*
                 * This controller can only access address at even
                 * 32bit boundaries, i.e. 2^2
@@ -2836,6 +2838,7 @@ static void d40_ops_init(struct d40_base *base, struct dma_device *dev)
        dev->device_pause = d40_pause;
        dev->device_resume = d40_resume;
        dev->device_terminate_all = d40_terminate_all;
+       dev->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
        dev->dev = base->dev;
 }
 
index 3056ce7..49f86ca 100644 (file)
 #define STM32_DMA_MAX_CHANNELS         0x08
 #define STM32_DMA_MAX_REQUEST_ID       0x08
 #define STM32_DMA_MAX_DATA_PARAM       0x03
+#define STM32_DMA_MAX_BURST            16
 
 enum stm32_dma_width {
        STM32_DMA_BYTE,
@@ -403,6 +404,13 @@ static int stm32_dma_terminate_all(struct dma_chan *c)
        return 0;
 }
 
+static void stm32_dma_synchronize(struct dma_chan *c)
+{
+       struct stm32_dma_chan *chan = to_stm32_dma_chan(c);
+
+       vchan_synchronize(&chan->vchan);
+}
+
 static void stm32_dma_dump_reg(struct stm32_dma_chan *chan)
 {
        struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
@@ -421,7 +429,7 @@ static void stm32_dma_dump_reg(struct stm32_dma_chan *chan)
        dev_dbg(chan2dev(chan), "SFCR:  0x%08x\n", sfcr);
 }
 
-static int stm32_dma_start_transfer(struct stm32_dma_chan *chan)
+static void stm32_dma_start_transfer(struct stm32_dma_chan *chan)
 {
        struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
        struct virt_dma_desc *vdesc;
@@ -432,12 +440,12 @@ static int stm32_dma_start_transfer(struct stm32_dma_chan *chan)
 
        ret = stm32_dma_disable_chan(chan);
        if (ret < 0)
-               return ret;
+               return;
 
        if (!chan->desc) {
                vdesc = vchan_next_desc(&chan->vchan);
                if (!vdesc)
-                       return -EPERM;
+                       return;
 
                chan->desc = to_stm32_dma_desc(vdesc);
                chan->next_sg = 0;
@@ -471,7 +479,7 @@ static int stm32_dma_start_transfer(struct stm32_dma_chan *chan)
 
        chan->busy = true;
 
-       return 0;
+       dev_dbg(chan2dev(chan), "vchan %p: started\n", &chan->vchan);
 }
 
 static void stm32_dma_configure_next_sg(struct stm32_dma_chan *chan)
@@ -500,8 +508,6 @@ static void stm32_dma_configure_next_sg(struct stm32_dma_chan *chan)
                        dev_dbg(chan2dev(chan), "CT=0 <=> SM1AR: 0x%08x\n",
                                stm32_dma_read(dmadev, STM32_DMA_SM1AR(id)));
                }
-
-               chan->next_sg++;
        }
 }
 
@@ -510,6 +516,7 @@ static void stm32_dma_handle_chan_done(struct stm32_dma_chan *chan)
        if (chan->desc) {
                if (chan->desc->cyclic) {
                        vchan_cyclic_callback(&chan->desc->vdesc);
+                       chan->next_sg++;
                        stm32_dma_configure_next_sg(chan);
                } else {
                        chan->busy = false;
@@ -552,15 +559,13 @@ static void stm32_dma_issue_pending(struct dma_chan *c)
 {
        struct stm32_dma_chan *chan = to_stm32_dma_chan(c);
        unsigned long flags;
-       int ret;
 
        spin_lock_irqsave(&chan->vchan.lock, flags);
-       if (!chan->busy) {
-               if (vchan_issue_pending(&chan->vchan) && !chan->desc) {
-                       ret = stm32_dma_start_transfer(chan);
-                       if ((!ret) && (chan->desc->cyclic))
-                               stm32_dma_configure_next_sg(chan);
-               }
+       if (vchan_issue_pending(&chan->vchan) && !chan->desc && !chan->busy) {
+               dev_dbg(chan2dev(chan), "vchan %p: issued\n", &chan->vchan);
+               stm32_dma_start_transfer(chan);
+               if (chan->desc->cyclic)
+                       stm32_dma_configure_next_sg(chan);
        }
        spin_unlock_irqrestore(&chan->vchan.lock, flags);
 }
@@ -848,26 +853,40 @@ static struct dma_async_tx_descriptor *stm32_dma_prep_dma_memcpy(
        return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
 }
 
+static u32 stm32_dma_get_remaining_bytes(struct stm32_dma_chan *chan)
+{
+       u32 dma_scr, width, ndtr;
+       struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
+
+       dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(chan->id));
+       width = STM32_DMA_SCR_PSIZE_GET(dma_scr);
+       ndtr = stm32_dma_read(dmadev, STM32_DMA_SNDTR(chan->id));
+
+       return ndtr << width;
+}
+
 static size_t stm32_dma_desc_residue(struct stm32_dma_chan *chan,
                                     struct stm32_dma_desc *desc,
                                     u32 next_sg)
 {
-       struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
-       u32 dma_scr, width, residue, count;
+       u32 residue = 0;
        int i;
 
-       residue = 0;
+       /*
+        * In cyclic mode, for the last period, residue = remaining bytes from
+        * NDTR
+        */
+       if (chan->desc->cyclic && next_sg == 0)
+               return stm32_dma_get_remaining_bytes(chan);
 
+       /*
+        * For all other periods in cyclic mode, and in sg mode,
+        * residue = remaining bytes from NDTR + remaining periods/sg to be
+        * transferred
+        */
        for (i = next_sg; i < desc->num_sgs; i++)
                residue += desc->sg_req[i].len;
-
-       if (next_sg != 0) {
-               dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(chan->id));
-               width = STM32_DMA_SCR_PSIZE_GET(dma_scr);
-               count = stm32_dma_read(dmadev, STM32_DMA_SNDTR(chan->id));
-
-               residue += count << width;
-       }
+       residue += stm32_dma_get_remaining_bytes(chan);
 
        return residue;
 }
@@ -964,27 +983,36 @@ static struct dma_chan *stm32_dma_of_xlate(struct of_phandle_args *dma_spec,
                                           struct of_dma *ofdma)
 {
        struct stm32_dma_device *dmadev = ofdma->of_dma_data;
+       struct device *dev = dmadev->ddev.dev;
        struct stm32_dma_cfg cfg;
        struct stm32_dma_chan *chan;
        struct dma_chan *c;
 
-       if (dma_spec->args_count < 4)
+       if (dma_spec->args_count < 4) {
+               dev_err(dev, "Bad number of cells\n");
                return NULL;
+       }
 
        cfg.channel_id = dma_spec->args[0];
        cfg.request_line = dma_spec->args[1];
        cfg.stream_config = dma_spec->args[2];
        cfg.threshold = dma_spec->args[3];
 
-       if ((cfg.channel_id >= STM32_DMA_MAX_CHANNELS) || (cfg.request_line >=
-                               STM32_DMA_MAX_REQUEST_ID))
+       if ((cfg.channel_id >= STM32_DMA_MAX_CHANNELS) ||
+           (cfg.request_line >= STM32_DMA_MAX_REQUEST_ID)) {
+               dev_err(dev, "Bad channel and/or request id\n");
                return NULL;
+       }
 
        chan = &dmadev->chan[cfg.channel_id];
 
        c = dma_get_slave_channel(&chan->vchan.chan);
-       if (c)
-               stm32_dma_set_config(chan, &cfg);
+       if (!c) {
+               dev_err(dev, "No more channel avalaible\n");
+               return NULL;
+       }
+
+       stm32_dma_set_config(chan, &cfg);
 
        return c;
 }
@@ -1048,6 +1076,7 @@ static int stm32_dma_probe(struct platform_device *pdev)
        dd->device_prep_dma_cyclic = stm32_dma_prep_dma_cyclic;
        dd->device_config = stm32_dma_slave_config;
        dd->device_terminate_all = stm32_dma_terminate_all;
+       dd->device_synchronize = stm32_dma_synchronize;
        dd->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
                BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
                BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
@@ -1056,6 +1085,7 @@ static int stm32_dma_probe(struct platform_device *pdev)
                BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
        dd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
        dd->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
+       dd->max_burst = STM32_DMA_MAX_BURST;
        dd->dev = &pdev->dev;
        INIT_LIST_HEAD(&dd->channels);
 
diff --git a/drivers/dma/zx296702_dma.c b/drivers/dma/zx296702_dma.c
deleted file mode 100644 (file)
index 380276d..0000000
+++ /dev/null
@@ -1,950 +0,0 @@
-/*
- * Copyright 2015 Linaro.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#include <linux/sched.h>
-#include <linux/device.h>
-#include <linux/dmaengine.h>
-#include <linux/dma-mapping.h>
-#include <linux/dmapool.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-#include <linux/of_device.h>
-#include <linux/of.h>
-#include <linux/clk.h>
-#include <linux/of_dma.h>
-
-#include "virt-dma.h"
-
-#define DRIVER_NAME            "zx-dma"
-#define DMA_ALIGN              4
-#define DMA_MAX_SIZE           (0x10000 - PAGE_SIZE)
-#define LLI_BLOCK_SIZE         (4 * PAGE_SIZE)
-
-#define REG_ZX_SRC_ADDR                        0x00
-#define REG_ZX_DST_ADDR                        0x04
-#define REG_ZX_TX_X_COUNT              0x08
-#define REG_ZX_TX_ZY_COUNT             0x0c
-#define REG_ZX_SRC_ZY_STEP             0x10
-#define REG_ZX_DST_ZY_STEP             0x14
-#define REG_ZX_LLI_ADDR                        0x1c
-#define REG_ZX_CTRL                    0x20
-#define REG_ZX_TC_IRQ                  0x800
-#define REG_ZX_SRC_ERR_IRQ             0x804
-#define REG_ZX_DST_ERR_IRQ             0x808
-#define REG_ZX_CFG_ERR_IRQ             0x80c
-#define REG_ZX_TC_IRQ_RAW              0x810
-#define REG_ZX_SRC_ERR_IRQ_RAW         0x814
-#define REG_ZX_DST_ERR_IRQ_RAW         0x818
-#define REG_ZX_CFG_ERR_IRQ_RAW         0x81c
-#define REG_ZX_STATUS                  0x820
-#define REG_ZX_DMA_GRP_PRIO            0x824
-#define REG_ZX_DMA_ARB                 0x828
-
-#define ZX_FORCE_CLOSE                 BIT(31)
-#define ZX_DST_BURST_WIDTH(x)          (((x) & 0x7) << 13)
-#define ZX_MAX_BURST_LEN               16
-#define ZX_SRC_BURST_LEN(x)            (((x) & 0xf) << 9)
-#define ZX_SRC_BURST_WIDTH(x)          (((x) & 0x7) << 6)
-#define ZX_IRQ_ENABLE_ALL              (3 << 4)
-#define ZX_DST_FIFO_MODE               BIT(3)
-#define ZX_SRC_FIFO_MODE               BIT(2)
-#define ZX_SOFT_REQ                    BIT(1)
-#define ZX_CH_ENABLE                   BIT(0)
-
-#define ZX_DMA_BUSWIDTHS \
-       (BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \
-       BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
-       BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
-       BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
-       BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
-
-enum zx_dma_burst_width {
-       ZX_DMA_WIDTH_8BIT       = 0,
-       ZX_DMA_WIDTH_16BIT      = 1,
-       ZX_DMA_WIDTH_32BIT      = 2,
-       ZX_DMA_WIDTH_64BIT      = 3,
-};
-
-struct zx_desc_hw {
-       u32 saddr;
-       u32 daddr;
-       u32 src_x;
-       u32 src_zy;
-       u32 src_zy_step;
-       u32 dst_zy_step;
-       u32 reserved1;
-       u32 lli;
-       u32 ctr;
-       u32 reserved[7]; /* pack as hardware registers region size */
-} __aligned(32);
-
-struct zx_dma_desc_sw {
-       struct virt_dma_desc    vd;
-       dma_addr_t              desc_hw_lli;
-       size_t                  desc_num;
-       size_t                  size;
-       struct zx_desc_hw       *desc_hw;
-};
-
-struct zx_dma_phy;
-
-struct zx_dma_chan {
-       struct dma_slave_config slave_cfg;
-       int                     id; /* Request phy chan id */
-       u32                     ccfg;
-       u32                     cyclic;
-       struct virt_dma_chan    vc;
-       struct zx_dma_phy       *phy;
-       struct list_head        node;
-       dma_addr_t              dev_addr;
-       enum dma_status         status;
-};
-
-struct zx_dma_phy {
-       u32                     idx;
-       void __iomem            *base;
-       struct zx_dma_chan      *vchan;
-       struct zx_dma_desc_sw   *ds_run;
-       struct zx_dma_desc_sw   *ds_done;
-};
-
-struct zx_dma_dev {
-       struct dma_device       slave;
-       void __iomem            *base;
-       spinlock_t              lock; /* lock for ch and phy */
-       struct list_head        chan_pending;
-       struct zx_dma_phy       *phy;
-       struct zx_dma_chan      *chans;
-       struct clk              *clk;
-       struct dma_pool         *pool;
-       u32                     dma_channels;
-       u32                     dma_requests;
-       int                     irq;
-};
-
-#define to_zx_dma(dmadev) container_of(dmadev, struct zx_dma_dev, slave)
-
-static struct zx_dma_chan *to_zx_chan(struct dma_chan *chan)
-{
-       return container_of(chan, struct zx_dma_chan, vc.chan);
-}
-
-static void zx_dma_terminate_chan(struct zx_dma_phy *phy, struct zx_dma_dev *d)
-{
-       u32 val = 0;
-
-       val = readl_relaxed(phy->base + REG_ZX_CTRL);
-       val &= ~ZX_CH_ENABLE;
-       val |= ZX_FORCE_CLOSE;
-       writel_relaxed(val, phy->base + REG_ZX_CTRL);
-
-       val = 0x1 << phy->idx;
-       writel_relaxed(val, d->base + REG_ZX_TC_IRQ_RAW);
-       writel_relaxed(val, d->base + REG_ZX_SRC_ERR_IRQ_RAW);
-       writel_relaxed(val, d->base + REG_ZX_DST_ERR_IRQ_RAW);
-       writel_relaxed(val, d->base + REG_ZX_CFG_ERR_IRQ_RAW);
-}
-
-static void zx_dma_set_desc(struct zx_dma_phy *phy, struct zx_desc_hw *hw)
-{
-       writel_relaxed(hw->saddr, phy->base + REG_ZX_SRC_ADDR);
-       writel_relaxed(hw->daddr, phy->base + REG_ZX_DST_ADDR);
-       writel_relaxed(hw->src_x, phy->base + REG_ZX_TX_X_COUNT);
-       writel_relaxed(0, phy->base + REG_ZX_TX_ZY_COUNT);
-       writel_relaxed(0, phy->base + REG_ZX_SRC_ZY_STEP);
-       writel_relaxed(0, phy->base + REG_ZX_DST_ZY_STEP);
-       writel_relaxed(hw->lli, phy->base + REG_ZX_LLI_ADDR);
-       writel_relaxed(hw->ctr, phy->base + REG_ZX_CTRL);
-}
-
-static u32 zx_dma_get_curr_lli(struct zx_dma_phy *phy)
-{
-       return readl_relaxed(phy->base + REG_ZX_LLI_ADDR);
-}
-
-static u32 zx_dma_get_chan_stat(struct zx_dma_dev *d)
-{
-       return readl_relaxed(d->base + REG_ZX_STATUS);
-}
-
-static void zx_dma_init_state(struct zx_dma_dev *d)
-{
-       /* set same priority */
-       writel_relaxed(0x0, d->base + REG_ZX_DMA_ARB);
-       /* clear all irq */
-       writel_relaxed(0xffffffff, d->base + REG_ZX_TC_IRQ_RAW);
-       writel_relaxed(0xffffffff, d->base + REG_ZX_SRC_ERR_IRQ_RAW);
-       writel_relaxed(0xffffffff, d->base + REG_ZX_DST_ERR_IRQ_RAW);
-       writel_relaxed(0xffffffff, d->base + REG_ZX_CFG_ERR_IRQ_RAW);
-}
-
-static int zx_dma_start_txd(struct zx_dma_chan *c)
-{
-       struct zx_dma_dev *d = to_zx_dma(c->vc.chan.device);
-       struct virt_dma_desc *vd = vchan_next_desc(&c->vc);
-
-       if (!c->phy)
-               return -EAGAIN;
-
-       if (BIT(c->phy->idx) & zx_dma_get_chan_stat(d))
-               return -EAGAIN;
-
-       if (vd) {
-               struct zx_dma_desc_sw *ds =
-                       container_of(vd, struct zx_dma_desc_sw, vd);
-               /*
-                * fetch and remove request from vc->desc_issued
-                * so vc->desc_issued only contains desc pending
-                */
-               list_del(&ds->vd.node);
-               c->phy->ds_run = ds;
-               c->phy->ds_done = NULL;
-               /* start dma */
-               zx_dma_set_desc(c->phy, ds->desc_hw);
-               return 0;
-       }
-       c->phy->ds_done = NULL;
-       c->phy->ds_run = NULL;
-       return -EAGAIN;
-}
-
-static void zx_dma_task(struct zx_dma_dev *d)
-{
-       struct zx_dma_phy *p;
-       struct zx_dma_chan *c, *cn;
-       unsigned pch, pch_alloc = 0;
-       unsigned long flags;
-
-       /* check new dma request of running channel in vc->desc_issued */
-       list_for_each_entry_safe(c, cn, &d->slave.channels,
-                                vc.chan.device_node) {
-               spin_lock_irqsave(&c->vc.lock, flags);
-               p = c->phy;
-               if (p && p->ds_done && zx_dma_start_txd(c)) {
-                       /* No current txd associated with this channel */
-                       dev_dbg(d->slave.dev, "pchan %u: free\n", p->idx);
-                       /* Mark this channel free */
-                       c->phy = NULL;
-                       p->vchan = NULL;
-               }
-               spin_unlock_irqrestore(&c->vc.lock, flags);
-       }
-
-       /* check new channel request in d->chan_pending */
-       spin_lock_irqsave(&d->lock, flags);
-       while (!list_empty(&d->chan_pending)) {
-               c = list_first_entry(&d->chan_pending,
-                                    struct zx_dma_chan, node);
-               p = &d->phy[c->id];
-               if (!p->vchan) {
-                       /* remove from d->chan_pending */
-                       list_del_init(&c->node);
-                       pch_alloc |= 1 << c->id;
-                       /* Mark this channel allocated */
-                       p->vchan = c;
-                       c->phy = p;
-               } else {
-                       dev_dbg(d->slave.dev, "pchan %u: busy!\n", c->id);
-               }
-       }
-       spin_unlock_irqrestore(&d->lock, flags);
-
-       for (pch = 0; pch < d->dma_channels; pch++) {
-               if (pch_alloc & (1 << pch)) {
-                       p = &d->phy[pch];
-                       c = p->vchan;
-                       if (c) {
-                               spin_lock_irqsave(&c->vc.lock, flags);
-                               zx_dma_start_txd(c);
-                               spin_unlock_irqrestore(&c->vc.lock, flags);
-                       }
-               }
-       }
-}
-
-static irqreturn_t zx_dma_int_handler(int irq, void *dev_id)
-{
-       struct zx_dma_dev *d = (struct zx_dma_dev *)dev_id;
-       struct zx_dma_phy *p;
-       struct zx_dma_chan *c;
-       u32 tc = readl_relaxed(d->base + REG_ZX_TC_IRQ);
-       u32 serr = readl_relaxed(d->base + REG_ZX_SRC_ERR_IRQ);
-       u32 derr = readl_relaxed(d->base + REG_ZX_DST_ERR_IRQ);
-       u32 cfg = readl_relaxed(d->base + REG_ZX_CFG_ERR_IRQ);
-       u32 i, irq_chan = 0, task = 0;
-
-       while (tc) {
-               i = __ffs(tc);
-               tc &= ~BIT(i);
-               p = &d->phy[i];
-               c = p->vchan;
-               if (c) {
-                       unsigned long flags;
-
-                       spin_lock_irqsave(&c->vc.lock, flags);
-                       if (c->cyclic) {
-                               vchan_cyclic_callback(&p->ds_run->vd);
-                       } else {
-                               vchan_cookie_complete(&p->ds_run->vd);
-                               p->ds_done = p->ds_run;
-                               task = 1;
-                       }
-                       spin_unlock_irqrestore(&c->vc.lock, flags);
-                       irq_chan |= BIT(i);
-               }
-       }
-
-       if (serr || derr || cfg)
-               dev_warn(d->slave.dev, "DMA ERR src 0x%x, dst 0x%x, cfg 0x%x\n",
-                        serr, derr, cfg);
-
-       writel_relaxed(irq_chan, d->base + REG_ZX_TC_IRQ_RAW);
-       writel_relaxed(serr, d->base + REG_ZX_SRC_ERR_IRQ_RAW);
-       writel_relaxed(derr, d->base + REG_ZX_DST_ERR_IRQ_RAW);
-       writel_relaxed(cfg, d->base + REG_ZX_CFG_ERR_IRQ_RAW);
-
-       if (task)
-               zx_dma_task(d);
-       return IRQ_HANDLED;
-}
-
-static void zx_dma_free_chan_resources(struct dma_chan *chan)
-{
-       struct zx_dma_chan *c = to_zx_chan(chan);
-       struct zx_dma_dev *d = to_zx_dma(chan->device);
-       unsigned long flags;
-
-       spin_lock_irqsave(&d->lock, flags);
-       list_del_init(&c->node);
-       spin_unlock_irqrestore(&d->lock, flags);
-
-       vchan_free_chan_resources(&c->vc);
-       c->ccfg = 0;
-}
-
-static enum dma_status zx_dma_tx_status(struct dma_chan *chan,
-                                       dma_cookie_t cookie,
-                                       struct dma_tx_state *state)
-{
-       struct zx_dma_chan *c = to_zx_chan(chan);
-       struct zx_dma_phy *p;
-       struct virt_dma_desc *vd;
-       unsigned long flags;
-       enum dma_status ret;
-       size_t bytes = 0;
-
-       ret = dma_cookie_status(&c->vc.chan, cookie, state);
-       if (ret == DMA_COMPLETE || !state)
-               return ret;
-
-       spin_lock_irqsave(&c->vc.lock, flags);
-       p = c->phy;
-       ret = c->status;
-
-       /*
-        * If the cookie is on our issue queue, then the residue is
-        * its total size.
-        */
-       vd = vchan_find_desc(&c->vc, cookie);
-       if (vd) {
-               bytes = container_of(vd, struct zx_dma_desc_sw, vd)->size;
-       } else if ((!p) || (!p->ds_run)) {
-               bytes = 0;
-       } else {
-               struct zx_dma_desc_sw *ds = p->ds_run;
-               u32 clli = 0, index = 0;
-
-               bytes = 0;
-               clli = zx_dma_get_curr_lli(p);
-               index = (clli - ds->desc_hw_lli) / sizeof(struct zx_desc_hw);
-               for (; index < ds->desc_num; index++) {
-                       bytes += ds->desc_hw[index].src_x;
-                       /* end of lli */
-                       if (!ds->desc_hw[index].lli)
-                               break;
-               }
-       }
-       spin_unlock_irqrestore(&c->vc.lock, flags);
-       dma_set_residue(state, bytes);
-       return ret;
-}
-
-static void zx_dma_issue_pending(struct dma_chan *chan)
-{
-       struct zx_dma_chan *c = to_zx_chan(chan);
-       struct zx_dma_dev *d = to_zx_dma(chan->device);
-       unsigned long flags;
-       int issue = 0;
-
-       spin_lock_irqsave(&c->vc.lock, flags);
-       /* add request to vc->desc_issued */
-       if (vchan_issue_pending(&c->vc)) {
-               spin_lock(&d->lock);
-               if (!c->phy && list_empty(&c->node)) {
-                       /* if new channel, add chan_pending */
-                       list_add_tail(&c->node, &d->chan_pending);
-                       issue = 1;
-                       dev_dbg(d->slave.dev, "vchan %p: issued\n", &c->vc);
-               }
-               spin_unlock(&d->lock);
-       } else {
-               dev_dbg(d->slave.dev, "vchan %p: nothing to issue\n", &c->vc);
-       }
-       spin_unlock_irqrestore(&c->vc.lock, flags);
-
-       if (issue)
-               zx_dma_task(d);
-}
-
-static void zx_dma_fill_desc(struct zx_dma_desc_sw *ds, dma_addr_t dst,
-                            dma_addr_t src, size_t len, u32 num, u32 ccfg)
-{
-       if ((num + 1) < ds->desc_num)
-               ds->desc_hw[num].lli = ds->desc_hw_lli + (num + 1) *
-                       sizeof(struct zx_desc_hw);
-       ds->desc_hw[num].saddr = src;
-       ds->desc_hw[num].daddr = dst;
-       ds->desc_hw[num].src_x = len;
-       ds->desc_hw[num].ctr = ccfg;
-}
-
-static struct zx_dma_desc_sw *zx_alloc_desc_resource(int num,
-                                                    struct dma_chan *chan)
-{
-       struct zx_dma_chan *c = to_zx_chan(chan);
-       struct zx_dma_desc_sw *ds;
-       struct zx_dma_dev *d = to_zx_dma(chan->device);
-       int lli_limit = LLI_BLOCK_SIZE / sizeof(struct zx_desc_hw);
-
-       if (num > lli_limit) {
-               dev_dbg(chan->device->dev, "vch %p: sg num %d exceed max %d\n",
-                       &c->vc, num, lli_limit);
-               return NULL;
-       }
-
-       ds = kzalloc(sizeof(*ds), GFP_ATOMIC);
-       if (!ds)
-               return NULL;
-
-       ds->desc_hw = dma_pool_zalloc(d->pool, GFP_NOWAIT, &ds->desc_hw_lli);
-       if (!ds->desc_hw) {
-               dev_dbg(chan->device->dev, "vch %p: dma alloc fail\n", &c->vc);
-               kfree(ds);
-               return NULL;
-       }
-       ds->desc_num = num;
-       return ds;
-}
-
-static enum zx_dma_burst_width zx_dma_burst_width(enum dma_slave_buswidth width)
-{
-       switch (width) {
-       case DMA_SLAVE_BUSWIDTH_1_BYTE:
-       case DMA_SLAVE_BUSWIDTH_2_BYTES:
-       case DMA_SLAVE_BUSWIDTH_4_BYTES:
-       case DMA_SLAVE_BUSWIDTH_8_BYTES:
-               return ffs(width) - 1;
-       default:
-               return ZX_DMA_WIDTH_32BIT;
-       }
-}
-
-static int zx_pre_config(struct zx_dma_chan *c, enum dma_transfer_direction dir)
-{
-       struct dma_slave_config *cfg = &c->slave_cfg;
-       enum zx_dma_burst_width src_width;
-       enum zx_dma_burst_width dst_width;
-       u32 maxburst = 0;
-
-       switch (dir) {
-       case DMA_MEM_TO_MEM:
-               c->ccfg = ZX_CH_ENABLE | ZX_SOFT_REQ
-                       | ZX_SRC_BURST_LEN(ZX_MAX_BURST_LEN - 1)
-                       | ZX_SRC_BURST_WIDTH(ZX_DMA_WIDTH_32BIT)
-                       | ZX_DST_BURST_WIDTH(ZX_DMA_WIDTH_32BIT);
-               break;
-       case DMA_MEM_TO_DEV:
-               c->dev_addr = cfg->dst_addr;
-               /* dst len is calculated from src width, len and dst width.
-                * We need make sure dst len not exceed MAX LEN.
-                * Trailing single transaction that does not fill a full
-                * burst also require identical src/dst data width.
-                */
-               dst_width = zx_dma_burst_width(cfg->dst_addr_width);
-               maxburst = cfg->dst_maxburst;
-               maxburst = maxburst < ZX_MAX_BURST_LEN ?
-                               maxburst : ZX_MAX_BURST_LEN;
-               c->ccfg = ZX_DST_FIFO_MODE | ZX_CH_ENABLE
-                       | ZX_SRC_BURST_LEN(maxburst - 1)
-                       | ZX_SRC_BURST_WIDTH(dst_width)
-                       | ZX_DST_BURST_WIDTH(dst_width);
-               break;
-       case DMA_DEV_TO_MEM:
-               c->dev_addr = cfg->src_addr;
-               src_width = zx_dma_burst_width(cfg->src_addr_width);
-               maxburst = cfg->src_maxburst;
-               maxburst = maxburst < ZX_MAX_BURST_LEN ?
-                               maxburst : ZX_MAX_BURST_LEN;
-               c->ccfg = ZX_SRC_FIFO_MODE | ZX_CH_ENABLE
-                       | ZX_SRC_BURST_LEN(maxburst - 1)
-                       | ZX_SRC_BURST_WIDTH(src_width)
-                       | ZX_DST_BURST_WIDTH(src_width);
-               break;
-       default:
-               return -EINVAL;
-       }
-       return 0;
-}
-
-static struct dma_async_tx_descriptor *zx_dma_prep_memcpy(
-       struct dma_chan *chan,  dma_addr_t dst, dma_addr_t src,
-       size_t len, unsigned long flags)
-{
-       struct zx_dma_chan *c = to_zx_chan(chan);
-       struct zx_dma_desc_sw *ds;
-       size_t copy = 0;
-       int num = 0;
-
-       if (!len)
-               return NULL;
-
-       if (zx_pre_config(c, DMA_MEM_TO_MEM))
-               return NULL;
-
-       num = DIV_ROUND_UP(len, DMA_MAX_SIZE);
-
-       ds = zx_alloc_desc_resource(num, chan);
-       if (!ds)
-               return NULL;
-
-       ds->size = len;
-       num = 0;
-
-       do {
-               copy = min_t(size_t, len, DMA_MAX_SIZE);
-               zx_dma_fill_desc(ds, dst, src, copy, num++, c->ccfg);
-
-               src += copy;
-               dst += copy;
-               len -= copy;
-       } while (len);
-
-       c->cyclic = 0;
-       ds->desc_hw[num - 1].lli = 0;   /* end of link */
-       ds->desc_hw[num - 1].ctr |= ZX_IRQ_ENABLE_ALL;
-       return vchan_tx_prep(&c->vc, &ds->vd, flags);
-}
-
-static struct dma_async_tx_descriptor *zx_dma_prep_slave_sg(
-       struct dma_chan *chan, struct scatterlist *sgl, unsigned int sglen,
-       enum dma_transfer_direction dir, unsigned long flags, void *context)
-{
-       struct zx_dma_chan *c = to_zx_chan(chan);
-       struct zx_dma_desc_sw *ds;
-       size_t len, avail, total = 0;
-       struct scatterlist *sg;
-       dma_addr_t addr, src = 0, dst = 0;
-       int num = sglen, i;
-
-       if (!sgl)
-               return NULL;
-
-       if (zx_pre_config(c, dir))
-               return NULL;
-
-       for_each_sg(sgl, sg, sglen, i) {
-               avail = sg_dma_len(sg);
-               if (avail > DMA_MAX_SIZE)
-                       num += DIV_ROUND_UP(avail, DMA_MAX_SIZE) - 1;
-       }
-
-       ds = zx_alloc_desc_resource(num, chan);
-       if (!ds)
-               return NULL;
-
-       c->cyclic = 0;
-       num = 0;
-       for_each_sg(sgl, sg, sglen, i) {
-               addr = sg_dma_address(sg);
-               avail = sg_dma_len(sg);
-               total += avail;
-
-               do {
-                       len = min_t(size_t, avail, DMA_MAX_SIZE);
-
-                       if (dir == DMA_MEM_TO_DEV) {
-                               src = addr;
-                               dst = c->dev_addr;
-                       } else if (dir == DMA_DEV_TO_MEM) {
-                               src = c->dev_addr;
-                               dst = addr;
-                       }
-
-                       zx_dma_fill_desc(ds, dst, src, len, num++, c->ccfg);
-
-                       addr += len;
-                       avail -= len;
-               } while (avail);
-       }
-
-       ds->desc_hw[num - 1].lli = 0;   /* end of link */
-       ds->desc_hw[num - 1].ctr |= ZX_IRQ_ENABLE_ALL;
-       ds->size = total;
-       return vchan_tx_prep(&c->vc, &ds->vd, flags);
-}
-
-static struct dma_async_tx_descriptor *zx_dma_prep_dma_cyclic(
-               struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
-               size_t period_len, enum dma_transfer_direction dir,
-               unsigned long flags)
-{
-       struct zx_dma_chan *c = to_zx_chan(chan);
-       struct zx_dma_desc_sw *ds;
-       dma_addr_t src = 0, dst = 0;
-       int num_periods = buf_len / period_len;
-       int buf = 0, num = 0;
-
-       if (period_len > DMA_MAX_SIZE) {
-               dev_err(chan->device->dev, "maximum period size exceeded\n");
-               return NULL;
-       }
-
-       if (zx_pre_config(c, dir))
-               return NULL;
-
-       ds = zx_alloc_desc_resource(num_periods, chan);
-       if (!ds)
-               return NULL;
-       c->cyclic = 1;
-
-       while (buf < buf_len) {
-               if (dir == DMA_MEM_TO_DEV) {
-                       src = dma_addr;
-                       dst = c->dev_addr;
-               } else if (dir == DMA_DEV_TO_MEM) {
-                       src = c->dev_addr;
-                       dst = dma_addr;
-               }
-               zx_dma_fill_desc(ds, dst, src, period_len, num++,
-                                c->ccfg | ZX_IRQ_ENABLE_ALL);
-               dma_addr += period_len;
-               buf += period_len;
-       }
-
-       ds->desc_hw[num - 1].lli = ds->desc_hw_lli;
-       ds->size = buf_len;
-       return vchan_tx_prep(&c->vc, &ds->vd, flags);
-}
-
-static int zx_dma_config(struct dma_chan *chan,
-                        struct dma_slave_config *cfg)
-{
-       struct zx_dma_chan *c = to_zx_chan(chan);
-
-       if (!cfg)
-               return -EINVAL;
-
-       memcpy(&c->slave_cfg, cfg, sizeof(*cfg));
-
-       return 0;
-}
-
-static int zx_dma_terminate_all(struct dma_chan *chan)
-{
-       struct zx_dma_chan *c = to_zx_chan(chan);
-       struct zx_dma_dev *d = to_zx_dma(chan->device);
-       struct zx_dma_phy *p = c->phy;
-       unsigned long flags;
-       LIST_HEAD(head);
-
-       dev_dbg(d->slave.dev, "vchan %p: terminate all\n", &c->vc);
-
-       /* Prevent this channel being scheduled */
-       spin_lock(&d->lock);
-       list_del_init(&c->node);
-       spin_unlock(&d->lock);
-
-       /* Clear the tx descriptor lists */
-       spin_lock_irqsave(&c->vc.lock, flags);
-       vchan_get_all_descriptors(&c->vc, &head);
-       if (p) {
-               /* vchan is assigned to a pchan - stop the channel */
-               zx_dma_terminate_chan(p, d);
-               c->phy = NULL;
-               p->vchan = NULL;
-               p->ds_run = NULL;
-               p->ds_done = NULL;
-       }
-       spin_unlock_irqrestore(&c->vc.lock, flags);
-       vchan_dma_desc_free_list(&c->vc, &head);
-
-       return 0;
-}
-
-static int zx_dma_transfer_pause(struct dma_chan *chan)
-{
-       struct zx_dma_chan *c = to_zx_chan(chan);
-       u32 val = 0;
-
-       val = readl_relaxed(c->phy->base + REG_ZX_CTRL);
-       val &= ~ZX_CH_ENABLE;
-       writel_relaxed(val, c->phy->base + REG_ZX_CTRL);
-
-       return 0;
-}
-
-static int zx_dma_transfer_resume(struct dma_chan *chan)
-{
-       struct zx_dma_chan *c = to_zx_chan(chan);
-       u32 val = 0;
-
-       val = readl_relaxed(c->phy->base + REG_ZX_CTRL);
-       val |= ZX_CH_ENABLE;
-       writel_relaxed(val, c->phy->base + REG_ZX_CTRL);
-
-       return 0;
-}
-
-static void zx_dma_free_desc(struct virt_dma_desc *vd)
-{
-       struct zx_dma_desc_sw *ds =
-               container_of(vd, struct zx_dma_desc_sw, vd);
-       struct zx_dma_dev *d = to_zx_dma(vd->tx.chan->device);
-
-       dma_pool_free(d->pool, ds->desc_hw, ds->desc_hw_lli);
-       kfree(ds);
-}
-
-static const struct of_device_id zx6702_dma_dt_ids[] = {
-       { .compatible = "zte,zx296702-dma", },
-       {}
-};
-MODULE_DEVICE_TABLE(of, zx6702_dma_dt_ids);
-
-static struct dma_chan *zx_of_dma_simple_xlate(struct of_phandle_args *dma_spec,
-                                              struct of_dma *ofdma)
-{
-       struct zx_dma_dev *d = ofdma->of_dma_data;
-       unsigned int request = dma_spec->args[0];
-       struct dma_chan *chan;
-       struct zx_dma_chan *c;
-
-       if (request >= d->dma_requests)
-               return NULL;
-
-       chan = dma_get_any_slave_channel(&d->slave);
-       if (!chan) {
-               dev_err(d->slave.dev, "get channel fail in %s.\n", __func__);
-               return NULL;
-       }
-       c = to_zx_chan(chan);
-       c->id = request;
-       dev_info(d->slave.dev, "zx_dma: pchan %u: alloc vchan %p\n",
-                c->id, &c->vc);
-       return chan;
-}
-
-static int zx_dma_probe(struct platform_device *op)
-{
-       struct zx_dma_dev *d;
-       struct resource *iores;
-       int i, ret = 0;
-
-       iores = platform_get_resource(op, IORESOURCE_MEM, 0);
-       if (!iores)
-               return -EINVAL;
-
-       d = devm_kzalloc(&op->dev, sizeof(*d), GFP_KERNEL);
-       if (!d)
-               return -ENOMEM;
-
-       d->base = devm_ioremap_resource(&op->dev, iores);
-       if (IS_ERR(d->base))
-               return PTR_ERR(d->base);
-
-       of_property_read_u32((&op->dev)->of_node,
-                            "dma-channels", &d->dma_channels);
-       of_property_read_u32((&op->dev)->of_node,
-                            "dma-requests", &d->dma_requests);
-       if (!d->dma_requests || !d->dma_channels)
-               return -EINVAL;
-
-       d->clk = devm_clk_get(&op->dev, NULL);
-       if (IS_ERR(d->clk)) {
-               dev_err(&op->dev, "no dma clk\n");
-               return PTR_ERR(d->clk);
-       }
-
-       d->irq = platform_get_irq(op, 0);
-       ret = devm_request_irq(&op->dev, d->irq, zx_dma_int_handler,
-                              0, DRIVER_NAME, d);
-       if (ret)
-               return ret;
-
-       /* A DMA memory pool for LLIs, align on 32-byte boundary */
-       d->pool = dmam_pool_create(DRIVER_NAME, &op->dev,
-                       LLI_BLOCK_SIZE, 32, 0);
-       if (!d->pool)
-               return -ENOMEM;
-
-       /* init phy channel */
-       d->phy = devm_kzalloc(&op->dev,
-               d->dma_channels * sizeof(struct zx_dma_phy), GFP_KERNEL);
-       if (!d->phy)
-               return -ENOMEM;
-
-       for (i = 0; i < d->dma_channels; i++) {
-               struct zx_dma_phy *p = &d->phy[i];
-
-               p->idx = i;
-               p->base = d->base + i * 0x40;
-       }
-
-       INIT_LIST_HEAD(&d->slave.channels);
-       dma_cap_set(DMA_SLAVE, d->slave.cap_mask);
-       dma_cap_set(DMA_MEMCPY, d->slave.cap_mask);
-       dma_cap_set(DMA_PRIVATE, d->slave.cap_mask);
-       d->slave.dev = &op->dev;
-       d->slave.device_free_chan_resources = zx_dma_free_chan_resources;
-       d->slave.device_tx_status = zx_dma_tx_status;
-       d->slave.device_prep_dma_memcpy = zx_dma_prep_memcpy;
-       d->slave.device_prep_slave_sg = zx_dma_prep_slave_sg;
-       d->slave.device_prep_dma_cyclic = zx_dma_prep_dma_cyclic;
-       d->slave.device_issue_pending = zx_dma_issue_pending;
-       d->slave.device_config = zx_dma_config;
-       d->slave.device_terminate_all = zx_dma_terminate_all;
-       d->slave.device_pause = zx_dma_transfer_pause;
-       d->slave.device_resume = zx_dma_transfer_resume;
-       d->slave.copy_align = DMA_ALIGN;
-       d->slave.src_addr_widths = ZX_DMA_BUSWIDTHS;
-       d->slave.dst_addr_widths = ZX_DMA_BUSWIDTHS;
-       d->slave.directions = BIT(DMA_MEM_TO_MEM) | BIT(DMA_MEM_TO_DEV)
-                       | BIT(DMA_DEV_TO_MEM);
-       d->slave.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
-
-       /* init virtual channel */
-       d->chans = devm_kzalloc(&op->dev,
-               d->dma_requests * sizeof(struct zx_dma_chan), GFP_KERNEL);
-       if (!d->chans)
-               return -ENOMEM;
-
-       for (i = 0; i < d->dma_requests; i++) {
-               struct zx_dma_chan *c = &d->chans[i];
-
-               c->status = DMA_IN_PROGRESS;
-               INIT_LIST_HEAD(&c->node);
-               c->vc.desc_free = zx_dma_free_desc;
-               vchan_init(&c->vc, &d->slave);
-       }
-
-       /* Enable clock before accessing registers */
-       ret = clk_prepare_enable(d->clk);
-       if (ret < 0) {
-               dev_err(&op->dev, "clk_prepare_enable failed: %d\n", ret);
-               goto zx_dma_out;
-       }
-
-       zx_dma_init_state(d);
-
-       spin_lock_init(&d->lock);
-       INIT_LIST_HEAD(&d->chan_pending);
-       platform_set_drvdata(op, d);
-
-       ret = dma_async_device_register(&d->slave);
-       if (ret)
-               goto clk_dis;
-
-       ret = of_dma_controller_register((&op->dev)->of_node,
-                                        zx_of_dma_simple_xlate, d);
-       if (ret)
-               goto of_dma_register_fail;
-
-       dev_info(&op->dev, "initialized\n");
-       return 0;
-
-of_dma_register_fail:
-       dma_async_device_unregister(&d->slave);
-clk_dis:
-       clk_disable_unprepare(d->clk);
-zx_dma_out:
-       return ret;
-}
-
-static int zx_dma_remove(struct platform_device *op)
-{
-       struct zx_dma_chan *c, *cn;
-       struct zx_dma_dev *d = platform_get_drvdata(op);
-
-       /* explictly free the irq */
-       devm_free_irq(&op->dev, d->irq, d);
-
-       dma_async_device_unregister(&d->slave);
-       of_dma_controller_free((&op->dev)->of_node);
-
-       list_for_each_entry_safe(c, cn, &d->slave.channels,
-                                vc.chan.device_node) {
-               list_del(&c->vc.chan.device_node);
-       }
-       clk_disable_unprepare(d->clk);
-       dmam_pool_destroy(d->pool);
-
-       return 0;
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int zx_dma_suspend_dev(struct device *dev)
-{
-       struct zx_dma_dev *d = dev_get_drvdata(dev);
-       u32 stat = 0;
-
-       stat = zx_dma_get_chan_stat(d);
-       if (stat) {
-               dev_warn(d->slave.dev,
-                        "chan %d is running fail to suspend\n", stat);
-               return -1;
-       }
-       clk_disable_unprepare(d->clk);
-       return 0;
-}
-
-static int zx_dma_resume_dev(struct device *dev)
-{
-       struct zx_dma_dev *d = dev_get_drvdata(dev);
-       int ret = 0;
-
-       ret = clk_prepare_enable(d->clk);
-       if (ret < 0) {
-               dev_err(d->slave.dev, "clk_prepare_enable failed: %d\n", ret);
-               return ret;
-       }
-       zx_dma_init_state(d);
-       return 0;
-}
-#endif
-
-static SIMPLE_DEV_PM_OPS(zx_dma_pmops, zx_dma_suspend_dev, zx_dma_resume_dev);
-
-static struct platform_driver zx_pdma_driver = {
-       .driver         = {
-               .name   = DRIVER_NAME,
-               .pm     = &zx_dma_pmops,
-               .of_match_table = zx6702_dma_dt_ids,
-       },
-       .probe          = zx_dma_probe,
-       .remove         = zx_dma_remove,
-};
-
-module_platform_driver(zx_pdma_driver);
-
-MODULE_DESCRIPTION("ZTE ZX296702 DMA Driver");
-MODULE_AUTHOR("Jun Nie jun.nie@linaro.org");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/zx_dma.c b/drivers/dma/zx_dma.c
new file mode 100644 (file)
index 0000000..2bb6953
--- /dev/null
@@ -0,0 +1,952 @@
+/*
+ * Copyright 2015 Linaro.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/sched.h>
+#include <linux/device.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/clk.h>
+#include <linux/of_dma.h>
+
+#include "virt-dma.h"
+
+#define DRIVER_NAME            "zx-dma"
+#define DMA_ALIGN              4
+#define DMA_MAX_SIZE           (0x10000 - 512)
+#define LLI_BLOCK_SIZE         (4 * PAGE_SIZE)
+
+#define REG_ZX_SRC_ADDR                        0x00
+#define REG_ZX_DST_ADDR                        0x04
+#define REG_ZX_TX_X_COUNT              0x08
+#define REG_ZX_TX_ZY_COUNT             0x0c
+#define REG_ZX_SRC_ZY_STEP             0x10
+#define REG_ZX_DST_ZY_STEP             0x14
+#define REG_ZX_LLI_ADDR                        0x1c
+#define REG_ZX_CTRL                    0x20
+#define REG_ZX_TC_IRQ                  0x800
+#define REG_ZX_SRC_ERR_IRQ             0x804
+#define REG_ZX_DST_ERR_IRQ             0x808
+#define REG_ZX_CFG_ERR_IRQ             0x80c
+#define REG_ZX_TC_IRQ_RAW              0x810
+#define REG_ZX_SRC_ERR_IRQ_RAW         0x814
+#define REG_ZX_DST_ERR_IRQ_RAW         0x818
+#define REG_ZX_CFG_ERR_IRQ_RAW         0x81c
+#define REG_ZX_STATUS                  0x820
+#define REG_ZX_DMA_GRP_PRIO            0x824
+#define REG_ZX_DMA_ARB                 0x828
+
+#define ZX_FORCE_CLOSE                 BIT(31)
+#define ZX_DST_BURST_WIDTH(x)          (((x) & 0x7) << 13)
+#define ZX_MAX_BURST_LEN               16
+#define ZX_SRC_BURST_LEN(x)            (((x) & 0xf) << 9)
+#define ZX_SRC_BURST_WIDTH(x)          (((x) & 0x7) << 6)
+#define ZX_IRQ_ENABLE_ALL              (3 << 4)
+#define ZX_DST_FIFO_MODE               BIT(3)
+#define ZX_SRC_FIFO_MODE               BIT(2)
+#define ZX_SOFT_REQ                    BIT(1)
+#define ZX_CH_ENABLE                   BIT(0)
+
+#define ZX_DMA_BUSWIDTHS \
+       (BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \
+       BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
+       BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
+       BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
+       BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
+
+enum zx_dma_burst_width {
+       ZX_DMA_WIDTH_8BIT       = 0,
+       ZX_DMA_WIDTH_16BIT      = 1,
+       ZX_DMA_WIDTH_32BIT      = 2,
+       ZX_DMA_WIDTH_64BIT      = 3,
+};
+
+struct zx_desc_hw {
+       u32 saddr;
+       u32 daddr;
+       u32 src_x;
+       u32 src_zy;
+       u32 src_zy_step;
+       u32 dst_zy_step;
+       u32 reserved1;
+       u32 lli;
+       u32 ctr;
+       u32 reserved[7]; /* pack as hardware registers region size */
+} __aligned(32);
+
+struct zx_dma_desc_sw {
+       struct virt_dma_desc    vd;
+       dma_addr_t              desc_hw_lli;
+       size_t                  desc_num;
+       size_t                  size;
+       struct zx_desc_hw       *desc_hw;
+};
+
+struct zx_dma_phy;
+
+struct zx_dma_chan {
+       struct dma_slave_config slave_cfg;
+       int                     id; /* Request phy chan id */
+       u32                     ccfg;
+       u32                     cyclic;
+       struct virt_dma_chan    vc;
+       struct zx_dma_phy       *phy;
+       struct list_head        node;
+       dma_addr_t              dev_addr;
+       enum dma_status         status;
+};
+
+struct zx_dma_phy {
+       u32                     idx;
+       void __iomem            *base;
+       struct zx_dma_chan      *vchan;
+       struct zx_dma_desc_sw   *ds_run;
+       struct zx_dma_desc_sw   *ds_done;
+};
+
+struct zx_dma_dev {
+       struct dma_device       slave;
+       void __iomem            *base;
+       spinlock_t              lock; /* lock for ch and phy */
+       struct list_head        chan_pending;
+       struct zx_dma_phy       *phy;
+       struct zx_dma_chan      *chans;
+       struct clk              *clk;
+       struct dma_pool         *pool;
+       u32                     dma_channels;
+       u32                     dma_requests;
+       int                     irq;
+};
+
+#define to_zx_dma(dmadev) container_of(dmadev, struct zx_dma_dev, slave)
+
+static struct zx_dma_chan *to_zx_chan(struct dma_chan *chan)
+{
+       return container_of(chan, struct zx_dma_chan, vc.chan);
+}
+
+static void zx_dma_terminate_chan(struct zx_dma_phy *phy, struct zx_dma_dev *d)
+{
+       u32 val = 0;
+
+       val = readl_relaxed(phy->base + REG_ZX_CTRL);
+       val &= ~ZX_CH_ENABLE;
+       val |= ZX_FORCE_CLOSE;
+       writel_relaxed(val, phy->base + REG_ZX_CTRL);
+
+       val = 0x1 << phy->idx;
+       writel_relaxed(val, d->base + REG_ZX_TC_IRQ_RAW);
+       writel_relaxed(val, d->base + REG_ZX_SRC_ERR_IRQ_RAW);
+       writel_relaxed(val, d->base + REG_ZX_DST_ERR_IRQ_RAW);
+       writel_relaxed(val, d->base + REG_ZX_CFG_ERR_IRQ_RAW);
+}
+
+static void zx_dma_set_desc(struct zx_dma_phy *phy, struct zx_desc_hw *hw)
+{
+       writel_relaxed(hw->saddr, phy->base + REG_ZX_SRC_ADDR);
+       writel_relaxed(hw->daddr, phy->base + REG_ZX_DST_ADDR);
+       writel_relaxed(hw->src_x, phy->base + REG_ZX_TX_X_COUNT);
+       writel_relaxed(0, phy->base + REG_ZX_TX_ZY_COUNT);
+       writel_relaxed(0, phy->base + REG_ZX_SRC_ZY_STEP);
+       writel_relaxed(0, phy->base + REG_ZX_DST_ZY_STEP);
+       writel_relaxed(hw->lli, phy->base + REG_ZX_LLI_ADDR);
+       writel_relaxed(hw->ctr, phy->base + REG_ZX_CTRL);
+}
+
+static u32 zx_dma_get_curr_lli(struct zx_dma_phy *phy)
+{
+       return readl_relaxed(phy->base + REG_ZX_LLI_ADDR);
+}
+
+static u32 zx_dma_get_chan_stat(struct zx_dma_dev *d)
+{
+       return readl_relaxed(d->base + REG_ZX_STATUS);
+}
+
+static void zx_dma_init_state(struct zx_dma_dev *d)
+{
+       /* set same priority */
+       writel_relaxed(0x0, d->base + REG_ZX_DMA_ARB);
+       /* clear all irq */
+       writel_relaxed(0xffffffff, d->base + REG_ZX_TC_IRQ_RAW);
+       writel_relaxed(0xffffffff, d->base + REG_ZX_SRC_ERR_IRQ_RAW);
+       writel_relaxed(0xffffffff, d->base + REG_ZX_DST_ERR_IRQ_RAW);
+       writel_relaxed(0xffffffff, d->base + REG_ZX_CFG_ERR_IRQ_RAW);
+}
+
+static int zx_dma_start_txd(struct zx_dma_chan *c)
+{
+       struct zx_dma_dev *d = to_zx_dma(c->vc.chan.device);
+       struct virt_dma_desc *vd = vchan_next_desc(&c->vc);
+
+       if (!c->phy)
+               return -EAGAIN;
+
+       if (BIT(c->phy->idx) & zx_dma_get_chan_stat(d))
+               return -EAGAIN;
+
+       if (vd) {
+               struct zx_dma_desc_sw *ds =
+                       container_of(vd, struct zx_dma_desc_sw, vd);
+               /*
+                * fetch and remove request from vc->desc_issued
+                * so vc->desc_issued only contains desc pending
+                */
+               list_del(&ds->vd.node);
+               c->phy->ds_run = ds;
+               c->phy->ds_done = NULL;
+               /* start dma */
+               zx_dma_set_desc(c->phy, ds->desc_hw);
+               return 0;
+       }
+       c->phy->ds_done = NULL;
+       c->phy->ds_run = NULL;
+       return -EAGAIN;
+}
+
+static void zx_dma_task(struct zx_dma_dev *d)
+{
+       struct zx_dma_phy *p;
+       struct zx_dma_chan *c, *cn;
+       unsigned pch, pch_alloc = 0;
+       unsigned long flags;
+
+       /* check new dma request of running channel in vc->desc_issued */
+       list_for_each_entry_safe(c, cn, &d->slave.channels,
+                                vc.chan.device_node) {
+               spin_lock_irqsave(&c->vc.lock, flags);
+               p = c->phy;
+               if (p && p->ds_done && zx_dma_start_txd(c)) {
+                       /* No current txd associated with this channel */
+                       dev_dbg(d->slave.dev, "pchan %u: free\n", p->idx);
+                       /* Mark this channel free */
+                       c->phy = NULL;
+                       p->vchan = NULL;
+               }
+               spin_unlock_irqrestore(&c->vc.lock, flags);
+       }
+
+       /* check new channel request in d->chan_pending */
+       spin_lock_irqsave(&d->lock, flags);
+       while (!list_empty(&d->chan_pending)) {
+               c = list_first_entry(&d->chan_pending,
+                                    struct zx_dma_chan, node);
+               p = &d->phy[c->id];
+               if (!p->vchan) {
+                       /* remove from d->chan_pending */
+                       list_del_init(&c->node);
+                       pch_alloc |= 1 << c->id;
+                       /* Mark this channel allocated */
+                       p->vchan = c;
+                       c->phy = p;
+               } else {
+                       dev_dbg(d->slave.dev, "pchan %u: busy!\n", c->id);
+               }
+       }
+       spin_unlock_irqrestore(&d->lock, flags);
+
+       for (pch = 0; pch < d->dma_channels; pch++) {
+               if (pch_alloc & (1 << pch)) {
+                       p = &d->phy[pch];
+                       c = p->vchan;
+                       if (c) {
+                               spin_lock_irqsave(&c->vc.lock, flags);
+                               zx_dma_start_txd(c);
+                               spin_unlock_irqrestore(&c->vc.lock, flags);
+                       }
+               }
+       }
+}
+
+static irqreturn_t zx_dma_int_handler(int irq, void *dev_id)
+{
+       struct zx_dma_dev *d = (struct zx_dma_dev *)dev_id;
+       struct zx_dma_phy *p;
+       struct zx_dma_chan *c;
+       u32 tc = readl_relaxed(d->base + REG_ZX_TC_IRQ);
+       u32 serr = readl_relaxed(d->base + REG_ZX_SRC_ERR_IRQ);
+       u32 derr = readl_relaxed(d->base + REG_ZX_DST_ERR_IRQ);
+       u32 cfg = readl_relaxed(d->base + REG_ZX_CFG_ERR_IRQ);
+       u32 i, irq_chan = 0, task = 0;
+
+       while (tc) {
+               i = __ffs(tc);
+               tc &= ~BIT(i);
+               p = &d->phy[i];
+               c = p->vchan;
+               if (c) {
+                       unsigned long flags;
+
+                       spin_lock_irqsave(&c->vc.lock, flags);
+                       if (c->cyclic) {
+                               vchan_cyclic_callback(&p->ds_run->vd);
+                       } else {
+                               vchan_cookie_complete(&p->ds_run->vd);
+                               p->ds_done = p->ds_run;
+                               task = 1;
+                       }
+                       spin_unlock_irqrestore(&c->vc.lock, flags);
+                       irq_chan |= BIT(i);
+               }
+       }
+
+       if (serr || derr || cfg)
+               dev_warn(d->slave.dev, "DMA ERR src 0x%x, dst 0x%x, cfg 0x%x\n",
+                        serr, derr, cfg);
+
+       writel_relaxed(irq_chan, d->base + REG_ZX_TC_IRQ_RAW);
+       writel_relaxed(serr, d->base + REG_ZX_SRC_ERR_IRQ_RAW);
+       writel_relaxed(derr, d->base + REG_ZX_DST_ERR_IRQ_RAW);
+       writel_relaxed(cfg, d->base + REG_ZX_CFG_ERR_IRQ_RAW);
+
+       if (task)
+               zx_dma_task(d);
+       return IRQ_HANDLED;
+}
+
+static void zx_dma_free_chan_resources(struct dma_chan *chan)
+{
+       struct zx_dma_chan *c = to_zx_chan(chan);
+       struct zx_dma_dev *d = to_zx_dma(chan->device);
+       unsigned long flags;
+
+       spin_lock_irqsave(&d->lock, flags);
+       list_del_init(&c->node);
+       spin_unlock_irqrestore(&d->lock, flags);
+
+       vchan_free_chan_resources(&c->vc);
+       c->ccfg = 0;
+}
+
+static enum dma_status zx_dma_tx_status(struct dma_chan *chan,
+                                       dma_cookie_t cookie,
+                                       struct dma_tx_state *state)
+{
+       struct zx_dma_chan *c = to_zx_chan(chan);
+       struct zx_dma_phy *p;
+       struct virt_dma_desc *vd;
+       unsigned long flags;
+       enum dma_status ret;
+       size_t bytes = 0;
+
+       ret = dma_cookie_status(&c->vc.chan, cookie, state);
+       if (ret == DMA_COMPLETE || !state)
+               return ret;
+
+       spin_lock_irqsave(&c->vc.lock, flags);
+       p = c->phy;
+       ret = c->status;
+
+       /*
+        * If the cookie is on our issue queue, then the residue is
+        * its total size.
+        */
+       vd = vchan_find_desc(&c->vc, cookie);
+       if (vd) {
+               bytes = container_of(vd, struct zx_dma_desc_sw, vd)->size;
+       } else if ((!p) || (!p->ds_run)) {
+               bytes = 0;
+       } else {
+               struct zx_dma_desc_sw *ds = p->ds_run;
+               u32 clli = 0, index = 0;
+
+               bytes = 0;
+               clli = zx_dma_get_curr_lli(p);
+               index = (clli - ds->desc_hw_lli) /
+                               sizeof(struct zx_desc_hw) + 1;
+               for (; index < ds->desc_num; index++) {
+                       bytes += ds->desc_hw[index].src_x;
+                       /* end of lli */
+                       if (!ds->desc_hw[index].lli)
+                               break;
+               }
+       }
+       spin_unlock_irqrestore(&c->vc.lock, flags);
+       dma_set_residue(state, bytes);
+       return ret;
+}
+
+static void zx_dma_issue_pending(struct dma_chan *chan)
+{
+       struct zx_dma_chan *c = to_zx_chan(chan);
+       struct zx_dma_dev *d = to_zx_dma(chan->device);
+       unsigned long flags;
+       int issue = 0;
+
+       spin_lock_irqsave(&c->vc.lock, flags);
+       /* add request to vc->desc_issued */
+       if (vchan_issue_pending(&c->vc)) {
+               spin_lock(&d->lock);
+               if (!c->phy && list_empty(&c->node)) {
+                       /* if new channel, add chan_pending */
+                       list_add_tail(&c->node, &d->chan_pending);
+                       issue = 1;
+                       dev_dbg(d->slave.dev, "vchan %p: issued\n", &c->vc);
+               }
+               spin_unlock(&d->lock);
+       } else {
+               dev_dbg(d->slave.dev, "vchan %p: nothing to issue\n", &c->vc);
+       }
+       spin_unlock_irqrestore(&c->vc.lock, flags);
+
+       if (issue)
+               zx_dma_task(d);
+}
+
+static void zx_dma_fill_desc(struct zx_dma_desc_sw *ds, dma_addr_t dst,
+                            dma_addr_t src, size_t len, u32 num, u32 ccfg)
+{
+       if ((num + 1) < ds->desc_num)
+               ds->desc_hw[num].lli = ds->desc_hw_lli + (num + 1) *
+                       sizeof(struct zx_desc_hw);
+       ds->desc_hw[num].saddr = src;
+       ds->desc_hw[num].daddr = dst;
+       ds->desc_hw[num].src_x = len;
+       ds->desc_hw[num].ctr = ccfg;
+}
+
+static struct zx_dma_desc_sw *zx_alloc_desc_resource(int num,
+                                                    struct dma_chan *chan)
+{
+       struct zx_dma_chan *c = to_zx_chan(chan);
+       struct zx_dma_desc_sw *ds;
+       struct zx_dma_dev *d = to_zx_dma(chan->device);
+       int lli_limit = LLI_BLOCK_SIZE / sizeof(struct zx_desc_hw);
+
+       if (num > lli_limit) {
+               dev_dbg(chan->device->dev, "vch %p: sg num %d exceed max %d\n",
+                       &c->vc, num, lli_limit);
+               return NULL;
+       }
+
+       ds = kzalloc(sizeof(*ds), GFP_ATOMIC);
+       if (!ds)
+               return NULL;
+
+       ds->desc_hw = dma_pool_zalloc(d->pool, GFP_NOWAIT, &ds->desc_hw_lli);
+       if (!ds->desc_hw) {
+               dev_dbg(chan->device->dev, "vch %p: dma alloc fail\n", &c->vc);
+               kfree(ds);
+               return NULL;
+       }
+       ds->desc_num = num;
+       return ds;
+}
+
+static enum zx_dma_burst_width zx_dma_burst_width(enum dma_slave_buswidth width)
+{
+       switch (width) {
+       case DMA_SLAVE_BUSWIDTH_1_BYTE:
+       case DMA_SLAVE_BUSWIDTH_2_BYTES:
+       case DMA_SLAVE_BUSWIDTH_4_BYTES:
+       case DMA_SLAVE_BUSWIDTH_8_BYTES:
+               return ffs(width) - 1;
+       default:
+               return ZX_DMA_WIDTH_32BIT;
+       }
+}
+
+static int zx_pre_config(struct zx_dma_chan *c, enum dma_transfer_direction dir)
+{
+       struct dma_slave_config *cfg = &c->slave_cfg;
+       enum zx_dma_burst_width src_width;
+       enum zx_dma_burst_width dst_width;
+       u32 maxburst = 0;
+
+       switch (dir) {
+       case DMA_MEM_TO_MEM:
+               c->ccfg = ZX_CH_ENABLE | ZX_SOFT_REQ
+                       | ZX_SRC_BURST_LEN(ZX_MAX_BURST_LEN - 1)
+                       | ZX_SRC_BURST_WIDTH(ZX_DMA_WIDTH_32BIT)
+                       | ZX_DST_BURST_WIDTH(ZX_DMA_WIDTH_32BIT);
+               break;
+       case DMA_MEM_TO_DEV:
+               c->dev_addr = cfg->dst_addr;
+               /* dst len is calculated from src width, len and dst width.
+                * We need make sure dst len not exceed MAX LEN.
+                * Trailing single transaction that does not fill a full
+                * burst also require identical src/dst data width.
+                */
+               dst_width = zx_dma_burst_width(cfg->dst_addr_width);
+               maxburst = cfg->dst_maxburst;
+               maxburst = maxburst < ZX_MAX_BURST_LEN ?
+                               maxburst : ZX_MAX_BURST_LEN;
+               c->ccfg = ZX_DST_FIFO_MODE | ZX_CH_ENABLE
+                       | ZX_SRC_BURST_LEN(maxburst - 1)
+                       | ZX_SRC_BURST_WIDTH(dst_width)
+                       | ZX_DST_BURST_WIDTH(dst_width);
+               break;
+       case DMA_DEV_TO_MEM:
+               c->dev_addr = cfg->src_addr;
+               src_width = zx_dma_burst_width(cfg->src_addr_width);
+               maxburst = cfg->src_maxburst;
+               maxburst = maxburst < ZX_MAX_BURST_LEN ?
+                               maxburst : ZX_MAX_BURST_LEN;
+               c->ccfg = ZX_SRC_FIFO_MODE | ZX_CH_ENABLE
+                       | ZX_SRC_BURST_LEN(maxburst - 1)
+                       | ZX_SRC_BURST_WIDTH(src_width)
+                       | ZX_DST_BURST_WIDTH(src_width);
+               break;
+       default:
+               return -EINVAL;
+       }
+       return 0;
+}
+
+static struct dma_async_tx_descriptor *zx_dma_prep_memcpy(
+       struct dma_chan *chan,  dma_addr_t dst, dma_addr_t src,
+       size_t len, unsigned long flags)
+{
+       struct zx_dma_chan *c = to_zx_chan(chan);
+       struct zx_dma_desc_sw *ds;
+       size_t copy = 0;
+       int num = 0;
+
+       if (!len)
+               return NULL;
+
+       if (zx_pre_config(c, DMA_MEM_TO_MEM))
+               return NULL;
+
+       num = DIV_ROUND_UP(len, DMA_MAX_SIZE);
+
+       ds = zx_alloc_desc_resource(num, chan);
+       if (!ds)
+               return NULL;
+
+       ds->size = len;
+       num = 0;
+
+       do {
+               copy = min_t(size_t, len, DMA_MAX_SIZE);
+               zx_dma_fill_desc(ds, dst, src, copy, num++, c->ccfg);
+
+               src += copy;
+               dst += copy;
+               len -= copy;
+       } while (len);
+
+       c->cyclic = 0;
+       ds->desc_hw[num - 1].lli = 0;   /* end of link */
+       ds->desc_hw[num - 1].ctr |= ZX_IRQ_ENABLE_ALL;
+       return vchan_tx_prep(&c->vc, &ds->vd, flags);
+}
+
+static struct dma_async_tx_descriptor *zx_dma_prep_slave_sg(
+       struct dma_chan *chan, struct scatterlist *sgl, unsigned int sglen,
+       enum dma_transfer_direction dir, unsigned long flags, void *context)
+{
+       struct zx_dma_chan *c = to_zx_chan(chan);
+       struct zx_dma_desc_sw *ds;
+       size_t len, avail, total = 0;
+       struct scatterlist *sg;
+       dma_addr_t addr, src = 0, dst = 0;
+       int num = sglen, i;
+
+       if (!sgl)
+               return NULL;
+
+       if (zx_pre_config(c, dir))
+               return NULL;
+
+       for_each_sg(sgl, sg, sglen, i) {
+               avail = sg_dma_len(sg);
+               if (avail > DMA_MAX_SIZE)
+                       num += DIV_ROUND_UP(avail, DMA_MAX_SIZE) - 1;
+       }
+
+       ds = zx_alloc_desc_resource(num, chan);
+       if (!ds)
+               return NULL;
+
+       c->cyclic = 0;
+       num = 0;
+       for_each_sg(sgl, sg, sglen, i) {
+               addr = sg_dma_address(sg);
+               avail = sg_dma_len(sg);
+               total += avail;
+
+               do {
+                       len = min_t(size_t, avail, DMA_MAX_SIZE);
+
+                       if (dir == DMA_MEM_TO_DEV) {
+                               src = addr;
+                               dst = c->dev_addr;
+                       } else if (dir == DMA_DEV_TO_MEM) {
+                               src = c->dev_addr;
+                               dst = addr;
+                       }
+
+                       zx_dma_fill_desc(ds, dst, src, len, num++, c->ccfg);
+
+                       addr += len;
+                       avail -= len;
+               } while (avail);
+       }
+
+       ds->desc_hw[num - 1].lli = 0;   /* end of link */
+       ds->desc_hw[num - 1].ctr |= ZX_IRQ_ENABLE_ALL;
+       ds->size = total;
+       return vchan_tx_prep(&c->vc, &ds->vd, flags);
+}
+
+static struct dma_async_tx_descriptor *zx_dma_prep_dma_cyclic(
+               struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
+               size_t period_len, enum dma_transfer_direction dir,
+               unsigned long flags)
+{
+       struct zx_dma_chan *c = to_zx_chan(chan);
+       struct zx_dma_desc_sw *ds;
+       dma_addr_t src = 0, dst = 0;
+       int num_periods = buf_len / period_len;
+       int buf = 0, num = 0;
+
+       if (period_len > DMA_MAX_SIZE) {
+               dev_err(chan->device->dev, "maximum period size exceeded\n");
+               return NULL;
+       }
+
+       if (zx_pre_config(c, dir))
+               return NULL;
+
+       ds = zx_alloc_desc_resource(num_periods, chan);
+       if (!ds)
+               return NULL;
+       c->cyclic = 1;
+
+       while (buf < buf_len) {
+               if (dir == DMA_MEM_TO_DEV) {
+                       src = dma_addr;
+                       dst = c->dev_addr;
+               } else if (dir == DMA_DEV_TO_MEM) {
+                       src = c->dev_addr;
+                       dst = dma_addr;
+               }
+               zx_dma_fill_desc(ds, dst, src, period_len, num++,
+                                c->ccfg | ZX_IRQ_ENABLE_ALL);
+               dma_addr += period_len;
+               buf += period_len;
+       }
+
+       ds->desc_hw[num - 1].lli = ds->desc_hw_lli;
+       ds->size = buf_len;
+       return vchan_tx_prep(&c->vc, &ds->vd, flags);
+}
+
+static int zx_dma_config(struct dma_chan *chan,
+                        struct dma_slave_config *cfg)
+{
+       struct zx_dma_chan *c = to_zx_chan(chan);
+
+       if (!cfg)
+               return -EINVAL;
+
+       memcpy(&c->slave_cfg, cfg, sizeof(*cfg));
+
+       return 0;
+}
+
+static int zx_dma_terminate_all(struct dma_chan *chan)
+{
+       struct zx_dma_chan *c = to_zx_chan(chan);
+       struct zx_dma_dev *d = to_zx_dma(chan->device);
+       struct zx_dma_phy *p = c->phy;
+       unsigned long flags;
+       LIST_HEAD(head);
+
+       dev_dbg(d->slave.dev, "vchan %p: terminate all\n", &c->vc);
+
+       /* Prevent this channel being scheduled */
+       spin_lock(&d->lock);
+       list_del_init(&c->node);
+       spin_unlock(&d->lock);
+
+       /* Clear the tx descriptor lists */
+       spin_lock_irqsave(&c->vc.lock, flags);
+       vchan_get_all_descriptors(&c->vc, &head);
+       if (p) {
+               /* vchan is assigned to a pchan - stop the channel */
+               zx_dma_terminate_chan(p, d);
+               c->phy = NULL;
+               p->vchan = NULL;
+               p->ds_run = NULL;
+               p->ds_done = NULL;
+       }
+       spin_unlock_irqrestore(&c->vc.lock, flags);
+       vchan_dma_desc_free_list(&c->vc, &head);
+
+       return 0;
+}
+
+static int zx_dma_transfer_pause(struct dma_chan *chan)
+{
+       struct zx_dma_chan *c = to_zx_chan(chan);
+       u32 val = 0;
+
+       val = readl_relaxed(c->phy->base + REG_ZX_CTRL);
+       val &= ~ZX_CH_ENABLE;
+       writel_relaxed(val, c->phy->base + REG_ZX_CTRL);
+
+       return 0;
+}
+
+static int zx_dma_transfer_resume(struct dma_chan *chan)
+{
+       struct zx_dma_chan *c = to_zx_chan(chan);
+       u32 val = 0;
+
+       val = readl_relaxed(c->phy->base + REG_ZX_CTRL);
+       val |= ZX_CH_ENABLE;
+       writel_relaxed(val, c->phy->base + REG_ZX_CTRL);
+
+       return 0;
+}
+
+static void zx_dma_free_desc(struct virt_dma_desc *vd)
+{
+       struct zx_dma_desc_sw *ds =
+               container_of(vd, struct zx_dma_desc_sw, vd);
+       struct zx_dma_dev *d = to_zx_dma(vd->tx.chan->device);
+
+       dma_pool_free(d->pool, ds->desc_hw, ds->desc_hw_lli);
+       kfree(ds);
+}
+
+static const struct of_device_id zx6702_dma_dt_ids[] = {
+       { .compatible = "zte,zx296702-dma", },
+       {}
+};
+MODULE_DEVICE_TABLE(of, zx6702_dma_dt_ids);
+
+static struct dma_chan *zx_of_dma_simple_xlate(struct of_phandle_args *dma_spec,
+                                              struct of_dma *ofdma)
+{
+       struct zx_dma_dev *d = ofdma->of_dma_data;
+       unsigned int request = dma_spec->args[0];
+       struct dma_chan *chan;
+       struct zx_dma_chan *c;
+
+       if (request >= d->dma_requests)
+               return NULL;
+
+       chan = dma_get_any_slave_channel(&d->slave);
+       if (!chan) {
+               dev_err(d->slave.dev, "get channel fail in %s.\n", __func__);
+               return NULL;
+       }
+       c = to_zx_chan(chan);
+       c->id = request;
+       dev_info(d->slave.dev, "zx_dma: pchan %u: alloc vchan %p\n",
+                c->id, &c->vc);
+       return chan;
+}
+
+static int zx_dma_probe(struct platform_device *op)
+{
+       struct zx_dma_dev *d;
+       struct resource *iores;
+       int i, ret = 0;
+
+       iores = platform_get_resource(op, IORESOURCE_MEM, 0);
+       if (!iores)
+               return -EINVAL;
+
+       d = devm_kzalloc(&op->dev, sizeof(*d), GFP_KERNEL);
+       if (!d)
+               return -ENOMEM;
+
+       d->base = devm_ioremap_resource(&op->dev, iores);
+       if (IS_ERR(d->base))
+               return PTR_ERR(d->base);
+
+       of_property_read_u32((&op->dev)->of_node,
+                            "dma-channels", &d->dma_channels);
+       of_property_read_u32((&op->dev)->of_node,
+                            "dma-requests", &d->dma_requests);
+       if (!d->dma_requests || !d->dma_channels)
+               return -EINVAL;
+
+       d->clk = devm_clk_get(&op->dev, NULL);
+       if (IS_ERR(d->clk)) {
+               dev_err(&op->dev, "no dma clk\n");
+               return PTR_ERR(d->clk);
+       }
+
+       d->irq = platform_get_irq(op, 0);
+       ret = devm_request_irq(&op->dev, d->irq, zx_dma_int_handler,
+                              0, DRIVER_NAME, d);
+       if (ret)
+               return ret;
+
+       /* A DMA memory pool for LLIs, align on 32-byte boundary */
+       d->pool = dmam_pool_create(DRIVER_NAME, &op->dev,
+                       LLI_BLOCK_SIZE, 32, 0);
+       if (!d->pool)
+               return -ENOMEM;
+
+       /* init phy channel */
+       d->phy = devm_kzalloc(&op->dev,
+               d->dma_channels * sizeof(struct zx_dma_phy), GFP_KERNEL);
+       if (!d->phy)
+               return -ENOMEM;
+
+       for (i = 0; i < d->dma_channels; i++) {
+               struct zx_dma_phy *p = &d->phy[i];
+
+               p->idx = i;
+               p->base = d->base + i * 0x40;
+       }
+
+       INIT_LIST_HEAD(&d->slave.channels);
+       dma_cap_set(DMA_SLAVE, d->slave.cap_mask);
+       dma_cap_set(DMA_MEMCPY, d->slave.cap_mask);
+       dma_cap_set(DMA_CYCLIC, d->slave.cap_mask);
+       dma_cap_set(DMA_PRIVATE, d->slave.cap_mask);
+       d->slave.dev = &op->dev;
+       d->slave.device_free_chan_resources = zx_dma_free_chan_resources;
+       d->slave.device_tx_status = zx_dma_tx_status;
+       d->slave.device_prep_dma_memcpy = zx_dma_prep_memcpy;
+       d->slave.device_prep_slave_sg = zx_dma_prep_slave_sg;
+       d->slave.device_prep_dma_cyclic = zx_dma_prep_dma_cyclic;
+       d->slave.device_issue_pending = zx_dma_issue_pending;
+       d->slave.device_config = zx_dma_config;
+       d->slave.device_terminate_all = zx_dma_terminate_all;
+       d->slave.device_pause = zx_dma_transfer_pause;
+       d->slave.device_resume = zx_dma_transfer_resume;
+       d->slave.copy_align = DMA_ALIGN;
+       d->slave.src_addr_widths = ZX_DMA_BUSWIDTHS;
+       d->slave.dst_addr_widths = ZX_DMA_BUSWIDTHS;
+       d->slave.directions = BIT(DMA_MEM_TO_MEM) | BIT(DMA_MEM_TO_DEV)
+                       | BIT(DMA_DEV_TO_MEM);
+       d->slave.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
+
+       /* init virtual channel */
+       d->chans = devm_kzalloc(&op->dev,
+               d->dma_requests * sizeof(struct zx_dma_chan), GFP_KERNEL);
+       if (!d->chans)
+               return -ENOMEM;
+
+       for (i = 0; i < d->dma_requests; i++) {
+               struct zx_dma_chan *c = &d->chans[i];
+
+               c->status = DMA_IN_PROGRESS;
+               INIT_LIST_HEAD(&c->node);
+               c->vc.desc_free = zx_dma_free_desc;
+               vchan_init(&c->vc, &d->slave);
+       }
+
+       /* Enable clock before accessing registers */
+       ret = clk_prepare_enable(d->clk);
+       if (ret < 0) {
+               dev_err(&op->dev, "clk_prepare_enable failed: %d\n", ret);
+               goto zx_dma_out;
+       }
+
+       zx_dma_init_state(d);
+
+       spin_lock_init(&d->lock);
+       INIT_LIST_HEAD(&d->chan_pending);
+       platform_set_drvdata(op, d);
+
+       ret = dma_async_device_register(&d->slave);
+       if (ret)
+               goto clk_dis;
+
+       ret = of_dma_controller_register((&op->dev)->of_node,
+                                        zx_of_dma_simple_xlate, d);
+       if (ret)
+               goto of_dma_register_fail;
+
+       dev_info(&op->dev, "initialized\n");
+       return 0;
+
+of_dma_register_fail:
+       dma_async_device_unregister(&d->slave);
+clk_dis:
+       clk_disable_unprepare(d->clk);
+zx_dma_out:
+       return ret;
+}
+
+static int zx_dma_remove(struct platform_device *op)
+{
+       struct zx_dma_chan *c, *cn;
+       struct zx_dma_dev *d = platform_get_drvdata(op);
+
+       /* explictly free the irq */
+       devm_free_irq(&op->dev, d->irq, d);
+
+       dma_async_device_unregister(&d->slave);
+       of_dma_controller_free((&op->dev)->of_node);
+
+       list_for_each_entry_safe(c, cn, &d->slave.channels,
+                                vc.chan.device_node) {
+               list_del(&c->vc.chan.device_node);
+       }
+       clk_disable_unprepare(d->clk);
+       dmam_pool_destroy(d->pool);
+
+       return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int zx_dma_suspend_dev(struct device *dev)
+{
+       struct zx_dma_dev *d = dev_get_drvdata(dev);
+       u32 stat = 0;
+
+       stat = zx_dma_get_chan_stat(d);
+       if (stat) {
+               dev_warn(d->slave.dev,
+                        "chan %d is running fail to suspend\n", stat);
+               return -1;
+       }
+       clk_disable_unprepare(d->clk);
+       return 0;
+}
+
+static int zx_dma_resume_dev(struct device *dev)
+{
+       struct zx_dma_dev *d = dev_get_drvdata(dev);
+       int ret = 0;
+
+       ret = clk_prepare_enable(d->clk);
+       if (ret < 0) {
+               dev_err(d->slave.dev, "clk_prepare_enable failed: %d\n", ret);
+               return ret;
+       }
+       zx_dma_init_state(d);
+       return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(zx_dma_pmops, zx_dma_suspend_dev, zx_dma_resume_dev);
+
+static struct platform_driver zx_pdma_driver = {
+       .driver         = {
+               .name   = DRIVER_NAME,
+               .pm     = &zx_dma_pmops,
+               .of_match_table = zx6702_dma_dt_ids,
+       },
+       .probe          = zx_dma_probe,
+       .remove         = zx_dma_remove,
+};
+
+module_platform_driver(zx_pdma_driver);
+
+MODULE_DESCRIPTION("ZTE ZX296702 DMA Driver");
+MODULE_AUTHOR("Jun Nie jun.nie@linaro.org");
+MODULE_LICENSE("GPL v2");
index 388574e..28e3cf1 100644 (file)
@@ -87,7 +87,7 @@ struct async_submit_ctl {
        void *scribble;
 };
 
-#ifdef CONFIG_DMA_ENGINE
+#if defined(CONFIG_DMA_ENGINE) && !defined(CONFIG_ASYNC_TX_CHANNEL_SWITCH)
 #define async_tx_issue_pending_all dma_issue_pending_all
 
 /**
index ccfd0c3..b63b258 100644 (file)
@@ -23,6 +23,7 @@ struct dw_dma;
 /**
  * struct dw_dma_chip - representation of DesignWare DMA controller hardware
  * @dev:               struct device of the DMA controller
+ * @id:                        instance ID
  * @irq:               irq line
  * @regs:              memory mapped I/O space
  * @clk:               hclk clock
@@ -31,6 +32,7 @@ struct dw_dma;
  */
 struct dw_dma_chip {
        struct device   *dev;
+       int             id;
        int             irq;
        void __iomem    *regs;
        struct clk      *clk;
index feee6ec..5336808 100644 (file)
@@ -894,6 +894,17 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_memset(
                                                    len, flags);
 }
 
+static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_memcpy(
+               struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
+               size_t len, unsigned long flags)
+{
+       if (!chan || !chan->device || !chan->device->device_prep_dma_memcpy)
+               return NULL;
+
+       return chan->device->device_prep_dma_memcpy(chan, dest, src,
+                                                   len, flags);
+}
+
 static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_sg(
                struct dma_chan *chan,
                struct scatterlist *dst_sg, unsigned int dst_nents,
index e69e415..896cb71 100644 (file)
@@ -41,6 +41,7 @@ struct dw_dma_slave {
  * @is_private: The device channels should be marked as private and not for
  *     by the general purpose DMA channel allocator.
  * @is_memcpy: The device channels do support memory-to-memory transfers.
+ * @is_idma32: The type of the DMA controller is iDMA32
  * @chan_allocation_order: Allocate channels starting from 0 or 7
  * @chan_priority: Set channel priority increasing from 0 to 7 or 7 to 0.
  * @block_size: Maximum block size supported by the controller
@@ -53,6 +54,7 @@ struct dw_dma_platform_data {
        unsigned int    nr_channels;
        bool            is_private;
        bool            is_memcpy;
+       bool            is_idma32;
 #define CHAN_ALLOCATION_ASCENDING      0       /* zero to seven */
 #define CHAN_ALLOCATION_DESCENDING     1       /* seven to zero */
        unsigned char   chan_allocation_order;