Merge branch 'pci/ctrl/dwc-edma'
authorBjorn Helgaas <bhelgaas@google.com>
Thu, 4 Aug 2022 16:41:54 +0000 (11:41 -0500)
committerBjorn Helgaas <bhelgaas@google.com>
Thu, 4 Aug 2022 16:41:54 +0000 (11:41 -0500)
- Remove unused struct dw_edma_chip.irq (Frank Li)

- Move eDMA private data from struct dw_edma to struct dw_edma_chip (Frank
  Li)

- Convert "struct dw_edma_region rg_region" to "void __iomem *reg_base"
  since only the virtual address (not physical address or size) is used
  (Frank Li)

- Rename "*_ch_cnt" to "ll_*_cnt" to reflect actual usage (Frank Li)

- Drop dma_slave_config.direction field usage (Serge Semin)

- Fix eDMA Rd/Wr-channels and DMA-direction semantics (Serge Semin)

- Add chip-specific DW_EDMA_CHIP_LOCAL flag to indicate that local eDMA
  doesn't require generating MSIs to remote (Frank Li)

- Enable DMA tests for endpoints that support it (Frank Li)

* pci/ctrl/dwc-edma:
  PCI: endpoint: Enable DMA tests for endpoints with DMA capabilities
  dmaengine: dw-edma: Add support for chip-specific flags
  dmaengine: dw-edma: Fix eDMA Rd/Wr-channels and DMA-direction semantics
  dmaengine: dw-edma: Drop dma_slave_config.direction field usage
  dmaengine: dw-edma: Rename wr(rd)_ch_cnt to ll_wr(rd)_cnt in struct dw_edma_chip
  dmaengine: dw-edma: Change rg_region to reg_base in struct dw_edma_chip
  dmaengine: dw-edma: Detach the private data and chip info structures
  dmaengine: dw-edma: Remove unused irq field in struct dw_edma_chip

drivers/dma/dw-edma/dw-edma-core.c
drivers/dma/dw-edma/dw-edma-core.h
drivers/dma/dw-edma/dw-edma-pcie.c
drivers/dma/dw-edma/dw-edma-v0-core.c
drivers/dma/dw-edma/dw-edma-v0-core.h
drivers/dma/dw-edma/dw-edma-v0-debugfs.c
drivers/dma/dw-edma/dw-edma-v0-debugfs.h
drivers/pci/endpoint/functions/pci-epf-test.c
include/linux/dma/edma.h

index 468d109..07f7564 100644 (file)
@@ -64,8 +64,8 @@ static struct dw_edma_burst *dw_edma_alloc_burst(struct dw_edma_chunk *chunk)
 
 static struct dw_edma_chunk *dw_edma_alloc_chunk(struct dw_edma_desc *desc)
 {
+       struct dw_edma_chip *chip = desc->chan->dw->chip;
        struct dw_edma_chan *chan = desc->chan;
-       struct dw_edma *dw = chan->chip->dw;
        struct dw_edma_chunk *chunk;
 
        chunk = kzalloc(sizeof(*chunk), GFP_NOWAIT);
@@ -82,11 +82,11 @@ static struct dw_edma_chunk *dw_edma_alloc_chunk(struct dw_edma_desc *desc)
         */
        chunk->cb = !(desc->chunks_alloc % 2);
        if (chan->dir == EDMA_DIR_WRITE) {
-               chunk->ll_region.paddr = dw->ll_region_wr[chan->id].paddr;
-               chunk->ll_region.vaddr = dw->ll_region_wr[chan->id].vaddr;
+               chunk->ll_region.paddr = chip->ll_region_wr[chan->id].paddr;
+               chunk->ll_region.vaddr = chip->ll_region_wr[chan->id].vaddr;
        } else {
-               chunk->ll_region.paddr = dw->ll_region_rd[chan->id].paddr;
-               chunk->ll_region.vaddr = dw->ll_region_rd[chan->id].vaddr;
+               chunk->ll_region.paddr = chip->ll_region_rd[chan->id].paddr;
+               chunk->ll_region.vaddr = chip->ll_region_rd[chan->id].vaddr;
        }
 
        if (desc->chunk) {
@@ -339,21 +339,40 @@ dw_edma_device_transfer(struct dw_edma_transfer *xfer)
        if (!chan->configured)
                return NULL;
 
-       switch (chan->config.direction) {
-       case DMA_DEV_TO_MEM: /* local DMA */
-               if (dir == DMA_DEV_TO_MEM && chan->dir == EDMA_DIR_READ)
-                       break;
-               return NULL;
-       case DMA_MEM_TO_DEV: /* local DMA */
-               if (dir == DMA_MEM_TO_DEV && chan->dir == EDMA_DIR_WRITE)
-                       break;
-               return NULL;
-       default: /* remote DMA */
-               if (dir == DMA_MEM_TO_DEV && chan->dir == EDMA_DIR_READ)
-                       break;
-               if (dir == DMA_DEV_TO_MEM && chan->dir == EDMA_DIR_WRITE)
-                       break;
-               return NULL;
+       /*
+        * Local Root Port/End-point              Remote End-point
+        * +-----------------------+ PCIe bus +----------------------+
+        * |                       |    +-+   |                      |
+        * |    DEV_TO_MEM   Rx Ch <----+ +---+ Tx Ch  DEV_TO_MEM    |
+        * |                       |    | |   |                      |
+        * |    MEM_TO_DEV   Tx Ch +----+ +---> Rx Ch  MEM_TO_DEV    |
+        * |                       |    +-+   |                      |
+        * +-----------------------+          +----------------------+
+        *
+        * 1. Normal logic:
+        * If eDMA is embedded into the DW PCIe RP/EP and controlled from the
+        * CPU/Application side, the Rx channel (EDMA_DIR_READ) will be used
+        * for the device read operations (DEV_TO_MEM) and the Tx channel
+        * (EDMA_DIR_WRITE) - for the write operations (MEM_TO_DEV).
+        *
+        * 2. Inverted logic:
+        * If eDMA is embedded into a Remote PCIe EP and is controlled by the
+        * MWr/MRd TLPs sent from the CPU's PCIe host controller, the Tx
+        * channel (EDMA_DIR_WRITE) will be used for the device read operations
+        * (DEV_TO_MEM) and the Rx channel (EDMA_DIR_READ) - for the write
+        * operations (MEM_TO_DEV).
+        *
+        * It is the client driver responsibility to choose a proper channel
+        * for the DMA transfers.
+        */
+       if (chan->dw->chip->flags & DW_EDMA_CHIP_LOCAL) {
+               if ((chan->dir == EDMA_DIR_READ && dir != DMA_DEV_TO_MEM) ||
+                   (chan->dir == EDMA_DIR_WRITE && dir != DMA_MEM_TO_DEV))
+                       return NULL;
+       } else {
+               if ((chan->dir == EDMA_DIR_WRITE && dir != DMA_DEV_TO_MEM) ||
+                   (chan->dir == EDMA_DIR_READ && dir != DMA_MEM_TO_DEV))
+                       return NULL;
        }
 
        if (xfer->type == EDMA_XFER_CYCLIC) {
@@ -423,7 +442,7 @@ dw_edma_device_transfer(struct dw_edma_transfer *xfer)
                chunk->ll_region.sz += burst->sz;
                desc->alloc_sz += burst->sz;
 
-               if (chan->dir == EDMA_DIR_WRITE) {
+               if (dir == DMA_DEV_TO_MEM) {
                        burst->sar = src_addr;
                        if (xfer->type == EDMA_XFER_CYCLIC) {
                                burst->dar = xfer->xfer.cyclic.paddr;
@@ -663,7 +682,7 @@ static int dw_edma_alloc_chan_resources(struct dma_chan *dchan)
        if (chan->status != EDMA_ST_IDLE)
                return -EBUSY;
 
-       pm_runtime_get(chan->chip->dev);
+       pm_runtime_get(chan->dw->chip->dev);
 
        return 0;
 }
@@ -685,15 +704,15 @@ static void dw_edma_free_chan_resources(struct dma_chan *dchan)
                cpu_relax();
        }
 
-       pm_runtime_put(chan->chip->dev);
+       pm_runtime_put(chan->dw->chip->dev);
 }
 
-static int dw_edma_channel_setup(struct dw_edma_chip *chip, bool write,
+static int dw_edma_channel_setup(struct dw_edma *dw, bool write,
                                 u32 wr_alloc, u32 rd_alloc)
 {
+       struct dw_edma_chip *chip = dw->chip;
        struct dw_edma_region *dt_region;
        struct device *dev = chip->dev;
-       struct dw_edma *dw = chip->dw;
        struct dw_edma_chan *chan;
        struct dw_edma_irq *irq;
        struct dma_device *dma;
@@ -726,7 +745,7 @@ static int dw_edma_channel_setup(struct dw_edma_chip *chip, bool write,
 
                chan->vc.chan.private = dt_region;
 
-               chan->chip = chip;
+               chan->dw = dw;
                chan->id = j;
                chan->dir = write ? EDMA_DIR_WRITE : EDMA_DIR_READ;
                chan->configured = false;
@@ -734,9 +753,9 @@ static int dw_edma_channel_setup(struct dw_edma_chip *chip, bool write,
                chan->status = EDMA_ST_IDLE;
 
                if (write)
-                       chan->ll_max = (dw->ll_region_wr[j].sz / EDMA_LL_SZ);
+                       chan->ll_max = (chip->ll_region_wr[j].sz / EDMA_LL_SZ);
                else
-                       chan->ll_max = (dw->ll_region_rd[j].sz / EDMA_LL_SZ);
+                       chan->ll_max = (chip->ll_region_rd[j].sz / EDMA_LL_SZ);
                chan->ll_max -= 1;
 
                dev_vdbg(dev, "L. List:\tChannel %s[%u] max_cnt=%u\n",
@@ -766,13 +785,13 @@ static int dw_edma_channel_setup(struct dw_edma_chip *chip, bool write,
                vchan_init(&chan->vc, dma);
 
                if (write) {
-                       dt_region->paddr = dw->dt_region_wr[j].paddr;
-                       dt_region->vaddr = dw->dt_region_wr[j].vaddr;
-                       dt_region->sz = dw->dt_region_wr[j].sz;
+                       dt_region->paddr = chip->dt_region_wr[j].paddr;
+                       dt_region->vaddr = chip->dt_region_wr[j].vaddr;
+                       dt_region->sz = chip->dt_region_wr[j].sz;
                } else {
-                       dt_region->paddr = dw->dt_region_rd[j].paddr;
-                       dt_region->vaddr = dw->dt_region_rd[j].vaddr;
-                       dt_region->sz = dw->dt_region_rd[j].sz;
+                       dt_region->paddr = chip->dt_region_rd[j].paddr;
+                       dt_region->vaddr = chip->dt_region_rd[j].vaddr;
+                       dt_region->sz = chip->dt_region_rd[j].sz;
                }
 
                dw_edma_v0_core_device_config(chan);
@@ -826,11 +845,11 @@ static inline void dw_edma_add_irq_mask(u32 *mask, u32 alloc, u16 cnt)
                (*mask)++;
 }
 
-static int dw_edma_irq_request(struct dw_edma_chip *chip,
+static int dw_edma_irq_request(struct dw_edma *dw,
                               u32 *wr_alloc, u32 *rd_alloc)
 {
-       struct device *dev = chip->dev;
-       struct dw_edma *dw = chip->dw;
+       struct dw_edma_chip *chip = dw->chip;
+       struct device *dev = dw->chip->dev;
        u32 wr_mask = 1;
        u32 rd_mask = 1;
        int i, err = 0;
@@ -839,12 +858,16 @@ static int dw_edma_irq_request(struct dw_edma_chip *chip,
 
        ch_cnt = dw->wr_ch_cnt + dw->rd_ch_cnt;
 
-       if (dw->nr_irqs < 1)
+       if (chip->nr_irqs < 1 || !chip->ops->irq_vector)
                return -EINVAL;
 
-       if (dw->nr_irqs == 1) {
+       dw->irq = devm_kcalloc(dev, chip->nr_irqs, sizeof(*dw->irq), GFP_KERNEL);
+       if (!dw->irq)
+               return -ENOMEM;
+
+       if (chip->nr_irqs == 1) {
                /* Common IRQ shared among all channels */
-               irq = dw->ops->irq_vector(dev, 0);
+               irq = chip->ops->irq_vector(dev, 0);
                err = request_irq(irq, dw_edma_interrupt_common,
                                  IRQF_SHARED, dw->name, &dw->irq[0]);
                if (err) {
@@ -854,9 +877,11 @@ static int dw_edma_irq_request(struct dw_edma_chip *chip,
 
                if (irq_get_msi_desc(irq))
                        get_cached_msi_msg(irq, &dw->irq[0].msi);
+
+               dw->nr_irqs = 1;
        } else {
                /* Distribute IRQs equally among all channels */
-               int tmp = dw->nr_irqs;
+               int tmp = chip->nr_irqs;
 
                while (tmp && (*wr_alloc + *rd_alloc) < ch_cnt) {
                        dw_edma_dec_irq_alloc(&tmp, wr_alloc, dw->wr_ch_cnt);
@@ -867,7 +892,7 @@ static int dw_edma_irq_request(struct dw_edma_chip *chip,
                dw_edma_add_irq_mask(&rd_mask, *rd_alloc, dw->rd_ch_cnt);
 
                for (i = 0; i < (*wr_alloc + *rd_alloc); i++) {
-                       irq = dw->ops->irq_vector(dev, i);
+                       irq = chip->ops->irq_vector(dev, i);
                        err = request_irq(irq,
                                          i < *wr_alloc ?
                                                dw_edma_interrupt_write :
@@ -901,20 +926,22 @@ int dw_edma_probe(struct dw_edma_chip *chip)
                return -EINVAL;
 
        dev = chip->dev;
-       if (!dev)
+       if (!dev || !chip->ops)
                return -EINVAL;
 
-       dw = chip->dw;
-       if (!dw || !dw->irq || !dw->ops || !dw->ops->irq_vector)
-               return -EINVAL;
+       dw = devm_kzalloc(dev, sizeof(*dw), GFP_KERNEL);
+       if (!dw)
+               return -ENOMEM;
+
+       dw->chip = chip;
 
        raw_spin_lock_init(&dw->lock);
 
-       dw->wr_ch_cnt = min_t(u16, dw->wr_ch_cnt,
+       dw->wr_ch_cnt = min_t(u16, chip->ll_wr_cnt,
                              dw_edma_v0_core_ch_count(dw, EDMA_DIR_WRITE));
        dw->wr_ch_cnt = min_t(u16, dw->wr_ch_cnt, EDMA_MAX_WR_CH);
 
-       dw->rd_ch_cnt = min_t(u16, dw->rd_ch_cnt,
+       dw->rd_ch_cnt = min_t(u16, chip->ll_rd_cnt,
                              dw_edma_v0_core_ch_count(dw, EDMA_DIR_READ));
        dw->rd_ch_cnt = min_t(u16, dw->rd_ch_cnt, EDMA_MAX_RD_CH);
 
@@ -936,17 +963,17 @@ int dw_edma_probe(struct dw_edma_chip *chip)
        dw_edma_v0_core_off(dw);
 
        /* Request IRQs */
-       err = dw_edma_irq_request(chip, &wr_alloc, &rd_alloc);
+       err = dw_edma_irq_request(dw, &wr_alloc, &rd_alloc);
        if (err)
                return err;
 
        /* Setup write channels */
-       err = dw_edma_channel_setup(chip, true, wr_alloc, rd_alloc);
+       err = dw_edma_channel_setup(dw, true, wr_alloc, rd_alloc);
        if (err)
                goto err_irq_free;
 
        /* Setup read channels */
-       err = dw_edma_channel_setup(chip, false, wr_alloc, rd_alloc);
+       err = dw_edma_channel_setup(dw, false, wr_alloc, rd_alloc);
        if (err)
                goto err_irq_free;
 
@@ -954,15 +981,15 @@ int dw_edma_probe(struct dw_edma_chip *chip)
        pm_runtime_enable(dev);
 
        /* Turn debugfs on */
-       dw_edma_v0_core_debugfs_on(chip);
+       dw_edma_v0_core_debugfs_on(dw);
+
+       chip->dw = dw;
 
        return 0;
 
 err_irq_free:
        for (i = (dw->nr_irqs - 1); i >= 0; i--)
-               free_irq(dw->ops->irq_vector(dev, i), &dw->irq[i]);
-
-       dw->nr_irqs = 0;
+               free_irq(chip->ops->irq_vector(dev, i), &dw->irq[i]);
 
        return err;
 }
@@ -980,7 +1007,7 @@ int dw_edma_remove(struct dw_edma_chip *chip)
 
        /* Free irqs */
        for (i = (dw->nr_irqs - 1); i >= 0; i--)
-               free_irq(dw->ops->irq_vector(dev, i), &dw->irq[i]);
+               free_irq(chip->ops->irq_vector(dev, i), &dw->irq[i]);
 
        /* Power management */
        pm_runtime_disable(dev);
@@ -1001,7 +1028,7 @@ int dw_edma_remove(struct dw_edma_chip *chip)
        }
 
        /* Turn debugfs off */
-       dw_edma_v0_core_debugfs_off(chip);
+       dw_edma_v0_core_debugfs_off(dw);
 
        return 0;
 }
index 60316d4..85df2d5 100644 (file)
 #include "../virt-dma.h"
 
 #define EDMA_LL_SZ                                     24
-#define EDMA_MAX_WR_CH                                 8
-#define EDMA_MAX_RD_CH                                 8
 
 enum dw_edma_dir {
        EDMA_DIR_WRITE = 0,
        EDMA_DIR_READ
 };
 
-enum dw_edma_map_format {
-       EDMA_MF_EDMA_LEGACY = 0x0,
-       EDMA_MF_EDMA_UNROLL = 0x1,
-       EDMA_MF_HDMA_COMPAT = 0x5
-};
-
 enum dw_edma_request {
        EDMA_REQ_NONE = 0,
        EDMA_REQ_STOP,
@@ -57,12 +49,6 @@ struct dw_edma_burst {
        u32                             sz;
 };
 
-struct dw_edma_region {
-       phys_addr_t                     paddr;
-       void                            __iomem *vaddr;
-       size_t                          sz;
-};
-
 struct dw_edma_chunk {
        struct list_head                list;
        struct dw_edma_chan             *chan;
@@ -87,7 +73,7 @@ struct dw_edma_desc {
 
 struct dw_edma_chan {
        struct virt_dma_chan            vc;
-       struct dw_edma_chip             *chip;
+       struct dw_edma                  *dw;
        int                             id;
        enum dw_edma_dir                dir;
 
@@ -109,10 +95,6 @@ struct dw_edma_irq {
        struct dw_edma                  *dw;
 };
 
-struct dw_edma_core_ops {
-       int     (*irq_vector)(struct device *dev, unsigned int nr);
-};
-
 struct dw_edma {
        char                            name[20];
 
@@ -122,21 +104,14 @@ struct dw_edma {
        struct dma_device               rd_edma;
        u16                             rd_ch_cnt;
 
-       struct dw_edma_region           rg_region;      /* Registers */
-       struct dw_edma_region           ll_region_wr[EDMA_MAX_WR_CH];
-       struct dw_edma_region           ll_region_rd[EDMA_MAX_RD_CH];
-       struct dw_edma_region           dt_region_wr[EDMA_MAX_WR_CH];
-       struct dw_edma_region           dt_region_rd[EDMA_MAX_RD_CH];
-
        struct dw_edma_irq              *irq;
        int                             nr_irqs;
 
-       enum dw_edma_map_format         mf;
-
        struct dw_edma_chan             *chan;
-       const struct dw_edma_core_ops   *ops;
 
        raw_spinlock_t                  lock;           /* Only for legacy */
+
+       struct dw_edma_chip             *chip;
 #ifdef CONFIG_DEBUG_FS
        struct dentry                   *debugfs;
 #endif /* CONFIG_DEBUG_FS */
index cee7aa2..d6b5e24 100644 (file)
@@ -148,7 +148,6 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev,
        struct dw_edma_pcie_data vsec_data;
        struct device *dev = &pdev->dev;
        struct dw_edma_chip *chip;
-       struct dw_edma *dw;
        int err, nr_irqs;
        int i, mask;
 
@@ -197,10 +196,6 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev,
        if (!chip)
                return -ENOMEM;
 
-       dw = devm_kzalloc(dev, sizeof(*dw), GFP_KERNEL);
-       if (!dw)
-               return -ENOMEM;
-
        /* IRQs allocation */
        nr_irqs = pci_alloc_irq_vectors(pdev, 1, vsec_data.irqs,
                                        PCI_IRQ_MSI | PCI_IRQ_MSIX);
@@ -211,29 +206,23 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev,
        }
 
        /* Data structure initialization */
-       chip->dw = dw;
        chip->dev = dev;
        chip->id = pdev->devfn;
-       chip->irq = pdev->irq;
 
-       dw->mf = vsec_data.mf;
-       dw->nr_irqs = nr_irqs;
-       dw->ops = &dw_edma_pcie_core_ops;
-       dw->wr_ch_cnt = vsec_data.wr_ch_cnt;
-       dw->rd_ch_cnt = vsec_data.rd_ch_cnt;
+       chip->mf = vsec_data.mf;
+       chip->nr_irqs = nr_irqs;
+       chip->ops = &dw_edma_pcie_core_ops;
 
-       dw->rg_region.vaddr = pcim_iomap_table(pdev)[vsec_data.rg.bar];
-       if (!dw->rg_region.vaddr)
-               return -ENOMEM;
+       chip->ll_wr_cnt = vsec_data.wr_ch_cnt;
+       chip->ll_rd_cnt = vsec_data.rd_ch_cnt;
 
-       dw->rg_region.vaddr += vsec_data.rg.off;
-       dw->rg_region.paddr = pdev->resource[vsec_data.rg.bar].start;
-       dw->rg_region.paddr += vsec_data.rg.off;
-       dw->rg_region.sz = vsec_data.rg.sz;
+       chip->reg_base = pcim_iomap_table(pdev)[vsec_data.rg.bar];
+       if (!chip->reg_base)
+               return -ENOMEM;
 
-       for (i = 0; i < dw->wr_ch_cnt; i++) {
-               struct dw_edma_region *ll_region = &dw->ll_region_wr[i];
-               struct dw_edma_region *dt_region = &dw->dt_region_wr[i];
+       for (i = 0; i < chip->ll_wr_cnt; i++) {
+               struct dw_edma_region *ll_region = &chip->ll_region_wr[i];
+               struct dw_edma_region *dt_region = &chip->dt_region_wr[i];
                struct dw_edma_block *ll_block = &vsec_data.ll_wr[i];
                struct dw_edma_block *dt_block = &vsec_data.dt_wr[i];
 
@@ -256,9 +245,9 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev,
                dt_region->sz = dt_block->sz;
        }
 
-       for (i = 0; i < dw->rd_ch_cnt; i++) {
-               struct dw_edma_region *ll_region = &dw->ll_region_rd[i];
-               struct dw_edma_region *dt_region = &dw->dt_region_rd[i];
+       for (i = 0; i < chip->ll_rd_cnt; i++) {
+               struct dw_edma_region *ll_region = &chip->ll_region_rd[i];
+               struct dw_edma_region *dt_region = &chip->dt_region_rd[i];
                struct dw_edma_block *ll_block = &vsec_data.ll_rd[i];
                struct dw_edma_block *dt_block = &vsec_data.dt_rd[i];
 
@@ -282,45 +271,45 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev,
        }
 
        /* Debug info */
-       if (dw->mf == EDMA_MF_EDMA_LEGACY)
-               pci_dbg(pdev, "Version:\teDMA Port Logic (0x%x)\n", dw->mf);
-       else if (dw->mf == EDMA_MF_EDMA_UNROLL)
-               pci_dbg(pdev, "Version:\teDMA Unroll (0x%x)\n", dw->mf);
-       else if (dw->mf == EDMA_MF_HDMA_COMPAT)
-               pci_dbg(pdev, "Version:\tHDMA Compatible (0x%x)\n", dw->mf);
+       if (chip->mf == EDMA_MF_EDMA_LEGACY)
+               pci_dbg(pdev, "Version:\teDMA Port Logic (0x%x)\n", chip->mf);
+       else if (chip->mf == EDMA_MF_EDMA_UNROLL)
+               pci_dbg(pdev, "Version:\teDMA Unroll (0x%x)\n", chip->mf);
+       else if (chip->mf == EDMA_MF_HDMA_COMPAT)
+               pci_dbg(pdev, "Version:\tHDMA Compatible (0x%x)\n", chip->mf);
        else
-               pci_dbg(pdev, "Version:\tUnknown (0x%x)\n", dw->mf);
+               pci_dbg(pdev, "Version:\tUnknown (0x%x)\n", chip->mf);
 
-       pci_dbg(pdev, "Registers:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n",
+       pci_dbg(pdev, "Registers:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p)\n",
                vsec_data.rg.bar, vsec_data.rg.off, vsec_data.rg.sz,
-               dw->rg_region.vaddr, &dw->rg_region.paddr);
+               chip->reg_base);
 
 
-       for (i = 0; i < dw->wr_ch_cnt; i++) {
+       for (i = 0; i < chip->ll_wr_cnt; i++) {
                pci_dbg(pdev, "L. List:\tWRITE CH%.2u, BAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n",
                        i, vsec_data.ll_wr[i].bar,
-                       vsec_data.ll_wr[i].off, dw->ll_region_wr[i].sz,
-                       dw->ll_region_wr[i].vaddr, &dw->ll_region_wr[i].paddr);
+                       vsec_data.ll_wr[i].off, chip->ll_region_wr[i].sz,
+                       chip->ll_region_wr[i].vaddr, &chip->ll_region_wr[i].paddr);
 
                pci_dbg(pdev, "Data:\tWRITE CH%.2u, BAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n",
                        i, vsec_data.dt_wr[i].bar,
-                       vsec_data.dt_wr[i].off, dw->dt_region_wr[i].sz,
-                       dw->dt_region_wr[i].vaddr, &dw->dt_region_wr[i].paddr);
+                       vsec_data.dt_wr[i].off, chip->dt_region_wr[i].sz,
+                       chip->dt_region_wr[i].vaddr, &chip->dt_region_wr[i].paddr);
        }
 
-       for (i = 0; i < dw->rd_ch_cnt; i++) {
+       for (i = 0; i < chip->ll_rd_cnt; i++) {
                pci_dbg(pdev, "L. List:\tREAD CH%.2u, BAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n",
                        i, vsec_data.ll_rd[i].bar,
-                       vsec_data.ll_rd[i].off, dw->ll_region_rd[i].sz,
-                       dw->ll_region_rd[i].vaddr, &dw->ll_region_rd[i].paddr);
+                       vsec_data.ll_rd[i].off, chip->ll_region_rd[i].sz,
+                       chip->ll_region_rd[i].vaddr, &chip->ll_region_rd[i].paddr);
 
                pci_dbg(pdev, "Data:\tREAD CH%.2u, BAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n",
                        i, vsec_data.dt_rd[i].bar,
-                       vsec_data.dt_rd[i].off, dw->dt_region_rd[i].sz,
-                       dw->dt_region_rd[i].vaddr, &dw->dt_region_rd[i].paddr);
+                       vsec_data.dt_rd[i].off, chip->dt_region_rd[i].sz,
+                       chip->dt_region_rd[i].vaddr, &chip->dt_region_rd[i].paddr);
        }
 
-       pci_dbg(pdev, "Nr. IRQs:\t%u\n", dw->nr_irqs);
+       pci_dbg(pdev, "Nr. IRQs:\t%u\n", chip->nr_irqs);
 
        /* Validating if PCI interrupts were enabled */
        if (!pci_dev_msi_enabled(pdev)) {
@@ -328,10 +317,6 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev,
                return -EPERM;
        }
 
-       dw->irq = devm_kcalloc(dev, nr_irqs, sizeof(*dw->irq), GFP_KERNEL);
-       if (!dw->irq)
-               return -ENOMEM;
-
        /* Starting eDMA driver */
        err = dw_edma_probe(chip);
        if (err) {
index 33bc1e6..607647d 100644 (file)
@@ -25,7 +25,7 @@ enum dw_edma_control {
 
 static inline struct dw_edma_v0_regs __iomem *__dw_regs(struct dw_edma *dw)
 {
-       return dw->rg_region.vaddr;
+       return dw->chip->reg_base;
 }
 
 #define SET_32(dw, name, value)                                \
@@ -96,7 +96,7 @@ static inline struct dw_edma_v0_regs __iomem *__dw_regs(struct dw_edma *dw)
 static inline struct dw_edma_v0_ch_regs __iomem *
 __dw_ch_regs(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch)
 {
-       if (dw->mf == EDMA_MF_EDMA_LEGACY)
+       if (dw->chip->mf == EDMA_MF_EDMA_LEGACY)
                return &(__dw_regs(dw)->type.legacy.ch);
 
        if (dir == EDMA_DIR_WRITE)
@@ -108,7 +108,7 @@ __dw_ch_regs(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch)
 static inline void writel_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch,
                             u32 value, void __iomem *addr)
 {
-       if (dw->mf == EDMA_MF_EDMA_LEGACY) {
+       if (dw->chip->mf == EDMA_MF_EDMA_LEGACY) {
                u32 viewport_sel;
                unsigned long flags;
 
@@ -133,7 +133,7 @@ static inline u32 readl_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch,
 {
        u32 value;
 
-       if (dw->mf == EDMA_MF_EDMA_LEGACY) {
+       if (dw->chip->mf == EDMA_MF_EDMA_LEGACY) {
                u32 viewport_sel;
                unsigned long flags;
 
@@ -169,7 +169,7 @@ static inline u32 readl_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch,
 static inline void writeq_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch,
                             u64 value, void __iomem *addr)
 {
-       if (dw->mf == EDMA_MF_EDMA_LEGACY) {
+       if (dw->chip->mf == EDMA_MF_EDMA_LEGACY) {
                u32 viewport_sel;
                unsigned long flags;
 
@@ -194,7 +194,7 @@ static inline u64 readq_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch,
 {
        u32 value;
 
-       if (dw->mf == EDMA_MF_EDMA_LEGACY) {
+       if (dw->chip->mf == EDMA_MF_EDMA_LEGACY) {
                u32 viewport_sel;
                unsigned long flags;
 
@@ -256,7 +256,7 @@ u16 dw_edma_v0_core_ch_count(struct dw_edma *dw, enum dw_edma_dir dir)
 
 enum dma_status dw_edma_v0_core_ch_status(struct dw_edma_chan *chan)
 {
-       struct dw_edma *dw = chan->chip->dw;
+       struct dw_edma *dw = chan->dw;
        u32 tmp;
 
        tmp = FIELD_GET(EDMA_V0_CH_STATUS_MASK,
@@ -272,7 +272,7 @@ enum dma_status dw_edma_v0_core_ch_status(struct dw_edma_chan *chan)
 
 void dw_edma_v0_core_clear_done_int(struct dw_edma_chan *chan)
 {
-       struct dw_edma *dw = chan->chip->dw;
+       struct dw_edma *dw = chan->dw;
 
        SET_RW_32(dw, chan->dir, int_clear,
                  FIELD_PREP(EDMA_V0_DONE_INT_MASK, BIT(chan->id)));
@@ -280,7 +280,7 @@ void dw_edma_v0_core_clear_done_int(struct dw_edma_chan *chan)
 
 void dw_edma_v0_core_clear_abort_int(struct dw_edma_chan *chan)
 {
-       struct dw_edma *dw = chan->chip->dw;
+       struct dw_edma *dw = chan->dw;
 
        SET_RW_32(dw, chan->dir, int_clear,
                  FIELD_PREP(EDMA_V0_ABORT_INT_MASK, BIT(chan->id)));
@@ -301,6 +301,7 @@ u32 dw_edma_v0_core_status_abort_int(struct dw_edma *dw, enum dw_edma_dir dir)
 static void dw_edma_v0_core_write_chunk(struct dw_edma_chunk *chunk)
 {
        struct dw_edma_burst *child;
+       struct dw_edma_chan *chan = chunk->chan;
        struct dw_edma_v0_lli __iomem *lli;
        struct dw_edma_v0_llp __iomem *llp;
        u32 control = 0, i = 0;
@@ -314,9 +315,11 @@ static void dw_edma_v0_core_write_chunk(struct dw_edma_chunk *chunk)
        j = chunk->bursts_alloc;
        list_for_each_entry(child, &chunk->burst->list, list) {
                j--;
-               if (!j)
-                       control |= (DW_EDMA_V0_LIE | DW_EDMA_V0_RIE);
-
+               if (!j) {
+                       control |= DW_EDMA_V0_LIE;
+                       if (!(chan->dw->chip->flags & DW_EDMA_CHIP_LOCAL))
+                               control |= DW_EDMA_V0_RIE;
+               }
                /* Channel control */
                SET_LL_32(&lli[i].control, control);
                /* Transfer size */
@@ -357,7 +360,7 @@ static void dw_edma_v0_core_write_chunk(struct dw_edma_chunk *chunk)
 void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
 {
        struct dw_edma_chan *chan = chunk->chan;
-       struct dw_edma *dw = chan->chip->dw;
+       struct dw_edma *dw = chan->dw;
        u32 tmp;
 
        dw_edma_v0_core_write_chunk(chunk);
@@ -365,7 +368,7 @@ void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
        if (first) {
                /* Enable engine */
                SET_RW_32(dw, chan->dir, engine_en, BIT(0));
-               if (dw->mf == EDMA_MF_HDMA_COMPAT) {
+               if (dw->chip->mf == EDMA_MF_HDMA_COMPAT) {
                        switch (chan->id) {
                        case 0:
                                SET_RW_COMPAT(dw, chan->dir, ch0_pwr_en,
@@ -435,7 +438,7 @@ void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
 
 int dw_edma_v0_core_device_config(struct dw_edma_chan *chan)
 {
-       struct dw_edma *dw = chan->chip->dw;
+       struct dw_edma *dw = chan->dw;
        u32 tmp = 0;
 
        /* MSI done addr - low, high */
@@ -505,12 +508,12 @@ int dw_edma_v0_core_device_config(struct dw_edma_chan *chan)
 }
 
 /* eDMA debugfs callbacks */
-void dw_edma_v0_core_debugfs_on(struct dw_edma_chip *chip)
+void dw_edma_v0_core_debugfs_on(struct dw_edma *dw)
 {
-       dw_edma_v0_debugfs_on(chip);
+       dw_edma_v0_debugfs_on(dw);
 }
 
-void dw_edma_v0_core_debugfs_off(struct dw_edma_chip *chip)
+void dw_edma_v0_core_debugfs_off(struct dw_edma *dw)
 {
-       dw_edma_v0_debugfs_off(chip);
+       dw_edma_v0_debugfs_off(dw);
 }
index 2afa626..75aec6d 100644 (file)
@@ -22,7 +22,7 @@ u32 dw_edma_v0_core_status_abort_int(struct dw_edma *chan, enum dw_edma_dir dir)
 void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first);
 int dw_edma_v0_core_device_config(struct dw_edma_chan *chan);
 /* eDMA debug fs callbacks */
-void dw_edma_v0_core_debugfs_on(struct dw_edma_chip *chip);
-void dw_edma_v0_core_debugfs_off(struct dw_edma_chip *chip);
+void dw_edma_v0_core_debugfs_on(struct dw_edma *dw);
+void dw_edma_v0_core_debugfs_off(struct dw_edma *dw);
 
 #endif /* _DW_EDMA_V0_CORE_H */
index 4b3bcff..5226c90 100644 (file)
@@ -54,7 +54,7 @@ struct debugfs_entries {
 static int dw_edma_debugfs_u32_get(void *data, u64 *val)
 {
        void __iomem *reg = (void __force __iomem *)data;
-       if (dw->mf == EDMA_MF_EDMA_LEGACY &&
+       if (dw->chip->mf == EDMA_MF_EDMA_LEGACY &&
            reg >= (void __iomem *)&regs->type.legacy.ch) {
                void __iomem *ptr = &regs->type.legacy.ch;
                u32 viewport_sel = 0;
@@ -173,7 +173,7 @@ static void dw_edma_debugfs_regs_wr(struct dentry *dir)
        nr_entries = ARRAY_SIZE(debugfs_regs);
        dw_edma_debugfs_create_x32(debugfs_regs, nr_entries, regs_dir);
 
-       if (dw->mf == EDMA_MF_HDMA_COMPAT) {
+       if (dw->chip->mf == EDMA_MF_HDMA_COMPAT) {
                nr_entries = ARRAY_SIZE(debugfs_unroll_regs);
                dw_edma_debugfs_create_x32(debugfs_unroll_regs, nr_entries,
                                           regs_dir);
@@ -242,7 +242,7 @@ static void dw_edma_debugfs_regs_rd(struct dentry *dir)
        nr_entries = ARRAY_SIZE(debugfs_regs);
        dw_edma_debugfs_create_x32(debugfs_regs, nr_entries, regs_dir);
 
-       if (dw->mf == EDMA_MF_HDMA_COMPAT) {
+       if (dw->chip->mf == EDMA_MF_HDMA_COMPAT) {
                nr_entries = ARRAY_SIZE(debugfs_unroll_regs);
                dw_edma_debugfs_create_x32(debugfs_unroll_regs, nr_entries,
                                           regs_dir);
@@ -282,13 +282,13 @@ static void dw_edma_debugfs_regs(void)
        dw_edma_debugfs_regs_rd(regs_dir);
 }
 
-void dw_edma_v0_debugfs_on(struct dw_edma_chip *chip)
+void dw_edma_v0_debugfs_on(struct dw_edma *_dw)
 {
-       dw = chip->dw;
+       dw = _dw;
        if (!dw)
                return;
 
-       regs = dw->rg_region.vaddr;
+       regs = dw->chip->reg_base;
        if (!regs)
                return;
 
@@ -296,16 +296,16 @@ void dw_edma_v0_debugfs_on(struct dw_edma_chip *chip)
        if (!dw->debugfs)
                return;
 
-       debugfs_create_u32("mf", 0444, dw->debugfs, &dw->mf);
+       debugfs_create_u32("mf", 0444, dw->debugfs, &dw->chip->mf);
        debugfs_create_u16("wr_ch_cnt", 0444, dw->debugfs, &dw->wr_ch_cnt);
        debugfs_create_u16("rd_ch_cnt", 0444, dw->debugfs, &dw->rd_ch_cnt);
 
        dw_edma_debugfs_regs();
 }
 
-void dw_edma_v0_debugfs_off(struct dw_edma_chip *chip)
+void dw_edma_v0_debugfs_off(struct dw_edma *_dw)
 {
-       dw = chip->dw;
+       dw = _dw;
        if (!dw)
                return;
 
index d0ff25a..3391b86 100644 (file)
 #include <linux/dma/edma.h>
 
 #ifdef CONFIG_DEBUG_FS
-void dw_edma_v0_debugfs_on(struct dw_edma_chip *chip);
-void dw_edma_v0_debugfs_off(struct dw_edma_chip *chip);
+void dw_edma_v0_debugfs_on(struct dw_edma *dw);
+void dw_edma_v0_debugfs_off(struct dw_edma *dw);
 #else
-static inline void dw_edma_v0_debugfs_on(struct dw_edma_chip *chip)
+static inline void dw_edma_v0_debugfs_on(struct dw_edma *dw)
 {
 }
 
-static inline void dw_edma_v0_debugfs_off(struct dw_edma_chip *chip)
+static inline void dw_edma_v0_debugfs_off(struct dw_edma *dw)
 {
 }
 #endif /* CONFIG_DEBUG_FS */
index a5ed779..36b1801 100644 (file)
@@ -52,9 +52,11 @@ struct pci_epf_test {
        enum pci_barno          test_reg_bar;
        size_t                  msix_table_offset;
        struct delayed_work     cmd_handler;
-       struct dma_chan         *dma_chan;
+       struct dma_chan         *dma_chan_tx;
+       struct dma_chan         *dma_chan_rx;
        struct completion       transfer_complete;
        bool                    dma_supported;
+       bool                    dma_private;
        const struct pci_epc_features *epc_features;
 };
 
@@ -96,6 +98,8 @@ static void pci_epf_test_dma_callback(void *param)
  * @dma_src: The source address of the data transfer. It can be a physical
  *          address given by pci_epc_mem_alloc_addr or DMA mapping APIs.
  * @len: The size of the data transfer
+ * @dma_remote: remote RC physical address
+ * @dir: DMA transfer direction
  *
  * Function that uses dmaengine API to transfer data between PCIe EP and remote
  * PCIe RC. The source and destination address can be a physical address given
@@ -105,12 +109,16 @@ static void pci_epf_test_dma_callback(void *param)
  */
 static int pci_epf_test_data_transfer(struct pci_epf_test *epf_test,
                                      dma_addr_t dma_dst, dma_addr_t dma_src,
-                                     size_t len)
+                                     size_t len, dma_addr_t dma_remote,
+                                     enum dma_transfer_direction dir)
 {
+       struct dma_chan *chan = (dir == DMA_DEV_TO_MEM) ?
+                                epf_test->dma_chan_tx : epf_test->dma_chan_rx;
+       dma_addr_t dma_local = (dir == DMA_MEM_TO_DEV) ? dma_src : dma_dst;
        enum dma_ctrl_flags flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
-       struct dma_chan *chan = epf_test->dma_chan;
        struct pci_epf *epf = epf_test->epf;
        struct dma_async_tx_descriptor *tx;
+       struct dma_slave_config sconf = {};
        struct device *dev = &epf->dev;
        dma_cookie_t cookie;
        int ret;
@@ -120,7 +128,24 @@ static int pci_epf_test_data_transfer(struct pci_epf_test *epf_test,
                return -EINVAL;
        }
 
-       tx = dmaengine_prep_dma_memcpy(chan, dma_dst, dma_src, len, flags);
+       if (epf_test->dma_private) {
+               sconf.direction = dir;
+               if (dir == DMA_MEM_TO_DEV)
+                       sconf.dst_addr = dma_remote;
+               else
+                       sconf.src_addr = dma_remote;
+
+               if (dmaengine_slave_config(chan, &sconf)) {
+                       dev_err(dev, "DMA slave config fail\n");
+                       return -EIO;
+               }
+               tx = dmaengine_prep_slave_single(chan, dma_local, len, dir,
+                                                flags);
+       } else {
+               tx = dmaengine_prep_dma_memcpy(chan, dma_dst, dma_src, len,
+                                              flags);
+       }
+
        if (!tx) {
                dev_err(dev, "Failed to prepare DMA memcpy\n");
                return -EIO;
@@ -148,6 +173,23 @@ static int pci_epf_test_data_transfer(struct pci_epf_test *epf_test,
        return 0;
 }
 
+struct epf_dma_filter {
+       struct device *dev;
+       u32 dma_mask;
+};
+
+static bool epf_dma_filter_fn(struct dma_chan *chan, void *node)
+{
+       struct epf_dma_filter *filter = node;
+       struct dma_slave_caps caps;
+
+       memset(&caps, 0, sizeof(caps));
+       dma_get_slave_caps(chan, &caps);
+
+       return chan->device->dev == filter->dev
+               && (filter->dma_mask & caps.directions);
+}
+
 /**
  * pci_epf_test_init_dma_chan() - Function to initialize EPF test DMA channel
  * @epf_test: the EPF test device that performs data transfer operation
@@ -158,10 +200,44 @@ static int pci_epf_test_init_dma_chan(struct pci_epf_test *epf_test)
 {
        struct pci_epf *epf = epf_test->epf;
        struct device *dev = &epf->dev;
+       struct epf_dma_filter filter;
        struct dma_chan *dma_chan;
        dma_cap_mask_t mask;
        int ret;
 
+       filter.dev = epf->epc->dev.parent;
+       filter.dma_mask = BIT(DMA_DEV_TO_MEM);
+
+       dma_cap_zero(mask);
+       dma_cap_set(DMA_SLAVE, mask);
+       dma_chan = dma_request_channel(mask, epf_dma_filter_fn, &filter);
+       if (!dma_chan) {
+               dev_info(dev, "Failed to get private DMA rx channel. Falling back to generic one\n");
+               goto fail_back_tx;
+       }
+
+       epf_test->dma_chan_rx = dma_chan;
+
+       filter.dma_mask = BIT(DMA_MEM_TO_DEV);
+       dma_chan = dma_request_channel(mask, epf_dma_filter_fn, &filter);
+
+       if (!dma_chan) {
+               dev_info(dev, "Failed to get private DMA tx channel. Falling back to generic one\n");
+               goto fail_back_rx;
+       }
+
+       epf_test->dma_chan_tx = dma_chan;
+       epf_test->dma_private = true;
+
+       init_completion(&epf_test->transfer_complete);
+
+       return 0;
+
+fail_back_rx:
+       dma_release_channel(epf_test->dma_chan_rx);
+       epf_test->dma_chan_tx = NULL;
+
+fail_back_tx:
        dma_cap_zero(mask);
        dma_cap_set(DMA_MEMCPY, mask);
 
@@ -174,7 +250,7 @@ static int pci_epf_test_init_dma_chan(struct pci_epf_test *epf_test)
        }
        init_completion(&epf_test->transfer_complete);
 
-       epf_test->dma_chan = dma_chan;
+       epf_test->dma_chan_tx = epf_test->dma_chan_rx = dma_chan;
 
        return 0;
 }
@@ -190,8 +266,17 @@ static void pci_epf_test_clean_dma_chan(struct pci_epf_test *epf_test)
        if (!epf_test->dma_supported)
                return;
 
-       dma_release_channel(epf_test->dma_chan);
-       epf_test->dma_chan = NULL;
+       dma_release_channel(epf_test->dma_chan_tx);
+       if (epf_test->dma_chan_tx == epf_test->dma_chan_rx) {
+               epf_test->dma_chan_tx = NULL;
+               epf_test->dma_chan_rx = NULL;
+               return;
+       }
+
+       dma_release_channel(epf_test->dma_chan_rx);
+       epf_test->dma_chan_rx = NULL;
+
+       return;
 }
 
 static void pci_epf_test_print_rate(const char *ops, u64 size,
@@ -280,8 +365,15 @@ static int pci_epf_test_copy(struct pci_epf_test *epf_test)
                        goto err_map_addr;
                }
 
+               if (epf_test->dma_private) {
+                       dev_err(dev, "Cannot transfer data using DMA\n");
+                       ret = -EINVAL;
+                       goto err_map_addr;
+               }
+
                ret = pci_epf_test_data_transfer(epf_test, dst_phys_addr,
-                                                src_phys_addr, reg->size);
+                                                src_phys_addr, reg->size, 0,
+                                                DMA_MEM_TO_MEM);
                if (ret)
                        dev_err(dev, "Data transfer failed\n");
        } else {
@@ -373,7 +465,8 @@ static int pci_epf_test_read(struct pci_epf_test *epf_test)
 
                ktime_get_ts64(&start);
                ret = pci_epf_test_data_transfer(epf_test, dst_phys_addr,
-                                                phys_addr, reg->size);
+                                                phys_addr, reg->size,
+                                                reg->src_addr, DMA_DEV_TO_MEM);
                if (ret)
                        dev_err(dev, "Data transfer failed\n");
                ktime_get_ts64(&end);
@@ -463,8 +556,11 @@ static int pci_epf_test_write(struct pci_epf_test *epf_test)
                }
 
                ktime_get_ts64(&start);
+
                ret = pci_epf_test_data_transfer(epf_test, phys_addr,
-                                                src_phys_addr, reg->size);
+                                                src_phys_addr, reg->size,
+                                                reg->dst_addr,
+                                                DMA_MEM_TO_DEV);
                if (ret)
                        dev_err(dev, "Data transfer failed\n");
                ktime_get_ts64(&end);
index cab6e18..7d8062e 100644 (file)
 #include <linux/device.h>
 #include <linux/dmaengine.h>
 
+#define EDMA_MAX_WR_CH                                  8
+#define EDMA_MAX_RD_CH                                  8
+
 struct dw_edma;
 
+struct dw_edma_region {
+       phys_addr_t     paddr;
+       void __iomem    *vaddr;
+       size_t          sz;
+};
+
+struct dw_edma_core_ops {
+       int (*irq_vector)(struct device *dev, unsigned int nr);
+};
+
+enum dw_edma_map_format {
+       EDMA_MF_EDMA_LEGACY = 0x0,
+       EDMA_MF_EDMA_UNROLL = 0x1,
+       EDMA_MF_HDMA_COMPAT = 0x5
+};
+
+/**
+ * enum dw_edma_chip_flags - Flags specific to an eDMA chip
+ * @DW_EDMA_CHIP_LOCAL:                eDMA is used locally by an endpoint
+ */
+enum dw_edma_chip_flags {
+       DW_EDMA_CHIP_LOCAL      = BIT(0),
+};
+
 /**
  * struct dw_edma_chip - representation of DesignWare eDMA controller hardware
  * @dev:                struct device of the eDMA controller
  * @id:                         instance ID
- * @irq:                irq line
- * @dw:                         struct dw_edma that is filed by dw_edma_probe()
+ * @nr_irqs:            total number of DMA IRQs
+ * @ops                         DMA channel to IRQ number mapping
+ * @flags               dw_edma_chip_flags
+ * @reg_base            DMA register base address
+ * @ll_wr_cnt           DMA write link list count
+ * @ll_rd_cnt           DMA read link list count
+ * @rg_region           DMA register region
+ * @ll_region_wr        DMA descriptor link list memory for write channel
+ * @ll_region_rd        DMA descriptor link list memory for read channel
+ * @dt_region_wr        DMA data memory for write channel
+ * @dt_region_rd        DMA data memory for read channel
+ * @mf                  DMA register map format
+ * @dw:                         struct dw_edma that is filled by dw_edma_probe()
  */
 struct dw_edma_chip {
        struct device           *dev;
        int                     id;
-       int                     irq;
+       int                     nr_irqs;
+       const struct dw_edma_core_ops   *ops;
+       u32                     flags;
+
+       void __iomem            *reg_base;
+
+       u16                     ll_wr_cnt;
+       u16                     ll_rd_cnt;
+       /* link list address */
+       struct dw_edma_region   ll_region_wr[EDMA_MAX_WR_CH];
+       struct dw_edma_region   ll_region_rd[EDMA_MAX_RD_CH];
+
+       /* data region */
+       struct dw_edma_region   dt_region_wr[EDMA_MAX_WR_CH];
+       struct dw_edma_region   dt_region_rd[EDMA_MAX_RD_CH];
+
+       enum dw_edma_map_format mf;
+
        struct dw_edma          *dw;
 };