2 * Copyright 2012 Marvell International Ltd.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
9 #include <linux/module.h>
10 #include <linux/init.h>
11 #include <linux/types.h>
12 #include <linux/interrupt.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/slab.h>
15 #include <linux/dmaengine.h>
16 #include <linux/platform_device.h>
17 #include <linux/device.h>
18 #include <linux/platform_data/mmp_dma.h>
19 #include <linux/dmapool.h>
20 #include <linux/of_device.h>
22 #include <linux/dma/mmp-pdma.h>
24 #include "dmaengine.h"
34 #define DCSR_RUN (1 << 31) /* Run Bit (read / write) */
35 #define DCSR_NODESC (1 << 30) /* No-Descriptor Fetch (read / write) */
36 #define DCSR_STOPIRQEN (1 << 29) /* Stop Interrupt Enable (read / write) */
37 #define DCSR_REQPEND (1 << 8) /* Request Pending (read-only) */
38 #define DCSR_STOPSTATE (1 << 3) /* Stop State (read-only) */
39 #define DCSR_ENDINTR (1 << 2) /* End Interrupt (read / write) */
40 #define DCSR_STARTINTR (1 << 1) /* Start Interrupt (read / write) */
41 #define DCSR_BUSERR (1 << 0) /* Bus Error Interrupt (read / write) */
43 #define DCSR_EORIRQEN (1 << 28) /* End of Receive Interrupt Enable (R/W) */
44 #define DCSR_EORJMPEN (1 << 27) /* Jump to next descriptor on EOR */
45 #define DCSR_EORSTOPEN (1 << 26) /* STOP on an EOR */
46 #define DCSR_SETCMPST (1 << 25) /* Set Descriptor Compare Status */
47 #define DCSR_CLRCMPST (1 << 24) /* Clear Descriptor Compare Status */
48 #define DCSR_CMPST (1 << 10) /* The Descriptor Compare Status */
49 #define DCSR_EORINTR (1 << 9) /* The end of Receive */
51 #define DRCMR(n) ((((n) < 64) ? 0x0100 : 0x1100) + \
53 #define DRCMR_MAPVLD (1 << 7) /* Map Valid (read / write) */
54 #define DRCMR_CHLNUM 0x1f /* mask for Channel Number (read / write) */
56 #define DDADR_DESCADDR 0xfffffff0 /* Address of next descriptor (mask) */
57 #define DDADR_STOP (1 << 0) /* Stop (read / write) */
59 #define DCMD_INCSRCADDR (1 << 31) /* Source Address Increment Setting. */
60 #define DCMD_INCTRGADDR (1 << 30) /* Target Address Increment Setting. */
61 #define DCMD_FLOWSRC (1 << 29) /* Flow Control by the source. */
62 #define DCMD_FLOWTRG (1 << 28) /* Flow Control by the target. */
63 #define DCMD_STARTIRQEN (1 << 22) /* Start Interrupt Enable */
64 #define DCMD_ENDIRQEN (1 << 21) /* End Interrupt Enable */
65 #define DCMD_ENDIAN (1 << 18) /* Device Endian-ness. */
66 #define DCMD_BURST8 (1 << 16) /* 8 byte burst */
67 #define DCMD_BURST16 (2 << 16) /* 16 byte burst */
68 #define DCMD_BURST32 (3 << 16) /* 32 byte burst */
69 #define DCMD_WIDTH1 (1 << 14) /* 1 byte width */
70 #define DCMD_WIDTH2 (2 << 14) /* 2 byte width (HalfWord) */
71 #define DCMD_WIDTH4 (3 << 14) /* 4 byte width (Word) */
72 #define DCMD_LENGTH 0x01fff /* length mask (max = 8K - 1) */
74 #define PDMA_ALIGNMENT 3
75 #define PDMA_MAX_DESC_BYTES DCMD_LENGTH
77 struct mmp_pdma_desc_hw {
78 u32 ddadr; /* Points to the next descriptor + flags */
79 u32 dsadr; /* DSADR value for the current transfer */
80 u32 dtadr; /* DTADR value for the current transfer */
81 u32 dcmd; /* DCMD value for the current transfer */
84 struct mmp_pdma_desc_sw {
85 struct mmp_pdma_desc_hw desc;
86 struct list_head node;
87 struct list_head tx_list;
88 struct dma_async_tx_descriptor async_tx;
93 struct mmp_pdma_chan {
96 struct dma_async_tx_descriptor desc;
97 struct mmp_pdma_phy *phy;
98 enum dma_transfer_direction dir;
100 /* channel's basic info */
101 struct tasklet_struct tasklet;
107 spinlock_t desc_lock; /* Descriptor list lock */
108 struct list_head chain_pending; /* Link descriptors queue for pending */
109 struct list_head chain_running; /* Link descriptors queue for running */
110 bool idle; /* channel statue machine */
112 struct dma_pool *desc_pool; /* Descriptors pool */
115 struct mmp_pdma_phy {
118 struct mmp_pdma_chan *vchan;
121 struct mmp_pdma_device {
125 struct dma_device device;
126 struct mmp_pdma_phy *phy;
127 spinlock_t phy_lock; /* protect alloc/free phy channels */
130 #define tx_to_mmp_pdma_desc(tx) container_of(tx, struct mmp_pdma_desc_sw, async_tx)
131 #define to_mmp_pdma_desc(lh) container_of(lh, struct mmp_pdma_desc_sw, node)
132 #define to_mmp_pdma_chan(dchan) container_of(dchan, struct mmp_pdma_chan, chan)
133 #define to_mmp_pdma_dev(dmadev) container_of(dmadev, struct mmp_pdma_device, device)
135 static void set_desc(struct mmp_pdma_phy *phy, dma_addr_t addr)
137 u32 reg = (phy->idx << 4) + DDADR;
139 writel(addr, phy->base + reg);
142 static void enable_chan(struct mmp_pdma_phy *phy)
149 reg = DRCMR(phy->vchan->drcmr);
150 writel(DRCMR_MAPVLD | phy->idx, phy->base + reg);
152 reg = (phy->idx << 2) + DCSR;
153 writel(readl(phy->base + reg) | DCSR_RUN,
157 static void disable_chan(struct mmp_pdma_phy *phy)
162 reg = (phy->idx << 2) + DCSR;
163 writel(readl(phy->base + reg) & ~DCSR_RUN,
168 static int clear_chan_irq(struct mmp_pdma_phy *phy)
171 u32 dint = readl(phy->base + DINT);
172 u32 reg = (phy->idx << 2) + DCSR;
174 if (dint & BIT(phy->idx)) {
176 dcsr = readl(phy->base + reg);
177 writel(dcsr, phy->base + reg);
178 if ((dcsr & DCSR_BUSERR) && (phy->vchan))
179 dev_warn(phy->vchan->dev, "DCSR_BUSERR\n");
185 static irqreturn_t mmp_pdma_chan_handler(int irq, void *dev_id)
187 struct mmp_pdma_phy *phy = dev_id;
189 if (clear_chan_irq(phy) == 0) {
190 tasklet_schedule(&phy->vchan->tasklet);
196 static irqreturn_t mmp_pdma_int_handler(int irq, void *dev_id)
198 struct mmp_pdma_device *pdev = dev_id;
199 struct mmp_pdma_phy *phy;
200 u32 dint = readl(pdev->base + DINT);
208 ret = mmp_pdma_chan_handler(irq, phy);
209 if (ret == IRQ_HANDLED)
219 /* lookup free phy channel as descending priority */
220 static struct mmp_pdma_phy *lookup_phy(struct mmp_pdma_chan *pchan)
223 struct mmp_pdma_device *pdev = to_mmp_pdma_dev(pchan->chan.device);
224 struct mmp_pdma_phy *phy, *found = NULL;
228 * dma channel priorities
229 * ch 0 - 3, 16 - 19 <--> (0)
230 * ch 4 - 7, 20 - 23 <--> (1)
231 * ch 8 - 11, 24 - 27 <--> (2)
232 * ch 12 - 15, 28 - 31 <--> (3)
235 spin_lock_irqsave(&pdev->phy_lock, flags);
236 for (prio = 0; prio <= (((pdev->dma_channels - 1) & 0xf) >> 2); prio++) {
237 for (i = 0; i < pdev->dma_channels; i++) {
238 if (prio != ((i & 0xf) >> 2))
250 spin_unlock_irqrestore(&pdev->phy_lock, flags);
254 static void mmp_pdma_free_phy(struct mmp_pdma_chan *pchan)
256 struct mmp_pdma_device *pdev = to_mmp_pdma_dev(pchan->chan.device);
263 /* clear the channel mapping in DRCMR */
264 reg = DRCMR(pchan->phy->vchan->drcmr);
265 writel(0, pchan->phy->base + reg);
267 spin_lock_irqsave(&pdev->phy_lock, flags);
268 pchan->phy->vchan = NULL;
270 spin_unlock_irqrestore(&pdev->phy_lock, flags);
273 /* desc->tx_list ==> pending list */
274 static void append_pending_queue(struct mmp_pdma_chan *chan,
275 struct mmp_pdma_desc_sw *desc)
277 struct mmp_pdma_desc_sw *tail =
278 to_mmp_pdma_desc(chan->chain_pending.prev);
280 if (list_empty(&chan->chain_pending))
283 /* one irq per queue, even appended */
284 tail->desc.ddadr = desc->async_tx.phys;
285 tail->desc.dcmd &= ~DCMD_ENDIRQEN;
287 /* softly link to pending list */
289 list_splice_tail_init(&desc->tx_list, &chan->chain_pending);
293 * start_pending_queue - transfer any pending transactions
294 * pending list ==> running list
296 static void start_pending_queue(struct mmp_pdma_chan *chan)
298 struct mmp_pdma_desc_sw *desc;
300 /* still in running, irq will start the pending list */
302 dev_dbg(chan->dev, "DMA controller still busy\n");
306 if (list_empty(&chan->chain_pending)) {
307 /* chance to re-fetch phy channel with higher prio */
308 mmp_pdma_free_phy(chan);
309 dev_dbg(chan->dev, "no pending list\n");
314 chan->phy = lookup_phy(chan);
316 dev_dbg(chan->dev, "no free dma channel\n");
323 * reintilize pending list
325 desc = list_first_entry(&chan->chain_pending,
326 struct mmp_pdma_desc_sw, node);
327 list_splice_tail_init(&chan->chain_pending, &chan->chain_running);
330 * Program the descriptor's address into the DMA controller,
331 * then start the DMA transaction
333 set_desc(chan->phy, desc->async_tx.phys);
334 enable_chan(chan->phy);
339 /* desc->tx_list ==> pending list */
340 static dma_cookie_t mmp_pdma_tx_submit(struct dma_async_tx_descriptor *tx)
342 struct mmp_pdma_chan *chan = to_mmp_pdma_chan(tx->chan);
343 struct mmp_pdma_desc_sw *desc = tx_to_mmp_pdma_desc(tx);
344 struct mmp_pdma_desc_sw *child;
346 dma_cookie_t cookie = -EBUSY;
348 spin_lock_irqsave(&chan->desc_lock, flags);
350 list_for_each_entry(child, &desc->tx_list, node) {
351 cookie = dma_cookie_assign(&child->async_tx);
354 append_pending_queue(chan, desc);
356 spin_unlock_irqrestore(&chan->desc_lock, flags);
361 static struct mmp_pdma_desc_sw *
362 mmp_pdma_alloc_descriptor(struct mmp_pdma_chan *chan)
364 struct mmp_pdma_desc_sw *desc;
367 desc = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &pdesc);
369 dev_err(chan->dev, "out of memory for link descriptor\n");
373 memset(desc, 0, sizeof(*desc));
374 INIT_LIST_HEAD(&desc->tx_list);
375 dma_async_tx_descriptor_init(&desc->async_tx, &chan->chan);
376 /* each desc has submit */
377 desc->async_tx.tx_submit = mmp_pdma_tx_submit;
378 desc->async_tx.phys = pdesc;
384 * mmp_pdma_alloc_chan_resources - Allocate resources for DMA channel.
386 * This function will create a dma pool for descriptor allocation.
387 * Request irq only when channel is requested
388 * Return - The number of allocated descriptors.
391 static int mmp_pdma_alloc_chan_resources(struct dma_chan *dchan)
393 struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
399 dma_pool_create(dev_name(&dchan->dev->device), chan->dev,
400 sizeof(struct mmp_pdma_desc_sw),
401 __alignof__(struct mmp_pdma_desc_sw), 0);
402 if (!chan->desc_pool) {
403 dev_err(chan->dev, "unable to allocate descriptor pool\n");
406 mmp_pdma_free_phy(chan);
412 static void mmp_pdma_free_desc_list(struct mmp_pdma_chan *chan,
413 struct list_head *list)
415 struct mmp_pdma_desc_sw *desc, *_desc;
417 list_for_each_entry_safe(desc, _desc, list, node) {
418 list_del(&desc->node);
419 dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
423 static void mmp_pdma_free_chan_resources(struct dma_chan *dchan)
425 struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
428 spin_lock_irqsave(&chan->desc_lock, flags);
429 mmp_pdma_free_desc_list(chan, &chan->chain_pending);
430 mmp_pdma_free_desc_list(chan, &chan->chain_running);
431 spin_unlock_irqrestore(&chan->desc_lock, flags);
433 dma_pool_destroy(chan->desc_pool);
434 chan->desc_pool = NULL;
437 mmp_pdma_free_phy(chan);
441 static struct dma_async_tx_descriptor *
442 mmp_pdma_prep_memcpy(struct dma_chan *dchan,
443 dma_addr_t dma_dst, dma_addr_t dma_src,
444 size_t len, unsigned long flags)
446 struct mmp_pdma_chan *chan;
447 struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new;
456 chan = to_mmp_pdma_chan(dchan);
459 chan->dir = DMA_MEM_TO_MEM;
460 chan->dcmd = DCMD_INCTRGADDR | DCMD_INCSRCADDR;
461 chan->dcmd |= DCMD_BURST32;
465 /* Allocate the link descriptor from DMA pool */
466 new = mmp_pdma_alloc_descriptor(chan);
468 dev_err(chan->dev, "no memory for desc\n");
472 copy = min_t(size_t, len, PDMA_MAX_DESC_BYTES);
474 new->desc.dcmd = chan->dcmd | (DCMD_LENGTH & copy);
475 new->desc.dsadr = dma_src;
476 new->desc.dtadr = dma_dst;
481 prev->desc.ddadr = new->async_tx.phys;
483 new->async_tx.cookie = 0;
484 async_tx_ack(&new->async_tx);
489 if (chan->dir == DMA_MEM_TO_DEV) {
491 } else if (chan->dir == DMA_DEV_TO_MEM) {
493 } else if (chan->dir == DMA_MEM_TO_MEM) {
498 /* Insert the link descriptor to the LD ring */
499 list_add_tail(&new->node, &first->tx_list);
502 first->async_tx.flags = flags; /* client is in control of this ack */
503 first->async_tx.cookie = -EBUSY;
505 /* last desc and fire IRQ */
506 new->desc.ddadr = DDADR_STOP;
507 new->desc.dcmd |= DCMD_ENDIRQEN;
509 return &first->async_tx;
513 mmp_pdma_free_desc_list(chan, &first->tx_list);
517 static struct dma_async_tx_descriptor *
518 mmp_pdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
519 unsigned int sg_len, enum dma_transfer_direction dir,
520 unsigned long flags, void *context)
522 struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
523 struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new = NULL;
525 struct scatterlist *sg;
529 if ((sgl == NULL) || (sg_len == 0))
532 for_each_sg(sgl, sg, sg_len, i) {
533 addr = sg_dma_address(sg);
534 avail = sg_dma_len(sgl);
537 len = min_t(size_t, avail, PDMA_MAX_DESC_BYTES);
539 /* allocate and populate the descriptor */
540 new = mmp_pdma_alloc_descriptor(chan);
542 dev_err(chan->dev, "no memory for desc\n");
546 new->desc.dcmd = chan->dcmd | (DCMD_LENGTH & len);
547 if (dir == DMA_MEM_TO_DEV) {
548 new->desc.dsadr = addr;
549 new->desc.dtadr = chan->dev_addr;
551 new->desc.dsadr = chan->dev_addr;
552 new->desc.dtadr = addr;
558 prev->desc.ddadr = new->async_tx.phys;
560 new->async_tx.cookie = 0;
561 async_tx_ack(&new->async_tx);
564 /* Insert the link descriptor to the LD ring */
565 list_add_tail(&new->node, &first->tx_list);
567 /* update metadata */
573 first->async_tx.cookie = -EBUSY;
574 first->async_tx.flags = flags;
576 /* last desc and fire IRQ */
577 new->desc.ddadr = DDADR_STOP;
578 new->desc.dcmd |= DCMD_ENDIRQEN;
580 return &first->async_tx;
584 mmp_pdma_free_desc_list(chan, &first->tx_list);
588 static int mmp_pdma_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd,
591 struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
592 struct dma_slave_config *cfg = (void *)arg;
595 u32 maxburst = 0, addr = 0;
596 enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED;
602 case DMA_TERMINATE_ALL:
603 disable_chan(chan->phy);
604 mmp_pdma_free_phy(chan);
605 spin_lock_irqsave(&chan->desc_lock, flags);
606 mmp_pdma_free_desc_list(chan, &chan->chain_pending);
607 mmp_pdma_free_desc_list(chan, &chan->chain_running);
608 spin_unlock_irqrestore(&chan->desc_lock, flags);
611 case DMA_SLAVE_CONFIG:
612 if (cfg->direction == DMA_DEV_TO_MEM) {
613 chan->dcmd = DCMD_INCTRGADDR | DCMD_FLOWSRC;
614 maxburst = cfg->src_maxburst;
615 width = cfg->src_addr_width;
616 addr = cfg->src_addr;
617 } else if (cfg->direction == DMA_MEM_TO_DEV) {
618 chan->dcmd = DCMD_INCSRCADDR | DCMD_FLOWTRG;
619 maxburst = cfg->dst_maxburst;
620 width = cfg->dst_addr_width;
621 addr = cfg->dst_addr;
624 if (width == DMA_SLAVE_BUSWIDTH_1_BYTE)
625 chan->dcmd |= DCMD_WIDTH1;
626 else if (width == DMA_SLAVE_BUSWIDTH_2_BYTES)
627 chan->dcmd |= DCMD_WIDTH2;
628 else if (width == DMA_SLAVE_BUSWIDTH_4_BYTES)
629 chan->dcmd |= DCMD_WIDTH4;
632 chan->dcmd |= DCMD_BURST8;
633 else if (maxburst == 16)
634 chan->dcmd |= DCMD_BURST16;
635 else if (maxburst == 32)
636 chan->dcmd |= DCMD_BURST32;
638 chan->dir = cfg->direction;
639 chan->dev_addr = addr;
640 /* FIXME: drivers should be ported over to use the filter
641 * function. Once that's done, the following two lines can
645 chan->drcmr = cfg->slave_id;
654 static enum dma_status mmp_pdma_tx_status(struct dma_chan *dchan,
655 dma_cookie_t cookie, struct dma_tx_state *txstate)
657 return dma_cookie_status(dchan, cookie, txstate);
661 * mmp_pdma_issue_pending - Issue the DMA start command
662 * pending list ==> running list
664 static void mmp_pdma_issue_pending(struct dma_chan *dchan)
666 struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
669 spin_lock_irqsave(&chan->desc_lock, flags);
670 start_pending_queue(chan);
671 spin_unlock_irqrestore(&chan->desc_lock, flags);
679 static void dma_do_tasklet(unsigned long data)
681 struct mmp_pdma_chan *chan = (struct mmp_pdma_chan *)data;
682 struct mmp_pdma_desc_sw *desc, *_desc;
683 LIST_HEAD(chain_cleanup);
686 /* submit pending list; callback for each desc; free desc */
688 spin_lock_irqsave(&chan->desc_lock, flags);
690 /* update the cookie if we have some descriptors to cleanup */
691 if (!list_empty(&chan->chain_running)) {
694 desc = to_mmp_pdma_desc(chan->chain_running.prev);
695 cookie = desc->async_tx.cookie;
696 dma_cookie_complete(&desc->async_tx);
698 dev_dbg(chan->dev, "completed_cookie=%d\n", cookie);
702 * move the descriptors to a temporary list so we can drop the lock
703 * during the entire cleanup operation
705 list_splice_tail_init(&chan->chain_running, &chain_cleanup);
707 /* the hardware is now idle and ready for more */
710 /* Start any pending transactions automatically */
711 start_pending_queue(chan);
712 spin_unlock_irqrestore(&chan->desc_lock, flags);
714 /* Run the callback for each descriptor, in order */
715 list_for_each_entry_safe(desc, _desc, &chain_cleanup, node) {
716 struct dma_async_tx_descriptor *txd = &desc->async_tx;
718 /* Remove from the list of transactions */
719 list_del(&desc->node);
720 /* Run the link descriptor callback function */
722 txd->callback(txd->callback_param);
724 dma_pool_free(chan->desc_pool, desc, txd->phys);
728 static int mmp_pdma_remove(struct platform_device *op)
730 struct mmp_pdma_device *pdev = platform_get_drvdata(op);
732 dma_async_device_unregister(&pdev->device);
736 static int mmp_pdma_chan_init(struct mmp_pdma_device *pdev,
739 struct mmp_pdma_phy *phy = &pdev->phy[idx];
740 struct mmp_pdma_chan *chan;
743 chan = devm_kzalloc(pdev->dev,
744 sizeof(struct mmp_pdma_chan), GFP_KERNEL);
749 phy->base = pdev->base;
752 ret = devm_request_irq(pdev->dev, irq,
753 mmp_pdma_chan_handler, IRQF_DISABLED, "pdma", phy);
755 dev_err(pdev->dev, "channel request irq fail!\n");
760 spin_lock_init(&chan->desc_lock);
761 chan->dev = pdev->dev;
762 chan->chan.device = &pdev->device;
763 tasklet_init(&chan->tasklet, dma_do_tasklet, (unsigned long)chan);
764 INIT_LIST_HEAD(&chan->chain_pending);
765 INIT_LIST_HEAD(&chan->chain_running);
767 /* register virt channel to dma engine */
768 list_add_tail(&chan->chan.device_node,
769 &pdev->device.channels);
774 static struct of_device_id mmp_pdma_dt_ids[] = {
775 { .compatible = "marvell,pdma-1.0", },
778 MODULE_DEVICE_TABLE(of, mmp_pdma_dt_ids);
780 static int mmp_pdma_probe(struct platform_device *op)
782 struct mmp_pdma_device *pdev;
783 const struct of_device_id *of_id;
784 struct mmp_dma_platdata *pdata = dev_get_platdata(&op->dev);
785 struct resource *iores;
787 int dma_channels = 0, irq_num = 0;
789 pdev = devm_kzalloc(&op->dev, sizeof(*pdev), GFP_KERNEL);
792 pdev->dev = &op->dev;
794 spin_lock_init(&pdev->phy_lock);
796 iores = platform_get_resource(op, IORESOURCE_MEM, 0);
800 pdev->base = devm_ioremap_resource(pdev->dev, iores);
801 if (IS_ERR(pdev->base))
802 return PTR_ERR(pdev->base);
804 of_id = of_match_device(mmp_pdma_dt_ids, pdev->dev);
806 of_property_read_u32(pdev->dev->of_node,
807 "#dma-channels", &dma_channels);
808 else if (pdata && pdata->dma_channels)
809 dma_channels = pdata->dma_channels;
811 dma_channels = 32; /* default 32 channel */
812 pdev->dma_channels = dma_channels;
814 for (i = 0; i < dma_channels; i++) {
815 if (platform_get_irq(op, i) > 0)
819 pdev->phy = devm_kzalloc(pdev->dev,
820 dma_channels * sizeof(struct mmp_pdma_chan), GFP_KERNEL);
821 if (pdev->phy == NULL)
824 INIT_LIST_HEAD(&pdev->device.channels);
826 if (irq_num != dma_channels) {
827 /* all chan share one irq, demux inside */
828 irq = platform_get_irq(op, 0);
829 ret = devm_request_irq(pdev->dev, irq,
830 mmp_pdma_int_handler, IRQF_DISABLED, "pdma", pdev);
835 for (i = 0; i < dma_channels; i++) {
836 irq = (irq_num != dma_channels) ? 0 : platform_get_irq(op, i);
837 ret = mmp_pdma_chan_init(pdev, i, irq);
842 dma_cap_set(DMA_SLAVE, pdev->device.cap_mask);
843 dma_cap_set(DMA_MEMCPY, pdev->device.cap_mask);
844 dma_cap_set(DMA_SLAVE, pdev->device.cap_mask);
845 pdev->device.dev = &op->dev;
846 pdev->device.device_alloc_chan_resources = mmp_pdma_alloc_chan_resources;
847 pdev->device.device_free_chan_resources = mmp_pdma_free_chan_resources;
848 pdev->device.device_tx_status = mmp_pdma_tx_status;
849 pdev->device.device_prep_dma_memcpy = mmp_pdma_prep_memcpy;
850 pdev->device.device_prep_slave_sg = mmp_pdma_prep_slave_sg;
851 pdev->device.device_issue_pending = mmp_pdma_issue_pending;
852 pdev->device.device_control = mmp_pdma_control;
853 pdev->device.copy_align = PDMA_ALIGNMENT;
855 if (pdev->dev->coherent_dma_mask)
856 dma_set_mask(pdev->dev, pdev->dev->coherent_dma_mask);
858 dma_set_mask(pdev->dev, DMA_BIT_MASK(64));
860 ret = dma_async_device_register(&pdev->device);
862 dev_err(pdev->device.dev, "unable to register\n");
866 dev_info(pdev->device.dev, "initialized\n");
870 static const struct platform_device_id mmp_pdma_id_table[] = {
875 static struct platform_driver mmp_pdma_driver = {
878 .owner = THIS_MODULE,
879 .of_match_table = mmp_pdma_dt_ids,
881 .id_table = mmp_pdma_id_table,
882 .probe = mmp_pdma_probe,
883 .remove = mmp_pdma_remove,
886 bool mmp_pdma_filter_fn(struct dma_chan *chan, void *param)
888 struct mmp_pdma_chan *c = to_mmp_pdma_chan(chan);
890 if (chan->device->dev->driver != &mmp_pdma_driver.driver)
893 c->drcmr = *(unsigned int *) param;
897 EXPORT_SYMBOL_GPL(mmp_pdma_filter_fn);
899 module_platform_driver(mmp_pdma_driver);
901 MODULE_DESCRIPTION("MARVELL MMP Periphera DMA Driver");
902 MODULE_AUTHOR("Marvell International Ltd.");
903 MODULE_LICENSE("GPL v2");