1 // SPDX-License-Identifier: GPL-2.0
2 // (C) 2017-2018 Synopsys, Inc. (www.synopsys.com)
5 * Synopsys DesignWare AXI DMA Controller driver.
7 * Author: Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com>
10 #include <linux/bitops.h>
11 #include <linux/delay.h>
12 #include <linux/device.h>
13 #include <linux/dmaengine.h>
14 #include <linux/dmapool.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/err.h>
17 #include <linux/interrupt.h>
19 #include <linux/iopoll.h>
20 #include <linux/io-64-nonatomic-lo-hi.h>
21 #include <linux/kernel.h>
22 #include <linux/module.h>
24 #include <linux/of_dma.h>
25 #include <linux/platform_device.h>
26 #include <linux/pm_runtime.h>
27 #include <linux/property.h>
28 #include <linux/slab.h>
29 #include <linux/types.h>
31 #include "dw-axi-dmac.h"
32 #include "../dmaengine.h"
33 #include "../virt-dma.h"
36 * The set of bus widths supported by the DMA controller. DW AXI DMAC supports
37 * master data bus width up to 512 bits (for both AXI master interfaces), but
38 * it depends on IP block configurarion.
40 #define AXI_DMA_BUSWIDTHS \
41 (DMA_SLAVE_BUSWIDTH_1_BYTE | \
42 DMA_SLAVE_BUSWIDTH_2_BYTES | \
43 DMA_SLAVE_BUSWIDTH_4_BYTES | \
44 DMA_SLAVE_BUSWIDTH_8_BYTES | \
45 DMA_SLAVE_BUSWIDTH_16_BYTES | \
46 DMA_SLAVE_BUSWIDTH_32_BYTES | \
47 DMA_SLAVE_BUSWIDTH_64_BYTES)
50 axi_dma_iowrite32(struct axi_dma_chip *chip, u32 reg, u32 val)
52 iowrite32(val, chip->regs + reg);
55 static inline u32 axi_dma_ioread32(struct axi_dma_chip *chip, u32 reg)
57 return ioread32(chip->regs + reg);
61 axi_chan_iowrite32(struct axi_dma_chan *chan, u32 reg, u32 val)
63 iowrite32(val, chan->chan_regs + reg);
66 static inline u32 axi_chan_ioread32(struct axi_dma_chan *chan, u32 reg)
68 return ioread32(chan->chan_regs + reg);
72 axi_chan_iowrite64(struct axi_dma_chan *chan, u32 reg, u64 val)
75 * We split one 64 bit write for two 32 bit write as some HW doesn't
76 * support 64 bit access.
78 iowrite32(lower_32_bits(val), chan->chan_regs + reg);
79 iowrite32(upper_32_bits(val), chan->chan_regs + reg + 4);
82 static inline void axi_dma_disable(struct axi_dma_chip *chip)
86 val = axi_dma_ioread32(chip, DMAC_CFG);
88 axi_dma_iowrite32(chip, DMAC_CFG, val);
91 static inline void axi_dma_enable(struct axi_dma_chip *chip)
95 val = axi_dma_ioread32(chip, DMAC_CFG);
97 axi_dma_iowrite32(chip, DMAC_CFG, val);
100 static inline void axi_dma_irq_disable(struct axi_dma_chip *chip)
104 val = axi_dma_ioread32(chip, DMAC_CFG);
106 axi_dma_iowrite32(chip, DMAC_CFG, val);
109 static inline void axi_dma_irq_enable(struct axi_dma_chip *chip)
113 val = axi_dma_ioread32(chip, DMAC_CFG);
115 axi_dma_iowrite32(chip, DMAC_CFG, val);
118 static inline void axi_chan_irq_disable(struct axi_dma_chan *chan, u32 irq_mask)
122 if (likely(irq_mask == DWAXIDMAC_IRQ_ALL)) {
123 axi_chan_iowrite32(chan, CH_INTSTATUS_ENA, DWAXIDMAC_IRQ_NONE);
125 val = axi_chan_ioread32(chan, CH_INTSTATUS_ENA);
127 axi_chan_iowrite32(chan, CH_INTSTATUS_ENA, val);
131 static inline void axi_chan_irq_set(struct axi_dma_chan *chan, u32 irq_mask)
133 axi_chan_iowrite32(chan, CH_INTSTATUS_ENA, irq_mask);
136 static inline void axi_chan_irq_sig_set(struct axi_dma_chan *chan, u32 irq_mask)
138 axi_chan_iowrite32(chan, CH_INTSIGNAL_ENA, irq_mask);
141 static inline void axi_chan_irq_clear(struct axi_dma_chan *chan, u32 irq_mask)
143 axi_chan_iowrite32(chan, CH_INTCLEAR, irq_mask);
146 static inline u32 axi_chan_irq_read(struct axi_dma_chan *chan)
148 return axi_chan_ioread32(chan, CH_INTSTATUS);
151 static inline void axi_chan_disable(struct axi_dma_chan *chan)
155 val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
156 val &= ~(BIT(chan->id) << DMAC_CHAN_EN_SHIFT);
157 val |= BIT(chan->id) << DMAC_CHAN_EN_WE_SHIFT;
158 axi_dma_iowrite32(chan->chip, DMAC_CHEN, val);
161 static inline void axi_chan_enable(struct axi_dma_chan *chan)
165 val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
166 val |= BIT(chan->id) << DMAC_CHAN_EN_SHIFT |
167 BIT(chan->id) << DMAC_CHAN_EN_WE_SHIFT;
168 axi_dma_iowrite32(chan->chip, DMAC_CHEN, val);
171 static inline bool axi_chan_is_hw_enable(struct axi_dma_chan *chan)
175 val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
177 return !!(val & (BIT(chan->id) << DMAC_CHAN_EN_SHIFT));
180 static void axi_dma_hw_init(struct axi_dma_chip *chip)
184 for (i = 0; i < chip->dw->hdata->nr_channels; i++) {
185 axi_chan_irq_disable(&chip->dw->chan[i], DWAXIDMAC_IRQ_ALL);
186 axi_chan_disable(&chip->dw->chan[i]);
190 static u32 axi_chan_get_xfer_width(struct axi_dma_chan *chan, dma_addr_t src,
191 dma_addr_t dst, size_t len)
193 u32 max_width = chan->chip->dw->hdata->m_data_width;
195 return __ffs(src | dst | len | BIT(max_width));
198 static inline const char *axi_chan_name(struct axi_dma_chan *chan)
200 return dma_chan_name(&chan->vc.chan);
203 static struct axi_dma_desc *axi_desc_alloc(u32 num)
205 struct axi_dma_desc *desc;
207 desc = kzalloc(sizeof(*desc), GFP_NOWAIT);
211 desc->hw_desc = kcalloc(num, sizeof(*desc->hw_desc), GFP_NOWAIT);
212 if (!desc->hw_desc) {
220 static struct axi_dma_lli *axi_desc_get(struct axi_dma_chan *chan,
223 struct axi_dma_lli *lli;
226 lli = dma_pool_zalloc(chan->desc_pool, GFP_NOWAIT, &phys);
227 if (unlikely(!lli)) {
228 dev_err(chan2dev(chan), "%s: not enough descriptors available\n",
229 axi_chan_name(chan));
233 atomic_inc(&chan->descs_allocated);
239 static void axi_desc_put(struct axi_dma_desc *desc)
241 struct axi_dma_chan *chan = desc->chan;
242 int count = atomic_read(&chan->descs_allocated);
243 struct axi_dma_hw_desc *hw_desc;
246 for (descs_put = 0; descs_put < count; descs_put++) {
247 hw_desc = &desc->hw_desc[descs_put];
248 dma_pool_free(chan->desc_pool, hw_desc->lli, hw_desc->llp);
251 kfree(desc->hw_desc);
253 atomic_sub(descs_put, &chan->descs_allocated);
254 dev_vdbg(chan2dev(chan), "%s: %d descs put, %d still allocated\n",
255 axi_chan_name(chan), descs_put,
256 atomic_read(&chan->descs_allocated));
259 static void vchan_desc_put(struct virt_dma_desc *vdesc)
261 axi_desc_put(vd_to_axi_desc(vdesc));
264 static enum dma_status
265 dma_chan_tx_status(struct dma_chan *dchan, dma_cookie_t cookie,
266 struct dma_tx_state *txstate)
268 struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
269 struct virt_dma_desc *vdesc;
270 enum dma_status status;
271 u32 completed_length;
273 u32 completed_blocks;
278 status = dma_cookie_status(dchan, cookie, txstate);
279 if (status == DMA_COMPLETE || !txstate)
282 spin_lock_irqsave(&chan->vc.lock, flags);
284 vdesc = vchan_find_desc(&chan->vc, cookie);
286 length = vd_to_axi_desc(vdesc)->length;
287 completed_blocks = vd_to_axi_desc(vdesc)->completed_blocks;
288 len = vd_to_axi_desc(vdesc)->hw_desc[0].len;
289 completed_length = completed_blocks * len;
290 bytes = length - completed_length;
292 bytes = vd_to_axi_desc(vdesc)->length;
295 spin_unlock_irqrestore(&chan->vc.lock, flags);
296 dma_set_residue(txstate, bytes);
301 static void write_desc_llp(struct axi_dma_hw_desc *desc, dma_addr_t adr)
303 desc->lli->llp = cpu_to_le64(adr);
306 static void write_chan_llp(struct axi_dma_chan *chan, dma_addr_t adr)
308 axi_chan_iowrite64(chan, CH_LLP, adr);
311 static void dw_axi_dma_set_byte_halfword(struct axi_dma_chan *chan, bool set)
313 u32 offset = DMAC_APB_BYTE_WR_CH_EN;
316 if (!chan->chip->apb_regs) {
317 dev_dbg(chan->chip->dev, "apb_regs not initialized\n");
321 reg_width = __ffs(chan->config.dst_addr_width);
322 if (reg_width == DWAXIDMAC_TRANS_WIDTH_16)
323 offset = DMAC_APB_HALFWORD_WR_CH_EN;
325 val = ioread32(chan->chip->apb_regs + offset);
328 val |= BIT(chan->id);
330 val &= ~BIT(chan->id);
332 iowrite32(val, chan->chip->apb_regs + offset);
334 /* Called in chan locked context */
335 static void axi_chan_block_xfer_start(struct axi_dma_chan *chan,
336 struct axi_dma_desc *first)
338 u32 priority = chan->chip->dw->hdata->priority[chan->id];
340 u8 lms = 0; /* Select AXI0 master for LLI fetching */
342 if (unlikely(axi_chan_is_hw_enable(chan))) {
343 dev_err(chan2dev(chan), "%s is non-idle!\n",
344 axi_chan_name(chan));
349 axi_dma_enable(chan->chip);
351 reg = (DWAXIDMAC_MBLK_TYPE_LL << CH_CFG_L_DST_MULTBLK_TYPE_POS |
352 DWAXIDMAC_MBLK_TYPE_LL << CH_CFG_L_SRC_MULTBLK_TYPE_POS);
353 axi_chan_iowrite32(chan, CH_CFG_L, reg);
355 reg = (DWAXIDMAC_TT_FC_MEM_TO_MEM_DMAC << CH_CFG_H_TT_FC_POS |
356 priority << CH_CFG_H_PRIORITY_POS |
357 DWAXIDMAC_HS_SEL_HW << CH_CFG_H_HS_SEL_DST_POS |
358 DWAXIDMAC_HS_SEL_HW << CH_CFG_H_HS_SEL_SRC_POS);
359 switch (chan->direction) {
361 dw_axi_dma_set_byte_halfword(chan, true);
362 reg |= (chan->config.device_fc ?
363 DWAXIDMAC_TT_FC_MEM_TO_PER_DST :
364 DWAXIDMAC_TT_FC_MEM_TO_PER_DMAC)
365 << CH_CFG_H_TT_FC_POS;
368 reg |= (chan->config.device_fc ?
369 DWAXIDMAC_TT_FC_PER_TO_MEM_SRC :
370 DWAXIDMAC_TT_FC_PER_TO_MEM_DMAC)
371 << CH_CFG_H_TT_FC_POS;
376 axi_chan_iowrite32(chan, CH_CFG_H, reg);
378 write_chan_llp(chan, first->hw_desc[0].llp | lms);
380 irq_mask = DWAXIDMAC_IRQ_DMA_TRF | DWAXIDMAC_IRQ_ALL_ERR;
381 axi_chan_irq_sig_set(chan, irq_mask);
383 /* Generate 'suspend' status but don't generate interrupt */
384 irq_mask |= DWAXIDMAC_IRQ_SUSPENDED;
385 axi_chan_irq_set(chan, irq_mask);
387 axi_chan_enable(chan);
390 static void axi_chan_start_first_queued(struct axi_dma_chan *chan)
392 struct axi_dma_desc *desc;
393 struct virt_dma_desc *vd;
395 vd = vchan_next_desc(&chan->vc);
399 desc = vd_to_axi_desc(vd);
400 dev_vdbg(chan2dev(chan), "%s: started %u\n", axi_chan_name(chan),
402 axi_chan_block_xfer_start(chan, desc);
405 static void dma_chan_issue_pending(struct dma_chan *dchan)
407 struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
410 spin_lock_irqsave(&chan->vc.lock, flags);
411 if (vchan_issue_pending(&chan->vc))
412 axi_chan_start_first_queued(chan);
413 spin_unlock_irqrestore(&chan->vc.lock, flags);
416 static void dw_axi_dma_synchronize(struct dma_chan *dchan)
418 struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
420 vchan_synchronize(&chan->vc);
423 static int dma_chan_alloc_chan_resources(struct dma_chan *dchan)
425 struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
427 /* ASSERT: channel is idle */
428 if (axi_chan_is_hw_enable(chan)) {
429 dev_err(chan2dev(chan), "%s is non-idle!\n",
430 axi_chan_name(chan));
434 /* LLI address must be aligned to a 64-byte boundary */
435 chan->desc_pool = dma_pool_create(dev_name(chan2dev(chan)),
437 sizeof(struct axi_dma_lli),
439 if (!chan->desc_pool) {
440 dev_err(chan2dev(chan), "No memory for descriptors\n");
443 dev_vdbg(dchan2dev(dchan), "%s: allocating\n", axi_chan_name(chan));
445 pm_runtime_get(chan->chip->dev);
450 static void dma_chan_free_chan_resources(struct dma_chan *dchan)
452 struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
454 /* ASSERT: channel is idle */
455 if (axi_chan_is_hw_enable(chan))
456 dev_err(dchan2dev(dchan), "%s is non-idle!\n",
457 axi_chan_name(chan));
459 axi_chan_disable(chan);
460 axi_chan_irq_disable(chan, DWAXIDMAC_IRQ_ALL);
462 vchan_free_chan_resources(&chan->vc);
464 dma_pool_destroy(chan->desc_pool);
465 chan->desc_pool = NULL;
466 dev_vdbg(dchan2dev(dchan),
467 "%s: free resources, descriptor still allocated: %u\n",
468 axi_chan_name(chan), atomic_read(&chan->descs_allocated));
470 pm_runtime_put(chan->chip->dev);
473 static void dw_axi_dma_set_hw_channel(struct axi_dma_chip *chip,
474 u32 handshake_num, bool set)
476 unsigned long start = 0;
477 unsigned long reg_value;
478 unsigned long reg_mask;
479 unsigned long reg_set;
483 if (!chip->apb_regs) {
484 dev_dbg(chip->dev, "apb_regs not initialized\n");
489 * An unused DMA channel has a default value of 0x3F.
490 * Lock the DMA channel by assign a handshake number to the channel.
491 * Unlock the DMA channel by assign 0x3F to the channel.
494 reg_set = UNUSED_CHANNEL;
497 reg_set = handshake_num;
498 val = UNUSED_CHANNEL;
501 reg_value = lo_hi_readq(chip->apb_regs + DMAC_APB_HW_HS_SEL_0);
503 for_each_set_clump8(start, reg_mask, ®_value, 64) {
504 if (reg_mask == reg_set) {
505 mask = GENMASK_ULL(start + 7, start);
507 reg_value |= rol64(val, start);
508 lo_hi_writeq(reg_value,
509 chip->apb_regs + DMAC_APB_HW_HS_SEL_0);
516 * If DW_axi_dmac sees CHx_CTL.ShadowReg_Or_LLI_Last bit of the fetched LLI
517 * as 1, it understands that the current block is the final block in the
518 * transfer and completes the DMA transfer operation at the end of current
521 static void set_desc_last(struct axi_dma_hw_desc *desc)
525 val = le32_to_cpu(desc->lli->ctl_hi);
526 val |= CH_CTL_H_LLI_LAST;
527 desc->lli->ctl_hi = cpu_to_le32(val);
530 static void write_desc_sar(struct axi_dma_hw_desc *desc, dma_addr_t adr)
532 desc->lli->sar = cpu_to_le64(adr);
535 static void write_desc_dar(struct axi_dma_hw_desc *desc, dma_addr_t adr)
537 desc->lli->dar = cpu_to_le64(adr);
540 static void set_desc_src_master(struct axi_dma_hw_desc *desc)
544 /* Select AXI0 for source master */
545 val = le32_to_cpu(desc->lli->ctl_lo);
546 val &= ~CH_CTL_L_SRC_MAST;
547 desc->lli->ctl_lo = cpu_to_le32(val);
550 static void set_desc_dest_master(struct axi_dma_hw_desc *hw_desc,
551 struct axi_dma_desc *desc)
555 /* Select AXI1 for source master if available */
556 val = le32_to_cpu(hw_desc->lli->ctl_lo);
557 if (desc->chan->chip->dw->hdata->nr_masters > 1)
558 val |= CH_CTL_L_DST_MAST;
560 val &= ~CH_CTL_L_DST_MAST;
562 hw_desc->lli->ctl_lo = cpu_to_le32(val);
565 static int dw_axi_dma_set_hw_desc(struct axi_dma_chan *chan,
566 struct axi_dma_hw_desc *hw_desc,
567 dma_addr_t mem_addr, size_t len)
569 unsigned int data_width = BIT(chan->chip->dw->hdata->m_data_width);
570 unsigned int reg_width;
571 unsigned int mem_width;
572 dma_addr_t device_addr;
578 axi_block_ts = chan->chip->dw->hdata->block_size[chan->id];
580 mem_width = __ffs(data_width | mem_addr | len);
581 if (mem_width > DWAXIDMAC_TRANS_WIDTH_32)
582 mem_width = DWAXIDMAC_TRANS_WIDTH_32;
584 if (!IS_ALIGNED(mem_addr, 4)) {
585 dev_err(chan->chip->dev, "invalid buffer alignment\n");
589 switch (chan->direction) {
591 reg_width = __ffs(chan->config.dst_addr_width);
592 device_addr = chan->config.dst_addr;
593 ctllo = reg_width << CH_CTL_L_DST_WIDTH_POS |
594 mem_width << CH_CTL_L_SRC_WIDTH_POS |
595 DWAXIDMAC_CH_CTL_L_NOINC << CH_CTL_L_DST_INC_POS |
596 DWAXIDMAC_CH_CTL_L_INC << CH_CTL_L_SRC_INC_POS;
597 block_ts = len >> mem_width;
600 reg_width = __ffs(chan->config.src_addr_width);
601 device_addr = chan->config.src_addr;
602 ctllo = reg_width << CH_CTL_L_SRC_WIDTH_POS |
603 mem_width << CH_CTL_L_DST_WIDTH_POS |
604 DWAXIDMAC_CH_CTL_L_INC << CH_CTL_L_DST_INC_POS |
605 DWAXIDMAC_CH_CTL_L_NOINC << CH_CTL_L_SRC_INC_POS;
606 block_ts = len >> reg_width;
612 if (block_ts > axi_block_ts)
615 hw_desc->lli = axi_desc_get(chan, &hw_desc->llp);
616 if (unlikely(!hw_desc->lli))
619 ctlhi = CH_CTL_H_LLI_VALID;
621 if (chan->chip->dw->hdata->restrict_axi_burst_len) {
622 burst_len = chan->chip->dw->hdata->axi_rw_burst_len;
623 ctlhi |= CH_CTL_H_ARLEN_EN | CH_CTL_H_AWLEN_EN |
624 burst_len << CH_CTL_H_ARLEN_POS |
625 burst_len << CH_CTL_H_AWLEN_POS;
628 hw_desc->lli->ctl_hi = cpu_to_le32(ctlhi);
630 if (chan->direction == DMA_MEM_TO_DEV) {
631 write_desc_sar(hw_desc, mem_addr);
632 write_desc_dar(hw_desc, device_addr);
634 write_desc_sar(hw_desc, device_addr);
635 write_desc_dar(hw_desc, mem_addr);
638 hw_desc->lli->block_ts_lo = cpu_to_le32(block_ts - 1);
640 ctllo |= DWAXIDMAC_BURST_TRANS_LEN_4 << CH_CTL_L_DST_MSIZE_POS |
641 DWAXIDMAC_BURST_TRANS_LEN_4 << CH_CTL_L_SRC_MSIZE_POS;
642 hw_desc->lli->ctl_lo = cpu_to_le32(ctllo);
644 set_desc_src_master(hw_desc);
650 static size_t calculate_block_len(struct axi_dma_chan *chan,
651 dma_addr_t dma_addr, size_t buf_len,
652 enum dma_transfer_direction direction)
654 u32 data_width, reg_width, mem_width;
655 size_t axi_block_ts, block_len;
657 axi_block_ts = chan->chip->dw->hdata->block_size[chan->id];
661 data_width = BIT(chan->chip->dw->hdata->m_data_width);
662 mem_width = __ffs(data_width | dma_addr | buf_len);
663 if (mem_width > DWAXIDMAC_TRANS_WIDTH_32)
664 mem_width = DWAXIDMAC_TRANS_WIDTH_32;
666 block_len = axi_block_ts << mem_width;
669 reg_width = __ffs(chan->config.src_addr_width);
670 block_len = axi_block_ts << reg_width;
679 static struct dma_async_tx_descriptor *
680 dw_axi_dma_chan_prep_cyclic(struct dma_chan *dchan, dma_addr_t dma_addr,
681 size_t buf_len, size_t period_len,
682 enum dma_transfer_direction direction,
685 struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
686 struct axi_dma_hw_desc *hw_desc = NULL;
687 struct axi_dma_desc *desc = NULL;
688 dma_addr_t src_addr = dma_addr;
689 u32 num_periods, num_segments;
690 size_t axi_block_len;
696 u8 lms = 0; /* Select AXI0 master for LLI fetching */
698 num_periods = buf_len / period_len;
700 axi_block_len = calculate_block_len(chan, dma_addr, buf_len, direction);
701 if (axi_block_len == 0)
704 num_segments = DIV_ROUND_UP(period_len, axi_block_len);
705 segment_len = DIV_ROUND_UP(period_len, num_segments);
707 total_segments = num_periods * num_segments;
709 desc = axi_desc_alloc(total_segments);
713 chan->direction = direction;
717 desc->period_len = period_len;
719 for (i = 0; i < total_segments; i++) {
720 hw_desc = &desc->hw_desc[i];
722 status = dw_axi_dma_set_hw_desc(chan, hw_desc, src_addr,
727 desc->length += hw_desc->len;
728 /* Set end-of-link to the linked descriptor, so that cyclic
729 * callback function can be triggered during interrupt.
731 set_desc_last(hw_desc);
733 src_addr += segment_len;
736 llp = desc->hw_desc[0].llp;
738 /* Managed transfer list */
740 hw_desc = &desc->hw_desc[--total_segments];
741 write_desc_llp(hw_desc, llp | lms);
743 } while (total_segments);
745 dw_axi_dma_set_hw_channel(chan->chip, chan->hw_handshake_num, true);
747 return vchan_tx_prep(&chan->vc, &desc->vd, flags);
756 static struct dma_async_tx_descriptor *
757 dw_axi_dma_chan_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
759 enum dma_transfer_direction direction,
760 unsigned long flags, void *context)
762 struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
763 struct axi_dma_hw_desc *hw_desc = NULL;
764 struct axi_dma_desc *desc = NULL;
765 u32 num_segments, segment_len;
766 unsigned int loop = 0;
767 struct scatterlist *sg;
768 size_t axi_block_len;
769 u32 len, num_sgs = 0;
774 u8 lms = 0; /* Select AXI0 master for LLI fetching */
776 if (unlikely(!is_slave_direction(direction) || !sg_len))
779 mem = sg_dma_address(sgl);
780 len = sg_dma_len(sgl);
782 axi_block_len = calculate_block_len(chan, mem, len, direction);
783 if (axi_block_len == 0)
786 for_each_sg(sgl, sg, sg_len, i)
787 num_sgs += DIV_ROUND_UP(sg_dma_len(sg), axi_block_len);
789 desc = axi_desc_alloc(num_sgs);
795 chan->direction = direction;
797 for_each_sg(sgl, sg, sg_len, i) {
798 mem = sg_dma_address(sg);
799 len = sg_dma_len(sg);
800 num_segments = DIV_ROUND_UP(sg_dma_len(sg), axi_block_len);
801 segment_len = DIV_ROUND_UP(sg_dma_len(sg), num_segments);
804 hw_desc = &desc->hw_desc[loop++];
805 status = dw_axi_dma_set_hw_desc(chan, hw_desc, mem, segment_len);
809 desc->length += hw_desc->len;
812 } while (len >= segment_len);
815 /* Set end-of-link to the last link descriptor of list */
816 set_desc_last(&desc->hw_desc[num_sgs - 1]);
818 /* Managed transfer list */
820 hw_desc = &desc->hw_desc[--num_sgs];
821 write_desc_llp(hw_desc, llp | lms);
825 dw_axi_dma_set_hw_channel(chan->chip, chan->hw_handshake_num, true);
827 return vchan_tx_prep(&chan->vc, &desc->vd, flags);
836 static struct dma_async_tx_descriptor *
837 dma_chan_prep_dma_memcpy(struct dma_chan *dchan, dma_addr_t dst_adr,
838 dma_addr_t src_adr, size_t len, unsigned long flags)
840 struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
841 size_t block_ts, max_block_ts, xfer_len;
842 struct axi_dma_hw_desc *hw_desc = NULL;
843 struct axi_dma_desc *desc = NULL;
844 u32 xfer_width, reg, num;
846 u8 lms = 0; /* Select AXI0 master for LLI fetching */
848 dev_dbg(chan2dev(chan), "%s: memcpy: src: %pad dst: %pad length: %zd flags: %#lx",
849 axi_chan_name(chan), &src_adr, &dst_adr, len, flags);
851 max_block_ts = chan->chip->dw->hdata->block_size[chan->id];
852 xfer_width = axi_chan_get_xfer_width(chan, src_adr, dst_adr, len);
853 num = DIV_ROUND_UP(len, max_block_ts << xfer_width);
854 desc = axi_desc_alloc(num);
864 hw_desc = &desc->hw_desc[num];
866 * Take care for the alignment.
867 * Actually source and destination widths can be different, but
868 * make them same to be simpler.
870 xfer_width = axi_chan_get_xfer_width(chan, src_adr, dst_adr, xfer_len);
873 * block_ts indicates the total number of data of width
874 * to be transferred in a DMA block transfer.
875 * BLOCK_TS register should be set to block_ts - 1
877 block_ts = xfer_len >> xfer_width;
878 if (block_ts > max_block_ts) {
879 block_ts = max_block_ts;
880 xfer_len = max_block_ts << xfer_width;
883 hw_desc->lli = axi_desc_get(chan, &hw_desc->llp);
884 if (unlikely(!hw_desc->lli))
887 write_desc_sar(hw_desc, src_adr);
888 write_desc_dar(hw_desc, dst_adr);
889 hw_desc->lli->block_ts_lo = cpu_to_le32(block_ts - 1);
891 reg = CH_CTL_H_LLI_VALID;
892 if (chan->chip->dw->hdata->restrict_axi_burst_len) {
893 u32 burst_len = chan->chip->dw->hdata->axi_rw_burst_len;
895 reg |= (CH_CTL_H_ARLEN_EN |
896 burst_len << CH_CTL_H_ARLEN_POS |
898 burst_len << CH_CTL_H_AWLEN_POS);
900 hw_desc->lli->ctl_hi = cpu_to_le32(reg);
902 reg = (DWAXIDMAC_BURST_TRANS_LEN_4 << CH_CTL_L_DST_MSIZE_POS |
903 DWAXIDMAC_BURST_TRANS_LEN_4 << CH_CTL_L_SRC_MSIZE_POS |
904 xfer_width << CH_CTL_L_DST_WIDTH_POS |
905 xfer_width << CH_CTL_L_SRC_WIDTH_POS |
906 DWAXIDMAC_CH_CTL_L_INC << CH_CTL_L_DST_INC_POS |
907 DWAXIDMAC_CH_CTL_L_INC << CH_CTL_L_SRC_INC_POS);
908 hw_desc->lli->ctl_lo = cpu_to_le32(reg);
910 set_desc_src_master(hw_desc);
911 set_desc_dest_master(hw_desc, desc);
913 hw_desc->len = xfer_len;
914 desc->length += hw_desc->len;
915 /* update the length and addresses for the next loop cycle */
922 /* Set end-of-link to the last link descriptor of list */
923 set_desc_last(&desc->hw_desc[num - 1]);
924 /* Managed transfer list */
926 hw_desc = &desc->hw_desc[--num];
927 write_desc_llp(hw_desc, llp | lms);
931 return vchan_tx_prep(&chan->vc, &desc->vd, flags);
939 static int dw_axi_dma_chan_slave_config(struct dma_chan *dchan,
940 struct dma_slave_config *config)
942 struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
944 memcpy(&chan->config, config, sizeof(*config));
949 static void axi_chan_dump_lli(struct axi_dma_chan *chan,
950 struct axi_dma_hw_desc *desc)
952 dev_err(dchan2dev(&chan->vc.chan),
953 "SAR: 0x%llx DAR: 0x%llx LLP: 0x%llx BTS 0x%x CTL: 0x%x:%08x",
954 le64_to_cpu(desc->lli->sar),
955 le64_to_cpu(desc->lli->dar),
956 le64_to_cpu(desc->lli->llp),
957 le32_to_cpu(desc->lli->block_ts_lo),
958 le32_to_cpu(desc->lli->ctl_hi),
959 le32_to_cpu(desc->lli->ctl_lo));
962 static void axi_chan_list_dump_lli(struct axi_dma_chan *chan,
963 struct axi_dma_desc *desc_head)
965 int count = atomic_read(&chan->descs_allocated);
968 for (i = 0; i < count; i++)
969 axi_chan_dump_lli(chan, &desc_head->hw_desc[i]);
972 static noinline void axi_chan_handle_err(struct axi_dma_chan *chan, u32 status)
974 struct virt_dma_desc *vd;
977 spin_lock_irqsave(&chan->vc.lock, flags);
979 axi_chan_disable(chan);
981 /* The bad descriptor currently is in the head of vc list */
982 vd = vchan_next_desc(&chan->vc);
983 /* Remove the completed descriptor from issued list */
986 /* WARN about bad descriptor */
987 dev_err(chan2dev(chan),
988 "Bad descriptor submitted for %s, cookie: %d, irq: 0x%08x\n",
989 axi_chan_name(chan), vd->tx.cookie, status);
990 axi_chan_list_dump_lli(chan, vd_to_axi_desc(vd));
992 vchan_cookie_complete(vd);
994 /* Try to restart the controller */
995 axi_chan_start_first_queued(chan);
997 spin_unlock_irqrestore(&chan->vc.lock, flags);
1000 static void axi_chan_block_xfer_complete(struct axi_dma_chan *chan)
1002 int count = atomic_read(&chan->descs_allocated);
1003 struct axi_dma_hw_desc *hw_desc;
1004 struct axi_dma_desc *desc;
1005 struct virt_dma_desc *vd;
1006 unsigned long flags;
1010 spin_lock_irqsave(&chan->vc.lock, flags);
1011 if (unlikely(axi_chan_is_hw_enable(chan))) {
1012 dev_err(chan2dev(chan), "BUG: %s caught DWAXIDMAC_IRQ_DMA_TRF, but channel not idle!\n",
1013 axi_chan_name(chan));
1014 axi_chan_disable(chan);
1017 /* The completed descriptor currently is in the head of vc list */
1018 vd = vchan_next_desc(&chan->vc);
1021 desc = vd_to_axi_desc(vd);
1023 llp = lo_hi_readq(chan->chan_regs + CH_LLP);
1024 for (i = 0; i < count; i++) {
1025 hw_desc = &desc->hw_desc[i];
1026 if (hw_desc->llp == llp) {
1027 axi_chan_irq_clear(chan, hw_desc->lli->status_lo);
1028 hw_desc->lli->ctl_hi |= CH_CTL_H_LLI_VALID;
1029 desc->completed_blocks = i;
1031 if (((hw_desc->len * (i + 1)) % desc->period_len) == 0)
1032 vchan_cyclic_callback(vd);
1037 axi_chan_enable(chan);
1040 /* Remove the completed descriptor from issued list before completing */
1041 list_del(&vd->node);
1042 vchan_cookie_complete(vd);
1044 /* Submit queued descriptors after processing the completed ones */
1045 axi_chan_start_first_queued(chan);
1048 spin_unlock_irqrestore(&chan->vc.lock, flags);
1051 static irqreturn_t dw_axi_dma_interrupt(int irq, void *dev_id)
1053 struct axi_dma_chip *chip = dev_id;
1054 struct dw_axi_dma *dw = chip->dw;
1055 struct axi_dma_chan *chan;
1059 /* Disable DMAC inerrupts. We'll enable them after processing chanels */
1060 axi_dma_irq_disable(chip);
1062 /* Poll, clear and process every chanel interrupt status */
1063 for (i = 0; i < dw->hdata->nr_channels; i++) {
1064 chan = &dw->chan[i];
1065 status = axi_chan_irq_read(chan);
1066 axi_chan_irq_clear(chan, status);
1068 dev_vdbg(chip->dev, "%s %u IRQ status: 0x%08x\n",
1069 axi_chan_name(chan), i, status);
1071 if (status & DWAXIDMAC_IRQ_ALL_ERR)
1072 axi_chan_handle_err(chan, status);
1073 else if (status & DWAXIDMAC_IRQ_DMA_TRF)
1074 axi_chan_block_xfer_complete(chan);
1077 /* Re-enable interrupts */
1078 axi_dma_irq_enable(chip);
1083 static int dma_chan_terminate_all(struct dma_chan *dchan)
1085 struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
1086 u32 chan_active = BIT(chan->id) << DMAC_CHAN_EN_SHIFT;
1087 unsigned long flags;
1092 axi_chan_disable(chan);
1094 ret = readl_poll_timeout_atomic(chan->chip->regs + DMAC_CHEN, val,
1095 !(val & chan_active), 1000, 10000);
1096 if (ret == -ETIMEDOUT)
1097 dev_warn(dchan2dev(dchan),
1098 "%s failed to stop\n", axi_chan_name(chan));
1100 if (chan->direction != DMA_MEM_TO_MEM)
1101 dw_axi_dma_set_hw_channel(chan->chip,
1102 chan->hw_handshake_num, false);
1103 if (chan->direction == DMA_MEM_TO_DEV)
1104 dw_axi_dma_set_byte_halfword(chan, false);
1106 spin_lock_irqsave(&chan->vc.lock, flags);
1108 vchan_get_all_descriptors(&chan->vc, &head);
1110 chan->cyclic = false;
1111 spin_unlock_irqrestore(&chan->vc.lock, flags);
1113 vchan_dma_desc_free_list(&chan->vc, &head);
1115 dev_vdbg(dchan2dev(dchan), "terminated: %s\n", axi_chan_name(chan));
1120 static int dma_chan_pause(struct dma_chan *dchan)
1122 struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
1123 unsigned long flags;
1124 unsigned int timeout = 20; /* timeout iterations */
1127 spin_lock_irqsave(&chan->vc.lock, flags);
1129 val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
1130 val |= BIT(chan->id) << DMAC_CHAN_SUSP_SHIFT |
1131 BIT(chan->id) << DMAC_CHAN_SUSP_WE_SHIFT;
1132 axi_dma_iowrite32(chan->chip, DMAC_CHEN, val);
1135 if (axi_chan_irq_read(chan) & DWAXIDMAC_IRQ_SUSPENDED)
1139 } while (--timeout);
1141 axi_chan_irq_clear(chan, DWAXIDMAC_IRQ_SUSPENDED);
1143 chan->is_paused = true;
1145 spin_unlock_irqrestore(&chan->vc.lock, flags);
1147 return timeout ? 0 : -EAGAIN;
1150 /* Called in chan locked context */
1151 static inline void axi_chan_resume(struct axi_dma_chan *chan)
1155 val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
1156 val &= ~(BIT(chan->id) << DMAC_CHAN_SUSP_SHIFT);
1157 val |= (BIT(chan->id) << DMAC_CHAN_SUSP_WE_SHIFT);
1158 axi_dma_iowrite32(chan->chip, DMAC_CHEN, val);
1160 chan->is_paused = false;
1163 static int dma_chan_resume(struct dma_chan *dchan)
1165 struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
1166 unsigned long flags;
1168 spin_lock_irqsave(&chan->vc.lock, flags);
1170 if (chan->is_paused)
1171 axi_chan_resume(chan);
1173 spin_unlock_irqrestore(&chan->vc.lock, flags);
1178 static int axi_dma_suspend(struct axi_dma_chip *chip)
1180 axi_dma_irq_disable(chip);
1181 axi_dma_disable(chip);
1183 clk_disable_unprepare(chip->core_clk);
1184 clk_disable_unprepare(chip->cfgr_clk);
1189 static int axi_dma_resume(struct axi_dma_chip *chip)
1193 ret = clk_prepare_enable(chip->cfgr_clk);
1197 ret = clk_prepare_enable(chip->core_clk);
1201 axi_dma_enable(chip);
1202 axi_dma_irq_enable(chip);
1207 static int __maybe_unused axi_dma_runtime_suspend(struct device *dev)
1209 struct axi_dma_chip *chip = dev_get_drvdata(dev);
1211 return axi_dma_suspend(chip);
1214 static int __maybe_unused axi_dma_runtime_resume(struct device *dev)
1216 struct axi_dma_chip *chip = dev_get_drvdata(dev);
1218 return axi_dma_resume(chip);
1221 static struct dma_chan *dw_axi_dma_of_xlate(struct of_phandle_args *dma_spec,
1222 struct of_dma *ofdma)
1224 struct dw_axi_dma *dw = ofdma->of_dma_data;
1225 struct axi_dma_chan *chan;
1226 struct dma_chan *dchan;
1228 dchan = dma_get_any_slave_channel(&dw->dma);
1232 chan = dchan_to_axi_dma_chan(dchan);
1233 chan->hw_handshake_num = dma_spec->args[0];
1237 static int parse_device_properties(struct axi_dma_chip *chip)
1239 struct device *dev = chip->dev;
1240 u32 tmp, carr[DMAC_MAX_CHANNELS];
1243 ret = device_property_read_u32(dev, "dma-channels", &tmp);
1246 if (tmp == 0 || tmp > DMAC_MAX_CHANNELS)
1249 chip->dw->hdata->nr_channels = tmp;
1251 ret = device_property_read_u32(dev, "snps,dma-masters", &tmp);
1254 if (tmp == 0 || tmp > DMAC_MAX_MASTERS)
1257 chip->dw->hdata->nr_masters = tmp;
1259 ret = device_property_read_u32(dev, "snps,data-width", &tmp);
1262 if (tmp > DWAXIDMAC_TRANS_WIDTH_MAX)
1265 chip->dw->hdata->m_data_width = tmp;
1267 ret = device_property_read_u32_array(dev, "snps,block-size", carr,
1268 chip->dw->hdata->nr_channels);
1271 for (tmp = 0; tmp < chip->dw->hdata->nr_channels; tmp++) {
1272 if (carr[tmp] == 0 || carr[tmp] > DMAC_MAX_BLK_SIZE)
1275 chip->dw->hdata->block_size[tmp] = carr[tmp];
1278 ret = device_property_read_u32_array(dev, "snps,priority", carr,
1279 chip->dw->hdata->nr_channels);
1282 /* Priority value must be programmed within [0:nr_channels-1] range */
1283 for (tmp = 0; tmp < chip->dw->hdata->nr_channels; tmp++) {
1284 if (carr[tmp] >= chip->dw->hdata->nr_channels)
1287 chip->dw->hdata->priority[tmp] = carr[tmp];
1290 /* axi-max-burst-len is optional property */
1291 ret = device_property_read_u32(dev, "snps,axi-max-burst-len", &tmp);
1293 if (tmp > DWAXIDMAC_ARWLEN_MAX + 1)
1295 if (tmp < DWAXIDMAC_ARWLEN_MIN + 1)
1298 chip->dw->hdata->restrict_axi_burst_len = true;
1299 chip->dw->hdata->axi_rw_burst_len = tmp - 1;
1305 static int dw_probe(struct platform_device *pdev)
1307 struct device_node *node = pdev->dev.of_node;
1308 struct axi_dma_chip *chip;
1309 struct resource *mem;
1310 struct dw_axi_dma *dw;
1311 struct dw_axi_dma_hcfg *hdata;
1315 chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
1319 dw = devm_kzalloc(&pdev->dev, sizeof(*dw), GFP_KERNEL);
1323 hdata = devm_kzalloc(&pdev->dev, sizeof(*hdata), GFP_KERNEL);
1328 chip->dev = &pdev->dev;
1329 chip->dw->hdata = hdata;
1331 chip->irq = platform_get_irq(pdev, 0);
1335 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1336 chip->regs = devm_ioremap_resource(chip->dev, mem);
1337 if (IS_ERR(chip->regs))
1338 return PTR_ERR(chip->regs);
1340 if (of_device_is_compatible(node, "intel,kmb-axi-dma")) {
1341 chip->apb_regs = devm_platform_ioremap_resource(pdev, 1);
1342 if (IS_ERR(chip->apb_regs))
1343 return PTR_ERR(chip->apb_regs);
1346 chip->core_clk = devm_clk_get(chip->dev, "core-clk");
1347 if (IS_ERR(chip->core_clk))
1348 return PTR_ERR(chip->core_clk);
1350 chip->cfgr_clk = devm_clk_get(chip->dev, "cfgr-clk");
1351 if (IS_ERR(chip->cfgr_clk))
1352 return PTR_ERR(chip->cfgr_clk);
1354 ret = parse_device_properties(chip);
1358 dw->chan = devm_kcalloc(chip->dev, hdata->nr_channels,
1359 sizeof(*dw->chan), GFP_KERNEL);
1363 ret = devm_request_irq(chip->dev, chip->irq, dw_axi_dma_interrupt,
1364 IRQF_SHARED, KBUILD_MODNAME, chip);
1369 INIT_LIST_HEAD(&dw->dma.channels);
1370 for (i = 0; i < hdata->nr_channels; i++) {
1371 struct axi_dma_chan *chan = &dw->chan[i];
1375 chan->chan_regs = chip->regs + COMMON_REG_LEN + i * CHAN_REG_LEN;
1376 atomic_set(&chan->descs_allocated, 0);
1378 chan->vc.desc_free = vchan_desc_put;
1379 vchan_init(&chan->vc, &dw->dma);
1382 /* Set capabilities */
1383 dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask);
1384 dma_cap_set(DMA_SLAVE, dw->dma.cap_mask);
1385 dma_cap_set(DMA_CYCLIC, dw->dma.cap_mask);
1387 /* DMA capabilities */
1388 dw->dma.chancnt = hdata->nr_channels;
1389 dw->dma.src_addr_widths = AXI_DMA_BUSWIDTHS;
1390 dw->dma.dst_addr_widths = AXI_DMA_BUSWIDTHS;
1391 dw->dma.directions = BIT(DMA_MEM_TO_MEM);
1392 dw->dma.directions |= BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);
1393 dw->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
1395 dw->dma.dev = chip->dev;
1396 dw->dma.device_tx_status = dma_chan_tx_status;
1397 dw->dma.device_issue_pending = dma_chan_issue_pending;
1398 dw->dma.device_terminate_all = dma_chan_terminate_all;
1399 dw->dma.device_pause = dma_chan_pause;
1400 dw->dma.device_resume = dma_chan_resume;
1402 dw->dma.device_alloc_chan_resources = dma_chan_alloc_chan_resources;
1403 dw->dma.device_free_chan_resources = dma_chan_free_chan_resources;
1405 dw->dma.device_prep_dma_memcpy = dma_chan_prep_dma_memcpy;
1406 dw->dma.device_synchronize = dw_axi_dma_synchronize;
1407 dw->dma.device_config = dw_axi_dma_chan_slave_config;
1408 dw->dma.device_prep_slave_sg = dw_axi_dma_chan_prep_slave_sg;
1409 dw->dma.device_prep_dma_cyclic = dw_axi_dma_chan_prep_cyclic;
1412 * Synopsis DesignWare AxiDMA datasheet mentioned Maximum
1413 * supported blocks is 1024. Device register width is 4 bytes.
1414 * Therefore, set constraint to 1024 * 4.
1416 dw->dma.dev->dma_parms = &dw->dma_parms;
1417 dma_set_max_seg_size(&pdev->dev, MAX_BLOCK_SIZE);
1418 platform_set_drvdata(pdev, chip);
1420 pm_runtime_enable(chip->dev);
1423 * We can't just call pm_runtime_get here instead of
1424 * pm_runtime_get_noresume + axi_dma_resume because we need
1425 * driver to work also without Runtime PM.
1427 pm_runtime_get_noresume(chip->dev);
1428 ret = axi_dma_resume(chip);
1430 goto err_pm_disable;
1432 axi_dma_hw_init(chip);
1434 pm_runtime_put(chip->dev);
1436 ret = dmaenginem_async_device_register(&dw->dma);
1438 goto err_pm_disable;
1440 /* Register with OF helpers for DMA lookups */
1441 ret = of_dma_controller_register(pdev->dev.of_node,
1442 dw_axi_dma_of_xlate, dw);
1444 dev_warn(&pdev->dev,
1445 "Failed to register OF DMA controller, fallback to MEM_TO_MEM mode\n");
1447 dev_info(chip->dev, "DesignWare AXI DMA Controller, %d channels\n",
1448 dw->hdata->nr_channels);
1453 pm_runtime_disable(chip->dev);
1458 static int dw_remove(struct platform_device *pdev)
1460 struct axi_dma_chip *chip = platform_get_drvdata(pdev);
1461 struct dw_axi_dma *dw = chip->dw;
1462 struct axi_dma_chan *chan, *_chan;
1465 /* Enable clk before accessing to registers */
1466 clk_prepare_enable(chip->cfgr_clk);
1467 clk_prepare_enable(chip->core_clk);
1468 axi_dma_irq_disable(chip);
1469 for (i = 0; i < dw->hdata->nr_channels; i++) {
1470 axi_chan_disable(&chip->dw->chan[i]);
1471 axi_chan_irq_disable(&chip->dw->chan[i], DWAXIDMAC_IRQ_ALL);
1473 axi_dma_disable(chip);
1475 pm_runtime_disable(chip->dev);
1476 axi_dma_suspend(chip);
1478 devm_free_irq(chip->dev, chip->irq, chip);
1480 of_dma_controller_free(chip->dev->of_node);
1482 list_for_each_entry_safe(chan, _chan, &dw->dma.channels,
1483 vc.chan.device_node) {
1484 list_del(&chan->vc.chan.device_node);
1485 tasklet_kill(&chan->vc.task);
1491 static const struct dev_pm_ops dw_axi_dma_pm_ops = {
1492 SET_RUNTIME_PM_OPS(axi_dma_runtime_suspend, axi_dma_runtime_resume, NULL)
1495 static const struct of_device_id dw_dma_of_id_table[] = {
1496 { .compatible = "snps,axi-dma-1.01a" },
1497 { .compatible = "intel,kmb-axi-dma" },
1500 MODULE_DEVICE_TABLE(of, dw_dma_of_id_table);
1502 static struct platform_driver dw_driver = {
1504 .remove = dw_remove,
1506 .name = KBUILD_MODNAME,
1507 .of_match_table = dw_dma_of_id_table,
1508 .pm = &dw_axi_dma_pm_ops,
1511 module_platform_driver(dw_driver);
1513 MODULE_LICENSE("GPL v2");
1514 MODULE_DESCRIPTION("Synopsys DesignWare AXI DMA Controller platform driver");
1515 MODULE_AUTHOR("Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com>");