1 // SPDX-License-Identifier: GPL-2.0
2 // (C) 2017-2018 Synopsys, Inc. (www.synopsys.com)
5 * Synopsys DesignWare AXI DMA Controller driver.
7 * Author: Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com>
10 #include <linux/bitops.h>
11 #include <linux/delay.h>
12 #include <linux/device.h>
13 #include <linux/dmaengine.h>
14 #include <linux/dmapool.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/err.h>
17 #include <linux/interrupt.h>
19 #include <linux/iopoll.h>
20 #include <linux/io-64-nonatomic-lo-hi.h>
21 #include <linux/kernel.h>
22 #include <linux/module.h>
24 #include <linux/of_dma.h>
25 #include <linux/platform_device.h>
26 #include <linux/pm_runtime.h>
27 #include <linux/property.h>
28 #include <linux/slab.h>
29 #include <linux/types.h>
31 #include "dw-axi-dmac.h"
32 #include "../dmaengine.h"
33 #include "../virt-dma.h"
36 * The set of bus widths supported by the DMA controller. DW AXI DMAC supports
37 * master data bus width up to 512 bits (for both AXI master interfaces), but
38 * it depends on IP block configurarion.
40 #define AXI_DMA_BUSWIDTHS \
41 (DMA_SLAVE_BUSWIDTH_1_BYTE | \
42 DMA_SLAVE_BUSWIDTH_2_BYTES | \
43 DMA_SLAVE_BUSWIDTH_4_BYTES | \
44 DMA_SLAVE_BUSWIDTH_8_BYTES | \
45 DMA_SLAVE_BUSWIDTH_16_BYTES | \
46 DMA_SLAVE_BUSWIDTH_32_BYTES | \
47 DMA_SLAVE_BUSWIDTH_64_BYTES)
50 axi_dma_iowrite32(struct axi_dma_chip *chip, u32 reg, u32 val)
52 iowrite32(val, chip->regs + reg);
55 static inline u32 axi_dma_ioread32(struct axi_dma_chip *chip, u32 reg)
57 return ioread32(chip->regs + reg);
61 axi_chan_iowrite32(struct axi_dma_chan *chan, u32 reg, u32 val)
63 iowrite32(val, chan->chan_regs + reg);
66 static inline u32 axi_chan_ioread32(struct axi_dma_chan *chan, u32 reg)
68 return ioread32(chan->chan_regs + reg);
72 axi_chan_iowrite64(struct axi_dma_chan *chan, u32 reg, u64 val)
75 * We split one 64 bit write for two 32 bit write as some HW doesn't
76 * support 64 bit access.
78 iowrite32(lower_32_bits(val), chan->chan_regs + reg);
79 iowrite32(upper_32_bits(val), chan->chan_regs + reg + 4);
82 static inline void axi_dma_disable(struct axi_dma_chip *chip)
86 val = axi_dma_ioread32(chip, DMAC_CFG);
88 axi_dma_iowrite32(chip, DMAC_CFG, val);
91 static inline void axi_dma_enable(struct axi_dma_chip *chip)
95 val = axi_dma_ioread32(chip, DMAC_CFG);
97 axi_dma_iowrite32(chip, DMAC_CFG, val);
100 static inline void axi_dma_irq_disable(struct axi_dma_chip *chip)
104 val = axi_dma_ioread32(chip, DMAC_CFG);
106 axi_dma_iowrite32(chip, DMAC_CFG, val);
109 static inline void axi_dma_irq_enable(struct axi_dma_chip *chip)
113 val = axi_dma_ioread32(chip, DMAC_CFG);
115 axi_dma_iowrite32(chip, DMAC_CFG, val);
118 static inline void axi_chan_irq_disable(struct axi_dma_chan *chan, u32 irq_mask)
122 if (likely(irq_mask == DWAXIDMAC_IRQ_ALL)) {
123 axi_chan_iowrite32(chan, CH_INTSTATUS_ENA, DWAXIDMAC_IRQ_NONE);
125 val = axi_chan_ioread32(chan, CH_INTSTATUS_ENA);
127 axi_chan_iowrite32(chan, CH_INTSTATUS_ENA, val);
131 static inline void axi_chan_irq_set(struct axi_dma_chan *chan, u32 irq_mask)
133 axi_chan_iowrite32(chan, CH_INTSTATUS_ENA, irq_mask);
136 static inline void axi_chan_irq_sig_set(struct axi_dma_chan *chan, u32 irq_mask)
138 axi_chan_iowrite32(chan, CH_INTSIGNAL_ENA, irq_mask);
141 static inline void axi_chan_irq_clear(struct axi_dma_chan *chan, u32 irq_mask)
143 axi_chan_iowrite32(chan, CH_INTCLEAR, irq_mask);
146 static inline u32 axi_chan_irq_read(struct axi_dma_chan *chan)
148 return axi_chan_ioread32(chan, CH_INTSTATUS);
151 static inline void axi_chan_disable(struct axi_dma_chan *chan)
155 val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
156 val &= ~(BIT(chan->id) << DMAC_CHAN_EN_SHIFT);
157 val |= BIT(chan->id) << DMAC_CHAN_EN_WE_SHIFT;
158 axi_dma_iowrite32(chan->chip, DMAC_CHEN, val);
161 static inline void axi_chan_enable(struct axi_dma_chan *chan)
165 val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
166 val |= BIT(chan->id) << DMAC_CHAN_EN_SHIFT |
167 BIT(chan->id) << DMAC_CHAN_EN_WE_SHIFT;
168 axi_dma_iowrite32(chan->chip, DMAC_CHEN, val);
171 static inline bool axi_chan_is_hw_enable(struct axi_dma_chan *chan)
175 val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
177 return !!(val & (BIT(chan->id) << DMAC_CHAN_EN_SHIFT));
180 static void axi_dma_hw_init(struct axi_dma_chip *chip)
184 for (i = 0; i < chip->dw->hdata->nr_channels; i++) {
185 axi_chan_irq_disable(&chip->dw->chan[i], DWAXIDMAC_IRQ_ALL);
186 axi_chan_disable(&chip->dw->chan[i]);
190 static u32 axi_chan_get_xfer_width(struct axi_dma_chan *chan, dma_addr_t src,
191 dma_addr_t dst, size_t len)
193 u32 max_width = chan->chip->dw->hdata->m_data_width;
195 return __ffs(src | dst | len | BIT(max_width));
198 static inline const char *axi_chan_name(struct axi_dma_chan *chan)
200 return dma_chan_name(&chan->vc.chan);
203 static struct axi_dma_desc *axi_desc_alloc(u32 num)
205 struct axi_dma_desc *desc;
207 desc = kzalloc(sizeof(*desc), GFP_NOWAIT);
211 desc->hw_desc = kcalloc(num, sizeof(*desc->hw_desc), GFP_NOWAIT);
212 if (!desc->hw_desc) {
220 static struct axi_dma_lli *axi_desc_get(struct axi_dma_chan *chan,
223 struct axi_dma_lli *lli;
226 lli = dma_pool_zalloc(chan->desc_pool, GFP_NOWAIT, &phys);
227 if (unlikely(!lli)) {
228 dev_err(chan2dev(chan), "%s: not enough descriptors available\n",
229 axi_chan_name(chan));
233 atomic_inc(&chan->descs_allocated);
239 static void axi_desc_put(struct axi_dma_desc *desc)
241 struct axi_dma_chan *chan = desc->chan;
242 int count = atomic_read(&chan->descs_allocated);
243 struct axi_dma_hw_desc *hw_desc;
246 for (descs_put = 0; descs_put < count; descs_put++) {
247 hw_desc = &desc->hw_desc[descs_put];
248 dma_pool_free(chan->desc_pool, hw_desc->lli, hw_desc->llp);
251 kfree(desc->hw_desc);
253 atomic_sub(descs_put, &chan->descs_allocated);
254 dev_vdbg(chan2dev(chan), "%s: %d descs put, %d still allocated\n",
255 axi_chan_name(chan), descs_put,
256 atomic_read(&chan->descs_allocated));
259 static void vchan_desc_put(struct virt_dma_desc *vdesc)
261 axi_desc_put(vd_to_axi_desc(vdesc));
264 static enum dma_status
265 dma_chan_tx_status(struct dma_chan *dchan, dma_cookie_t cookie,
266 struct dma_tx_state *txstate)
268 struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
269 struct virt_dma_desc *vdesc;
270 enum dma_status status;
271 u32 completed_length;
273 u32 completed_blocks;
278 status = dma_cookie_status(dchan, cookie, txstate);
279 if (status == DMA_COMPLETE || !txstate)
282 spin_lock_irqsave(&chan->vc.lock, flags);
284 vdesc = vchan_find_desc(&chan->vc, cookie);
286 length = vd_to_axi_desc(vdesc)->length;
287 completed_blocks = vd_to_axi_desc(vdesc)->completed_blocks;
288 len = vd_to_axi_desc(vdesc)->hw_desc[0].len;
289 completed_length = completed_blocks * len;
290 bytes = length - completed_length;
292 bytes = vd_to_axi_desc(vdesc)->length;
295 spin_unlock_irqrestore(&chan->vc.lock, flags);
296 dma_set_residue(txstate, bytes);
301 static void write_desc_llp(struct axi_dma_hw_desc *desc, dma_addr_t adr)
303 desc->lli->llp = cpu_to_le64(adr);
306 static void write_chan_llp(struct axi_dma_chan *chan, dma_addr_t adr)
308 axi_chan_iowrite64(chan, CH_LLP, adr);
311 static void dw_axi_dma_set_byte_halfword(struct axi_dma_chan *chan, bool set)
313 u32 offset = DMAC_APB_BYTE_WR_CH_EN;
316 if (!chan->chip->apb_regs) {
317 dev_dbg(chan->chip->dev, "apb_regs not initialized\n");
321 reg_width = __ffs(chan->config.dst_addr_width);
322 if (reg_width == DWAXIDMAC_TRANS_WIDTH_16)
323 offset = DMAC_APB_HALFWORD_WR_CH_EN;
325 val = ioread32(chan->chip->apb_regs + offset);
328 val |= BIT(chan->id);
330 val &= ~BIT(chan->id);
332 iowrite32(val, chan->chip->apb_regs + offset);
334 /* Called in chan locked context */
335 static void axi_chan_block_xfer_start(struct axi_dma_chan *chan,
336 struct axi_dma_desc *first)
338 u32 priority = chan->chip->dw->hdata->priority[chan->id];
340 u8 lms = 0; /* Select AXI0 master for LLI fetching */
342 if (unlikely(axi_chan_is_hw_enable(chan))) {
343 dev_err(chan2dev(chan), "%s is non-idle!\n",
344 axi_chan_name(chan));
349 axi_dma_enable(chan->chip);
351 reg = (DWAXIDMAC_MBLK_TYPE_LL << CH_CFG_L_DST_MULTBLK_TYPE_POS |
352 DWAXIDMAC_MBLK_TYPE_LL << CH_CFG_L_SRC_MULTBLK_TYPE_POS);
353 axi_chan_iowrite32(chan, CH_CFG_L, reg);
355 reg = (DWAXIDMAC_TT_FC_MEM_TO_MEM_DMAC << CH_CFG_H_TT_FC_POS |
356 priority << CH_CFG_H_PRIORITY_POS |
357 DWAXIDMAC_HS_SEL_HW << CH_CFG_H_HS_SEL_DST_POS |
358 DWAXIDMAC_HS_SEL_HW << CH_CFG_H_HS_SEL_SRC_POS);
359 switch (chan->direction) {
361 dw_axi_dma_set_byte_halfword(chan, true);
362 reg |= (chan->config.device_fc ?
363 DWAXIDMAC_TT_FC_MEM_TO_PER_DST :
364 DWAXIDMAC_TT_FC_MEM_TO_PER_DMAC)
365 << CH_CFG_H_TT_FC_POS;
366 if (chan->chip->apb_regs)
367 reg |= (chan->id << CH_CFG_H_DST_PER_POS);
370 reg |= (chan->config.device_fc ?
371 DWAXIDMAC_TT_FC_PER_TO_MEM_SRC :
372 DWAXIDMAC_TT_FC_PER_TO_MEM_DMAC)
373 << CH_CFG_H_TT_FC_POS;
374 if (chan->chip->apb_regs)
375 reg |= (chan->id << CH_CFG_H_SRC_PER_POS);
380 axi_chan_iowrite32(chan, CH_CFG_H, reg);
382 write_chan_llp(chan, first->hw_desc[0].llp | lms);
384 irq_mask = DWAXIDMAC_IRQ_DMA_TRF | DWAXIDMAC_IRQ_ALL_ERR;
385 axi_chan_irq_sig_set(chan, irq_mask);
387 /* Generate 'suspend' status but don't generate interrupt */
388 irq_mask |= DWAXIDMAC_IRQ_SUSPENDED;
389 axi_chan_irq_set(chan, irq_mask);
391 axi_chan_enable(chan);
394 static void axi_chan_start_first_queued(struct axi_dma_chan *chan)
396 struct axi_dma_desc *desc;
397 struct virt_dma_desc *vd;
399 vd = vchan_next_desc(&chan->vc);
403 desc = vd_to_axi_desc(vd);
404 dev_vdbg(chan2dev(chan), "%s: started %u\n", axi_chan_name(chan),
406 axi_chan_block_xfer_start(chan, desc);
409 static void dma_chan_issue_pending(struct dma_chan *dchan)
411 struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
414 spin_lock_irqsave(&chan->vc.lock, flags);
415 if (vchan_issue_pending(&chan->vc))
416 axi_chan_start_first_queued(chan);
417 spin_unlock_irqrestore(&chan->vc.lock, flags);
420 static void dw_axi_dma_synchronize(struct dma_chan *dchan)
422 struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
424 vchan_synchronize(&chan->vc);
427 static int dma_chan_alloc_chan_resources(struct dma_chan *dchan)
429 struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
431 /* ASSERT: channel is idle */
432 if (axi_chan_is_hw_enable(chan)) {
433 dev_err(chan2dev(chan), "%s is non-idle!\n",
434 axi_chan_name(chan));
438 /* LLI address must be aligned to a 64-byte boundary */
439 chan->desc_pool = dma_pool_create(dev_name(chan2dev(chan)),
441 sizeof(struct axi_dma_lli),
443 if (!chan->desc_pool) {
444 dev_err(chan2dev(chan), "No memory for descriptors\n");
447 dev_vdbg(dchan2dev(dchan), "%s: allocating\n", axi_chan_name(chan));
449 pm_runtime_get(chan->chip->dev);
454 static void dma_chan_free_chan_resources(struct dma_chan *dchan)
456 struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
458 /* ASSERT: channel is idle */
459 if (axi_chan_is_hw_enable(chan))
460 dev_err(dchan2dev(dchan), "%s is non-idle!\n",
461 axi_chan_name(chan));
463 axi_chan_disable(chan);
464 axi_chan_irq_disable(chan, DWAXIDMAC_IRQ_ALL);
466 vchan_free_chan_resources(&chan->vc);
468 dma_pool_destroy(chan->desc_pool);
469 chan->desc_pool = NULL;
470 dev_vdbg(dchan2dev(dchan),
471 "%s: free resources, descriptor still allocated: %u\n",
472 axi_chan_name(chan), atomic_read(&chan->descs_allocated));
474 pm_runtime_put(chan->chip->dev);
477 static void dw_axi_dma_set_hw_channel(struct axi_dma_chan *chan, bool set)
479 struct axi_dma_chip *chip = chan->chip;
480 unsigned long reg_value, val;
482 if (!chip->apb_regs) {
483 dev_err(chip->dev, "apb_regs not initialized\n");
488 * An unused DMA channel has a default value of 0x3F.
489 * Lock the DMA channel by assign a handshake number to the channel.
490 * Unlock the DMA channel by assign 0x3F to the channel.
493 val = chan->hw_handshake_num;
495 val = UNUSED_CHANNEL;
497 reg_value = lo_hi_readq(chip->apb_regs + DMAC_APB_HW_HS_SEL_0);
499 /* Channel is already allocated, set handshake as per channel ID */
500 /* 64 bit write should handle for 8 channels */
502 reg_value &= ~(DMA_APB_HS_SEL_MASK <<
503 (chan->id * DMA_APB_HS_SEL_BIT_SIZE));
504 reg_value |= (val << (chan->id * DMA_APB_HS_SEL_BIT_SIZE));
505 lo_hi_writeq(reg_value, chip->apb_regs + DMAC_APB_HW_HS_SEL_0);
511 * If DW_axi_dmac sees CHx_CTL.ShadowReg_Or_LLI_Last bit of the fetched LLI
512 * as 1, it understands that the current block is the final block in the
513 * transfer and completes the DMA transfer operation at the end of current
516 static void set_desc_last(struct axi_dma_hw_desc *desc)
520 val = le32_to_cpu(desc->lli->ctl_hi);
521 val |= CH_CTL_H_LLI_LAST;
522 desc->lli->ctl_hi = cpu_to_le32(val);
525 static void write_desc_sar(struct axi_dma_hw_desc *desc, dma_addr_t adr)
527 desc->lli->sar = cpu_to_le64(adr);
530 static void write_desc_dar(struct axi_dma_hw_desc *desc, dma_addr_t adr)
532 desc->lli->dar = cpu_to_le64(adr);
535 static void set_desc_src_master(struct axi_dma_hw_desc *desc)
539 /* Select AXI0 for source master */
540 val = le32_to_cpu(desc->lli->ctl_lo);
541 val &= ~CH_CTL_L_SRC_MAST;
542 desc->lli->ctl_lo = cpu_to_le32(val);
545 static void set_desc_dest_master(struct axi_dma_hw_desc *hw_desc,
546 struct axi_dma_desc *desc)
550 /* Select AXI1 for source master if available */
551 val = le32_to_cpu(hw_desc->lli->ctl_lo);
552 if (desc->chan->chip->dw->hdata->nr_masters > 1)
553 val |= CH_CTL_L_DST_MAST;
555 val &= ~CH_CTL_L_DST_MAST;
557 hw_desc->lli->ctl_lo = cpu_to_le32(val);
560 static int dw_axi_dma_set_hw_desc(struct axi_dma_chan *chan,
561 struct axi_dma_hw_desc *hw_desc,
562 dma_addr_t mem_addr, size_t len)
564 unsigned int data_width = BIT(chan->chip->dw->hdata->m_data_width);
565 unsigned int reg_width;
566 unsigned int mem_width;
567 dma_addr_t device_addr;
573 axi_block_ts = chan->chip->dw->hdata->block_size[chan->id];
575 mem_width = __ffs(data_width | mem_addr | len);
576 if (mem_width > DWAXIDMAC_TRANS_WIDTH_32)
577 mem_width = DWAXIDMAC_TRANS_WIDTH_32;
579 if (!IS_ALIGNED(mem_addr, 4)) {
580 dev_err(chan->chip->dev, "invalid buffer alignment\n");
584 switch (chan->direction) {
586 reg_width = __ffs(chan->config.dst_addr_width);
587 device_addr = chan->config.dst_addr;
588 ctllo = reg_width << CH_CTL_L_DST_WIDTH_POS |
589 mem_width << CH_CTL_L_SRC_WIDTH_POS |
590 DWAXIDMAC_CH_CTL_L_NOINC << CH_CTL_L_DST_INC_POS |
591 DWAXIDMAC_CH_CTL_L_INC << CH_CTL_L_SRC_INC_POS;
592 block_ts = len >> mem_width;
595 reg_width = __ffs(chan->config.src_addr_width);
596 device_addr = chan->config.src_addr;
597 ctllo = reg_width << CH_CTL_L_SRC_WIDTH_POS |
598 mem_width << CH_CTL_L_DST_WIDTH_POS |
599 DWAXIDMAC_CH_CTL_L_INC << CH_CTL_L_DST_INC_POS |
600 DWAXIDMAC_CH_CTL_L_NOINC << CH_CTL_L_SRC_INC_POS;
601 block_ts = len >> reg_width;
607 if (block_ts > axi_block_ts)
610 hw_desc->lli = axi_desc_get(chan, &hw_desc->llp);
611 if (unlikely(!hw_desc->lli))
614 ctlhi = CH_CTL_H_LLI_VALID;
616 if (chan->chip->dw->hdata->restrict_axi_burst_len) {
617 burst_len = chan->chip->dw->hdata->axi_rw_burst_len;
618 ctlhi |= CH_CTL_H_ARLEN_EN | CH_CTL_H_AWLEN_EN |
619 burst_len << CH_CTL_H_ARLEN_POS |
620 burst_len << CH_CTL_H_AWLEN_POS;
623 hw_desc->lli->ctl_hi = cpu_to_le32(ctlhi);
625 if (chan->direction == DMA_MEM_TO_DEV) {
626 write_desc_sar(hw_desc, mem_addr);
627 write_desc_dar(hw_desc, device_addr);
629 write_desc_sar(hw_desc, device_addr);
630 write_desc_dar(hw_desc, mem_addr);
633 hw_desc->lli->block_ts_lo = cpu_to_le32(block_ts - 1);
635 ctllo |= DWAXIDMAC_BURST_TRANS_LEN_4 << CH_CTL_L_DST_MSIZE_POS |
636 DWAXIDMAC_BURST_TRANS_LEN_4 << CH_CTL_L_SRC_MSIZE_POS;
637 hw_desc->lli->ctl_lo = cpu_to_le32(ctllo);
639 set_desc_src_master(hw_desc);
645 static size_t calculate_block_len(struct axi_dma_chan *chan,
646 dma_addr_t dma_addr, size_t buf_len,
647 enum dma_transfer_direction direction)
649 u32 data_width, reg_width, mem_width;
650 size_t axi_block_ts, block_len;
652 axi_block_ts = chan->chip->dw->hdata->block_size[chan->id];
656 data_width = BIT(chan->chip->dw->hdata->m_data_width);
657 mem_width = __ffs(data_width | dma_addr | buf_len);
658 if (mem_width > DWAXIDMAC_TRANS_WIDTH_32)
659 mem_width = DWAXIDMAC_TRANS_WIDTH_32;
661 block_len = axi_block_ts << mem_width;
664 reg_width = __ffs(chan->config.src_addr_width);
665 block_len = axi_block_ts << reg_width;
674 static struct dma_async_tx_descriptor *
675 dw_axi_dma_chan_prep_cyclic(struct dma_chan *dchan, dma_addr_t dma_addr,
676 size_t buf_len, size_t period_len,
677 enum dma_transfer_direction direction,
680 struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
681 struct axi_dma_hw_desc *hw_desc = NULL;
682 struct axi_dma_desc *desc = NULL;
683 dma_addr_t src_addr = dma_addr;
684 u32 num_periods, num_segments;
685 size_t axi_block_len;
691 u8 lms = 0; /* Select AXI0 master for LLI fetching */
693 num_periods = buf_len / period_len;
695 axi_block_len = calculate_block_len(chan, dma_addr, buf_len, direction);
696 if (axi_block_len == 0)
699 num_segments = DIV_ROUND_UP(period_len, axi_block_len);
700 segment_len = DIV_ROUND_UP(period_len, num_segments);
702 total_segments = num_periods * num_segments;
704 desc = axi_desc_alloc(total_segments);
708 chan->direction = direction;
712 desc->period_len = period_len;
714 for (i = 0; i < total_segments; i++) {
715 hw_desc = &desc->hw_desc[i];
717 status = dw_axi_dma_set_hw_desc(chan, hw_desc, src_addr,
722 desc->length += hw_desc->len;
723 /* Set end-of-link to the linked descriptor, so that cyclic
724 * callback function can be triggered during interrupt.
726 set_desc_last(hw_desc);
728 src_addr += segment_len;
731 llp = desc->hw_desc[0].llp;
733 /* Managed transfer list */
735 hw_desc = &desc->hw_desc[--total_segments];
736 write_desc_llp(hw_desc, llp | lms);
738 } while (total_segments);
740 dw_axi_dma_set_hw_channel(chan, true);
742 return vchan_tx_prep(&chan->vc, &desc->vd, flags);
751 static struct dma_async_tx_descriptor *
752 dw_axi_dma_chan_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
754 enum dma_transfer_direction direction,
755 unsigned long flags, void *context)
757 struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
758 struct axi_dma_hw_desc *hw_desc = NULL;
759 struct axi_dma_desc *desc = NULL;
760 u32 num_segments, segment_len;
761 unsigned int loop = 0;
762 struct scatterlist *sg;
763 size_t axi_block_len;
764 u32 len, num_sgs = 0;
769 u8 lms = 0; /* Select AXI0 master for LLI fetching */
771 if (unlikely(!is_slave_direction(direction) || !sg_len))
774 mem = sg_dma_address(sgl);
775 len = sg_dma_len(sgl);
777 axi_block_len = calculate_block_len(chan, mem, len, direction);
778 if (axi_block_len == 0)
781 for_each_sg(sgl, sg, sg_len, i)
782 num_sgs += DIV_ROUND_UP(sg_dma_len(sg), axi_block_len);
784 desc = axi_desc_alloc(num_sgs);
790 chan->direction = direction;
792 for_each_sg(sgl, sg, sg_len, i) {
793 mem = sg_dma_address(sg);
794 len = sg_dma_len(sg);
795 num_segments = DIV_ROUND_UP(sg_dma_len(sg), axi_block_len);
796 segment_len = DIV_ROUND_UP(sg_dma_len(sg), num_segments);
799 hw_desc = &desc->hw_desc[loop++];
800 status = dw_axi_dma_set_hw_desc(chan, hw_desc, mem, segment_len);
804 desc->length += hw_desc->len;
807 } while (len >= segment_len);
810 /* Set end-of-link to the last link descriptor of list */
811 set_desc_last(&desc->hw_desc[num_sgs - 1]);
813 /* Managed transfer list */
815 hw_desc = &desc->hw_desc[--num_sgs];
816 write_desc_llp(hw_desc, llp | lms);
820 dw_axi_dma_set_hw_channel(chan, true);
822 return vchan_tx_prep(&chan->vc, &desc->vd, flags);
831 static struct dma_async_tx_descriptor *
832 dma_chan_prep_dma_memcpy(struct dma_chan *dchan, dma_addr_t dst_adr,
833 dma_addr_t src_adr, size_t len, unsigned long flags)
835 struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
836 size_t block_ts, max_block_ts, xfer_len;
837 struct axi_dma_hw_desc *hw_desc = NULL;
838 struct axi_dma_desc *desc = NULL;
839 u32 xfer_width, reg, num;
841 u8 lms = 0; /* Select AXI0 master for LLI fetching */
843 dev_dbg(chan2dev(chan), "%s: memcpy: src: %pad dst: %pad length: %zd flags: %#lx",
844 axi_chan_name(chan), &src_adr, &dst_adr, len, flags);
846 max_block_ts = chan->chip->dw->hdata->block_size[chan->id];
847 xfer_width = axi_chan_get_xfer_width(chan, src_adr, dst_adr, len);
848 num = DIV_ROUND_UP(len, max_block_ts << xfer_width);
849 desc = axi_desc_alloc(num);
859 hw_desc = &desc->hw_desc[num];
861 * Take care for the alignment.
862 * Actually source and destination widths can be different, but
863 * make them same to be simpler.
865 xfer_width = axi_chan_get_xfer_width(chan, src_adr, dst_adr, xfer_len);
868 * block_ts indicates the total number of data of width
869 * to be transferred in a DMA block transfer.
870 * BLOCK_TS register should be set to block_ts - 1
872 block_ts = xfer_len >> xfer_width;
873 if (block_ts > max_block_ts) {
874 block_ts = max_block_ts;
875 xfer_len = max_block_ts << xfer_width;
878 hw_desc->lli = axi_desc_get(chan, &hw_desc->llp);
879 if (unlikely(!hw_desc->lli))
882 write_desc_sar(hw_desc, src_adr);
883 write_desc_dar(hw_desc, dst_adr);
884 hw_desc->lli->block_ts_lo = cpu_to_le32(block_ts - 1);
886 reg = CH_CTL_H_LLI_VALID;
887 if (chan->chip->dw->hdata->restrict_axi_burst_len) {
888 u32 burst_len = chan->chip->dw->hdata->axi_rw_burst_len;
890 reg |= (CH_CTL_H_ARLEN_EN |
891 burst_len << CH_CTL_H_ARLEN_POS |
893 burst_len << CH_CTL_H_AWLEN_POS);
895 hw_desc->lli->ctl_hi = cpu_to_le32(reg);
897 reg = (DWAXIDMAC_BURST_TRANS_LEN_4 << CH_CTL_L_DST_MSIZE_POS |
898 DWAXIDMAC_BURST_TRANS_LEN_4 << CH_CTL_L_SRC_MSIZE_POS |
899 xfer_width << CH_CTL_L_DST_WIDTH_POS |
900 xfer_width << CH_CTL_L_SRC_WIDTH_POS |
901 DWAXIDMAC_CH_CTL_L_INC << CH_CTL_L_DST_INC_POS |
902 DWAXIDMAC_CH_CTL_L_INC << CH_CTL_L_SRC_INC_POS);
903 hw_desc->lli->ctl_lo = cpu_to_le32(reg);
905 set_desc_src_master(hw_desc);
906 set_desc_dest_master(hw_desc, desc);
908 hw_desc->len = xfer_len;
909 desc->length += hw_desc->len;
910 /* update the length and addresses for the next loop cycle */
917 /* Set end-of-link to the last link descriptor of list */
918 set_desc_last(&desc->hw_desc[num - 1]);
919 /* Managed transfer list */
921 hw_desc = &desc->hw_desc[--num];
922 write_desc_llp(hw_desc, llp | lms);
926 return vchan_tx_prep(&chan->vc, &desc->vd, flags);
934 static int dw_axi_dma_chan_slave_config(struct dma_chan *dchan,
935 struct dma_slave_config *config)
937 struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
939 memcpy(&chan->config, config, sizeof(*config));
944 static void axi_chan_dump_lli(struct axi_dma_chan *chan,
945 struct axi_dma_hw_desc *desc)
947 dev_err(dchan2dev(&chan->vc.chan),
948 "SAR: 0x%llx DAR: 0x%llx LLP: 0x%llx BTS 0x%x CTL: 0x%x:%08x",
949 le64_to_cpu(desc->lli->sar),
950 le64_to_cpu(desc->lli->dar),
951 le64_to_cpu(desc->lli->llp),
952 le32_to_cpu(desc->lli->block_ts_lo),
953 le32_to_cpu(desc->lli->ctl_hi),
954 le32_to_cpu(desc->lli->ctl_lo));
957 static void axi_chan_list_dump_lli(struct axi_dma_chan *chan,
958 struct axi_dma_desc *desc_head)
960 int count = atomic_read(&chan->descs_allocated);
963 for (i = 0; i < count; i++)
964 axi_chan_dump_lli(chan, &desc_head->hw_desc[i]);
967 static noinline void axi_chan_handle_err(struct axi_dma_chan *chan, u32 status)
969 struct virt_dma_desc *vd;
972 spin_lock_irqsave(&chan->vc.lock, flags);
974 axi_chan_disable(chan);
976 /* The bad descriptor currently is in the head of vc list */
977 vd = vchan_next_desc(&chan->vc);
978 /* Remove the completed descriptor from issued list */
981 /* WARN about bad descriptor */
982 dev_err(chan2dev(chan),
983 "Bad descriptor submitted for %s, cookie: %d, irq: 0x%08x\n",
984 axi_chan_name(chan), vd->tx.cookie, status);
985 axi_chan_list_dump_lli(chan, vd_to_axi_desc(vd));
987 vchan_cookie_complete(vd);
989 /* Try to restart the controller */
990 axi_chan_start_first_queued(chan);
992 spin_unlock_irqrestore(&chan->vc.lock, flags);
995 static void axi_chan_block_xfer_complete(struct axi_dma_chan *chan)
997 int count = atomic_read(&chan->descs_allocated);
998 struct axi_dma_hw_desc *hw_desc;
999 struct axi_dma_desc *desc;
1000 struct virt_dma_desc *vd;
1001 unsigned long flags;
1005 spin_lock_irqsave(&chan->vc.lock, flags);
1006 if (unlikely(axi_chan_is_hw_enable(chan))) {
1007 dev_err(chan2dev(chan), "BUG: %s caught DWAXIDMAC_IRQ_DMA_TRF, but channel not idle!\n",
1008 axi_chan_name(chan));
1009 axi_chan_disable(chan);
1012 /* The completed descriptor currently is in the head of vc list */
1013 vd = vchan_next_desc(&chan->vc);
1016 desc = vd_to_axi_desc(vd);
1018 llp = lo_hi_readq(chan->chan_regs + CH_LLP);
1019 for (i = 0; i < count; i++) {
1020 hw_desc = &desc->hw_desc[i];
1021 if (hw_desc->llp == llp) {
1022 axi_chan_irq_clear(chan, hw_desc->lli->status_lo);
1023 hw_desc->lli->ctl_hi |= CH_CTL_H_LLI_VALID;
1024 desc->completed_blocks = i;
1026 if (((hw_desc->len * (i + 1)) % desc->period_len) == 0)
1027 vchan_cyclic_callback(vd);
1032 axi_chan_enable(chan);
1035 /* Remove the completed descriptor from issued list before completing */
1036 list_del(&vd->node);
1037 vchan_cookie_complete(vd);
1039 /* Submit queued descriptors after processing the completed ones */
1040 axi_chan_start_first_queued(chan);
1043 spin_unlock_irqrestore(&chan->vc.lock, flags);
1046 static irqreturn_t dw_axi_dma_interrupt(int irq, void *dev_id)
1048 struct axi_dma_chip *chip = dev_id;
1049 struct dw_axi_dma *dw = chip->dw;
1050 struct axi_dma_chan *chan;
1054 /* Disable DMAC inerrupts. We'll enable them after processing chanels */
1055 axi_dma_irq_disable(chip);
1057 /* Poll, clear and process every chanel interrupt status */
1058 for (i = 0; i < dw->hdata->nr_channels; i++) {
1059 chan = &dw->chan[i];
1060 status = axi_chan_irq_read(chan);
1061 axi_chan_irq_clear(chan, status);
1063 dev_vdbg(chip->dev, "%s %u IRQ status: 0x%08x\n",
1064 axi_chan_name(chan), i, status);
1066 if (status & DWAXIDMAC_IRQ_ALL_ERR)
1067 axi_chan_handle_err(chan, status);
1068 else if (status & DWAXIDMAC_IRQ_DMA_TRF)
1069 axi_chan_block_xfer_complete(chan);
1072 /* Re-enable interrupts */
1073 axi_dma_irq_enable(chip);
1078 static int dma_chan_terminate_all(struct dma_chan *dchan)
1080 struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
1081 u32 chan_active = BIT(chan->id) << DMAC_CHAN_EN_SHIFT;
1082 unsigned long flags;
1087 axi_chan_disable(chan);
1089 ret = readl_poll_timeout_atomic(chan->chip->regs + DMAC_CHEN, val,
1090 !(val & chan_active), 1000, 10000);
1091 if (ret == -ETIMEDOUT)
1092 dev_warn(dchan2dev(dchan),
1093 "%s failed to stop\n", axi_chan_name(chan));
1095 if (chan->direction != DMA_MEM_TO_MEM)
1096 dw_axi_dma_set_hw_channel(chan, false);
1097 if (chan->direction == DMA_MEM_TO_DEV)
1098 dw_axi_dma_set_byte_halfword(chan, false);
1100 spin_lock_irqsave(&chan->vc.lock, flags);
1102 vchan_get_all_descriptors(&chan->vc, &head);
1104 chan->cyclic = false;
1105 spin_unlock_irqrestore(&chan->vc.lock, flags);
1107 vchan_dma_desc_free_list(&chan->vc, &head);
1109 dev_vdbg(dchan2dev(dchan), "terminated: %s\n", axi_chan_name(chan));
1114 static int dma_chan_pause(struct dma_chan *dchan)
1116 struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
1117 unsigned long flags;
1118 unsigned int timeout = 20; /* timeout iterations */
1121 spin_lock_irqsave(&chan->vc.lock, flags);
1123 val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
1124 val |= BIT(chan->id) << DMAC_CHAN_SUSP_SHIFT |
1125 BIT(chan->id) << DMAC_CHAN_SUSP_WE_SHIFT;
1126 axi_dma_iowrite32(chan->chip, DMAC_CHEN, val);
1129 if (axi_chan_irq_read(chan) & DWAXIDMAC_IRQ_SUSPENDED)
1133 } while (--timeout);
1135 axi_chan_irq_clear(chan, DWAXIDMAC_IRQ_SUSPENDED);
1137 chan->is_paused = true;
1139 spin_unlock_irqrestore(&chan->vc.lock, flags);
1141 return timeout ? 0 : -EAGAIN;
1144 /* Called in chan locked context */
1145 static inline void axi_chan_resume(struct axi_dma_chan *chan)
1149 val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
1150 val &= ~(BIT(chan->id) << DMAC_CHAN_SUSP_SHIFT);
1151 val |= (BIT(chan->id) << DMAC_CHAN_SUSP_WE_SHIFT);
1152 axi_dma_iowrite32(chan->chip, DMAC_CHEN, val);
1154 chan->is_paused = false;
1157 static int dma_chan_resume(struct dma_chan *dchan)
1159 struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
1160 unsigned long flags;
1162 spin_lock_irqsave(&chan->vc.lock, flags);
1164 if (chan->is_paused)
1165 axi_chan_resume(chan);
1167 spin_unlock_irqrestore(&chan->vc.lock, flags);
1172 static int axi_dma_suspend(struct axi_dma_chip *chip)
1174 axi_dma_irq_disable(chip);
1175 axi_dma_disable(chip);
1177 clk_disable_unprepare(chip->core_clk);
1178 clk_disable_unprepare(chip->cfgr_clk);
1183 static int axi_dma_resume(struct axi_dma_chip *chip)
1187 ret = clk_prepare_enable(chip->cfgr_clk);
1191 ret = clk_prepare_enable(chip->core_clk);
1195 axi_dma_enable(chip);
1196 axi_dma_irq_enable(chip);
1201 static int __maybe_unused axi_dma_runtime_suspend(struct device *dev)
1203 struct axi_dma_chip *chip = dev_get_drvdata(dev);
1205 return axi_dma_suspend(chip);
1208 static int __maybe_unused axi_dma_runtime_resume(struct device *dev)
1210 struct axi_dma_chip *chip = dev_get_drvdata(dev);
1212 return axi_dma_resume(chip);
1215 static struct dma_chan *dw_axi_dma_of_xlate(struct of_phandle_args *dma_spec,
1216 struct of_dma *ofdma)
1218 struct dw_axi_dma *dw = ofdma->of_dma_data;
1219 struct axi_dma_chan *chan;
1220 struct dma_chan *dchan;
1222 dchan = dma_get_any_slave_channel(&dw->dma);
1226 chan = dchan_to_axi_dma_chan(dchan);
1227 chan->hw_handshake_num = dma_spec->args[0];
1231 static int parse_device_properties(struct axi_dma_chip *chip)
1233 struct device *dev = chip->dev;
1234 u32 tmp, carr[DMAC_MAX_CHANNELS];
1237 ret = device_property_read_u32(dev, "dma-channels", &tmp);
1240 if (tmp == 0 || tmp > DMAC_MAX_CHANNELS)
1243 chip->dw->hdata->nr_channels = tmp;
1245 ret = device_property_read_u32(dev, "snps,dma-masters", &tmp);
1248 if (tmp == 0 || tmp > DMAC_MAX_MASTERS)
1251 chip->dw->hdata->nr_masters = tmp;
1253 ret = device_property_read_u32(dev, "snps,data-width", &tmp);
1256 if (tmp > DWAXIDMAC_TRANS_WIDTH_MAX)
1259 chip->dw->hdata->m_data_width = tmp;
1261 ret = device_property_read_u32_array(dev, "snps,block-size", carr,
1262 chip->dw->hdata->nr_channels);
1265 for (tmp = 0; tmp < chip->dw->hdata->nr_channels; tmp++) {
1266 if (carr[tmp] == 0 || carr[tmp] > DMAC_MAX_BLK_SIZE)
1269 chip->dw->hdata->block_size[tmp] = carr[tmp];
1272 ret = device_property_read_u32_array(dev, "snps,priority", carr,
1273 chip->dw->hdata->nr_channels);
1276 /* Priority value must be programmed within [0:nr_channels-1] range */
1277 for (tmp = 0; tmp < chip->dw->hdata->nr_channels; tmp++) {
1278 if (carr[tmp] >= chip->dw->hdata->nr_channels)
1281 chip->dw->hdata->priority[tmp] = carr[tmp];
1284 /* axi-max-burst-len is optional property */
1285 ret = device_property_read_u32(dev, "snps,axi-max-burst-len", &tmp);
1287 if (tmp > DWAXIDMAC_ARWLEN_MAX + 1)
1289 if (tmp < DWAXIDMAC_ARWLEN_MIN + 1)
1292 chip->dw->hdata->restrict_axi_burst_len = true;
1293 chip->dw->hdata->axi_rw_burst_len = tmp;
1299 static int dw_probe(struct platform_device *pdev)
1301 struct device_node *node = pdev->dev.of_node;
1302 struct axi_dma_chip *chip;
1303 struct resource *mem;
1304 struct dw_axi_dma *dw;
1305 struct dw_axi_dma_hcfg *hdata;
1309 chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
1313 dw = devm_kzalloc(&pdev->dev, sizeof(*dw), GFP_KERNEL);
1317 hdata = devm_kzalloc(&pdev->dev, sizeof(*hdata), GFP_KERNEL);
1322 chip->dev = &pdev->dev;
1323 chip->dw->hdata = hdata;
1325 chip->irq = platform_get_irq(pdev, 0);
1329 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1330 chip->regs = devm_ioremap_resource(chip->dev, mem);
1331 if (IS_ERR(chip->regs))
1332 return PTR_ERR(chip->regs);
1334 if (of_device_is_compatible(node, "intel,kmb-axi-dma")) {
1335 chip->apb_regs = devm_platform_ioremap_resource(pdev, 1);
1336 if (IS_ERR(chip->apb_regs))
1337 return PTR_ERR(chip->apb_regs);
1340 chip->core_clk = devm_clk_get(chip->dev, "core-clk");
1341 if (IS_ERR(chip->core_clk))
1342 return PTR_ERR(chip->core_clk);
1344 chip->cfgr_clk = devm_clk_get(chip->dev, "cfgr-clk");
1345 if (IS_ERR(chip->cfgr_clk))
1346 return PTR_ERR(chip->cfgr_clk);
1348 ret = parse_device_properties(chip);
1352 dw->chan = devm_kcalloc(chip->dev, hdata->nr_channels,
1353 sizeof(*dw->chan), GFP_KERNEL);
1357 ret = devm_request_irq(chip->dev, chip->irq, dw_axi_dma_interrupt,
1358 IRQF_SHARED, KBUILD_MODNAME, chip);
1362 INIT_LIST_HEAD(&dw->dma.channels);
1363 for (i = 0; i < hdata->nr_channels; i++) {
1364 struct axi_dma_chan *chan = &dw->chan[i];
1368 chan->chan_regs = chip->regs + COMMON_REG_LEN + i * CHAN_REG_LEN;
1369 atomic_set(&chan->descs_allocated, 0);
1371 chan->vc.desc_free = vchan_desc_put;
1372 vchan_init(&chan->vc, &dw->dma);
1375 /* Set capabilities */
1376 dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask);
1377 dma_cap_set(DMA_SLAVE, dw->dma.cap_mask);
1378 dma_cap_set(DMA_CYCLIC, dw->dma.cap_mask);
1380 /* DMA capabilities */
1381 dw->dma.chancnt = hdata->nr_channels;
1382 dw->dma.max_burst = hdata->axi_rw_burst_len;
1383 dw->dma.src_addr_widths = AXI_DMA_BUSWIDTHS;
1384 dw->dma.dst_addr_widths = AXI_DMA_BUSWIDTHS;
1385 dw->dma.directions = BIT(DMA_MEM_TO_MEM);
1386 dw->dma.directions |= BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);
1387 dw->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
1389 dw->dma.dev = chip->dev;
1390 dw->dma.device_tx_status = dma_chan_tx_status;
1391 dw->dma.device_issue_pending = dma_chan_issue_pending;
1392 dw->dma.device_terminate_all = dma_chan_terminate_all;
1393 dw->dma.device_pause = dma_chan_pause;
1394 dw->dma.device_resume = dma_chan_resume;
1396 dw->dma.device_alloc_chan_resources = dma_chan_alloc_chan_resources;
1397 dw->dma.device_free_chan_resources = dma_chan_free_chan_resources;
1399 dw->dma.device_prep_dma_memcpy = dma_chan_prep_dma_memcpy;
1400 dw->dma.device_synchronize = dw_axi_dma_synchronize;
1401 dw->dma.device_config = dw_axi_dma_chan_slave_config;
1402 dw->dma.device_prep_slave_sg = dw_axi_dma_chan_prep_slave_sg;
1403 dw->dma.device_prep_dma_cyclic = dw_axi_dma_chan_prep_cyclic;
1406 * Synopsis DesignWare AxiDMA datasheet mentioned Maximum
1407 * supported blocks is 1024. Device register width is 4 bytes.
1408 * Therefore, set constraint to 1024 * 4.
1410 dw->dma.dev->dma_parms = &dw->dma_parms;
1411 dma_set_max_seg_size(&pdev->dev, MAX_BLOCK_SIZE);
1412 platform_set_drvdata(pdev, chip);
1414 pm_runtime_enable(chip->dev);
1417 * We can't just call pm_runtime_get here instead of
1418 * pm_runtime_get_noresume + axi_dma_resume because we need
1419 * driver to work also without Runtime PM.
1421 pm_runtime_get_noresume(chip->dev);
1422 ret = axi_dma_resume(chip);
1424 goto err_pm_disable;
1426 axi_dma_hw_init(chip);
1428 pm_runtime_put(chip->dev);
1430 ret = dmaenginem_async_device_register(&dw->dma);
1432 goto err_pm_disable;
1434 /* Register with OF helpers for DMA lookups */
1435 ret = of_dma_controller_register(pdev->dev.of_node,
1436 dw_axi_dma_of_xlate, dw);
1438 dev_warn(&pdev->dev,
1439 "Failed to register OF DMA controller, fallback to MEM_TO_MEM mode\n");
1441 dev_info(chip->dev, "DesignWare AXI DMA Controller, %d channels\n",
1442 dw->hdata->nr_channels);
1447 pm_runtime_disable(chip->dev);
1452 static int dw_remove(struct platform_device *pdev)
1454 struct axi_dma_chip *chip = platform_get_drvdata(pdev);
1455 struct dw_axi_dma *dw = chip->dw;
1456 struct axi_dma_chan *chan, *_chan;
1459 /* Enable clk before accessing to registers */
1460 clk_prepare_enable(chip->cfgr_clk);
1461 clk_prepare_enable(chip->core_clk);
1462 axi_dma_irq_disable(chip);
1463 for (i = 0; i < dw->hdata->nr_channels; i++) {
1464 axi_chan_disable(&chip->dw->chan[i]);
1465 axi_chan_irq_disable(&chip->dw->chan[i], DWAXIDMAC_IRQ_ALL);
1467 axi_dma_disable(chip);
1469 pm_runtime_disable(chip->dev);
1470 axi_dma_suspend(chip);
1472 devm_free_irq(chip->dev, chip->irq, chip);
1474 of_dma_controller_free(chip->dev->of_node);
1476 list_for_each_entry_safe(chan, _chan, &dw->dma.channels,
1477 vc.chan.device_node) {
1478 list_del(&chan->vc.chan.device_node);
1479 tasklet_kill(&chan->vc.task);
1485 static const struct dev_pm_ops dw_axi_dma_pm_ops = {
1486 SET_RUNTIME_PM_OPS(axi_dma_runtime_suspend, axi_dma_runtime_resume, NULL)
1489 static const struct of_device_id dw_dma_of_id_table[] = {
1490 { .compatible = "snps,axi-dma-1.01a" },
1491 { .compatible = "intel,kmb-axi-dma" },
1494 MODULE_DEVICE_TABLE(of, dw_dma_of_id_table);
1496 static struct platform_driver dw_driver = {
1498 .remove = dw_remove,
1500 .name = KBUILD_MODNAME,
1501 .of_match_table = dw_dma_of_id_table,
1502 .pm = &dw_axi_dma_pm_ops,
1505 module_platform_driver(dw_driver);
1507 MODULE_LICENSE("GPL v2");
1508 MODULE_DESCRIPTION("Synopsys DesignWare AXI DMA Controller platform driver");
1509 MODULE_AUTHOR("Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com>");