1 // SPDX-License-Identifier: GPL-2.0
2 // (C) 2017-2018 Synopsys, Inc. (www.synopsys.com)
5 * Synopsys DesignWare AXI DMA Controller driver.
7 * Author: Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com>
10 #include <linux/bitops.h>
11 #include <linux/delay.h>
12 #include <linux/device.h>
13 #include <linux/dmaengine.h>
14 #include <linux/dmapool.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/err.h>
17 #include <linux/interrupt.h>
19 #include <linux/iopoll.h>
20 #include <linux/io-64-nonatomic-lo-hi.h>
21 #include <linux/kernel.h>
22 #include <linux/module.h>
24 #include <linux/of_dma.h>
25 #include <linux/platform_device.h>
26 #include <linux/pm_runtime.h>
27 #include <linux/property.h>
28 #include <linux/slab.h>
29 #include <linux/types.h>
31 #include "dw-axi-dmac.h"
32 #include "../dmaengine.h"
33 #include "../virt-dma.h"
36 * The set of bus widths supported by the DMA controller. DW AXI DMAC supports
37 * master data bus width up to 512 bits (for both AXI master interfaces), but
38 * it depends on IP block configuration.
40 #define AXI_DMA_BUSWIDTHS \
41 (DMA_SLAVE_BUSWIDTH_1_BYTE | \
42 DMA_SLAVE_BUSWIDTH_2_BYTES | \
43 DMA_SLAVE_BUSWIDTH_4_BYTES | \
44 DMA_SLAVE_BUSWIDTH_8_BYTES | \
45 DMA_SLAVE_BUSWIDTH_16_BYTES | \
46 DMA_SLAVE_BUSWIDTH_32_BYTES | \
47 DMA_SLAVE_BUSWIDTH_64_BYTES)
50 axi_dma_iowrite32(struct axi_dma_chip *chip, u32 reg, u32 val)
52 iowrite32(val, chip->regs + reg);
55 static inline u32 axi_dma_ioread32(struct axi_dma_chip *chip, u32 reg)
57 return ioread32(chip->regs + reg);
61 axi_chan_iowrite32(struct axi_dma_chan *chan, u32 reg, u32 val)
63 iowrite32(val, chan->chan_regs + reg);
66 static inline u32 axi_chan_ioread32(struct axi_dma_chan *chan, u32 reg)
68 return ioread32(chan->chan_regs + reg);
72 axi_chan_iowrite64(struct axi_dma_chan *chan, u32 reg, u64 val)
75 * We split one 64 bit write for two 32 bit write as some HW doesn't
76 * support 64 bit access.
78 iowrite32(lower_32_bits(val), chan->chan_regs + reg);
79 iowrite32(upper_32_bits(val), chan->chan_regs + reg + 4);
82 static inline void axi_chan_config_write(struct axi_dma_chan *chan,
83 struct axi_dma_chan_config *config)
87 cfg_lo = (config->dst_multblk_type << CH_CFG_L_DST_MULTBLK_TYPE_POS |
88 config->src_multblk_type << CH_CFG_L_SRC_MULTBLK_TYPE_POS);
89 if (chan->chip->dw->hdata->reg_map_8_channels) {
90 cfg_hi = config->tt_fc << CH_CFG_H_TT_FC_POS |
91 config->hs_sel_src << CH_CFG_H_HS_SEL_SRC_POS |
92 config->hs_sel_dst << CH_CFG_H_HS_SEL_DST_POS |
93 config->src_per << CH_CFG_H_SRC_PER_POS |
94 config->dst_per << CH_CFG_H_DST_PER_POS |
95 config->prior << CH_CFG_H_PRIORITY_POS;
97 cfg_lo |= config->src_per << CH_CFG2_L_SRC_PER_POS |
98 config->dst_per << CH_CFG2_L_DST_PER_POS;
99 cfg_hi = config->tt_fc << CH_CFG2_H_TT_FC_POS |
100 config->hs_sel_src << CH_CFG2_H_HS_SEL_SRC_POS |
101 config->hs_sel_dst << CH_CFG2_H_HS_SEL_DST_POS |
102 config->prior << CH_CFG2_H_PRIORITY_POS;
104 axi_chan_iowrite32(chan, CH_CFG_L, cfg_lo);
105 axi_chan_iowrite32(chan, CH_CFG_H, cfg_hi);
108 static inline void axi_dma_disable(struct axi_dma_chip *chip)
112 val = axi_dma_ioread32(chip, DMAC_CFG);
113 val &= ~DMAC_EN_MASK;
114 axi_dma_iowrite32(chip, DMAC_CFG, val);
117 static inline void axi_dma_enable(struct axi_dma_chip *chip)
121 val = axi_dma_ioread32(chip, DMAC_CFG);
123 axi_dma_iowrite32(chip, DMAC_CFG, val);
126 static inline void axi_dma_irq_disable(struct axi_dma_chip *chip)
130 val = axi_dma_ioread32(chip, DMAC_CFG);
132 axi_dma_iowrite32(chip, DMAC_CFG, val);
135 static inline void axi_dma_irq_enable(struct axi_dma_chip *chip)
139 val = axi_dma_ioread32(chip, DMAC_CFG);
141 axi_dma_iowrite32(chip, DMAC_CFG, val);
144 static inline void axi_chan_irq_disable(struct axi_dma_chan *chan, u32 irq_mask)
148 if (likely(irq_mask == DWAXIDMAC_IRQ_ALL)) {
149 axi_chan_iowrite32(chan, CH_INTSTATUS_ENA, DWAXIDMAC_IRQ_NONE);
151 val = axi_chan_ioread32(chan, CH_INTSTATUS_ENA);
153 axi_chan_iowrite32(chan, CH_INTSTATUS_ENA, val);
157 static inline void axi_chan_irq_set(struct axi_dma_chan *chan, u32 irq_mask)
159 axi_chan_iowrite32(chan, CH_INTSTATUS_ENA, irq_mask);
162 static inline void axi_chan_irq_sig_set(struct axi_dma_chan *chan, u32 irq_mask)
164 axi_chan_iowrite32(chan, CH_INTSIGNAL_ENA, irq_mask);
167 static inline void axi_chan_irq_clear(struct axi_dma_chan *chan, u32 irq_mask)
169 axi_chan_iowrite32(chan, CH_INTCLEAR, irq_mask);
172 static inline u32 axi_chan_irq_read(struct axi_dma_chan *chan)
174 return axi_chan_ioread32(chan, CH_INTSTATUS);
177 static inline void axi_chan_disable(struct axi_dma_chan *chan)
181 val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
182 val &= ~(BIT(chan->id) << DMAC_CHAN_EN_SHIFT);
183 if (chan->chip->dw->hdata->reg_map_8_channels)
184 val |= BIT(chan->id) << DMAC_CHAN_EN_WE_SHIFT;
186 val |= BIT(chan->id) << DMAC_CHAN_EN2_WE_SHIFT;
187 axi_dma_iowrite32(chan->chip, DMAC_CHEN, val);
190 static inline void axi_chan_enable(struct axi_dma_chan *chan)
194 val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
195 if (chan->chip->dw->hdata->reg_map_8_channels)
196 val |= BIT(chan->id) << DMAC_CHAN_EN_SHIFT |
197 BIT(chan->id) << DMAC_CHAN_EN_WE_SHIFT;
199 val |= BIT(chan->id) << DMAC_CHAN_EN_SHIFT |
200 BIT(chan->id) << DMAC_CHAN_EN2_WE_SHIFT;
201 axi_dma_iowrite32(chan->chip, DMAC_CHEN, val);
204 static inline bool axi_chan_is_hw_enable(struct axi_dma_chan *chan)
208 val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
210 return !!(val & (BIT(chan->id) << DMAC_CHAN_EN_SHIFT));
213 static void axi_dma_hw_init(struct axi_dma_chip *chip)
218 for (i = 0; i < chip->dw->hdata->nr_channels; i++) {
219 axi_chan_irq_disable(&chip->dw->chan[i], DWAXIDMAC_IRQ_ALL);
220 axi_chan_disable(&chip->dw->chan[i]);
222 ret = dma_set_mask_and_coherent(chip->dev, DMA_BIT_MASK(64));
224 dev_warn(chip->dev, "Unable to set coherent mask\n");
227 static u32 axi_chan_get_xfer_width(struct axi_dma_chan *chan, dma_addr_t src,
228 dma_addr_t dst, size_t len)
230 u32 max_width = chan->chip->dw->hdata->m_data_width;
232 return __ffs(src | dst | len | BIT(max_width));
235 static inline const char *axi_chan_name(struct axi_dma_chan *chan)
237 return dma_chan_name(&chan->vc.chan);
240 static struct axi_dma_desc *axi_desc_alloc(u32 num)
242 struct axi_dma_desc *desc;
244 desc = kzalloc(sizeof(*desc), GFP_NOWAIT);
248 desc->hw_desc = kcalloc(num, sizeof(*desc->hw_desc), GFP_NOWAIT);
249 if (!desc->hw_desc) {
257 static struct axi_dma_lli *axi_desc_get(struct axi_dma_chan *chan,
260 struct axi_dma_lli *lli;
263 lli = dma_pool_zalloc(chan->desc_pool, GFP_NOWAIT, &phys);
264 if (unlikely(!lli)) {
265 dev_err(chan2dev(chan), "%s: not enough descriptors available\n",
266 axi_chan_name(chan));
270 atomic_inc(&chan->descs_allocated);
276 static void axi_desc_put(struct axi_dma_desc *desc)
278 struct axi_dma_chan *chan = desc->chan;
279 int count = atomic_read(&chan->descs_allocated);
280 struct axi_dma_hw_desc *hw_desc;
283 for (descs_put = 0; descs_put < count; descs_put++) {
284 hw_desc = &desc->hw_desc[descs_put];
285 dma_pool_free(chan->desc_pool, hw_desc->lli, hw_desc->llp);
288 kfree(desc->hw_desc);
290 atomic_sub(descs_put, &chan->descs_allocated);
291 dev_vdbg(chan2dev(chan), "%s: %d descs put, %d still allocated\n",
292 axi_chan_name(chan), descs_put,
293 atomic_read(&chan->descs_allocated));
296 static void vchan_desc_put(struct virt_dma_desc *vdesc)
298 axi_desc_put(vd_to_axi_desc(vdesc));
301 static enum dma_status
302 dma_chan_tx_status(struct dma_chan *dchan, dma_cookie_t cookie,
303 struct dma_tx_state *txstate)
305 struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
306 struct virt_dma_desc *vdesc;
307 enum dma_status status;
308 u32 completed_length;
310 u32 completed_blocks;
315 status = dma_cookie_status(dchan, cookie, txstate);
316 if (status == DMA_COMPLETE || !txstate)
319 spin_lock_irqsave(&chan->vc.lock, flags);
321 vdesc = vchan_find_desc(&chan->vc, cookie);
323 length = vd_to_axi_desc(vdesc)->length;
324 completed_blocks = vd_to_axi_desc(vdesc)->completed_blocks;
325 len = vd_to_axi_desc(vdesc)->hw_desc[0].len;
326 completed_length = completed_blocks * len;
327 bytes = length - completed_length;
329 bytes = vd_to_axi_desc(vdesc)->length;
332 spin_unlock_irqrestore(&chan->vc.lock, flags);
333 dma_set_residue(txstate, bytes);
338 static void write_desc_llp(struct axi_dma_hw_desc *desc, dma_addr_t adr)
340 desc->lli->llp = cpu_to_le64(adr);
343 static void write_chan_llp(struct axi_dma_chan *chan, dma_addr_t adr)
345 axi_chan_iowrite64(chan, CH_LLP, adr);
348 static void dw_axi_dma_set_byte_halfword(struct axi_dma_chan *chan, bool set)
350 u32 offset = DMAC_APB_BYTE_WR_CH_EN;
353 if (!chan->chip->apb_regs) {
354 dev_dbg(chan->chip->dev, "apb_regs not initialized\n");
358 reg_width = __ffs(chan->config.dst_addr_width);
359 if (reg_width == DWAXIDMAC_TRANS_WIDTH_16)
360 offset = DMAC_APB_HALFWORD_WR_CH_EN;
362 val = ioread32(chan->chip->apb_regs + offset);
365 val |= BIT(chan->id);
367 val &= ~BIT(chan->id);
369 iowrite32(val, chan->chip->apb_regs + offset);
371 /* Called in chan locked context */
372 static void axi_chan_block_xfer_start(struct axi_dma_chan *chan,
373 struct axi_dma_desc *first)
375 u32 priority = chan->chip->dw->hdata->priority[chan->id];
376 struct axi_dma_chan_config config = {};
378 u8 lms = 0; /* Select AXI0 master for LLI fetching */
380 if (unlikely(axi_chan_is_hw_enable(chan))) {
381 dev_err(chan2dev(chan), "%s is non-idle!\n",
382 axi_chan_name(chan));
387 axi_dma_enable(chan->chip);
389 config.dst_multblk_type = DWAXIDMAC_MBLK_TYPE_LL;
390 config.src_multblk_type = DWAXIDMAC_MBLK_TYPE_LL;
391 config.tt_fc = DWAXIDMAC_TT_FC_MEM_TO_MEM_DMAC;
392 config.prior = priority;
393 config.hs_sel_dst = DWAXIDMAC_HS_SEL_HW;
394 config.hs_sel_src = DWAXIDMAC_HS_SEL_HW;
395 switch (chan->direction) {
397 dw_axi_dma_set_byte_halfword(chan, true);
398 config.tt_fc = chan->config.device_fc ?
399 DWAXIDMAC_TT_FC_MEM_TO_PER_DST :
400 DWAXIDMAC_TT_FC_MEM_TO_PER_DMAC;
401 if (chan->chip->apb_regs)
402 config.dst_per = chan->id;
404 config.dst_per = chan->hw_handshake_num;
407 config.tt_fc = chan->config.device_fc ?
408 DWAXIDMAC_TT_FC_PER_TO_MEM_SRC :
409 DWAXIDMAC_TT_FC_PER_TO_MEM_DMAC;
410 if (chan->chip->apb_regs)
411 config.src_per = chan->id;
413 config.src_per = chan->hw_handshake_num;
418 axi_chan_config_write(chan, &config);
420 write_chan_llp(chan, first->hw_desc[0].llp | lms);
422 irq_mask = DWAXIDMAC_IRQ_DMA_TRF | DWAXIDMAC_IRQ_ALL_ERR;
423 axi_chan_irq_sig_set(chan, irq_mask);
425 /* Generate 'suspend' status but don't generate interrupt */
426 irq_mask |= DWAXIDMAC_IRQ_SUSPENDED;
427 axi_chan_irq_set(chan, irq_mask);
429 axi_chan_enable(chan);
432 static void axi_chan_start_first_queued(struct axi_dma_chan *chan)
434 struct axi_dma_desc *desc;
435 struct virt_dma_desc *vd;
437 vd = vchan_next_desc(&chan->vc);
441 desc = vd_to_axi_desc(vd);
442 dev_vdbg(chan2dev(chan), "%s: started %u\n", axi_chan_name(chan),
444 axi_chan_block_xfer_start(chan, desc);
447 static void dma_chan_issue_pending(struct dma_chan *dchan)
449 struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
452 spin_lock_irqsave(&chan->vc.lock, flags);
453 if (vchan_issue_pending(&chan->vc))
454 axi_chan_start_first_queued(chan);
455 spin_unlock_irqrestore(&chan->vc.lock, flags);
458 static void dw_axi_dma_synchronize(struct dma_chan *dchan)
460 struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
462 vchan_synchronize(&chan->vc);
465 static int dma_chan_alloc_chan_resources(struct dma_chan *dchan)
467 struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
469 /* ASSERT: channel is idle */
470 if (axi_chan_is_hw_enable(chan)) {
471 dev_err(chan2dev(chan), "%s is non-idle!\n",
472 axi_chan_name(chan));
476 /* LLI address must be aligned to a 64-byte boundary */
477 chan->desc_pool = dma_pool_create(dev_name(chan2dev(chan)),
479 sizeof(struct axi_dma_lli),
481 if (!chan->desc_pool) {
482 dev_err(chan2dev(chan), "No memory for descriptors\n");
485 dev_vdbg(dchan2dev(dchan), "%s: allocating\n", axi_chan_name(chan));
487 pm_runtime_get(chan->chip->dev);
492 static void dma_chan_free_chan_resources(struct dma_chan *dchan)
494 struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
496 /* ASSERT: channel is idle */
497 if (axi_chan_is_hw_enable(chan))
498 dev_err(dchan2dev(dchan), "%s is non-idle!\n",
499 axi_chan_name(chan));
501 axi_chan_disable(chan);
502 axi_chan_irq_disable(chan, DWAXIDMAC_IRQ_ALL);
504 vchan_free_chan_resources(&chan->vc);
506 dma_pool_destroy(chan->desc_pool);
507 chan->desc_pool = NULL;
508 dev_vdbg(dchan2dev(dchan),
509 "%s: free resources, descriptor still allocated: %u\n",
510 axi_chan_name(chan), atomic_read(&chan->descs_allocated));
512 pm_runtime_put(chan->chip->dev);
515 static void dw_axi_dma_set_hw_channel(struct axi_dma_chan *chan, bool set)
517 struct axi_dma_chip *chip = chan->chip;
518 unsigned long reg_value, val;
520 if (!chip->apb_regs) {
521 dev_err(chip->dev, "apb_regs not initialized\n");
526 * An unused DMA channel has a default value of 0x3F.
527 * Lock the DMA channel by assign a handshake number to the channel.
528 * Unlock the DMA channel by assign 0x3F to the channel.
531 val = chan->hw_handshake_num;
533 val = UNUSED_CHANNEL;
535 reg_value = lo_hi_readq(chip->apb_regs + DMAC_APB_HW_HS_SEL_0);
537 /* Channel is already allocated, set handshake as per channel ID */
538 /* 64 bit write should handle for 8 channels */
540 reg_value &= ~(DMA_APB_HS_SEL_MASK <<
541 (chan->id * DMA_APB_HS_SEL_BIT_SIZE));
542 reg_value |= (val << (chan->id * DMA_APB_HS_SEL_BIT_SIZE));
543 lo_hi_writeq(reg_value, chip->apb_regs + DMAC_APB_HW_HS_SEL_0);
549 * If DW_axi_dmac sees CHx_CTL.ShadowReg_Or_LLI_Last bit of the fetched LLI
550 * as 1, it understands that the current block is the final block in the
551 * transfer and completes the DMA transfer operation at the end of current
554 static void set_desc_last(struct axi_dma_hw_desc *desc)
558 val = le32_to_cpu(desc->lli->ctl_hi);
559 val |= CH_CTL_H_LLI_LAST;
560 desc->lli->ctl_hi = cpu_to_le32(val);
563 static void write_desc_sar(struct axi_dma_hw_desc *desc, dma_addr_t adr)
565 desc->lli->sar = cpu_to_le64(adr);
568 static void write_desc_dar(struct axi_dma_hw_desc *desc, dma_addr_t adr)
570 desc->lli->dar = cpu_to_le64(adr);
573 static void set_desc_src_master(struct axi_dma_hw_desc *desc)
577 /* Select AXI0 for source master */
578 val = le32_to_cpu(desc->lli->ctl_lo);
579 val &= ~CH_CTL_L_SRC_MAST;
580 desc->lli->ctl_lo = cpu_to_le32(val);
583 static void set_desc_dest_master(struct axi_dma_hw_desc *hw_desc,
584 struct axi_dma_desc *desc)
588 /* Select AXI1 for source master if available */
589 val = le32_to_cpu(hw_desc->lli->ctl_lo);
590 if (desc->chan->chip->dw->hdata->nr_masters > 1)
591 val |= CH_CTL_L_DST_MAST;
593 val &= ~CH_CTL_L_DST_MAST;
595 hw_desc->lli->ctl_lo = cpu_to_le32(val);
598 static int dw_axi_dma_set_hw_desc(struct axi_dma_chan *chan,
599 struct axi_dma_hw_desc *hw_desc,
600 dma_addr_t mem_addr, size_t len)
602 unsigned int data_width = BIT(chan->chip->dw->hdata->m_data_width);
603 unsigned int reg_width;
604 unsigned int mem_width;
605 dma_addr_t device_addr;
611 axi_block_ts = chan->chip->dw->hdata->block_size[chan->id];
613 mem_width = __ffs(data_width | mem_addr | len);
614 if (mem_width > DWAXIDMAC_TRANS_WIDTH_32)
615 mem_width = DWAXIDMAC_TRANS_WIDTH_32;
617 if (!IS_ALIGNED(mem_addr, 4)) {
618 dev_err(chan->chip->dev, "invalid buffer alignment\n");
622 switch (chan->direction) {
624 reg_width = __ffs(chan->config.dst_addr_width);
625 device_addr = chan->config.dst_addr;
626 ctllo = reg_width << CH_CTL_L_DST_WIDTH_POS |
627 mem_width << CH_CTL_L_SRC_WIDTH_POS |
628 DWAXIDMAC_CH_CTL_L_NOINC << CH_CTL_L_DST_INC_POS |
629 DWAXIDMAC_CH_CTL_L_INC << CH_CTL_L_SRC_INC_POS;
630 block_ts = len >> mem_width;
633 reg_width = __ffs(chan->config.src_addr_width);
634 device_addr = chan->config.src_addr;
635 ctllo = reg_width << CH_CTL_L_SRC_WIDTH_POS |
636 mem_width << CH_CTL_L_DST_WIDTH_POS |
637 DWAXIDMAC_CH_CTL_L_INC << CH_CTL_L_DST_INC_POS |
638 DWAXIDMAC_CH_CTL_L_NOINC << CH_CTL_L_SRC_INC_POS;
639 block_ts = len >> reg_width;
645 if (block_ts > axi_block_ts)
648 hw_desc->lli = axi_desc_get(chan, &hw_desc->llp);
649 if (unlikely(!hw_desc->lli))
652 ctlhi = CH_CTL_H_LLI_VALID;
654 if (chan->chip->dw->hdata->restrict_axi_burst_len) {
655 burst_len = chan->chip->dw->hdata->axi_rw_burst_len;
656 ctlhi |= CH_CTL_H_ARLEN_EN | CH_CTL_H_AWLEN_EN |
657 burst_len << CH_CTL_H_ARLEN_POS |
658 burst_len << CH_CTL_H_AWLEN_POS;
661 hw_desc->lli->ctl_hi = cpu_to_le32(ctlhi);
663 if (chan->direction == DMA_MEM_TO_DEV) {
664 write_desc_sar(hw_desc, mem_addr);
665 write_desc_dar(hw_desc, device_addr);
667 write_desc_sar(hw_desc, device_addr);
668 write_desc_dar(hw_desc, mem_addr);
671 hw_desc->lli->block_ts_lo = cpu_to_le32(block_ts - 1);
673 ctllo |= DWAXIDMAC_BURST_TRANS_LEN_4 << CH_CTL_L_DST_MSIZE_POS |
674 DWAXIDMAC_BURST_TRANS_LEN_4 << CH_CTL_L_SRC_MSIZE_POS;
675 hw_desc->lli->ctl_lo = cpu_to_le32(ctllo);
677 set_desc_src_master(hw_desc);
683 static size_t calculate_block_len(struct axi_dma_chan *chan,
684 dma_addr_t dma_addr, size_t buf_len,
685 enum dma_transfer_direction direction)
687 u32 data_width, reg_width, mem_width;
688 size_t axi_block_ts, block_len;
690 axi_block_ts = chan->chip->dw->hdata->block_size[chan->id];
694 data_width = BIT(chan->chip->dw->hdata->m_data_width);
695 mem_width = __ffs(data_width | dma_addr | buf_len);
696 if (mem_width > DWAXIDMAC_TRANS_WIDTH_32)
697 mem_width = DWAXIDMAC_TRANS_WIDTH_32;
699 block_len = axi_block_ts << mem_width;
702 reg_width = __ffs(chan->config.src_addr_width);
703 block_len = axi_block_ts << reg_width;
712 static struct dma_async_tx_descriptor *
713 dw_axi_dma_chan_prep_cyclic(struct dma_chan *dchan, dma_addr_t dma_addr,
714 size_t buf_len, size_t period_len,
715 enum dma_transfer_direction direction,
718 struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
719 struct axi_dma_hw_desc *hw_desc = NULL;
720 struct axi_dma_desc *desc = NULL;
721 dma_addr_t src_addr = dma_addr;
722 u32 num_periods, num_segments;
723 size_t axi_block_len;
729 u8 lms = 0; /* Select AXI0 master for LLI fetching */
731 num_periods = buf_len / period_len;
733 axi_block_len = calculate_block_len(chan, dma_addr, buf_len, direction);
734 if (axi_block_len == 0)
737 num_segments = DIV_ROUND_UP(period_len, axi_block_len);
738 segment_len = DIV_ROUND_UP(period_len, num_segments);
740 total_segments = num_periods * num_segments;
742 desc = axi_desc_alloc(total_segments);
746 chan->direction = direction;
750 desc->period_len = period_len;
752 for (i = 0; i < total_segments; i++) {
753 hw_desc = &desc->hw_desc[i];
755 status = dw_axi_dma_set_hw_desc(chan, hw_desc, src_addr,
760 desc->length += hw_desc->len;
761 /* Set end-of-link to the linked descriptor, so that cyclic
762 * callback function can be triggered during interrupt.
764 set_desc_last(hw_desc);
766 src_addr += segment_len;
769 llp = desc->hw_desc[0].llp;
771 /* Managed transfer list */
773 hw_desc = &desc->hw_desc[--total_segments];
774 write_desc_llp(hw_desc, llp | lms);
776 } while (total_segments);
778 dw_axi_dma_set_hw_channel(chan, true);
780 return vchan_tx_prep(&chan->vc, &desc->vd, flags);
789 static struct dma_async_tx_descriptor *
790 dw_axi_dma_chan_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
792 enum dma_transfer_direction direction,
793 unsigned long flags, void *context)
795 struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
796 struct axi_dma_hw_desc *hw_desc = NULL;
797 struct axi_dma_desc *desc = NULL;
798 u32 num_segments, segment_len;
799 unsigned int loop = 0;
800 struct scatterlist *sg;
801 size_t axi_block_len;
802 u32 len, num_sgs = 0;
807 u8 lms = 0; /* Select AXI0 master for LLI fetching */
809 if (unlikely(!is_slave_direction(direction) || !sg_len))
812 mem = sg_dma_address(sgl);
813 len = sg_dma_len(sgl);
815 axi_block_len = calculate_block_len(chan, mem, len, direction);
816 if (axi_block_len == 0)
819 for_each_sg(sgl, sg, sg_len, i)
820 num_sgs += DIV_ROUND_UP(sg_dma_len(sg), axi_block_len);
822 desc = axi_desc_alloc(num_sgs);
828 chan->direction = direction;
830 for_each_sg(sgl, sg, sg_len, i) {
831 mem = sg_dma_address(sg);
832 len = sg_dma_len(sg);
833 num_segments = DIV_ROUND_UP(sg_dma_len(sg), axi_block_len);
834 segment_len = DIV_ROUND_UP(sg_dma_len(sg), num_segments);
837 hw_desc = &desc->hw_desc[loop++];
838 status = dw_axi_dma_set_hw_desc(chan, hw_desc, mem, segment_len);
842 desc->length += hw_desc->len;
845 } while (len >= segment_len);
848 /* Set end-of-link to the last link descriptor of list */
849 set_desc_last(&desc->hw_desc[num_sgs - 1]);
851 /* Managed transfer list */
853 hw_desc = &desc->hw_desc[--num_sgs];
854 write_desc_llp(hw_desc, llp | lms);
858 dw_axi_dma_set_hw_channel(chan, true);
860 return vchan_tx_prep(&chan->vc, &desc->vd, flags);
869 static struct dma_async_tx_descriptor *
870 dma_chan_prep_dma_memcpy(struct dma_chan *dchan, dma_addr_t dst_adr,
871 dma_addr_t src_adr, size_t len, unsigned long flags)
873 struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
874 size_t block_ts, max_block_ts, xfer_len;
875 struct axi_dma_hw_desc *hw_desc = NULL;
876 struct axi_dma_desc *desc = NULL;
877 u32 xfer_width, reg, num;
879 u8 lms = 0; /* Select AXI0 master for LLI fetching */
881 dev_dbg(chan2dev(chan), "%s: memcpy: src: %pad dst: %pad length: %zd flags: %#lx",
882 axi_chan_name(chan), &src_adr, &dst_adr, len, flags);
884 max_block_ts = chan->chip->dw->hdata->block_size[chan->id];
885 xfer_width = axi_chan_get_xfer_width(chan, src_adr, dst_adr, len);
886 num = DIV_ROUND_UP(len, max_block_ts << xfer_width);
887 desc = axi_desc_alloc(num);
897 hw_desc = &desc->hw_desc[num];
899 * Take care for the alignment.
900 * Actually source and destination widths can be different, but
901 * make them same to be simpler.
903 xfer_width = axi_chan_get_xfer_width(chan, src_adr, dst_adr, xfer_len);
906 * block_ts indicates the total number of data of width
907 * to be transferred in a DMA block transfer.
908 * BLOCK_TS register should be set to block_ts - 1
910 block_ts = xfer_len >> xfer_width;
911 if (block_ts > max_block_ts) {
912 block_ts = max_block_ts;
913 xfer_len = max_block_ts << xfer_width;
916 hw_desc->lli = axi_desc_get(chan, &hw_desc->llp);
917 if (unlikely(!hw_desc->lli))
920 write_desc_sar(hw_desc, src_adr);
921 write_desc_dar(hw_desc, dst_adr);
922 hw_desc->lli->block_ts_lo = cpu_to_le32(block_ts - 1);
924 reg = CH_CTL_H_LLI_VALID;
925 if (chan->chip->dw->hdata->restrict_axi_burst_len) {
926 u32 burst_len = chan->chip->dw->hdata->axi_rw_burst_len;
928 reg |= (CH_CTL_H_ARLEN_EN |
929 burst_len << CH_CTL_H_ARLEN_POS |
931 burst_len << CH_CTL_H_AWLEN_POS);
933 hw_desc->lli->ctl_hi = cpu_to_le32(reg);
935 reg = (DWAXIDMAC_BURST_TRANS_LEN_4 << CH_CTL_L_DST_MSIZE_POS |
936 DWAXIDMAC_BURST_TRANS_LEN_4 << CH_CTL_L_SRC_MSIZE_POS |
937 xfer_width << CH_CTL_L_DST_WIDTH_POS |
938 xfer_width << CH_CTL_L_SRC_WIDTH_POS |
939 DWAXIDMAC_CH_CTL_L_INC << CH_CTL_L_DST_INC_POS |
940 DWAXIDMAC_CH_CTL_L_INC << CH_CTL_L_SRC_INC_POS);
941 hw_desc->lli->ctl_lo = cpu_to_le32(reg);
943 set_desc_src_master(hw_desc);
944 set_desc_dest_master(hw_desc, desc);
946 hw_desc->len = xfer_len;
947 desc->length += hw_desc->len;
948 /* update the length and addresses for the next loop cycle */
955 /* Set end-of-link to the last link descriptor of list */
956 set_desc_last(&desc->hw_desc[num - 1]);
957 /* Managed transfer list */
959 hw_desc = &desc->hw_desc[--num];
960 write_desc_llp(hw_desc, llp | lms);
964 return vchan_tx_prep(&chan->vc, &desc->vd, flags);
972 static int dw_axi_dma_chan_slave_config(struct dma_chan *dchan,
973 struct dma_slave_config *config)
975 struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
977 memcpy(&chan->config, config, sizeof(*config));
982 static void axi_chan_dump_lli(struct axi_dma_chan *chan,
983 struct axi_dma_hw_desc *desc)
985 dev_err(dchan2dev(&chan->vc.chan),
986 "SAR: 0x%llx DAR: 0x%llx LLP: 0x%llx BTS 0x%x CTL: 0x%x:%08x",
987 le64_to_cpu(desc->lli->sar),
988 le64_to_cpu(desc->lli->dar),
989 le64_to_cpu(desc->lli->llp),
990 le32_to_cpu(desc->lli->block_ts_lo),
991 le32_to_cpu(desc->lli->ctl_hi),
992 le32_to_cpu(desc->lli->ctl_lo));
995 static void axi_chan_list_dump_lli(struct axi_dma_chan *chan,
996 struct axi_dma_desc *desc_head)
998 int count = atomic_read(&chan->descs_allocated);
1001 for (i = 0; i < count; i++)
1002 axi_chan_dump_lli(chan, &desc_head->hw_desc[i]);
1005 static noinline void axi_chan_handle_err(struct axi_dma_chan *chan, u32 status)
1007 struct virt_dma_desc *vd;
1008 unsigned long flags;
1010 spin_lock_irqsave(&chan->vc.lock, flags);
1012 axi_chan_disable(chan);
1014 /* The bad descriptor currently is in the head of vc list */
1015 vd = vchan_next_desc(&chan->vc);
1016 /* Remove the completed descriptor from issued list */
1017 list_del(&vd->node);
1019 /* WARN about bad descriptor */
1020 dev_err(chan2dev(chan),
1021 "Bad descriptor submitted for %s, cookie: %d, irq: 0x%08x\n",
1022 axi_chan_name(chan), vd->tx.cookie, status);
1023 axi_chan_list_dump_lli(chan, vd_to_axi_desc(vd));
1025 vchan_cookie_complete(vd);
1027 /* Try to restart the controller */
1028 axi_chan_start_first_queued(chan);
1030 spin_unlock_irqrestore(&chan->vc.lock, flags);
1033 static void axi_chan_block_xfer_complete(struct axi_dma_chan *chan)
1035 int count = atomic_read(&chan->descs_allocated);
1036 struct axi_dma_hw_desc *hw_desc;
1037 struct axi_dma_desc *desc;
1038 struct virt_dma_desc *vd;
1039 unsigned long flags;
1043 spin_lock_irqsave(&chan->vc.lock, flags);
1044 if (unlikely(axi_chan_is_hw_enable(chan))) {
1045 dev_err(chan2dev(chan), "BUG: %s caught DWAXIDMAC_IRQ_DMA_TRF, but channel not idle!\n",
1046 axi_chan_name(chan));
1047 axi_chan_disable(chan);
1050 /* The completed descriptor currently is in the head of vc list */
1051 vd = vchan_next_desc(&chan->vc);
1054 desc = vd_to_axi_desc(vd);
1056 llp = lo_hi_readq(chan->chan_regs + CH_LLP);
1057 for (i = 0; i < count; i++) {
1058 hw_desc = &desc->hw_desc[i];
1059 if (hw_desc->llp == llp) {
1060 axi_chan_irq_clear(chan, hw_desc->lli->status_lo);
1061 hw_desc->lli->ctl_hi |= CH_CTL_H_LLI_VALID;
1062 desc->completed_blocks = i;
1064 if (((hw_desc->len * (i + 1)) % desc->period_len) == 0)
1065 vchan_cyclic_callback(vd);
1070 axi_chan_enable(chan);
1073 /* Remove the completed descriptor from issued list before completing */
1074 list_del(&vd->node);
1075 vchan_cookie_complete(vd);
1077 /* Submit queued descriptors after processing the completed ones */
1078 axi_chan_start_first_queued(chan);
1081 spin_unlock_irqrestore(&chan->vc.lock, flags);
1084 static irqreturn_t dw_axi_dma_interrupt(int irq, void *dev_id)
1086 struct axi_dma_chip *chip = dev_id;
1087 struct dw_axi_dma *dw = chip->dw;
1088 struct axi_dma_chan *chan;
1092 /* Disable DMAC interrupts. We'll enable them after processing channels */
1093 axi_dma_irq_disable(chip);
1095 /* Poll, clear and process every channel interrupt status */
1096 for (i = 0; i < dw->hdata->nr_channels; i++) {
1097 chan = &dw->chan[i];
1098 status = axi_chan_irq_read(chan);
1099 axi_chan_irq_clear(chan, status);
1101 dev_vdbg(chip->dev, "%s %u IRQ status: 0x%08x\n",
1102 axi_chan_name(chan), i, status);
1104 if (status & DWAXIDMAC_IRQ_ALL_ERR)
1105 axi_chan_handle_err(chan, status);
1106 else if (status & DWAXIDMAC_IRQ_DMA_TRF)
1107 axi_chan_block_xfer_complete(chan);
1110 /* Re-enable interrupts */
1111 axi_dma_irq_enable(chip);
1116 static int dma_chan_terminate_all(struct dma_chan *dchan)
1118 struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
1119 u32 chan_active = BIT(chan->id) << DMAC_CHAN_EN_SHIFT;
1120 unsigned long flags;
1125 axi_chan_disable(chan);
1127 ret = readl_poll_timeout_atomic(chan->chip->regs + DMAC_CHEN, val,
1128 !(val & chan_active), 1000, 10000);
1129 if (ret == -ETIMEDOUT)
1130 dev_warn(dchan2dev(dchan),
1131 "%s failed to stop\n", axi_chan_name(chan));
1133 if (chan->direction != DMA_MEM_TO_MEM)
1134 dw_axi_dma_set_hw_channel(chan, false);
1135 if (chan->direction == DMA_MEM_TO_DEV)
1136 dw_axi_dma_set_byte_halfword(chan, false);
1138 spin_lock_irqsave(&chan->vc.lock, flags);
1140 vchan_get_all_descriptors(&chan->vc, &head);
1142 chan->cyclic = false;
1143 spin_unlock_irqrestore(&chan->vc.lock, flags);
1145 vchan_dma_desc_free_list(&chan->vc, &head);
1147 dev_vdbg(dchan2dev(dchan), "terminated: %s\n", axi_chan_name(chan));
1152 static int dma_chan_pause(struct dma_chan *dchan)
1154 struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
1155 unsigned long flags;
1156 unsigned int timeout = 20; /* timeout iterations */
1159 spin_lock_irqsave(&chan->vc.lock, flags);
1161 if (chan->chip->dw->hdata->reg_map_8_channels) {
1162 val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
1163 val |= BIT(chan->id) << DMAC_CHAN_SUSP_SHIFT |
1164 BIT(chan->id) << DMAC_CHAN_SUSP_WE_SHIFT;
1165 axi_dma_iowrite32(chan->chip, DMAC_CHEN, val);
1167 val = axi_dma_ioread32(chan->chip, DMAC_CHSUSPREG);
1168 val |= BIT(chan->id) << DMAC_CHAN_SUSP2_SHIFT |
1169 BIT(chan->id) << DMAC_CHAN_SUSP2_WE_SHIFT;
1170 axi_dma_iowrite32(chan->chip, DMAC_CHSUSPREG, val);
1174 if (axi_chan_irq_read(chan) & DWAXIDMAC_IRQ_SUSPENDED)
1178 } while (--timeout);
1180 axi_chan_irq_clear(chan, DWAXIDMAC_IRQ_SUSPENDED);
1182 chan->is_paused = true;
1184 spin_unlock_irqrestore(&chan->vc.lock, flags);
1186 return timeout ? 0 : -EAGAIN;
1189 /* Called in chan locked context */
1190 static inline void axi_chan_resume(struct axi_dma_chan *chan)
1194 if (chan->chip->dw->hdata->reg_map_8_channels) {
1195 val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
1196 val &= ~(BIT(chan->id) << DMAC_CHAN_SUSP_SHIFT);
1197 val |= (BIT(chan->id) << DMAC_CHAN_SUSP_WE_SHIFT);
1198 axi_dma_iowrite32(chan->chip, DMAC_CHEN, val);
1200 val = axi_dma_ioread32(chan->chip, DMAC_CHSUSPREG);
1201 val &= ~(BIT(chan->id) << DMAC_CHAN_SUSP2_SHIFT);
1202 val |= (BIT(chan->id) << DMAC_CHAN_SUSP2_WE_SHIFT);
1203 axi_dma_iowrite32(chan->chip, DMAC_CHSUSPREG, val);
1206 chan->is_paused = false;
1209 static int dma_chan_resume(struct dma_chan *dchan)
1211 struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
1212 unsigned long flags;
1214 spin_lock_irqsave(&chan->vc.lock, flags);
1216 if (chan->is_paused)
1217 axi_chan_resume(chan);
1219 spin_unlock_irqrestore(&chan->vc.lock, flags);
1224 static int axi_dma_suspend(struct axi_dma_chip *chip)
1226 axi_dma_irq_disable(chip);
1227 axi_dma_disable(chip);
1229 clk_disable_unprepare(chip->core_clk);
1230 clk_disable_unprepare(chip->cfgr_clk);
1235 static int axi_dma_resume(struct axi_dma_chip *chip)
1239 ret = clk_prepare_enable(chip->cfgr_clk);
1243 ret = clk_prepare_enable(chip->core_clk);
1247 axi_dma_enable(chip);
1248 axi_dma_irq_enable(chip);
1253 static int __maybe_unused axi_dma_runtime_suspend(struct device *dev)
1255 struct axi_dma_chip *chip = dev_get_drvdata(dev);
1257 return axi_dma_suspend(chip);
1260 static int __maybe_unused axi_dma_runtime_resume(struct device *dev)
1262 struct axi_dma_chip *chip = dev_get_drvdata(dev);
1264 return axi_dma_resume(chip);
1267 static struct dma_chan *dw_axi_dma_of_xlate(struct of_phandle_args *dma_spec,
1268 struct of_dma *ofdma)
1270 struct dw_axi_dma *dw = ofdma->of_dma_data;
1271 struct axi_dma_chan *chan;
1272 struct dma_chan *dchan;
1274 dchan = dma_get_any_slave_channel(&dw->dma);
1278 chan = dchan_to_axi_dma_chan(dchan);
1279 chan->hw_handshake_num = dma_spec->args[0];
1283 static int parse_device_properties(struct axi_dma_chip *chip)
1285 struct device *dev = chip->dev;
1286 u32 tmp, carr[DMAC_MAX_CHANNELS];
1289 ret = device_property_read_u32(dev, "dma-channels", &tmp);
1292 if (tmp == 0 || tmp > DMAC_MAX_CHANNELS)
1295 chip->dw->hdata->nr_channels = tmp;
1296 if (tmp <= DMA_REG_MAP_CH_REF)
1297 chip->dw->hdata->reg_map_8_channels = true;
1299 ret = device_property_read_u32(dev, "snps,dma-masters", &tmp);
1302 if (tmp == 0 || tmp > DMAC_MAX_MASTERS)
1305 chip->dw->hdata->nr_masters = tmp;
1307 ret = device_property_read_u32(dev, "snps,data-width", &tmp);
1310 if (tmp > DWAXIDMAC_TRANS_WIDTH_MAX)
1313 chip->dw->hdata->m_data_width = tmp;
1315 ret = device_property_read_u32_array(dev, "snps,block-size", carr,
1316 chip->dw->hdata->nr_channels);
1319 for (tmp = 0; tmp < chip->dw->hdata->nr_channels; tmp++) {
1320 if (carr[tmp] == 0 || carr[tmp] > DMAC_MAX_BLK_SIZE)
1323 chip->dw->hdata->block_size[tmp] = carr[tmp];
1326 ret = device_property_read_u32_array(dev, "snps,priority", carr,
1327 chip->dw->hdata->nr_channels);
1330 /* Priority value must be programmed within [0:nr_channels-1] range */
1331 for (tmp = 0; tmp < chip->dw->hdata->nr_channels; tmp++) {
1332 if (carr[tmp] >= chip->dw->hdata->nr_channels)
1335 chip->dw->hdata->priority[tmp] = carr[tmp];
1338 /* axi-max-burst-len is optional property */
1339 ret = device_property_read_u32(dev, "snps,axi-max-burst-len", &tmp);
1341 if (tmp > DWAXIDMAC_ARWLEN_MAX + 1)
1343 if (tmp < DWAXIDMAC_ARWLEN_MIN + 1)
1346 chip->dw->hdata->restrict_axi_burst_len = true;
1347 chip->dw->hdata->axi_rw_burst_len = tmp;
1353 static int dw_probe(struct platform_device *pdev)
1355 struct device_node *node = pdev->dev.of_node;
1356 struct axi_dma_chip *chip;
1357 struct resource *mem;
1358 struct dw_axi_dma *dw;
1359 struct dw_axi_dma_hcfg *hdata;
1363 chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
1367 dw = devm_kzalloc(&pdev->dev, sizeof(*dw), GFP_KERNEL);
1371 hdata = devm_kzalloc(&pdev->dev, sizeof(*hdata), GFP_KERNEL);
1376 chip->dev = &pdev->dev;
1377 chip->dw->hdata = hdata;
1379 chip->irq = platform_get_irq(pdev, 0);
1383 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1384 chip->regs = devm_ioremap_resource(chip->dev, mem);
1385 if (IS_ERR(chip->regs))
1386 return PTR_ERR(chip->regs);
1388 if (of_device_is_compatible(node, "intel,kmb-axi-dma")) {
1389 chip->apb_regs = devm_platform_ioremap_resource(pdev, 1);
1390 if (IS_ERR(chip->apb_regs))
1391 return PTR_ERR(chip->apb_regs);
1394 chip->core_clk = devm_clk_get(chip->dev, "core-clk");
1395 if (IS_ERR(chip->core_clk))
1396 return PTR_ERR(chip->core_clk);
1398 chip->cfgr_clk = devm_clk_get(chip->dev, "cfgr-clk");
1399 if (IS_ERR(chip->cfgr_clk))
1400 return PTR_ERR(chip->cfgr_clk);
1402 ret = parse_device_properties(chip);
1406 dw->chan = devm_kcalloc(chip->dev, hdata->nr_channels,
1407 sizeof(*dw->chan), GFP_KERNEL);
1411 ret = devm_request_irq(chip->dev, chip->irq, dw_axi_dma_interrupt,
1412 IRQF_SHARED, KBUILD_MODNAME, chip);
1416 INIT_LIST_HEAD(&dw->dma.channels);
1417 for (i = 0; i < hdata->nr_channels; i++) {
1418 struct axi_dma_chan *chan = &dw->chan[i];
1422 chan->chan_regs = chip->regs + COMMON_REG_LEN + i * CHAN_REG_LEN;
1423 atomic_set(&chan->descs_allocated, 0);
1425 chan->vc.desc_free = vchan_desc_put;
1426 vchan_init(&chan->vc, &dw->dma);
1429 /* Set capabilities */
1430 dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask);
1431 dma_cap_set(DMA_SLAVE, dw->dma.cap_mask);
1432 dma_cap_set(DMA_CYCLIC, dw->dma.cap_mask);
1434 /* DMA capabilities */
1435 dw->dma.chancnt = hdata->nr_channels;
1436 dw->dma.max_burst = hdata->axi_rw_burst_len;
1437 dw->dma.src_addr_widths = AXI_DMA_BUSWIDTHS;
1438 dw->dma.dst_addr_widths = AXI_DMA_BUSWIDTHS;
1439 dw->dma.directions = BIT(DMA_MEM_TO_MEM);
1440 dw->dma.directions |= BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);
1441 dw->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
1443 dw->dma.dev = chip->dev;
1444 dw->dma.device_tx_status = dma_chan_tx_status;
1445 dw->dma.device_issue_pending = dma_chan_issue_pending;
1446 dw->dma.device_terminate_all = dma_chan_terminate_all;
1447 dw->dma.device_pause = dma_chan_pause;
1448 dw->dma.device_resume = dma_chan_resume;
1450 dw->dma.device_alloc_chan_resources = dma_chan_alloc_chan_resources;
1451 dw->dma.device_free_chan_resources = dma_chan_free_chan_resources;
1453 dw->dma.device_prep_dma_memcpy = dma_chan_prep_dma_memcpy;
1454 dw->dma.device_synchronize = dw_axi_dma_synchronize;
1455 dw->dma.device_config = dw_axi_dma_chan_slave_config;
1456 dw->dma.device_prep_slave_sg = dw_axi_dma_chan_prep_slave_sg;
1457 dw->dma.device_prep_dma_cyclic = dw_axi_dma_chan_prep_cyclic;
1460 * Synopsis DesignWare AxiDMA datasheet mentioned Maximum
1461 * supported blocks is 1024. Device register width is 4 bytes.
1462 * Therefore, set constraint to 1024 * 4.
1464 dw->dma.dev->dma_parms = &dw->dma_parms;
1465 dma_set_max_seg_size(&pdev->dev, MAX_BLOCK_SIZE);
1466 platform_set_drvdata(pdev, chip);
1468 pm_runtime_enable(chip->dev);
1471 * We can't just call pm_runtime_get here instead of
1472 * pm_runtime_get_noresume + axi_dma_resume because we need
1473 * driver to work also without Runtime PM.
1475 pm_runtime_get_noresume(chip->dev);
1476 ret = axi_dma_resume(chip);
1478 goto err_pm_disable;
1480 axi_dma_hw_init(chip);
1482 pm_runtime_put(chip->dev);
1484 ret = dmaenginem_async_device_register(&dw->dma);
1486 goto err_pm_disable;
1488 /* Register with OF helpers for DMA lookups */
1489 ret = of_dma_controller_register(pdev->dev.of_node,
1490 dw_axi_dma_of_xlate, dw);
1492 dev_warn(&pdev->dev,
1493 "Failed to register OF DMA controller, fallback to MEM_TO_MEM mode\n");
1495 dev_info(chip->dev, "DesignWare AXI DMA Controller, %d channels\n",
1496 dw->hdata->nr_channels);
1501 pm_runtime_disable(chip->dev);
1506 static int dw_remove(struct platform_device *pdev)
1508 struct axi_dma_chip *chip = platform_get_drvdata(pdev);
1509 struct dw_axi_dma *dw = chip->dw;
1510 struct axi_dma_chan *chan, *_chan;
1513 /* Enable clk before accessing to registers */
1514 clk_prepare_enable(chip->cfgr_clk);
1515 clk_prepare_enable(chip->core_clk);
1516 axi_dma_irq_disable(chip);
1517 for (i = 0; i < dw->hdata->nr_channels; i++) {
1518 axi_chan_disable(&chip->dw->chan[i]);
1519 axi_chan_irq_disable(&chip->dw->chan[i], DWAXIDMAC_IRQ_ALL);
1521 axi_dma_disable(chip);
1523 pm_runtime_disable(chip->dev);
1524 axi_dma_suspend(chip);
1526 devm_free_irq(chip->dev, chip->irq, chip);
1528 of_dma_controller_free(chip->dev->of_node);
1530 list_for_each_entry_safe(chan, _chan, &dw->dma.channels,
1531 vc.chan.device_node) {
1532 list_del(&chan->vc.chan.device_node);
1533 tasklet_kill(&chan->vc.task);
1539 static const struct dev_pm_ops dw_axi_dma_pm_ops = {
1540 SET_RUNTIME_PM_OPS(axi_dma_runtime_suspend, axi_dma_runtime_resume, NULL)
1543 static const struct of_device_id dw_dma_of_id_table[] = {
1544 { .compatible = "snps,axi-dma-1.01a" },
1545 { .compatible = "intel,kmb-axi-dma" },
1548 MODULE_DEVICE_TABLE(of, dw_dma_of_id_table);
1550 static struct platform_driver dw_driver = {
1552 .remove = dw_remove,
1554 .name = KBUILD_MODNAME,
1555 .of_match_table = dw_dma_of_id_table,
1556 .pm = &dw_axi_dma_pm_ops,
1559 module_platform_driver(dw_driver);
1561 MODULE_LICENSE("GPL v2");
1562 MODULE_DESCRIPTION("Synopsys DesignWare AXI DMA Controller platform driver");
1563 MODULE_AUTHOR("Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com>");