2 * TI EDMA DMA engine driver
4 * Copyright 2012 Texas Instruments
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation version 2.
10 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
11 * kind, whether express or implied; without even the implied warranty
12 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
16 #include <linux/dmaengine.h>
17 #include <linux/dma-mapping.h>
18 #include <linux/bitmap.h>
19 #include <linux/err.h>
20 #include <linux/init.h>
21 #include <linux/interrupt.h>
22 #include <linux/list.h>
23 #include <linux/module.h>
24 #include <linux/platform_device.h>
25 #include <linux/slab.h>
26 #include <linux/spinlock.h>
28 #include <linux/of_dma.h>
29 #include <linux/of_irq.h>
30 #include <linux/of_address.h>
31 #include <linux/of_device.h>
32 #include <linux/pm_runtime.h>
34 #include <linux/platform_data/edma.h>
36 #include "../dmaengine.h"
37 #include "../virt-dma.h"
39 /* Offsets matching "struct edmacc_param" */
42 #define PARM_A_B_CNT 0x08
44 #define PARM_SRC_DST_BIDX 0x10
45 #define PARM_LINK_BCNTRLD 0x14
46 #define PARM_SRC_DST_CIDX 0x18
47 #define PARM_CCNT 0x1c
49 #define PARM_SIZE 0x20
51 /* Offsets for EDMA CC global channel registers and their shadows */
52 #define SH_ER 0x00 /* 64 bits */
53 #define SH_ECR 0x08 /* 64 bits */
54 #define SH_ESR 0x10 /* 64 bits */
55 #define SH_CER 0x18 /* 64 bits */
56 #define SH_EER 0x20 /* 64 bits */
57 #define SH_EECR 0x28 /* 64 bits */
58 #define SH_EESR 0x30 /* 64 bits */
59 #define SH_SER 0x38 /* 64 bits */
60 #define SH_SECR 0x40 /* 64 bits */
61 #define SH_IER 0x50 /* 64 bits */
62 #define SH_IECR 0x58 /* 64 bits */
63 #define SH_IESR 0x60 /* 64 bits */
64 #define SH_IPR 0x68 /* 64 bits */
65 #define SH_ICR 0x70 /* 64 bits */
75 /* Offsets for EDMA CC global registers */
76 #define EDMA_REV 0x0000
77 #define EDMA_CCCFG 0x0004
78 #define EDMA_QCHMAP 0x0200 /* 8 registers */
79 #define EDMA_DMAQNUM 0x0240 /* 8 registers (4 on OMAP-L1xx) */
80 #define EDMA_QDMAQNUM 0x0260
81 #define EDMA_QUETCMAP 0x0280
82 #define EDMA_QUEPRI 0x0284
83 #define EDMA_EMR 0x0300 /* 64 bits */
84 #define EDMA_EMCR 0x0308 /* 64 bits */
85 #define EDMA_QEMR 0x0310
86 #define EDMA_QEMCR 0x0314
87 #define EDMA_CCERR 0x0318
88 #define EDMA_CCERRCLR 0x031c
89 #define EDMA_EEVAL 0x0320
90 #define EDMA_DRAE 0x0340 /* 4 x 64 bits*/
91 #define EDMA_QRAE 0x0380 /* 4 registers */
92 #define EDMA_QUEEVTENTRY 0x0400 /* 2 x 16 registers */
93 #define EDMA_QSTAT 0x0600 /* 2 registers */
94 #define EDMA_QWMTHRA 0x0620
95 #define EDMA_QWMTHRB 0x0624
96 #define EDMA_CCSTAT 0x0640
98 #define EDMA_M 0x1000 /* global channel registers */
99 #define EDMA_ECR 0x1008
100 #define EDMA_ECRH 0x100C
101 #define EDMA_SHADOW0 0x2000 /* 4 shadow regions */
102 #define EDMA_PARM 0x4000 /* PaRAM entries */
104 #define PARM_OFFSET(param_no) (EDMA_PARM + ((param_no) << 5))
106 #define EDMA_DCHMAP 0x0100 /* 64 registers */
109 #define GET_NUM_DMACH(x) (x & 0x7) /* bits 0-2 */
110 #define GET_NUM_QDMACH(x) ((x & 0x70) >> 4) /* bits 4-6 */
111 #define GET_NUM_PAENTRY(x) ((x & 0x7000) >> 12) /* bits 12-14 */
112 #define GET_NUM_EVQUE(x) ((x & 0x70000) >> 16) /* bits 16-18 */
113 #define GET_NUM_REGN(x) ((x & 0x300000) >> 20) /* bits 20-21 */
114 #define CHMAP_EXIST BIT(24)
116 /* CCSTAT register */
117 #define EDMA_CCSTAT_ACTV BIT(4)
120 * Max of 20 segments per channel to conserve PaRAM slots
121 * Also note that MAX_NR_SG should be atleast the no.of periods
122 * that are required for ASoC, otherwise DMA prep calls will
123 * fail. Today davinci-pcm is the only user of this driver and
124 * requires atleast 17 slots, so we setup the default to 20.
127 #define EDMA_MAX_SLOTS MAX_NR_SG
128 #define EDMA_DESCRIPTORS 16
130 #define EDMA_CHANNEL_ANY -1 /* for edma_alloc_channel() */
131 #define EDMA_SLOT_ANY -1 /* for edma_alloc_slot() */
132 #define EDMA_CONT_PARAMS_ANY 1001
133 #define EDMA_CONT_PARAMS_FIXED_EXACT 1002
134 #define EDMA_CONT_PARAMS_FIXED_NOT_EXACT 1003
137 * 64bit array registers are split into two 32bit registers:
138 * reg0: channel/event 0-31
139 * reg1: channel/event 32-63
141 * bit 5 in the channel number tells the array index (0/1)
142 * bit 0-4 (0x1f) is the bit offset within the register
144 #define EDMA_REG_ARRAY_INDEX(channel) ((channel) >> 5)
145 #define EDMA_CHANNEL_BIT(channel) (BIT((channel) & 0x1f))
147 /* PaRAM slots are laid out like this */
148 struct edmacc_param {
159 /* fields in edmacc_param.opt */
162 #define SYNCDIM BIT(2)
163 #define STATIC BIT(3)
164 #define EDMA_FWID (0x07 << 8)
165 #define TCCMODE BIT(11)
166 #define EDMA_TCC(t) ((t) << 12)
167 #define TCINTEN BIT(20)
168 #define ITCINTEN BIT(21)
169 #define TCCHEN BIT(22)
170 #define ITCCHEN BIT(23)
175 struct edmacc_param param;
179 struct virt_dma_desc vdesc;
180 struct list_head node;
181 enum dma_transfer_direction direction;
186 struct edma_chan *echan;
190 * The following 4 elements are used for residue accounting.
192 * - processed_stat: the number of SG elements we have traversed
193 * so far to cover accounting. This is updated directly to processed
194 * during edma_callback and is always <= processed, because processed
195 * refers to the number of pending transfer (programmed to EDMA
196 * controller), where as processed_stat tracks number of transfers
197 * accounted for so far.
199 * - residue: The amount of bytes we have left to transfer for this desc
201 * - residue_stat: The residue in bytes of data we have covered
202 * so far for accounting. This is updated directly to residue
203 * during callbacks to keep it current.
205 * - sg_len: Tracks the length of the current intermediate transfer,
206 * this is required to update the residue during intermediate transfer
207 * completion callback.
214 struct edma_pset pset[0];
220 struct device_node *node;
225 struct virt_dma_chan vchan;
226 struct list_head node;
227 struct edma_desc *edesc;
233 int slot[EDMA_MAX_SLOTS];
235 struct dma_slave_config cfg;
240 struct edma_soc_info *info;
245 /* eDMA3 resource information */
246 unsigned num_channels;
247 unsigned num_qchannels;
252 enum dma_event_q default_queue;
255 unsigned int ccerrint;
258 * The slot_inuse bit for each PaRAM slot is clear unless the slot is
259 * in use by Linux or if it is allocated to be used by DSP.
261 unsigned long *slot_inuse;
263 struct dma_device dma_slave;
264 struct dma_device *dma_memcpy;
265 struct edma_chan *slave_chans;
266 struct edma_tc *tc_list;
270 /* dummy param set used to (re)initialize parameter RAM slots */
271 static const struct edmacc_param dummy_paramset = {
272 .link_bcntrld = 0xffff,
276 #define EDMA_BINDING_LEGACY 0
277 #define EDMA_BINDING_TPCC 1
278 static const u32 edma_binding_type[] = {
279 [EDMA_BINDING_LEGACY] = EDMA_BINDING_LEGACY,
280 [EDMA_BINDING_TPCC] = EDMA_BINDING_TPCC,
283 static const struct of_device_id edma_of_ids[] = {
285 .compatible = "ti,edma3",
286 .data = &edma_binding_type[EDMA_BINDING_LEGACY],
289 .compatible = "ti,edma3-tpcc",
290 .data = &edma_binding_type[EDMA_BINDING_TPCC],
294 MODULE_DEVICE_TABLE(of, edma_of_ids);
296 static const struct of_device_id edma_tptc_of_ids[] = {
297 { .compatible = "ti,edma3-tptc", },
300 MODULE_DEVICE_TABLE(of, edma_tptc_of_ids);
302 static inline unsigned int edma_read(struct edma_cc *ecc, int offset)
304 return (unsigned int)__raw_readl(ecc->base + offset);
307 static inline void edma_write(struct edma_cc *ecc, int offset, int val)
309 __raw_writel(val, ecc->base + offset);
312 static inline void edma_modify(struct edma_cc *ecc, int offset, unsigned and,
315 unsigned val = edma_read(ecc, offset);
319 edma_write(ecc, offset, val);
322 static inline void edma_and(struct edma_cc *ecc, int offset, unsigned and)
324 unsigned val = edma_read(ecc, offset);
327 edma_write(ecc, offset, val);
330 static inline void edma_or(struct edma_cc *ecc, int offset, unsigned or)
332 unsigned val = edma_read(ecc, offset);
335 edma_write(ecc, offset, val);
338 static inline unsigned int edma_read_array(struct edma_cc *ecc, int offset,
341 return edma_read(ecc, offset + (i << 2));
344 static inline void edma_write_array(struct edma_cc *ecc, int offset, int i,
347 edma_write(ecc, offset + (i << 2), val);
350 static inline void edma_modify_array(struct edma_cc *ecc, int offset, int i,
351 unsigned and, unsigned or)
353 edma_modify(ecc, offset + (i << 2), and, or);
356 static inline void edma_or_array(struct edma_cc *ecc, int offset, int i,
359 edma_or(ecc, offset + (i << 2), or);
362 static inline void edma_or_array2(struct edma_cc *ecc, int offset, int i, int j,
365 edma_or(ecc, offset + ((i * 2 + j) << 2), or);
368 static inline void edma_write_array2(struct edma_cc *ecc, int offset, int i,
371 edma_write(ecc, offset + ((i * 2 + j) << 2), val);
374 static inline unsigned int edma_shadow0_read(struct edma_cc *ecc, int offset)
376 return edma_read(ecc, EDMA_SHADOW0 + offset);
379 static inline unsigned int edma_shadow0_read_array(struct edma_cc *ecc,
382 return edma_read(ecc, EDMA_SHADOW0 + offset + (i << 2));
385 static inline void edma_shadow0_write(struct edma_cc *ecc, int offset,
388 edma_write(ecc, EDMA_SHADOW0 + offset, val);
391 static inline void edma_shadow0_write_array(struct edma_cc *ecc, int offset,
394 edma_write(ecc, EDMA_SHADOW0 + offset + (i << 2), val);
397 static inline unsigned int edma_param_read(struct edma_cc *ecc, int offset,
400 return edma_read(ecc, EDMA_PARM + offset + (param_no << 5));
403 static inline void edma_param_write(struct edma_cc *ecc, int offset,
404 int param_no, unsigned val)
406 edma_write(ecc, EDMA_PARM + offset + (param_no << 5), val);
409 static inline void edma_param_modify(struct edma_cc *ecc, int offset,
410 int param_no, unsigned and, unsigned or)
412 edma_modify(ecc, EDMA_PARM + offset + (param_no << 5), and, or);
415 static inline void edma_param_and(struct edma_cc *ecc, int offset, int param_no,
418 edma_and(ecc, EDMA_PARM + offset + (param_no << 5), and);
421 static inline void edma_param_or(struct edma_cc *ecc, int offset, int param_no,
424 edma_or(ecc, EDMA_PARM + offset + (param_no << 5), or);
427 static void edma_assign_priority_to_queue(struct edma_cc *ecc, int queue_no,
430 int bit = queue_no * 4;
432 edma_modify(ecc, EDMA_QUEPRI, ~(0x7 << bit), ((priority & 0x7) << bit));
435 static void edma_set_chmap(struct edma_chan *echan, int slot)
437 struct edma_cc *ecc = echan->ecc;
438 int channel = EDMA_CHAN_SLOT(echan->ch_num);
440 if (ecc->chmap_exist) {
441 slot = EDMA_CHAN_SLOT(slot);
442 edma_write_array(ecc, EDMA_DCHMAP, channel, (slot << 5));
446 static void edma_setup_interrupt(struct edma_chan *echan, bool enable)
448 struct edma_cc *ecc = echan->ecc;
449 int channel = EDMA_CHAN_SLOT(echan->ch_num);
450 int idx = EDMA_REG_ARRAY_INDEX(channel);
451 int ch_bit = EDMA_CHANNEL_BIT(channel);
454 edma_shadow0_write_array(ecc, SH_ICR, idx, ch_bit);
455 edma_shadow0_write_array(ecc, SH_IESR, idx, ch_bit);
457 edma_shadow0_write_array(ecc, SH_IECR, idx, ch_bit);
462 * paRAM slot management functions
464 static void edma_write_slot(struct edma_cc *ecc, unsigned slot,
465 const struct edmacc_param *param)
467 slot = EDMA_CHAN_SLOT(slot);
468 if (slot >= ecc->num_slots)
470 memcpy_toio(ecc->base + PARM_OFFSET(slot), param, PARM_SIZE);
473 static int edma_read_slot(struct edma_cc *ecc, unsigned slot,
474 struct edmacc_param *param)
476 slot = EDMA_CHAN_SLOT(slot);
477 if (slot >= ecc->num_slots)
479 memcpy_fromio(param, ecc->base + PARM_OFFSET(slot), PARM_SIZE);
485 * edma_alloc_slot - allocate DMA parameter RAM
486 * @ecc: pointer to edma_cc struct
487 * @slot: specific slot to allocate; negative for "any unused slot"
489 * This allocates a parameter RAM slot, initializing it to hold a
490 * dummy transfer. Slots allocated using this routine have not been
491 * mapped to a hardware DMA channel, and will normally be used by
492 * linking to them from a slot associated with a DMA channel.
494 * Normal use is to pass EDMA_SLOT_ANY as the @slot, but specific
495 * slots may be allocated on behalf of DSP firmware.
497 * Returns the number of the slot, else negative errno.
499 static int edma_alloc_slot(struct edma_cc *ecc, int slot)
502 slot = EDMA_CHAN_SLOT(slot);
503 /* Requesting entry paRAM slot for a HW triggered channel. */
504 if (ecc->chmap_exist && slot < ecc->num_channels)
505 slot = EDMA_SLOT_ANY;
509 if (ecc->chmap_exist)
512 slot = ecc->num_channels;
514 slot = find_next_zero_bit(ecc->slot_inuse,
517 if (slot == ecc->num_slots)
519 if (!test_and_set_bit(slot, ecc->slot_inuse))
522 } else if (slot >= ecc->num_slots) {
524 } else if (test_and_set_bit(slot, ecc->slot_inuse)) {
528 edma_write_slot(ecc, slot, &dummy_paramset);
530 return EDMA_CTLR_CHAN(ecc->id, slot);
533 static void edma_free_slot(struct edma_cc *ecc, unsigned slot)
535 slot = EDMA_CHAN_SLOT(slot);
536 if (slot >= ecc->num_slots)
539 edma_write_slot(ecc, slot, &dummy_paramset);
540 clear_bit(slot, ecc->slot_inuse);
544 * edma_link - link one parameter RAM slot to another
545 * @ecc: pointer to edma_cc struct
546 * @from: parameter RAM slot originating the link
547 * @to: parameter RAM slot which is the link target
549 * The originating slot should not be part of any active DMA transfer.
551 static void edma_link(struct edma_cc *ecc, unsigned from, unsigned to)
553 if (unlikely(EDMA_CTLR(from) != EDMA_CTLR(to)))
554 dev_warn(ecc->dev, "Ignoring eDMA instance for linking\n");
556 from = EDMA_CHAN_SLOT(from);
557 to = EDMA_CHAN_SLOT(to);
558 if (from >= ecc->num_slots || to >= ecc->num_slots)
561 edma_param_modify(ecc, PARM_LINK_BCNTRLD, from, 0xffff0000,
566 * edma_get_position - returns the current transfer point
567 * @ecc: pointer to edma_cc struct
568 * @slot: parameter RAM slot being examined
569 * @dst: true selects the dest position, false the source
571 * Returns the position of the current active slot
573 static dma_addr_t edma_get_position(struct edma_cc *ecc, unsigned slot,
578 slot = EDMA_CHAN_SLOT(slot);
579 offs = PARM_OFFSET(slot);
580 offs += dst ? PARM_DST : PARM_SRC;
582 return edma_read(ecc, offs);
586 * Channels with event associations will be triggered by their hardware
587 * events, and channels without such associations will be triggered by
588 * software. (At this writing there is no interface for using software
589 * triggers except with channels that don't support hardware triggers.)
591 static void edma_start(struct edma_chan *echan)
593 struct edma_cc *ecc = echan->ecc;
594 int channel = EDMA_CHAN_SLOT(echan->ch_num);
595 int idx = EDMA_REG_ARRAY_INDEX(channel);
596 int ch_bit = EDMA_CHANNEL_BIT(channel);
598 if (!echan->hw_triggered) {
599 /* EDMA channels without event association */
600 dev_dbg(ecc->dev, "ESR%d %08x\n", idx,
601 edma_shadow0_read_array(ecc, SH_ESR, idx));
602 edma_shadow0_write_array(ecc, SH_ESR, idx, ch_bit);
604 /* EDMA channel with event association */
605 dev_dbg(ecc->dev, "ER%d %08x\n", idx,
606 edma_shadow0_read_array(ecc, SH_ER, idx));
607 /* Clear any pending event or error */
608 edma_write_array(ecc, EDMA_ECR, idx, ch_bit);
609 edma_write_array(ecc, EDMA_EMCR, idx, ch_bit);
611 edma_shadow0_write_array(ecc, SH_SECR, idx, ch_bit);
612 edma_shadow0_write_array(ecc, SH_EESR, idx, ch_bit);
613 dev_dbg(ecc->dev, "EER%d %08x\n", idx,
614 edma_shadow0_read_array(ecc, SH_EER, idx));
618 static void edma_stop(struct edma_chan *echan)
620 struct edma_cc *ecc = echan->ecc;
621 int channel = EDMA_CHAN_SLOT(echan->ch_num);
622 int idx = EDMA_REG_ARRAY_INDEX(channel);
623 int ch_bit = EDMA_CHANNEL_BIT(channel);
625 edma_shadow0_write_array(ecc, SH_EECR, idx, ch_bit);
626 edma_shadow0_write_array(ecc, SH_ECR, idx, ch_bit);
627 edma_shadow0_write_array(ecc, SH_SECR, idx, ch_bit);
628 edma_write_array(ecc, EDMA_EMCR, idx, ch_bit);
630 /* clear possibly pending completion interrupt */
631 edma_shadow0_write_array(ecc, SH_ICR, idx, ch_bit);
633 dev_dbg(ecc->dev, "EER%d %08x\n", idx,
634 edma_shadow0_read_array(ecc, SH_EER, idx));
636 /* REVISIT: consider guarding against inappropriate event
637 * chaining by overwriting with dummy_paramset.
642 * Temporarily disable EDMA hardware events on the specified channel,
643 * preventing them from triggering new transfers
645 static void edma_pause(struct edma_chan *echan)
647 int channel = EDMA_CHAN_SLOT(echan->ch_num);
649 edma_shadow0_write_array(echan->ecc, SH_EECR,
650 EDMA_REG_ARRAY_INDEX(channel),
651 EDMA_CHANNEL_BIT(channel));
654 /* Re-enable EDMA hardware events on the specified channel. */
655 static void edma_resume(struct edma_chan *echan)
657 int channel = EDMA_CHAN_SLOT(echan->ch_num);
659 edma_shadow0_write_array(echan->ecc, SH_EESR,
660 EDMA_REG_ARRAY_INDEX(channel),
661 EDMA_CHANNEL_BIT(channel));
664 static void edma_trigger_channel(struct edma_chan *echan)
666 struct edma_cc *ecc = echan->ecc;
667 int channel = EDMA_CHAN_SLOT(echan->ch_num);
668 int idx = EDMA_REG_ARRAY_INDEX(channel);
669 int ch_bit = EDMA_CHANNEL_BIT(channel);
671 edma_shadow0_write_array(ecc, SH_ESR, idx, ch_bit);
673 dev_dbg(ecc->dev, "ESR%d %08x\n", idx,
674 edma_shadow0_read_array(ecc, SH_ESR, idx));
677 static void edma_clean_channel(struct edma_chan *echan)
679 struct edma_cc *ecc = echan->ecc;
680 int channel = EDMA_CHAN_SLOT(echan->ch_num);
681 int idx = EDMA_REG_ARRAY_INDEX(channel);
682 int ch_bit = EDMA_CHANNEL_BIT(channel);
684 dev_dbg(ecc->dev, "EMR%d %08x\n", idx,
685 edma_read_array(ecc, EDMA_EMR, idx));
686 edma_shadow0_write_array(ecc, SH_ECR, idx, ch_bit);
687 /* Clear the corresponding EMR bits */
688 edma_write_array(ecc, EDMA_EMCR, idx, ch_bit);
690 edma_shadow0_write_array(ecc, SH_SECR, idx, ch_bit);
691 edma_write(ecc, EDMA_CCERRCLR, BIT(16) | BIT(1) | BIT(0));
694 /* Move channel to a specific event queue */
695 static void edma_assign_channel_eventq(struct edma_chan *echan,
696 enum dma_event_q eventq_no)
698 struct edma_cc *ecc = echan->ecc;
699 int channel = EDMA_CHAN_SLOT(echan->ch_num);
700 int bit = (channel & 0x7) * 4;
702 /* default to low priority queue */
703 if (eventq_no == EVENTQ_DEFAULT)
704 eventq_no = ecc->default_queue;
705 if (eventq_no >= ecc->num_tc)
709 edma_modify_array(ecc, EDMA_DMAQNUM, (channel >> 3), ~(0x7 << bit),
713 static int edma_alloc_channel(struct edma_chan *echan,
714 enum dma_event_q eventq_no)
716 struct edma_cc *ecc = echan->ecc;
717 int channel = EDMA_CHAN_SLOT(echan->ch_num);
719 /* ensure access through shadow region 0 */
720 edma_or_array2(ecc, EDMA_DRAE, 0, EDMA_REG_ARRAY_INDEX(channel),
721 EDMA_CHANNEL_BIT(channel));
723 /* ensure no events are pending */
726 edma_setup_interrupt(echan, true);
728 edma_assign_channel_eventq(echan, eventq_no);
733 static void edma_free_channel(struct edma_chan *echan)
735 /* ensure no events are pending */
737 /* REVISIT should probably take out of shadow region 0 */
738 edma_setup_interrupt(echan, false);
741 static inline struct edma_cc *to_edma_cc(struct dma_device *d)
743 return container_of(d, struct edma_cc, dma_slave);
746 static inline struct edma_chan *to_edma_chan(struct dma_chan *c)
748 return container_of(c, struct edma_chan, vchan.chan);
751 static inline struct edma_desc *to_edma_desc(struct dma_async_tx_descriptor *tx)
753 return container_of(tx, struct edma_desc, vdesc.tx);
756 static void edma_desc_free(struct virt_dma_desc *vdesc)
758 kfree(container_of(vdesc, struct edma_desc, vdesc));
761 /* Dispatch a queued descriptor to the controller (caller holds lock) */
762 static void edma_execute(struct edma_chan *echan)
764 struct edma_cc *ecc = echan->ecc;
765 struct virt_dma_desc *vdesc;
766 struct edma_desc *edesc;
767 struct device *dev = echan->vchan.chan.device->dev;
768 int i, j, left, nslots;
771 /* Setup is needed for the first transfer */
772 vdesc = vchan_next_desc(&echan->vchan);
775 list_del(&vdesc->node);
776 echan->edesc = to_edma_desc(&vdesc->tx);
779 edesc = echan->edesc;
781 /* Find out how many left */
782 left = edesc->pset_nr - edesc->processed;
783 nslots = min(MAX_NR_SG, left);
786 /* Write descriptor PaRAM set(s) */
787 for (i = 0; i < nslots; i++) {
788 j = i + edesc->processed;
789 edma_write_slot(ecc, echan->slot[i], &edesc->pset[j].param);
790 edesc->sg_len += edesc->pset[j].len;
803 j, echan->ch_num, echan->slot[i],
804 edesc->pset[j].param.opt,
805 edesc->pset[j].param.src,
806 edesc->pset[j].param.dst,
807 edesc->pset[j].param.a_b_cnt,
808 edesc->pset[j].param.ccnt,
809 edesc->pset[j].param.src_dst_bidx,
810 edesc->pset[j].param.src_dst_cidx,
811 edesc->pset[j].param.link_bcntrld);
812 /* Link to the previous slot if not the last set */
813 if (i != (nslots - 1))
814 edma_link(ecc, echan->slot[i], echan->slot[i + 1]);
817 edesc->processed += nslots;
820 * If this is either the last set in a set of SG-list transactions
821 * then setup a link to the dummy slot, this results in all future
822 * events being absorbed and that's OK because we're done
824 if (edesc->processed == edesc->pset_nr) {
826 edma_link(ecc, echan->slot[nslots - 1], echan->slot[1]);
828 edma_link(ecc, echan->slot[nslots - 1],
829 echan->ecc->dummy_slot);
834 * This happens due to setup times between intermediate
835 * transfers in long SG lists which have to be broken up into
836 * transfers of MAX_NR_SG
838 dev_dbg(dev, "missed event on channel %d\n", echan->ch_num);
839 edma_clean_channel(echan);
842 edma_trigger_channel(echan);
844 } else if (edesc->processed <= MAX_NR_SG) {
845 dev_dbg(dev, "first transfer starting on channel %d\n",
849 dev_dbg(dev, "chan: %d: completed %d elements, resuming\n",
850 echan->ch_num, edesc->processed);
855 static int edma_terminate_all(struct dma_chan *chan)
857 struct edma_chan *echan = to_edma_chan(chan);
861 spin_lock_irqsave(&echan->vchan.lock, flags);
864 * Stop DMA activity: we assume the callback will not be called
865 * after edma_dma() returns (even if it does, it will see
866 * echan->edesc is NULL and exit.)
870 /* Move the cyclic channel back to default queue */
871 if (!echan->tc && echan->edesc->cyclic)
872 edma_assign_channel_eventq(echan, EVENTQ_DEFAULT);
874 vchan_terminate_vdesc(&echan->edesc->vdesc);
878 vchan_get_all_descriptors(&echan->vchan, &head);
879 spin_unlock_irqrestore(&echan->vchan.lock, flags);
880 vchan_dma_desc_free_list(&echan->vchan, &head);
885 static void edma_synchronize(struct dma_chan *chan)
887 struct edma_chan *echan = to_edma_chan(chan);
889 vchan_synchronize(&echan->vchan);
892 static int edma_slave_config(struct dma_chan *chan,
893 struct dma_slave_config *cfg)
895 struct edma_chan *echan = to_edma_chan(chan);
897 if (cfg->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
898 cfg->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
901 if (cfg->src_maxburst > chan->device->max_burst ||
902 cfg->dst_maxburst > chan->device->max_burst)
905 memcpy(&echan->cfg, cfg, sizeof(echan->cfg));
910 static int edma_dma_pause(struct dma_chan *chan)
912 struct edma_chan *echan = to_edma_chan(chan);
921 static int edma_dma_resume(struct dma_chan *chan)
923 struct edma_chan *echan = to_edma_chan(chan);
930 * A PaRAM set configuration abstraction used by other modes
931 * @chan: Channel who's PaRAM set we're configuring
932 * @pset: PaRAM set to initialize and setup.
933 * @src_addr: Source address of the DMA
934 * @dst_addr: Destination address of the DMA
935 * @burst: In units of dev_width, how much to send
936 * @dev_width: How much is the dev_width
937 * @dma_length: Total length of the DMA transfer
938 * @direction: Direction of the transfer
940 static int edma_config_pset(struct dma_chan *chan, struct edma_pset *epset,
941 dma_addr_t src_addr, dma_addr_t dst_addr, u32 burst,
942 unsigned int acnt, unsigned int dma_length,
943 enum dma_transfer_direction direction)
945 struct edma_chan *echan = to_edma_chan(chan);
946 struct device *dev = chan->device->dev;
947 struct edmacc_param *param = &epset->param;
948 int bcnt, ccnt, cidx;
949 int src_bidx, dst_bidx, src_cidx, dst_cidx;
952 /* src/dst_maxburst == 0 is the same case as src/dst_maxburst == 1 */
956 * If the maxburst is equal to the fifo width, use
957 * A-synced transfers. This allows for large contiguous
958 * buffer transfers using only one PaRAM set.
962 * For the A-sync case, bcnt and ccnt are the remainder
963 * and quotient respectively of the division of:
964 * (dma_length / acnt) by (SZ_64K -1). This is so
965 * that in case bcnt over flows, we have ccnt to use.
966 * Note: In A-sync tranfer only, bcntrld is used, but it
967 * only applies for sg_dma_len(sg) >= SZ_64K.
968 * In this case, the best way adopted is- bccnt for the
969 * first frame will be the remainder below. Then for
970 * every successive frame, bcnt will be SZ_64K-1. This
971 * is assured as bcntrld = 0xffff in end of function.
974 ccnt = dma_length / acnt / (SZ_64K - 1);
975 bcnt = dma_length / acnt - ccnt * (SZ_64K - 1);
977 * If bcnt is non-zero, we have a remainder and hence an
978 * extra frame to transfer, so increment ccnt.
987 * If maxburst is greater than the fifo address_width,
988 * use AB-synced transfers where A count is the fifo
989 * address_width and B count is the maxburst. In this
990 * case, we are limited to transfers of C count frames
991 * of (address_width * maxburst) where C count is limited
992 * to SZ_64K-1. This places an upper bound on the length
993 * of an SG segment that can be handled.
997 ccnt = dma_length / (acnt * bcnt);
998 if (ccnt > (SZ_64K - 1)) {
999 dev_err(dev, "Exceeded max SG segment size\n");
1005 epset->len = dma_length;
1007 if (direction == DMA_MEM_TO_DEV) {
1012 epset->addr = src_addr;
1013 } else if (direction == DMA_DEV_TO_MEM) {
1018 epset->addr = dst_addr;
1019 } else if (direction == DMA_MEM_TO_MEM) {
1024 epset->addr = src_addr;
1026 dev_err(dev, "%s: direction not implemented yet\n", __func__);
1030 param->opt = EDMA_TCC(EDMA_CHAN_SLOT(echan->ch_num));
1031 /* Configure A or AB synchronized transfers */
1033 param->opt |= SYNCDIM;
1035 param->src = src_addr;
1036 param->dst = dst_addr;
1038 param->src_dst_bidx = (dst_bidx << 16) | src_bidx;
1039 param->src_dst_cidx = (dst_cidx << 16) | src_cidx;
1041 param->a_b_cnt = bcnt << 16 | acnt;
1044 * Only time when (bcntrld) auto reload is required is for
1045 * A-sync case, and in this case, a requirement of reload value
1046 * of SZ_64K-1 only is assured. 'link' is initially set to NULL
1047 * and then later will be populated by edma_execute.
1049 param->link_bcntrld = 0xffffffff;
1053 static struct dma_async_tx_descriptor *edma_prep_slave_sg(
1054 struct dma_chan *chan, struct scatterlist *sgl,
1055 unsigned int sg_len, enum dma_transfer_direction direction,
1056 unsigned long tx_flags, void *context)
1058 struct edma_chan *echan = to_edma_chan(chan);
1059 struct device *dev = chan->device->dev;
1060 struct edma_desc *edesc;
1061 dma_addr_t src_addr = 0, dst_addr = 0;
1062 enum dma_slave_buswidth dev_width;
1064 struct scatterlist *sg;
1067 if (unlikely(!echan || !sgl || !sg_len))
1070 if (direction == DMA_DEV_TO_MEM) {
1071 src_addr = echan->cfg.src_addr;
1072 dev_width = echan->cfg.src_addr_width;
1073 burst = echan->cfg.src_maxburst;
1074 } else if (direction == DMA_MEM_TO_DEV) {
1075 dst_addr = echan->cfg.dst_addr;
1076 dev_width = echan->cfg.dst_addr_width;
1077 burst = echan->cfg.dst_maxburst;
1079 dev_err(dev, "%s: bad direction: %d\n", __func__, direction);
1083 if (dev_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) {
1084 dev_err(dev, "%s: Undefined slave buswidth\n", __func__);
1088 edesc = kzalloc(struct_size(edesc, pset, sg_len), GFP_ATOMIC);
1092 edesc->pset_nr = sg_len;
1094 edesc->direction = direction;
1095 edesc->echan = echan;
1097 /* Allocate a PaRAM slot, if needed */
1098 nslots = min_t(unsigned, MAX_NR_SG, sg_len);
1100 for (i = 0; i < nslots; i++) {
1101 if (echan->slot[i] < 0) {
1103 edma_alloc_slot(echan->ecc, EDMA_SLOT_ANY);
1104 if (echan->slot[i] < 0) {
1106 dev_err(dev, "%s: Failed to allocate slot\n",
1113 /* Configure PaRAM sets for each SG */
1114 for_each_sg(sgl, sg, sg_len, i) {
1115 /* Get address for each SG */
1116 if (direction == DMA_DEV_TO_MEM)
1117 dst_addr = sg_dma_address(sg);
1119 src_addr = sg_dma_address(sg);
1121 ret = edma_config_pset(chan, &edesc->pset[i], src_addr,
1122 dst_addr, burst, dev_width,
1123 sg_dma_len(sg), direction);
1129 edesc->absync = ret;
1130 edesc->residue += sg_dma_len(sg);
1132 if (i == sg_len - 1)
1133 /* Enable completion interrupt */
1134 edesc->pset[i].param.opt |= TCINTEN;
1135 else if (!((i+1) % MAX_NR_SG))
1137 * Enable early completion interrupt for the
1138 * intermediateset. In this case the driver will be
1139 * notified when the paRAM set is submitted to TC. This
1140 * will allow more time to set up the next set of slots.
1142 edesc->pset[i].param.opt |= (TCINTEN | TCCMODE);
1144 edesc->residue_stat = edesc->residue;
1146 return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
1149 static struct dma_async_tx_descriptor *edma_prep_dma_memcpy(
1150 struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
1151 size_t len, unsigned long tx_flags)
1154 struct edma_desc *edesc;
1155 struct device *dev = chan->device->dev;
1156 struct edma_chan *echan = to_edma_chan(chan);
1157 unsigned int width, pset_len, array_size;
1159 if (unlikely(!echan || !len))
1162 /* Align the array size (acnt block) with the transfer properties */
1163 switch (__ffs((src | dest | len))) {
1165 array_size = SZ_32K - 1;
1168 array_size = SZ_32K - 2;
1171 array_size = SZ_32K - 4;
1177 * Transfer size less than 64K can be handled with one paRAM
1178 * slot and with one burst.
1186 * Transfer size bigger than 64K will be handled with maximum of
1188 * slot1: (full_length / 32767) times 32767 bytes bursts.
1189 * ACNT = 32767, length1: (full_length / 32767) * 32767
1190 * slot2: the remaining amount of data after slot1.
1191 * ACNT = full_length - length1, length2 = ACNT
1193 * When the full_length is multibple of 32767 one slot can be
1194 * used to complete the transfer.
1197 pset_len = rounddown(len, width);
1198 /* One slot is enough for lengths multiple of (SZ_32K -1) */
1199 if (unlikely(pset_len == len))
1205 edesc = kzalloc(struct_size(edesc, pset, nslots), GFP_ATOMIC);
1209 edesc->pset_nr = nslots;
1210 edesc->residue = edesc->residue_stat = len;
1211 edesc->direction = DMA_MEM_TO_MEM;
1212 edesc->echan = echan;
1214 ret = edma_config_pset(chan, &edesc->pset[0], src, dest, 1,
1215 width, pset_len, DMA_MEM_TO_MEM);
1221 edesc->absync = ret;
1223 edesc->pset[0].param.opt |= ITCCHEN;
1225 /* Enable transfer complete interrupt if requested */
1226 if (tx_flags & DMA_PREP_INTERRUPT)
1227 edesc->pset[0].param.opt |= TCINTEN;
1229 /* Enable transfer complete chaining for the first slot */
1230 edesc->pset[0].param.opt |= TCCHEN;
1232 if (echan->slot[1] < 0) {
1233 echan->slot[1] = edma_alloc_slot(echan->ecc,
1235 if (echan->slot[1] < 0) {
1237 dev_err(dev, "%s: Failed to allocate slot\n",
1244 pset_len = width = len % array_size;
1246 ret = edma_config_pset(chan, &edesc->pset[1], src, dest, 1,
1247 width, pset_len, DMA_MEM_TO_MEM);
1253 edesc->pset[1].param.opt |= ITCCHEN;
1254 /* Enable transfer complete interrupt if requested */
1255 if (tx_flags & DMA_PREP_INTERRUPT)
1256 edesc->pset[1].param.opt |= TCINTEN;
1259 if (!(tx_flags & DMA_PREP_INTERRUPT))
1260 edesc->polled = true;
1262 return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
1265 static struct dma_async_tx_descriptor *edma_prep_dma_cyclic(
1266 struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
1267 size_t period_len, enum dma_transfer_direction direction,
1268 unsigned long tx_flags)
1270 struct edma_chan *echan = to_edma_chan(chan);
1271 struct device *dev = chan->device->dev;
1272 struct edma_desc *edesc;
1273 dma_addr_t src_addr, dst_addr;
1274 enum dma_slave_buswidth dev_width;
1275 bool use_intermediate = false;
1279 if (unlikely(!echan || !buf_len || !period_len))
1282 if (direction == DMA_DEV_TO_MEM) {
1283 src_addr = echan->cfg.src_addr;
1284 dst_addr = buf_addr;
1285 dev_width = echan->cfg.src_addr_width;
1286 burst = echan->cfg.src_maxburst;
1287 } else if (direction == DMA_MEM_TO_DEV) {
1288 src_addr = buf_addr;
1289 dst_addr = echan->cfg.dst_addr;
1290 dev_width = echan->cfg.dst_addr_width;
1291 burst = echan->cfg.dst_maxburst;
1293 dev_err(dev, "%s: bad direction: %d\n", __func__, direction);
1297 if (dev_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) {
1298 dev_err(dev, "%s: Undefined slave buswidth\n", __func__);
1302 if (unlikely(buf_len % period_len)) {
1303 dev_err(dev, "Period should be multiple of Buffer length\n");
1307 nslots = (buf_len / period_len) + 1;
1310 * Cyclic DMA users such as audio cannot tolerate delays introduced
1311 * by cases where the number of periods is more than the maximum
1312 * number of SGs the EDMA driver can handle at a time. For DMA types
1313 * such as Slave SGs, such delays are tolerable and synchronized,
1314 * but the synchronization is difficult to achieve with Cyclic and
1315 * cannot be guaranteed, so we error out early.
1317 if (nslots > MAX_NR_SG) {
1319 * If the burst and period sizes are the same, we can put
1320 * the full buffer into a single period and activate
1321 * intermediate interrupts. This will produce interrupts
1322 * after each burst, which is also after each desired period.
1324 if (burst == period_len) {
1325 period_len = buf_len;
1327 use_intermediate = true;
1333 edesc = kzalloc(struct_size(edesc, pset, nslots), GFP_ATOMIC);
1338 edesc->pset_nr = nslots;
1339 edesc->residue = edesc->residue_stat = buf_len;
1340 edesc->direction = direction;
1341 edesc->echan = echan;
1343 dev_dbg(dev, "%s: channel=%d nslots=%d period_len=%zu buf_len=%zu\n",
1344 __func__, echan->ch_num, nslots, period_len, buf_len);
1346 for (i = 0; i < nslots; i++) {
1347 /* Allocate a PaRAM slot, if needed */
1348 if (echan->slot[i] < 0) {
1350 edma_alloc_slot(echan->ecc, EDMA_SLOT_ANY);
1351 if (echan->slot[i] < 0) {
1353 dev_err(dev, "%s: Failed to allocate slot\n",
1359 if (i == nslots - 1) {
1360 memcpy(&edesc->pset[i], &edesc->pset[0],
1361 sizeof(edesc->pset[0]));
1365 ret = edma_config_pset(chan, &edesc->pset[i], src_addr,
1366 dst_addr, burst, dev_width, period_len,
1373 if (direction == DMA_DEV_TO_MEM)
1374 dst_addr += period_len;
1376 src_addr += period_len;
1378 dev_vdbg(dev, "%s: Configure period %d of buf:\n", __func__, i);
1391 i, echan->ch_num, echan->slot[i],
1392 edesc->pset[i].param.opt,
1393 edesc->pset[i].param.src,
1394 edesc->pset[i].param.dst,
1395 edesc->pset[i].param.a_b_cnt,
1396 edesc->pset[i].param.ccnt,
1397 edesc->pset[i].param.src_dst_bidx,
1398 edesc->pset[i].param.src_dst_cidx,
1399 edesc->pset[i].param.link_bcntrld);
1401 edesc->absync = ret;
1404 * Enable period interrupt only if it is requested
1406 if (tx_flags & DMA_PREP_INTERRUPT) {
1407 edesc->pset[i].param.opt |= TCINTEN;
1409 /* Also enable intermediate interrupts if necessary */
1410 if (use_intermediate)
1411 edesc->pset[i].param.opt |= ITCINTEN;
1415 /* Place the cyclic channel to highest priority queue */
1417 edma_assign_channel_eventq(echan, EVENTQ_0);
1419 return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
1422 static void edma_completion_handler(struct edma_chan *echan)
1424 struct device *dev = echan->vchan.chan.device->dev;
1425 struct edma_desc *edesc;
1427 spin_lock(&echan->vchan.lock);
1428 edesc = echan->edesc;
1430 if (edesc->cyclic) {
1431 vchan_cyclic_callback(&edesc->vdesc);
1432 spin_unlock(&echan->vchan.lock);
1434 } else if (edesc->processed == edesc->pset_nr) {
1437 vchan_cookie_complete(&edesc->vdesc);
1438 echan->edesc = NULL;
1440 dev_dbg(dev, "Transfer completed on channel %d\n",
1443 dev_dbg(dev, "Sub transfer completed on channel %d\n",
1448 /* Update statistics for tx_status */
1449 edesc->residue -= edesc->sg_len;
1450 edesc->residue_stat = edesc->residue;
1451 edesc->processed_stat = edesc->processed;
1453 edma_execute(echan);
1456 spin_unlock(&echan->vchan.lock);
1459 /* eDMA interrupt handler */
1460 static irqreturn_t dma_irq_handler(int irq, void *data)
1462 struct edma_cc *ecc = data;
1472 dev_vdbg(ecc->dev, "dma_irq_handler\n");
1474 sh_ipr = edma_shadow0_read_array(ecc, SH_IPR, 0);
1476 sh_ipr = edma_shadow0_read_array(ecc, SH_IPR, 1);
1479 sh_ier = edma_shadow0_read_array(ecc, SH_IER, 1);
1482 sh_ier = edma_shadow0_read_array(ecc, SH_IER, 0);
1490 slot = __ffs(sh_ipr);
1491 sh_ipr &= ~(BIT(slot));
1493 if (sh_ier & BIT(slot)) {
1494 channel = (bank << 5) | slot;
1495 /* Clear the corresponding IPR bits */
1496 edma_shadow0_write_array(ecc, SH_ICR, bank, BIT(slot));
1497 edma_completion_handler(&ecc->slave_chans[channel]);
1501 edma_shadow0_write(ecc, SH_IEVAL, 1);
1505 static void edma_error_handler(struct edma_chan *echan)
1507 struct edma_cc *ecc = echan->ecc;
1508 struct device *dev = echan->vchan.chan.device->dev;
1509 struct edmacc_param p;
1515 spin_lock(&echan->vchan.lock);
1517 err = edma_read_slot(ecc, echan->slot[0], &p);
1520 * Issue later based on missed flag which will be sure
1522 * (1) we finished transmitting an intermediate slot and
1523 * edma_execute is coming up.
1524 * (2) or we finished current transfer and issue will
1525 * call edma_execute.
1527 * Important note: issuing can be dangerous here and
1528 * lead to some nasty recursion when we are in a NULL
1529 * slot. So we avoid doing so and set the missed flag.
1531 if (err || (p.a_b_cnt == 0 && p.ccnt == 0)) {
1532 dev_dbg(dev, "Error on null slot, setting miss\n");
1536 * The slot is already programmed but the event got
1537 * missed, so its safe to issue it here.
1539 dev_dbg(dev, "Missed event, TRIGGERING\n");
1540 edma_clean_channel(echan);
1543 edma_trigger_channel(echan);
1545 spin_unlock(&echan->vchan.lock);
1548 static inline bool edma_error_pending(struct edma_cc *ecc)
1550 if (edma_read_array(ecc, EDMA_EMR, 0) ||
1551 edma_read_array(ecc, EDMA_EMR, 1) ||
1552 edma_read(ecc, EDMA_QEMR) || edma_read(ecc, EDMA_CCERR))
1558 /* eDMA error interrupt handler */
1559 static irqreturn_t dma_ccerr_handler(int irq, void *data)
1561 struct edma_cc *ecc = data;
1564 unsigned int cnt = 0;
1571 dev_vdbg(ecc->dev, "dma_ccerr_handler\n");
1573 if (!edma_error_pending(ecc)) {
1575 * The registers indicate no pending error event but the irq
1576 * handler has been called.
1577 * Ask eDMA to re-evaluate the error registers.
1579 dev_err(ecc->dev, "%s: Error interrupt without error event!\n",
1581 edma_write(ecc, EDMA_EEVAL, 1);
1586 /* Event missed register(s) */
1587 for (j = 0; j < 2; j++) {
1590 val = edma_read_array(ecc, EDMA_EMR, j);
1594 dev_dbg(ecc->dev, "EMR%d 0x%08x\n", j, val);
1596 for (i = find_next_bit(&emr, 32, 0); i < 32;
1597 i = find_next_bit(&emr, 32, i + 1)) {
1598 int k = (j << 5) + i;
1600 /* Clear the corresponding EMR bits */
1601 edma_write_array(ecc, EDMA_EMCR, j, BIT(i));
1603 edma_shadow0_write_array(ecc, SH_SECR, j,
1605 edma_error_handler(&ecc->slave_chans[k]);
1609 val = edma_read(ecc, EDMA_QEMR);
1611 dev_dbg(ecc->dev, "QEMR 0x%02x\n", val);
1612 /* Not reported, just clear the interrupt reason. */
1613 edma_write(ecc, EDMA_QEMCR, val);
1614 edma_shadow0_write(ecc, SH_QSECR, val);
1617 val = edma_read(ecc, EDMA_CCERR);
1619 dev_warn(ecc->dev, "CCERR 0x%08x\n", val);
1620 /* Not reported, just clear the interrupt reason. */
1621 edma_write(ecc, EDMA_CCERRCLR, val);
1624 if (!edma_error_pending(ecc))
1630 edma_write(ecc, EDMA_EEVAL, 1);
1634 /* Alloc channel resources */
1635 static int edma_alloc_chan_resources(struct dma_chan *chan)
1637 struct edma_chan *echan = to_edma_chan(chan);
1638 struct edma_cc *ecc = echan->ecc;
1639 struct device *dev = ecc->dev;
1640 enum dma_event_q eventq_no = EVENTQ_DEFAULT;
1644 eventq_no = echan->tc->id;
1645 } else if (ecc->tc_list) {
1646 /* memcpy channel */
1647 echan->tc = &ecc->tc_list[ecc->info->default_queue];
1648 eventq_no = echan->tc->id;
1651 ret = edma_alloc_channel(echan, eventq_no);
1655 echan->slot[0] = edma_alloc_slot(ecc, echan->ch_num);
1656 if (echan->slot[0] < 0) {
1657 dev_err(dev, "Entry slot allocation failed for channel %u\n",
1658 EDMA_CHAN_SLOT(echan->ch_num));
1659 ret = echan->slot[0];
1663 /* Set up channel -> slot mapping for the entry slot */
1664 edma_set_chmap(echan, echan->slot[0]);
1665 echan->alloced = true;
1667 dev_dbg(dev, "Got eDMA channel %d for virt channel %d (%s trigger)\n",
1668 EDMA_CHAN_SLOT(echan->ch_num), chan->chan_id,
1669 echan->hw_triggered ? "HW" : "SW");
1674 edma_free_channel(echan);
1678 /* Free channel resources */
1679 static void edma_free_chan_resources(struct dma_chan *chan)
1681 struct edma_chan *echan = to_edma_chan(chan);
1682 struct device *dev = echan->ecc->dev;
1685 /* Terminate transfers */
1688 vchan_free_chan_resources(&echan->vchan);
1690 /* Free EDMA PaRAM slots */
1691 for (i = 0; i < EDMA_MAX_SLOTS; i++) {
1692 if (echan->slot[i] >= 0) {
1693 edma_free_slot(echan->ecc, echan->slot[i]);
1694 echan->slot[i] = -1;
1698 /* Set entry slot to the dummy slot */
1699 edma_set_chmap(echan, echan->ecc->dummy_slot);
1701 /* Free EDMA channel */
1702 if (echan->alloced) {
1703 edma_free_channel(echan);
1704 echan->alloced = false;
1708 echan->hw_triggered = false;
1710 dev_dbg(dev, "Free eDMA channel %d for virt channel %d\n",
1711 EDMA_CHAN_SLOT(echan->ch_num), chan->chan_id);
1714 /* Send pending descriptor to hardware */
1715 static void edma_issue_pending(struct dma_chan *chan)
1717 struct edma_chan *echan = to_edma_chan(chan);
1718 unsigned long flags;
1720 spin_lock_irqsave(&echan->vchan.lock, flags);
1721 if (vchan_issue_pending(&echan->vchan) && !echan->edesc)
1722 edma_execute(echan);
1723 spin_unlock_irqrestore(&echan->vchan.lock, flags);
1727 * This limit exists to avoid a possible infinite loop when waiting for proof
1728 * that a particular transfer is completed. This limit can be hit if there
1729 * are large bursts to/from slow devices or the CPU is never able to catch
1730 * the DMA hardware idle. On an AM335x transfering 48 bytes from the UART
1731 * RX-FIFO, as many as 55 loops have been seen.
1733 #define EDMA_MAX_TR_WAIT_LOOPS 1000
1735 static u32 edma_residue(struct edma_desc *edesc)
1737 bool dst = edesc->direction == DMA_DEV_TO_MEM;
1738 int loop_count = EDMA_MAX_TR_WAIT_LOOPS;
1739 struct edma_chan *echan = edesc->echan;
1740 struct edma_pset *pset = edesc->pset;
1741 dma_addr_t done, pos, pos_old;
1742 int channel = EDMA_CHAN_SLOT(echan->ch_num);
1743 int idx = EDMA_REG_ARRAY_INDEX(channel);
1744 int ch_bit = EDMA_CHANNEL_BIT(channel);
1749 * We always read the dst/src position from the first RamPar
1750 * pset. That's the one which is active now.
1752 pos = edma_get_position(echan->ecc, echan->slot[0], dst);
1755 * "pos" may represent a transfer request that is still being
1756 * processed by the EDMACC or EDMATC. We will busy wait until
1757 * any one of the situations occurs:
1758 * 1. while and event is pending for the channel
1759 * 2. a position updated
1760 * 3. we hit the loop limit
1762 if (is_slave_direction(edesc->direction))
1768 while (edma_shadow0_read_array(echan->ecc, event_reg, idx) & ch_bit) {
1769 pos = edma_get_position(echan->ecc, echan->slot[0], dst);
1773 if (!--loop_count) {
1774 dev_dbg_ratelimited(echan->vchan.chan.device->dev,
1775 "%s: timeout waiting for PaRAM update\n",
1784 * Cyclic is simple. Just subtract pset[0].addr from pos.
1786 * We never update edesc->residue in the cyclic case, so we
1787 * can tell the remaining room to the end of the circular
1790 if (edesc->cyclic) {
1791 done = pos - pset->addr;
1792 edesc->residue_stat = edesc->residue - done;
1793 return edesc->residue_stat;
1797 * If the position is 0, then EDMA loaded the closing dummy slot, the
1798 * transfer is completed
1803 * For SG operation we catch up with the last processed
1806 pset += edesc->processed_stat;
1808 for (i = edesc->processed_stat; i < edesc->processed; i++, pset++) {
1810 * If we are inside this pset address range, we know
1811 * this is the active one. Get the current delta and
1812 * stop walking the psets.
1814 if (pos >= pset->addr && pos < pset->addr + pset->len)
1815 return edesc->residue_stat - (pos - pset->addr);
1817 /* Otherwise mark it done and update residue_stat. */
1818 edesc->processed_stat++;
1819 edesc->residue_stat -= pset->len;
1821 return edesc->residue_stat;
1824 /* Check request completion status */
1825 static enum dma_status edma_tx_status(struct dma_chan *chan,
1826 dma_cookie_t cookie,
1827 struct dma_tx_state *txstate)
1829 struct edma_chan *echan = to_edma_chan(chan);
1830 struct dma_tx_state txstate_tmp;
1831 enum dma_status ret;
1832 unsigned long flags;
1834 ret = dma_cookie_status(chan, cookie, txstate);
1836 if (ret == DMA_COMPLETE)
1839 /* Provide a dummy dma_tx_state for completion checking */
1841 txstate = &txstate_tmp;
1843 spin_lock_irqsave(&echan->vchan.lock, flags);
1844 if (echan->edesc && echan->edesc->vdesc.tx.cookie == cookie) {
1845 txstate->residue = edma_residue(echan->edesc);
1847 struct virt_dma_desc *vdesc = vchan_find_desc(&echan->vchan,
1851 txstate->residue = to_edma_desc(&vdesc->tx)->residue;
1853 txstate->residue = 0;
1857 * Mark the cookie completed if the residue is 0 for non cyclic
1860 if (ret != DMA_COMPLETE && !txstate->residue &&
1861 echan->edesc && echan->edesc->polled &&
1862 echan->edesc->vdesc.tx.cookie == cookie) {
1864 vchan_cookie_complete(&echan->edesc->vdesc);
1865 echan->edesc = NULL;
1866 edma_execute(echan);
1870 spin_unlock_irqrestore(&echan->vchan.lock, flags);
1875 static bool edma_is_memcpy_channel(int ch_num, s32 *memcpy_channels)
1877 if (!memcpy_channels)
1879 while (*memcpy_channels != -1) {
1880 if (*memcpy_channels == ch_num)
1887 #define EDMA_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
1888 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
1889 BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \
1890 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
1892 static void edma_dma_init(struct edma_cc *ecc, bool legacy_mode)
1894 struct dma_device *s_ddev = &ecc->dma_slave;
1895 struct dma_device *m_ddev = NULL;
1896 s32 *memcpy_channels = ecc->info->memcpy_channels;
1899 dma_cap_zero(s_ddev->cap_mask);
1900 dma_cap_set(DMA_SLAVE, s_ddev->cap_mask);
1901 dma_cap_set(DMA_CYCLIC, s_ddev->cap_mask);
1902 if (ecc->legacy_mode && !memcpy_channels) {
1904 "Legacy memcpy is enabled, things might not work\n");
1906 dma_cap_set(DMA_MEMCPY, s_ddev->cap_mask);
1907 s_ddev->device_prep_dma_memcpy = edma_prep_dma_memcpy;
1908 s_ddev->directions = BIT(DMA_MEM_TO_MEM);
1911 s_ddev->device_prep_slave_sg = edma_prep_slave_sg;
1912 s_ddev->device_prep_dma_cyclic = edma_prep_dma_cyclic;
1913 s_ddev->device_alloc_chan_resources = edma_alloc_chan_resources;
1914 s_ddev->device_free_chan_resources = edma_free_chan_resources;
1915 s_ddev->device_issue_pending = edma_issue_pending;
1916 s_ddev->device_tx_status = edma_tx_status;
1917 s_ddev->device_config = edma_slave_config;
1918 s_ddev->device_pause = edma_dma_pause;
1919 s_ddev->device_resume = edma_dma_resume;
1920 s_ddev->device_terminate_all = edma_terminate_all;
1921 s_ddev->device_synchronize = edma_synchronize;
1923 s_ddev->src_addr_widths = EDMA_DMA_BUSWIDTHS;
1924 s_ddev->dst_addr_widths = EDMA_DMA_BUSWIDTHS;
1925 s_ddev->directions |= (BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV));
1926 s_ddev->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
1927 s_ddev->max_burst = SZ_32K - 1; /* CIDX: 16bit signed */
1929 s_ddev->dev = ecc->dev;
1930 INIT_LIST_HEAD(&s_ddev->channels);
1932 if (memcpy_channels) {
1933 m_ddev = devm_kzalloc(ecc->dev, sizeof(*m_ddev), GFP_KERNEL);
1935 dev_warn(ecc->dev, "memcpy is disabled due to OoM\n");
1936 memcpy_channels = NULL;
1939 ecc->dma_memcpy = m_ddev;
1941 dma_cap_zero(m_ddev->cap_mask);
1942 dma_cap_set(DMA_MEMCPY, m_ddev->cap_mask);
1944 m_ddev->device_prep_dma_memcpy = edma_prep_dma_memcpy;
1945 m_ddev->device_alloc_chan_resources = edma_alloc_chan_resources;
1946 m_ddev->device_free_chan_resources = edma_free_chan_resources;
1947 m_ddev->device_issue_pending = edma_issue_pending;
1948 m_ddev->device_tx_status = edma_tx_status;
1949 m_ddev->device_config = edma_slave_config;
1950 m_ddev->device_pause = edma_dma_pause;
1951 m_ddev->device_resume = edma_dma_resume;
1952 m_ddev->device_terminate_all = edma_terminate_all;
1953 m_ddev->device_synchronize = edma_synchronize;
1955 m_ddev->src_addr_widths = EDMA_DMA_BUSWIDTHS;
1956 m_ddev->dst_addr_widths = EDMA_DMA_BUSWIDTHS;
1957 m_ddev->directions = BIT(DMA_MEM_TO_MEM);
1958 m_ddev->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
1960 m_ddev->dev = ecc->dev;
1961 INIT_LIST_HEAD(&m_ddev->channels);
1962 } else if (!ecc->legacy_mode) {
1963 dev_info(ecc->dev, "memcpy is disabled\n");
1967 for (i = 0; i < ecc->num_channels; i++) {
1968 struct edma_chan *echan = &ecc->slave_chans[i];
1969 echan->ch_num = EDMA_CTLR_CHAN(ecc->id, i);
1971 echan->vchan.desc_free = edma_desc_free;
1973 if (m_ddev && edma_is_memcpy_channel(i, memcpy_channels))
1974 vchan_init(&echan->vchan, m_ddev);
1976 vchan_init(&echan->vchan, s_ddev);
1978 INIT_LIST_HEAD(&echan->node);
1979 for (j = 0; j < EDMA_MAX_SLOTS; j++)
1980 echan->slot[j] = -1;
1984 static int edma_setup_from_hw(struct device *dev, struct edma_soc_info *pdata,
1985 struct edma_cc *ecc)
1989 s8 (*queue_priority_map)[2];
1991 /* Decode the eDMA3 configuration from CCCFG register */
1992 cccfg = edma_read(ecc, EDMA_CCCFG);
1994 value = GET_NUM_REGN(cccfg);
1995 ecc->num_region = BIT(value);
1997 value = GET_NUM_DMACH(cccfg);
1998 ecc->num_channels = BIT(value + 1);
2000 value = GET_NUM_QDMACH(cccfg);
2001 ecc->num_qchannels = value * 2;
2003 value = GET_NUM_PAENTRY(cccfg);
2004 ecc->num_slots = BIT(value + 4);
2006 value = GET_NUM_EVQUE(cccfg);
2007 ecc->num_tc = value + 1;
2009 ecc->chmap_exist = (cccfg & CHMAP_EXIST) ? true : false;
2011 dev_dbg(dev, "eDMA3 CC HW configuration (cccfg: 0x%08x):\n", cccfg);
2012 dev_dbg(dev, "num_region: %u\n", ecc->num_region);
2013 dev_dbg(dev, "num_channels: %u\n", ecc->num_channels);
2014 dev_dbg(dev, "num_qchannels: %u\n", ecc->num_qchannels);
2015 dev_dbg(dev, "num_slots: %u\n", ecc->num_slots);
2016 dev_dbg(dev, "num_tc: %u\n", ecc->num_tc);
2017 dev_dbg(dev, "chmap_exist: %s\n", ecc->chmap_exist ? "yes" : "no");
2019 /* Nothing need to be done if queue priority is provided */
2020 if (pdata->queue_priority_mapping)
2024 * Configure TC/queue priority as follows:
2029 * The meaning of priority numbers: 0 highest priority, 7 lowest
2030 * priority. So Q0 is the highest priority queue and the last queue has
2031 * the lowest priority.
2033 queue_priority_map = devm_kcalloc(dev, ecc->num_tc + 1, sizeof(s8),
2035 if (!queue_priority_map)
2038 for (i = 0; i < ecc->num_tc; i++) {
2039 queue_priority_map[i][0] = i;
2040 queue_priority_map[i][1] = i;
2042 queue_priority_map[i][0] = -1;
2043 queue_priority_map[i][1] = -1;
2045 pdata->queue_priority_mapping = queue_priority_map;
2046 /* Default queue has the lowest priority */
2047 pdata->default_queue = i - 1;
2052 #if IS_ENABLED(CONFIG_OF)
2053 static int edma_xbar_event_map(struct device *dev, struct edma_soc_info *pdata,
2056 const char pname[] = "ti,edma-xbar-event-map";
2057 struct resource res;
2059 s16 (*xbar_chans)[2];
2060 size_t nelm = sz / sizeof(s16);
2061 u32 shift, offset, mux;
2064 xbar_chans = devm_kcalloc(dev, nelm + 2, sizeof(s16), GFP_KERNEL);
2068 ret = of_address_to_resource(dev->of_node, 1, &res);
2072 xbar = devm_ioremap(dev, res.start, resource_size(&res));
2076 ret = of_property_read_u16_array(dev->of_node, pname, (u16 *)xbar_chans,
2081 /* Invalidate last entry for the other user of this mess */
2083 xbar_chans[nelm][0] = -1;
2084 xbar_chans[nelm][1] = -1;
2086 for (i = 0; i < nelm; i++) {
2087 shift = (xbar_chans[i][1] & 0x03) << 3;
2088 offset = xbar_chans[i][1] & 0xfffffffc;
2089 mux = readl(xbar + offset);
2090 mux &= ~(0xff << shift);
2091 mux |= xbar_chans[i][0] << shift;
2092 writel(mux, (xbar + offset));
2095 pdata->xbar_chans = (const s16 (*)[2]) xbar_chans;
2099 static struct edma_soc_info *edma_setup_info_from_dt(struct device *dev,
2102 struct edma_soc_info *info;
2103 struct property *prop;
2106 info = devm_kzalloc(dev, sizeof(struct edma_soc_info), GFP_KERNEL);
2108 return ERR_PTR(-ENOMEM);
2111 prop = of_find_property(dev->of_node, "ti,edma-xbar-event-map",
2114 ret = edma_xbar_event_map(dev, info, sz);
2116 return ERR_PTR(ret);
2121 /* Get the list of channels allocated to be used for memcpy */
2122 prop = of_find_property(dev->of_node, "ti,edma-memcpy-channels", &sz);
2124 const char pname[] = "ti,edma-memcpy-channels";
2125 size_t nelm = sz / sizeof(s32);
2128 memcpy_ch = devm_kcalloc(dev, nelm + 1, sizeof(s32),
2131 return ERR_PTR(-ENOMEM);
2133 ret = of_property_read_u32_array(dev->of_node, pname,
2134 (u32 *)memcpy_ch, nelm);
2136 return ERR_PTR(ret);
2138 memcpy_ch[nelm] = -1;
2139 info->memcpy_channels = memcpy_ch;
2142 prop = of_find_property(dev->of_node, "ti,edma-reserved-slot-ranges",
2145 const char pname[] = "ti,edma-reserved-slot-ranges";
2147 s16 (*rsv_slots)[2];
2148 size_t nelm = sz / sizeof(*tmp);
2149 struct edma_rsv_info *rsv_info;
2155 tmp = kcalloc(nelm, sizeof(*tmp), GFP_KERNEL);
2157 return ERR_PTR(-ENOMEM);
2159 rsv_info = devm_kzalloc(dev, sizeof(*rsv_info), GFP_KERNEL);
2162 return ERR_PTR(-ENOMEM);
2165 rsv_slots = devm_kcalloc(dev, nelm + 1, sizeof(*rsv_slots),
2169 return ERR_PTR(-ENOMEM);
2172 ret = of_property_read_u32_array(dev->of_node, pname,
2173 (u32 *)tmp, nelm * 2);
2176 return ERR_PTR(ret);
2179 for (i = 0; i < nelm; i++) {
2180 rsv_slots[i][0] = tmp[i][0];
2181 rsv_slots[i][1] = tmp[i][1];
2183 rsv_slots[nelm][0] = -1;
2184 rsv_slots[nelm][1] = -1;
2186 info->rsv = rsv_info;
2187 info->rsv->rsv_slots = (const s16 (*)[2])rsv_slots;
2195 static struct dma_chan *of_edma_xlate(struct of_phandle_args *dma_spec,
2196 struct of_dma *ofdma)
2198 struct edma_cc *ecc = ofdma->of_dma_data;
2199 struct dma_chan *chan = NULL;
2200 struct edma_chan *echan;
2203 if (!ecc || dma_spec->args_count < 1)
2206 for (i = 0; i < ecc->num_channels; i++) {
2207 echan = &ecc->slave_chans[i];
2208 if (echan->ch_num == dma_spec->args[0]) {
2209 chan = &echan->vchan.chan;
2217 if (echan->ecc->legacy_mode && dma_spec->args_count == 1)
2220 if (!echan->ecc->legacy_mode && dma_spec->args_count == 2 &&
2221 dma_spec->args[1] < echan->ecc->num_tc) {
2222 echan->tc = &echan->ecc->tc_list[dma_spec->args[1]];
2228 /* The channel is going to be used as HW synchronized */
2229 echan->hw_triggered = true;
2230 return dma_get_slave_channel(chan);
2233 static struct edma_soc_info *edma_setup_info_from_dt(struct device *dev,
2236 return ERR_PTR(-EINVAL);
2239 static struct dma_chan *of_edma_xlate(struct of_phandle_args *dma_spec,
2240 struct of_dma *ofdma)
2246 static bool edma_filter_fn(struct dma_chan *chan, void *param);
2248 static int edma_probe(struct platform_device *pdev)
2250 struct edma_soc_info *info = pdev->dev.platform_data;
2251 s8 (*queue_priority_mapping)[2];
2253 const s16 (*rsv_slots)[2];
2254 const s16 (*xbar_chans)[2];
2257 struct resource *mem;
2258 struct device_node *node = pdev->dev.of_node;
2259 struct device *dev = &pdev->dev;
2260 struct edma_cc *ecc;
2261 bool legacy_mode = true;
2265 const struct of_device_id *match;
2267 match = of_match_node(edma_of_ids, node);
2268 if (match && (*(u32 *)match->data) == EDMA_BINDING_TPCC)
2269 legacy_mode = false;
2271 info = edma_setup_info_from_dt(dev, legacy_mode);
2273 dev_err(dev, "failed to get DT data\n");
2274 return PTR_ERR(info);
2281 pm_runtime_enable(dev);
2282 ret = pm_runtime_get_sync(dev);
2284 dev_err(dev, "pm_runtime_get_sync() failed\n");
2288 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
2292 ecc = devm_kzalloc(dev, sizeof(*ecc), GFP_KERNEL);
2298 ecc->legacy_mode = legacy_mode;
2299 /* When booting with DT the pdev->id is -1 */
2303 mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "edma3_cc");
2305 dev_dbg(dev, "mem resource not found, using index 0\n");
2306 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2308 dev_err(dev, "no mem resource?\n");
2312 ecc->base = devm_ioremap_resource(dev, mem);
2313 if (IS_ERR(ecc->base))
2314 return PTR_ERR(ecc->base);
2316 platform_set_drvdata(pdev, ecc);
2318 /* Get eDMA3 configuration from IP */
2319 ret = edma_setup_from_hw(dev, info, ecc);
2323 /* Allocate memory based on the information we got from the IP */
2324 ecc->slave_chans = devm_kcalloc(dev, ecc->num_channels,
2325 sizeof(*ecc->slave_chans), GFP_KERNEL);
2326 if (!ecc->slave_chans)
2329 ecc->slot_inuse = devm_kcalloc(dev, BITS_TO_LONGS(ecc->num_slots),
2330 sizeof(unsigned long), GFP_KERNEL);
2331 if (!ecc->slot_inuse)
2334 ecc->default_queue = info->default_queue;
2337 /* Set the reserved slots in inuse list */
2338 rsv_slots = info->rsv->rsv_slots;
2340 for (i = 0; rsv_slots[i][0] != -1; i++)
2341 bitmap_set(ecc->slot_inuse, rsv_slots[i][0],
2346 for (i = 0; i < ecc->num_slots; i++) {
2347 /* Reset only unused - not reserved - paRAM slots */
2348 if (!test_bit(i, ecc->slot_inuse))
2349 edma_write_slot(ecc, i, &dummy_paramset);
2352 /* Clear the xbar mapped channels in unused list */
2353 xbar_chans = info->xbar_chans;
2355 for (i = 0; xbar_chans[i][1] != -1; i++) {
2356 off = xbar_chans[i][1];
2360 irq = platform_get_irq_byname(pdev, "edma3_ccint");
2361 if (irq < 0 && node)
2362 irq = irq_of_parse_and_map(node, 0);
2365 irq_name = devm_kasprintf(dev, GFP_KERNEL, "%s_ccint",
2367 ret = devm_request_irq(dev, irq, dma_irq_handler, 0, irq_name,
2370 dev_err(dev, "CCINT (%d) failed --> %d\n", irq, ret);
2376 irq = platform_get_irq_byname(pdev, "edma3_ccerrint");
2377 if (irq < 0 && node)
2378 irq = irq_of_parse_and_map(node, 2);
2381 irq_name = devm_kasprintf(dev, GFP_KERNEL, "%s_ccerrint",
2383 ret = devm_request_irq(dev, irq, dma_ccerr_handler, 0, irq_name,
2386 dev_err(dev, "CCERRINT (%d) failed --> %d\n", irq, ret);
2389 ecc->ccerrint = irq;
2392 ecc->dummy_slot = edma_alloc_slot(ecc, EDMA_SLOT_ANY);
2393 if (ecc->dummy_slot < 0) {
2394 dev_err(dev, "Can't allocate PaRAM dummy slot\n");
2395 return ecc->dummy_slot;
2398 queue_priority_mapping = info->queue_priority_mapping;
2400 if (!ecc->legacy_mode) {
2401 int lowest_priority = 0;
2402 struct of_phandle_args tc_args;
2404 ecc->tc_list = devm_kcalloc(dev, ecc->num_tc,
2405 sizeof(*ecc->tc_list), GFP_KERNEL);
2410 ret = of_parse_phandle_with_fixed_args(node, "ti,tptcs",
2412 if (ret || i == ecc->num_tc)
2415 ecc->tc_list[i].node = tc_args.np;
2416 ecc->tc_list[i].id = i;
2417 queue_priority_mapping[i][1] = tc_args.args[0];
2418 if (queue_priority_mapping[i][1] > lowest_priority) {
2419 lowest_priority = queue_priority_mapping[i][1];
2420 info->default_queue = i;
2425 /* Event queue priority mapping */
2426 for (i = 0; queue_priority_mapping[i][0] != -1; i++)
2427 edma_assign_priority_to_queue(ecc, queue_priority_mapping[i][0],
2428 queue_priority_mapping[i][1]);
2430 edma_write_array2(ecc, EDMA_DRAE, 0, 0, 0x0);
2431 edma_write_array2(ecc, EDMA_DRAE, 0, 1, 0x0);
2432 edma_write_array(ecc, EDMA_QRAE, 0, 0x0);
2436 /* Init the dma device and channels */
2437 edma_dma_init(ecc, legacy_mode);
2439 for (i = 0; i < ecc->num_channels; i++) {
2440 /* Assign all channels to the default queue */
2441 edma_assign_channel_eventq(&ecc->slave_chans[i],
2442 info->default_queue);
2443 /* Set entry slot to the dummy slot */
2444 edma_set_chmap(&ecc->slave_chans[i], ecc->dummy_slot);
2447 ecc->dma_slave.filter.map = info->slave_map;
2448 ecc->dma_slave.filter.mapcnt = info->slavecnt;
2449 ecc->dma_slave.filter.fn = edma_filter_fn;
2451 ret = dma_async_device_register(&ecc->dma_slave);
2453 dev_err(dev, "slave ddev registration failed (%d)\n", ret);
2457 if (ecc->dma_memcpy) {
2458 ret = dma_async_device_register(ecc->dma_memcpy);
2460 dev_err(dev, "memcpy ddev registration failed (%d)\n",
2462 dma_async_device_unregister(&ecc->dma_slave);
2468 of_dma_controller_register(node, of_edma_xlate, ecc);
2470 dev_info(dev, "TI EDMA DMA engine driver\n");
2475 edma_free_slot(ecc, ecc->dummy_slot);
2479 static void edma_cleanupp_vchan(struct dma_device *dmadev)
2481 struct edma_chan *echan, *_echan;
2483 list_for_each_entry_safe(echan, _echan,
2484 &dmadev->channels, vchan.chan.device_node) {
2485 list_del(&echan->vchan.chan.device_node);
2486 tasklet_kill(&echan->vchan.task);
2490 static int edma_remove(struct platform_device *pdev)
2492 struct device *dev = &pdev->dev;
2493 struct edma_cc *ecc = dev_get_drvdata(dev);
2495 devm_free_irq(dev, ecc->ccint, ecc);
2496 devm_free_irq(dev, ecc->ccerrint, ecc);
2498 edma_cleanupp_vchan(&ecc->dma_slave);
2501 of_dma_controller_free(dev->of_node);
2502 dma_async_device_unregister(&ecc->dma_slave);
2503 if (ecc->dma_memcpy)
2504 dma_async_device_unregister(ecc->dma_memcpy);
2505 edma_free_slot(ecc, ecc->dummy_slot);
2510 #ifdef CONFIG_PM_SLEEP
2511 static int edma_pm_suspend(struct device *dev)
2513 struct edma_cc *ecc = dev_get_drvdata(dev);
2514 struct edma_chan *echan = ecc->slave_chans;
2517 for (i = 0; i < ecc->num_channels; i++) {
2518 if (echan[i].alloced)
2519 edma_setup_interrupt(&echan[i], false);
2525 static int edma_pm_resume(struct device *dev)
2527 struct edma_cc *ecc = dev_get_drvdata(dev);
2528 struct edma_chan *echan = ecc->slave_chans;
2530 s8 (*queue_priority_mapping)[2];
2532 /* re initialize dummy slot to dummy param set */
2533 edma_write_slot(ecc, ecc->dummy_slot, &dummy_paramset);
2535 queue_priority_mapping = ecc->info->queue_priority_mapping;
2537 /* Event queue priority mapping */
2538 for (i = 0; queue_priority_mapping[i][0] != -1; i++)
2539 edma_assign_priority_to_queue(ecc, queue_priority_mapping[i][0],
2540 queue_priority_mapping[i][1]);
2542 for (i = 0; i < ecc->num_channels; i++) {
2543 if (echan[i].alloced) {
2544 /* ensure access through shadow region 0 */
2545 edma_or_array2(ecc, EDMA_DRAE, 0,
2546 EDMA_REG_ARRAY_INDEX(i),
2547 EDMA_CHANNEL_BIT(i));
2549 edma_setup_interrupt(&echan[i], true);
2551 /* Set up channel -> slot mapping for the entry slot */
2552 edma_set_chmap(&echan[i], echan[i].slot[0]);
2560 static const struct dev_pm_ops edma_pm_ops = {
2561 SET_LATE_SYSTEM_SLEEP_PM_OPS(edma_pm_suspend, edma_pm_resume)
2564 static struct platform_driver edma_driver = {
2565 .probe = edma_probe,
2566 .remove = edma_remove,
2570 .of_match_table = edma_of_ids,
2574 static int edma_tptc_probe(struct platform_device *pdev)
2576 pm_runtime_enable(&pdev->dev);
2577 return pm_runtime_get_sync(&pdev->dev);
2580 static struct platform_driver edma_tptc_driver = {
2581 .probe = edma_tptc_probe,
2583 .name = "edma3-tptc",
2584 .of_match_table = edma_tptc_of_ids,
2588 static bool edma_filter_fn(struct dma_chan *chan, void *param)
2592 if (chan->device->dev->driver == &edma_driver.driver) {
2593 struct edma_chan *echan = to_edma_chan(chan);
2594 unsigned ch_req = *(unsigned *)param;
2595 if (ch_req == echan->ch_num) {
2596 /* The channel is going to be used as HW synchronized */
2597 echan->hw_triggered = true;
2604 static int edma_init(void)
2608 ret = platform_driver_register(&edma_tptc_driver);
2612 return platform_driver_register(&edma_driver);
2614 subsys_initcall(edma_init);
2616 static void __exit edma_exit(void)
2618 platform_driver_unregister(&edma_driver);
2619 platform_driver_unregister(&edma_tptc_driver);
2621 module_exit(edma_exit);
2623 MODULE_AUTHOR("Matt Porter <matt.porter@linaro.org>");
2624 MODULE_DESCRIPTION("TI EDMA DMA engine driver");
2625 MODULE_LICENSE("GPL v2");