1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
4 * Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
7 #include <linux/kernel.h>
8 #include <linux/delay.h>
9 #include <linux/dmaengine.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/dmapool.h>
12 #include <linux/err.h>
13 #include <linux/init.h>
14 #include <linux/interrupt.h>
15 #include <linux/list.h>
16 #include <linux/platform_device.h>
17 #include <linux/slab.h>
18 #include <linux/spinlock.h>
20 #include <linux/of_dma.h>
21 #include <linux/of_device.h>
22 #include <linux/of_irq.h>
23 #include <linux/workqueue.h>
24 #include <linux/completion.h>
25 #include <linux/soc/ti/k3-ringacc.h>
26 #include <linux/soc/ti/ti_sci_protocol.h>
27 #include <linux/soc/ti/ti_sci_inta_msi.h>
28 #include <linux/dma/ti-cppi5.h>
30 #include "../virt-dma.h"
32 #include "k3-psil-priv.h"
34 struct udma_static_tr {
35 u8 elsize; /* RPSTR0 */
36 u16 elcnt; /* RPSTR0 */
37 u16 bstcnt; /* RPSTR1 */
40 #define K3_UDMA_MAX_RFLOWS 1024
41 #define K3_UDMA_DEFAULT_RING_SIZE 16
43 /* How SRC/DST tag should be updated by UDMA in the descriptor's Word 3 */
44 #define UDMA_RFLOW_SRCTAG_NONE 0
45 #define UDMA_RFLOW_SRCTAG_CFG_TAG 1
46 #define UDMA_RFLOW_SRCTAG_FLOW_ID 2
47 #define UDMA_RFLOW_SRCTAG_SRC_TAG 4
49 #define UDMA_RFLOW_DSTTAG_NONE 0
50 #define UDMA_RFLOW_DSTTAG_CFG_TAG 1
51 #define UDMA_RFLOW_DSTTAG_FLOW_ID 2
52 #define UDMA_RFLOW_DSTTAG_DST_TAG_LO 4
53 #define UDMA_RFLOW_DSTTAG_DST_TAG_HI 5
64 static const char * const mmr_names[] = { "gcfg", "rchanrt", "tchanrt" };
70 struct k3_ring *t_ring; /* Transmit ring */
71 struct k3_ring *tc_ring; /* Transmit Completion ring */
76 struct k3_ring *fd_ring; /* Free Descriptor ring */
77 struct k3_ring *r_ring; /* Receive ring */
86 #define UDMA_FLAG_PDMA_ACC32 BIT(0)
87 #define UDMA_FLAG_PDMA_BURST BIT(1)
89 struct udma_match_data {
91 bool enable_memcpy_support;
97 u32 level_start_idx[];
101 size_t cppi5_desc_size;
102 void *cppi5_desc_vaddr;
103 dma_addr_t cppi5_desc_paddr;
105 /* TR descriptor internal pointers */
107 struct cppi5_tr_resp_t *tr_resp_base;
110 struct udma_rx_flush {
111 struct udma_hwdesc hwdescs[2];
115 dma_addr_t buffer_paddr;
119 struct dma_device ddev;
121 void __iomem *mmrs[MMR_LAST];
122 const struct udma_match_data *match_data;
124 size_t desc_align; /* alignment to use for descriptors */
126 struct udma_tisci_rm tisci_rm;
128 struct k3_ringacc *ringacc;
130 struct work_struct purge_work;
131 struct list_head desc_to_purge;
134 struct udma_rx_flush rx_flush;
140 unsigned long *tchan_map;
141 unsigned long *rchan_map;
142 unsigned long *rflow_gp_map;
143 unsigned long *rflow_gp_map_allocated;
144 unsigned long *rflow_in_use;
146 struct udma_tchan *tchans;
147 struct udma_rchan *rchans;
148 struct udma_rflow *rflows;
150 struct udma_chan *channels;
155 struct virt_dma_desc vd;
159 enum dma_transfer_direction dir;
161 struct udma_static_tr static_tr;
165 unsigned int desc_idx; /* Only used for cyclic in packet mode */
169 void *metadata; /* pointer to provided metadata buffer (EPIP, PSdata) */
171 unsigned int hwdesc_count;
172 struct udma_hwdesc hwdesc[0];
175 enum udma_chan_state {
176 UDMA_CHAN_IS_IDLE = 0, /* not active, no teardown is in progress */
177 UDMA_CHAN_IS_ACTIVE, /* Normal operation */
178 UDMA_CHAN_IS_TERMINATING, /* channel is being terminated */
181 struct udma_tx_drain {
182 struct delayed_work work;
187 struct udma_chan_config {
188 bool pkt_mode; /* TR or packet */
189 bool needs_epib; /* EPIB is needed for the communication or not */
190 u32 psd_size; /* size of Protocol Specific Data */
191 u32 metadata_size; /* (needs_epib ? 16:0) + psd_size */
192 u32 hdesc_size; /* Size of a packet descriptor in packet mode */
193 bool notdpkt; /* Suppress sending TDC packet */
194 int remote_thread_id;
197 enum psil_endpoint_type ep_type;
200 enum udma_tp_level channel_tpl; /* Channel Throughput Level */
202 enum dma_transfer_direction dir;
206 struct virt_dma_chan vc;
207 struct dma_slave_config cfg;
209 struct udma_desc *desc;
210 struct udma_desc *terminated_desc;
211 struct udma_static_tr static_tr;
214 struct udma_tchan *tchan;
215 struct udma_rchan *rchan;
216 struct udma_rflow *rflow;
226 enum udma_chan_state state;
227 struct completion teardown_completed;
229 struct udma_tx_drain tx_drain;
231 u32 bcnt; /* number of bytes completed since the start of the channel */
232 u32 in_ring_cnt; /* number of descriptors in flight */
234 /* Channel configuration parameters */
235 struct udma_chan_config config;
237 /* dmapool for packet mode descriptors */
239 struct dma_pool *hdesc_pool;
244 static inline struct udma_dev *to_udma_dev(struct dma_device *d)
246 return container_of(d, struct udma_dev, ddev);
249 static inline struct udma_chan *to_udma_chan(struct dma_chan *c)
251 return container_of(c, struct udma_chan, vc.chan);
254 static inline struct udma_desc *to_udma_desc(struct dma_async_tx_descriptor *t)
256 return container_of(t, struct udma_desc, vd.tx);
259 /* Generic register access functions */
260 static inline u32 udma_read(void __iomem *base, int reg)
262 return readl(base + reg);
265 static inline void udma_write(void __iomem *base, int reg, u32 val)
267 writel(val, base + reg);
270 static inline void udma_update_bits(void __iomem *base, int reg,
275 orig = readl(base + reg);
280 writel(tmp, base + reg);
284 static inline u32 udma_tchanrt_read(struct udma_tchan *tchan, int reg)
288 return udma_read(tchan->reg_rt, reg);
291 static inline void udma_tchanrt_write(struct udma_tchan *tchan, int reg,
296 udma_write(tchan->reg_rt, reg, val);
299 static inline void udma_tchanrt_update_bits(struct udma_tchan *tchan, int reg,
304 udma_update_bits(tchan->reg_rt, reg, mask, val);
308 static inline u32 udma_rchanrt_read(struct udma_rchan *rchan, int reg)
312 return udma_read(rchan->reg_rt, reg);
315 static inline void udma_rchanrt_write(struct udma_rchan *rchan, int reg,
320 udma_write(rchan->reg_rt, reg, val);
323 static inline void udma_rchanrt_update_bits(struct udma_rchan *rchan, int reg,
328 udma_update_bits(rchan->reg_rt, reg, mask, val);
331 static int navss_psil_pair(struct udma_dev *ud, u32 src_thread, u32 dst_thread)
333 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
335 dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET;
336 return tisci_rm->tisci_psil_ops->pair(tisci_rm->tisci,
337 tisci_rm->tisci_navss_dev_id,
338 src_thread, dst_thread);
341 static int navss_psil_unpair(struct udma_dev *ud, u32 src_thread,
344 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
346 dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET;
347 return tisci_rm->tisci_psil_ops->unpair(tisci_rm->tisci,
348 tisci_rm->tisci_navss_dev_id,
349 src_thread, dst_thread);
352 static void udma_reset_uchan(struct udma_chan *uc)
354 memset(&uc->config, 0, sizeof(uc->config));
355 uc->config.remote_thread_id = -1;
356 uc->state = UDMA_CHAN_IS_IDLE;
359 static void udma_dump_chan_stdata(struct udma_chan *uc)
361 struct device *dev = uc->ud->dev;
365 if (uc->config.dir == DMA_MEM_TO_DEV || uc->config.dir == DMA_MEM_TO_MEM) {
366 dev_dbg(dev, "TCHAN State data:\n");
367 for (i = 0; i < 32; i++) {
368 offset = UDMA_TCHAN_RT_STDATA_REG + i * 4;
369 dev_dbg(dev, "TRT_STDATA[%02d]: 0x%08x\n", i,
370 udma_tchanrt_read(uc->tchan, offset));
374 if (uc->config.dir == DMA_DEV_TO_MEM || uc->config.dir == DMA_MEM_TO_MEM) {
375 dev_dbg(dev, "RCHAN State data:\n");
376 for (i = 0; i < 32; i++) {
377 offset = UDMA_RCHAN_RT_STDATA_REG + i * 4;
378 dev_dbg(dev, "RRT_STDATA[%02d]: 0x%08x\n", i,
379 udma_rchanrt_read(uc->rchan, offset));
384 static inline dma_addr_t udma_curr_cppi5_desc_paddr(struct udma_desc *d,
387 return d->hwdesc[idx].cppi5_desc_paddr;
390 static inline void *udma_curr_cppi5_desc_vaddr(struct udma_desc *d, int idx)
392 return d->hwdesc[idx].cppi5_desc_vaddr;
395 static struct udma_desc *udma_udma_desc_from_paddr(struct udma_chan *uc,
398 struct udma_desc *d = uc->terminated_desc;
401 dma_addr_t desc_paddr = udma_curr_cppi5_desc_paddr(d,
404 if (desc_paddr != paddr)
411 dma_addr_t desc_paddr = udma_curr_cppi5_desc_paddr(d,
414 if (desc_paddr != paddr)
422 static void udma_free_hwdesc(struct udma_chan *uc, struct udma_desc *d)
424 if (uc->use_dma_pool) {
427 for (i = 0; i < d->hwdesc_count; i++) {
428 if (!d->hwdesc[i].cppi5_desc_vaddr)
431 dma_pool_free(uc->hdesc_pool,
432 d->hwdesc[i].cppi5_desc_vaddr,
433 d->hwdesc[i].cppi5_desc_paddr);
435 d->hwdesc[i].cppi5_desc_vaddr = NULL;
437 } else if (d->hwdesc[0].cppi5_desc_vaddr) {
438 struct udma_dev *ud = uc->ud;
440 dma_free_coherent(ud->dev, d->hwdesc[0].cppi5_desc_size,
441 d->hwdesc[0].cppi5_desc_vaddr,
442 d->hwdesc[0].cppi5_desc_paddr);
444 d->hwdesc[0].cppi5_desc_vaddr = NULL;
448 static void udma_purge_desc_work(struct work_struct *work)
450 struct udma_dev *ud = container_of(work, typeof(*ud), purge_work);
451 struct virt_dma_desc *vd, *_vd;
455 spin_lock_irqsave(&ud->lock, flags);
456 list_splice_tail_init(&ud->desc_to_purge, &head);
457 spin_unlock_irqrestore(&ud->lock, flags);
459 list_for_each_entry_safe(vd, _vd, &head, node) {
460 struct udma_chan *uc = to_udma_chan(vd->tx.chan);
461 struct udma_desc *d = to_udma_desc(&vd->tx);
463 udma_free_hwdesc(uc, d);
468 /* If more to purge, schedule the work again */
469 if (!list_empty(&ud->desc_to_purge))
470 schedule_work(&ud->purge_work);
473 static void udma_desc_free(struct virt_dma_desc *vd)
475 struct udma_dev *ud = to_udma_dev(vd->tx.chan->device);
476 struct udma_chan *uc = to_udma_chan(vd->tx.chan);
477 struct udma_desc *d = to_udma_desc(&vd->tx);
480 if (uc->terminated_desc == d)
481 uc->terminated_desc = NULL;
483 if (uc->use_dma_pool) {
484 udma_free_hwdesc(uc, d);
489 spin_lock_irqsave(&ud->lock, flags);
490 list_add_tail(&vd->node, &ud->desc_to_purge);
491 spin_unlock_irqrestore(&ud->lock, flags);
493 schedule_work(&ud->purge_work);
496 static bool udma_is_chan_running(struct udma_chan *uc)
502 trt_ctl = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG);
504 rrt_ctl = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_CTL_REG);
506 if (trt_ctl & UDMA_CHAN_RT_CTL_EN || rrt_ctl & UDMA_CHAN_RT_CTL_EN)
512 static bool udma_is_chan_paused(struct udma_chan *uc)
516 switch (uc->config.dir) {
518 val = udma_rchanrt_read(uc->rchan,
519 UDMA_RCHAN_RT_PEER_RT_EN_REG);
520 pause_mask = UDMA_PEER_RT_EN_PAUSE;
523 val = udma_tchanrt_read(uc->tchan,
524 UDMA_TCHAN_RT_PEER_RT_EN_REG);
525 pause_mask = UDMA_PEER_RT_EN_PAUSE;
528 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG);
529 pause_mask = UDMA_CHAN_RT_CTL_PAUSE;
535 if (val & pause_mask)
541 static void udma_sync_for_device(struct udma_chan *uc, int idx)
543 struct udma_desc *d = uc->desc;
545 if (uc->cyclic && uc->config.pkt_mode) {
546 dma_sync_single_for_device(uc->ud->dev,
547 d->hwdesc[idx].cppi5_desc_paddr,
548 d->hwdesc[idx].cppi5_desc_size,
553 for (i = 0; i < d->hwdesc_count; i++) {
554 if (!d->hwdesc[i].cppi5_desc_vaddr)
557 dma_sync_single_for_device(uc->ud->dev,
558 d->hwdesc[i].cppi5_desc_paddr,
559 d->hwdesc[i].cppi5_desc_size,
565 static inline dma_addr_t udma_get_rx_flush_hwdesc_paddr(struct udma_chan *uc)
567 return uc->ud->rx_flush.hwdescs[uc->config.pkt_mode].cppi5_desc_paddr;
570 static int udma_push_to_ring(struct udma_chan *uc, int idx)
572 struct udma_desc *d = uc->desc;
573 struct k3_ring *ring = NULL;
577 switch (uc->config.dir) {
579 ring = uc->rflow->fd_ring;
583 ring = uc->tchan->t_ring;
589 /* RX flush packet: idx == -1 is only passed in case of DEV_TO_MEM */
591 paddr = udma_get_rx_flush_hwdesc_paddr(uc);
593 paddr = udma_curr_cppi5_desc_paddr(d, idx);
595 wmb(); /* Ensure that writes are not moved over this point */
596 udma_sync_for_device(uc, idx);
599 ret = k3_ringacc_ring_push(ring, &paddr);
606 static bool udma_desc_is_rx_flush(struct udma_chan *uc, dma_addr_t addr)
608 if (uc->config.dir != DMA_DEV_TO_MEM)
611 if (addr == udma_get_rx_flush_hwdesc_paddr(uc))
617 static int udma_pop_from_ring(struct udma_chan *uc, dma_addr_t *addr)
619 struct k3_ring *ring = NULL;
622 switch (uc->config.dir) {
624 ring = uc->rflow->r_ring;
628 ring = uc->tchan->tc_ring;
634 if (ring && k3_ringacc_ring_get_occ(ring)) {
635 struct udma_desc *d = NULL;
637 ret = k3_ringacc_ring_pop(ring, addr);
641 /* Teardown completion */
642 if (cppi5_desc_is_tdcm(*addr))
645 /* Check for flush descriptor */
646 if (udma_desc_is_rx_flush(uc, *addr))
649 d = udma_udma_desc_from_paddr(uc, *addr);
652 dma_sync_single_for_cpu(uc->ud->dev, *addr,
653 d->hwdesc[0].cppi5_desc_size,
655 rmb(); /* Ensure that reads are not moved before this point */
664 static void udma_reset_rings(struct udma_chan *uc)
666 struct k3_ring *ring1 = NULL;
667 struct k3_ring *ring2 = NULL;
669 switch (uc->config.dir) {
672 ring1 = uc->rflow->fd_ring;
673 ring2 = uc->rflow->r_ring;
679 ring1 = uc->tchan->t_ring;
680 ring2 = uc->tchan->tc_ring;
688 k3_ringacc_ring_reset_dma(ring1,
689 k3_ringacc_ring_get_occ(ring1));
691 k3_ringacc_ring_reset(ring2);
693 /* make sure we are not leaking memory by stalled descriptor */
694 if (uc->terminated_desc) {
695 udma_desc_free(&uc->terminated_desc->vd);
696 uc->terminated_desc = NULL;
702 static void udma_reset_counters(struct udma_chan *uc)
707 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_BCNT_REG);
708 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_BCNT_REG, val);
710 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_SBCNT_REG);
711 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_SBCNT_REG, val);
713 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PCNT_REG);
714 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PCNT_REG, val);
716 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PEER_BCNT_REG);
717 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_BCNT_REG, val);
721 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_BCNT_REG);
722 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_BCNT_REG, val);
724 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_SBCNT_REG);
725 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_SBCNT_REG, val);
727 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_PCNT_REG);
728 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PCNT_REG, val);
730 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_PEER_BCNT_REG);
731 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_BCNT_REG, val);
737 static int udma_reset_chan(struct udma_chan *uc, bool hard)
739 switch (uc->config.dir) {
741 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG, 0);
742 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG, 0);
745 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG, 0);
746 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_RT_EN_REG, 0);
749 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG, 0);
750 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG, 0);
756 /* Reset all counters */
757 udma_reset_counters(uc);
759 /* Hard reset: re-initialize the channel to reset */
761 struct udma_chan_config ucc_backup;
764 memcpy(&ucc_backup, &uc->config, sizeof(uc->config));
765 uc->ud->ddev.device_free_chan_resources(&uc->vc.chan);
767 /* restore the channel configuration */
768 memcpy(&uc->config, &ucc_backup, sizeof(uc->config));
769 ret = uc->ud->ddev.device_alloc_chan_resources(&uc->vc.chan);
774 * Setting forced teardown after forced reset helps recovering
777 if (uc->config.dir == DMA_DEV_TO_MEM)
778 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG,
779 UDMA_CHAN_RT_CTL_EN |
780 UDMA_CHAN_RT_CTL_TDOWN |
781 UDMA_CHAN_RT_CTL_FTDOWN);
783 uc->state = UDMA_CHAN_IS_IDLE;
788 static void udma_start_desc(struct udma_chan *uc)
790 struct udma_chan_config *ucc = &uc->config;
792 if (ucc->pkt_mode && (uc->cyclic || ucc->dir == DMA_DEV_TO_MEM)) {
795 /* Push all descriptors to ring for packet mode cyclic or RX */
796 for (i = 0; i < uc->desc->sglen; i++)
797 udma_push_to_ring(uc, i);
799 udma_push_to_ring(uc, 0);
803 static bool udma_chan_needs_reconfiguration(struct udma_chan *uc)
805 /* Only PDMAs have staticTR */
806 if (uc->config.ep_type == PSIL_EP_NATIVE)
809 /* Check if the staticTR configuration has changed for TX */
810 if (memcmp(&uc->static_tr, &uc->desc->static_tr, sizeof(uc->static_tr)))
816 static int udma_start(struct udma_chan *uc)
818 struct virt_dma_desc *vd = vchan_next_desc(&uc->vc);
827 uc->desc = to_udma_desc(&vd->tx);
829 /* Channel is already running and does not need reconfiguration */
830 if (udma_is_chan_running(uc) && !udma_chan_needs_reconfiguration(uc)) {
835 /* Make sure that we clear the teardown bit, if it is set */
836 udma_reset_chan(uc, false);
838 /* Push descriptors before we start the channel */
841 switch (uc->desc->dir) {
843 /* Config remote TR */
844 if (uc->config.ep_type == PSIL_EP_PDMA_XY) {
845 u32 val = PDMA_STATIC_TR_Y(uc->desc->static_tr.elcnt) |
846 PDMA_STATIC_TR_X(uc->desc->static_tr.elsize);
847 const struct udma_match_data *match_data =
850 if (uc->config.enable_acc32)
851 val |= PDMA_STATIC_TR_XY_ACC32;
852 if (uc->config.enable_burst)
853 val |= PDMA_STATIC_TR_XY_BURST;
855 udma_rchanrt_write(uc->rchan,
856 UDMA_RCHAN_RT_PEER_STATIC_TR_XY_REG, val);
858 udma_rchanrt_write(uc->rchan,
859 UDMA_RCHAN_RT_PEER_STATIC_TR_Z_REG,
860 PDMA_STATIC_TR_Z(uc->desc->static_tr.bstcnt,
861 match_data->statictr_z_mask));
863 /* save the current staticTR configuration */
864 memcpy(&uc->static_tr, &uc->desc->static_tr,
865 sizeof(uc->static_tr));
868 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG,
869 UDMA_CHAN_RT_CTL_EN);
872 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG,
873 UDMA_PEER_RT_EN_ENABLE);
877 /* Config remote TR */
878 if (uc->config.ep_type == PSIL_EP_PDMA_XY) {
879 u32 val = PDMA_STATIC_TR_Y(uc->desc->static_tr.elcnt) |
880 PDMA_STATIC_TR_X(uc->desc->static_tr.elsize);
882 if (uc->config.enable_acc32)
883 val |= PDMA_STATIC_TR_XY_ACC32;
884 if (uc->config.enable_burst)
885 val |= PDMA_STATIC_TR_XY_BURST;
887 udma_tchanrt_write(uc->tchan,
888 UDMA_TCHAN_RT_PEER_STATIC_TR_XY_REG, val);
890 /* save the current staticTR configuration */
891 memcpy(&uc->static_tr, &uc->desc->static_tr,
892 sizeof(uc->static_tr));
896 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_RT_EN_REG,
897 UDMA_PEER_RT_EN_ENABLE);
899 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
900 UDMA_CHAN_RT_CTL_EN);
904 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG,
905 UDMA_CHAN_RT_CTL_EN);
906 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
907 UDMA_CHAN_RT_CTL_EN);
914 uc->state = UDMA_CHAN_IS_ACTIVE;
920 static int udma_stop(struct udma_chan *uc)
922 enum udma_chan_state old_state = uc->state;
924 uc->state = UDMA_CHAN_IS_TERMINATING;
925 reinit_completion(&uc->teardown_completed);
927 switch (uc->config.dir) {
929 if (!uc->cyclic && !uc->desc)
930 udma_push_to_ring(uc, -1);
932 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG,
933 UDMA_PEER_RT_EN_ENABLE |
934 UDMA_PEER_RT_EN_TEARDOWN);
937 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_RT_EN_REG,
938 UDMA_PEER_RT_EN_ENABLE |
939 UDMA_PEER_RT_EN_FLUSH);
940 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
941 UDMA_CHAN_RT_CTL_EN |
942 UDMA_CHAN_RT_CTL_TDOWN);
945 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
946 UDMA_CHAN_RT_CTL_EN |
947 UDMA_CHAN_RT_CTL_TDOWN);
950 uc->state = old_state;
951 complete_all(&uc->teardown_completed);
958 static void udma_cyclic_packet_elapsed(struct udma_chan *uc)
960 struct udma_desc *d = uc->desc;
961 struct cppi5_host_desc_t *h_desc;
963 h_desc = d->hwdesc[d->desc_idx].cppi5_desc_vaddr;
964 cppi5_hdesc_reset_to_original(h_desc);
965 udma_push_to_ring(uc, d->desc_idx);
966 d->desc_idx = (d->desc_idx + 1) % d->sglen;
969 static inline void udma_fetch_epib(struct udma_chan *uc, struct udma_desc *d)
971 struct cppi5_host_desc_t *h_desc = d->hwdesc[0].cppi5_desc_vaddr;
973 memcpy(d->metadata, h_desc->epib, d->metadata_size);
976 static bool udma_is_desc_really_done(struct udma_chan *uc, struct udma_desc *d)
980 /* Only TX towards PDMA is affected */
981 if (uc->config.ep_type == PSIL_EP_NATIVE ||
982 uc->config.dir != DMA_MEM_TO_DEV)
985 peer_bcnt = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PEER_BCNT_REG);
986 bcnt = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_BCNT_REG);
988 /* Transfer is incomplete, store current residue and time stamp */
989 if (peer_bcnt < bcnt) {
990 uc->tx_drain.residue = bcnt - peer_bcnt;
991 uc->tx_drain.tstamp = ktime_get();
998 static void udma_check_tx_completion(struct work_struct *work)
1000 struct udma_chan *uc = container_of(work, typeof(*uc),
1001 tx_drain.work.work);
1002 bool desc_done = true;
1005 unsigned long delay;
1009 /* Get previous residue and time stamp */
1010 residue_diff = uc->tx_drain.residue;
1011 time_diff = uc->tx_drain.tstamp;
1013 * Get current residue and time stamp or see if
1014 * transfer is complete
1016 desc_done = udma_is_desc_really_done(uc, uc->desc);
1021 * Find the time delta and residue delta w.r.t
1024 time_diff = ktime_sub(uc->tx_drain.tstamp,
1026 residue_diff -= uc->tx_drain.residue;
1029 * Try to guess when we should check
1030 * next time by calculating rate at
1031 * which data is being drained at the
1034 delay = (time_diff / residue_diff) *
1035 uc->tx_drain.residue;
1037 /* No progress, check again in 1 second */
1038 schedule_delayed_work(&uc->tx_drain.work, HZ);
1042 usleep_range(ktime_to_us(delay),
1043 ktime_to_us(delay) + 10);
1048 struct udma_desc *d = uc->desc;
1050 uc->bcnt += d->residue;
1052 vchan_cookie_complete(&d->vd);
1060 static irqreturn_t udma_ring_irq_handler(int irq, void *data)
1062 struct udma_chan *uc = data;
1063 struct udma_desc *d;
1064 unsigned long flags;
1065 dma_addr_t paddr = 0;
1067 if (udma_pop_from_ring(uc, &paddr) || !paddr)
1070 spin_lock_irqsave(&uc->vc.lock, flags);
1072 /* Teardown completion message */
1073 if (cppi5_desc_is_tdcm(paddr)) {
1074 /* Compensate our internal pop/push counter */
1077 complete_all(&uc->teardown_completed);
1079 if (uc->terminated_desc) {
1080 udma_desc_free(&uc->terminated_desc->vd);
1081 uc->terminated_desc = NULL;
1090 d = udma_udma_desc_from_paddr(uc, paddr);
1093 dma_addr_t desc_paddr = udma_curr_cppi5_desc_paddr(d,
1095 if (desc_paddr != paddr) {
1096 dev_err(uc->ud->dev, "not matching descriptors!\n");
1100 if (d == uc->desc) {
1101 /* active descriptor */
1103 udma_cyclic_packet_elapsed(uc);
1104 vchan_cyclic_callback(&d->vd);
1106 if (udma_is_desc_really_done(uc, d)) {
1107 uc->bcnt += d->residue;
1109 vchan_cookie_complete(&d->vd);
1111 schedule_delayed_work(&uc->tx_drain.work,
1117 * terminated descriptor, mark the descriptor as
1118 * completed to update the channel's cookie marker
1120 dma_cookie_complete(&d->vd.tx);
1124 spin_unlock_irqrestore(&uc->vc.lock, flags);
1129 static irqreturn_t udma_udma_irq_handler(int irq, void *data)
1131 struct udma_chan *uc = data;
1132 struct udma_desc *d;
1133 unsigned long flags;
1135 spin_lock_irqsave(&uc->vc.lock, flags);
1138 d->tr_idx = (d->tr_idx + 1) % d->sglen;
1141 vchan_cyclic_callback(&d->vd);
1143 /* TODO: figure out the real amount of data */
1144 uc->bcnt += d->residue;
1146 vchan_cookie_complete(&d->vd);
1150 spin_unlock_irqrestore(&uc->vc.lock, flags);
1156 * __udma_alloc_gp_rflow_range - alloc range of GP RX flows
1158 * @from: Start the search from this flow id number
1159 * @cnt: Number of consecutive flow ids to allocate
1161 * Allocate range of RX flow ids for future use, those flows can be requested
1162 * only using explicit flow id number. if @from is set to -1 it will try to find
1163 * first free range. if @from is positive value it will force allocation only
1164 * of the specified range of flows.
1166 * Returns -ENOMEM if can't find free range.
1167 * -EEXIST if requested range is busy.
1168 * -EINVAL if wrong input values passed.
1169 * Returns flow id on success.
1171 static int __udma_alloc_gp_rflow_range(struct udma_dev *ud, int from, int cnt)
1173 int start, tmp_from;
1174 DECLARE_BITMAP(tmp, K3_UDMA_MAX_RFLOWS);
1178 tmp_from = ud->rchan_cnt;
1179 /* default flows can't be allocated and accessible only by id */
1180 if (tmp_from < ud->rchan_cnt)
1183 if (tmp_from + cnt > ud->rflow_cnt)
1186 bitmap_or(tmp, ud->rflow_gp_map, ud->rflow_gp_map_allocated,
1189 start = bitmap_find_next_zero_area(tmp,
1192 if (start >= ud->rflow_cnt)
1195 if (from >= 0 && start != from)
1198 bitmap_set(ud->rflow_gp_map_allocated, start, cnt);
1202 static int __udma_free_gp_rflow_range(struct udma_dev *ud, int from, int cnt)
1204 if (from < ud->rchan_cnt)
1206 if (from + cnt > ud->rflow_cnt)
1209 bitmap_clear(ud->rflow_gp_map_allocated, from, cnt);
1213 static struct udma_rflow *__udma_get_rflow(struct udma_dev *ud, int id)
1216 * Attempt to request rflow by ID can be made for any rflow
1217 * if not in use with assumption that caller knows what's doing.
1218 * TI-SCI FW will perform additional permission check ant way, it's
1222 if (id < 0 || id >= ud->rflow_cnt)
1223 return ERR_PTR(-ENOENT);
1225 if (test_bit(id, ud->rflow_in_use))
1226 return ERR_PTR(-ENOENT);
1228 /* GP rflow has to be allocated first */
1229 if (!test_bit(id, ud->rflow_gp_map) &&
1230 !test_bit(id, ud->rflow_gp_map_allocated))
1231 return ERR_PTR(-EINVAL);
1233 dev_dbg(ud->dev, "get rflow%d\n", id);
1234 set_bit(id, ud->rflow_in_use);
1235 return &ud->rflows[id];
1238 static void __udma_put_rflow(struct udma_dev *ud, struct udma_rflow *rflow)
1240 if (!test_bit(rflow->id, ud->rflow_in_use)) {
1241 dev_err(ud->dev, "attempt to put unused rflow%d\n", rflow->id);
1245 dev_dbg(ud->dev, "put rflow%d\n", rflow->id);
1246 clear_bit(rflow->id, ud->rflow_in_use);
1249 #define UDMA_RESERVE_RESOURCE(res) \
1250 static struct udma_##res *__udma_reserve_##res(struct udma_dev *ud, \
1251 enum udma_tp_level tpl, \
1255 if (test_bit(id, ud->res##_map)) { \
1256 dev_err(ud->dev, "res##%d is in use\n", id); \
1257 return ERR_PTR(-ENOENT); \
1262 if (tpl >= ud->match_data->tpl_levels) \
1263 tpl = ud->match_data->tpl_levels - 1; \
1265 start = ud->match_data->level_start_idx[tpl]; \
1267 id = find_next_zero_bit(ud->res##_map, ud->res##_cnt, \
1269 if (id == ud->res##_cnt) { \
1270 return ERR_PTR(-ENOENT); \
1274 set_bit(id, ud->res##_map); \
1275 return &ud->res##s[id]; \
1278 UDMA_RESERVE_RESOURCE(tchan);
1279 UDMA_RESERVE_RESOURCE(rchan);
1281 static int udma_get_tchan(struct udma_chan *uc)
1283 struct udma_dev *ud = uc->ud;
1286 dev_dbg(ud->dev, "chan%d: already have tchan%d allocated\n",
1287 uc->id, uc->tchan->id);
1291 uc->tchan = __udma_reserve_tchan(ud, uc->config.channel_tpl, -1);
1292 if (IS_ERR(uc->tchan))
1293 return PTR_ERR(uc->tchan);
1298 static int udma_get_rchan(struct udma_chan *uc)
1300 struct udma_dev *ud = uc->ud;
1303 dev_dbg(ud->dev, "chan%d: already have rchan%d allocated\n",
1304 uc->id, uc->rchan->id);
1308 uc->rchan = __udma_reserve_rchan(ud, uc->config.channel_tpl, -1);
1309 if (IS_ERR(uc->rchan))
1310 return PTR_ERR(uc->rchan);
1315 static int udma_get_chan_pair(struct udma_chan *uc)
1317 struct udma_dev *ud = uc->ud;
1318 const struct udma_match_data *match_data = ud->match_data;
1321 if ((uc->tchan && uc->rchan) && uc->tchan->id == uc->rchan->id) {
1322 dev_info(ud->dev, "chan%d: already have %d pair allocated\n",
1323 uc->id, uc->tchan->id);
1328 dev_err(ud->dev, "chan%d: already have tchan%d allocated\n",
1329 uc->id, uc->tchan->id);
1331 } else if (uc->rchan) {
1332 dev_err(ud->dev, "chan%d: already have rchan%d allocated\n",
1333 uc->id, uc->rchan->id);
1337 /* Can be optimized, but let's have it like this for now */
1338 end = min(ud->tchan_cnt, ud->rchan_cnt);
1339 /* Try to use the highest TPL channel pair for MEM_TO_MEM channels */
1340 chan_id = match_data->level_start_idx[match_data->tpl_levels - 1];
1341 for (; chan_id < end; chan_id++) {
1342 if (!test_bit(chan_id, ud->tchan_map) &&
1343 !test_bit(chan_id, ud->rchan_map))
1350 set_bit(chan_id, ud->tchan_map);
1351 set_bit(chan_id, ud->rchan_map);
1352 uc->tchan = &ud->tchans[chan_id];
1353 uc->rchan = &ud->rchans[chan_id];
1358 static int udma_get_rflow(struct udma_chan *uc, int flow_id)
1360 struct udma_dev *ud = uc->ud;
1363 dev_err(ud->dev, "chan%d: does not have rchan??\n", uc->id);
1368 dev_dbg(ud->dev, "chan%d: already have rflow%d allocated\n",
1369 uc->id, uc->rflow->id);
1373 uc->rflow = __udma_get_rflow(ud, flow_id);
1374 if (IS_ERR(uc->rflow))
1375 return PTR_ERR(uc->rflow);
1380 static void udma_put_rchan(struct udma_chan *uc)
1382 struct udma_dev *ud = uc->ud;
1385 dev_dbg(ud->dev, "chan%d: put rchan%d\n", uc->id,
1387 clear_bit(uc->rchan->id, ud->rchan_map);
1392 static void udma_put_tchan(struct udma_chan *uc)
1394 struct udma_dev *ud = uc->ud;
1397 dev_dbg(ud->dev, "chan%d: put tchan%d\n", uc->id,
1399 clear_bit(uc->tchan->id, ud->tchan_map);
1404 static void udma_put_rflow(struct udma_chan *uc)
1406 struct udma_dev *ud = uc->ud;
1409 dev_dbg(ud->dev, "chan%d: put rflow%d\n", uc->id,
1411 __udma_put_rflow(ud, uc->rflow);
1416 static void udma_free_tx_resources(struct udma_chan *uc)
1421 k3_ringacc_ring_free(uc->tchan->t_ring);
1422 k3_ringacc_ring_free(uc->tchan->tc_ring);
1423 uc->tchan->t_ring = NULL;
1424 uc->tchan->tc_ring = NULL;
1429 static int udma_alloc_tx_resources(struct udma_chan *uc)
1431 struct k3_ring_cfg ring_cfg;
1432 struct udma_dev *ud = uc->ud;
1435 ret = udma_get_tchan(uc);
1439 uc->tchan->t_ring = k3_ringacc_request_ring(ud->ringacc,
1441 if (!uc->tchan->t_ring) {
1446 uc->tchan->tc_ring = k3_ringacc_request_ring(ud->ringacc, -1, 0);
1447 if (!uc->tchan->tc_ring) {
1452 memset(&ring_cfg, 0, sizeof(ring_cfg));
1453 ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE;
1454 ring_cfg.elm_size = K3_RINGACC_RING_ELSIZE_8;
1455 ring_cfg.mode = K3_RINGACC_RING_MODE_MESSAGE;
1457 ret = k3_ringacc_ring_cfg(uc->tchan->t_ring, &ring_cfg);
1458 ret |= k3_ringacc_ring_cfg(uc->tchan->tc_ring, &ring_cfg);
1466 k3_ringacc_ring_free(uc->tchan->tc_ring);
1467 uc->tchan->tc_ring = NULL;
1469 k3_ringacc_ring_free(uc->tchan->t_ring);
1470 uc->tchan->t_ring = NULL;
1477 static void udma_free_rx_resources(struct udma_chan *uc)
1483 struct udma_rflow *rflow = uc->rflow;
1485 k3_ringacc_ring_free(rflow->fd_ring);
1486 k3_ringacc_ring_free(rflow->r_ring);
1487 rflow->fd_ring = NULL;
1488 rflow->r_ring = NULL;
1496 static int udma_alloc_rx_resources(struct udma_chan *uc)
1498 struct udma_dev *ud = uc->ud;
1499 struct k3_ring_cfg ring_cfg;
1500 struct udma_rflow *rflow;
1504 ret = udma_get_rchan(uc);
1508 /* For MEM_TO_MEM we don't need rflow or rings */
1509 if (uc->config.dir == DMA_MEM_TO_MEM)
1512 ret = udma_get_rflow(uc, uc->rchan->id);
1519 fd_ring_id = ud->tchan_cnt + ud->echan_cnt + uc->rchan->id;
1520 rflow->fd_ring = k3_ringacc_request_ring(ud->ringacc, fd_ring_id, 0);
1521 if (!rflow->fd_ring) {
1526 rflow->r_ring = k3_ringacc_request_ring(ud->ringacc, -1, 0);
1527 if (!rflow->r_ring) {
1532 memset(&ring_cfg, 0, sizeof(ring_cfg));
1534 if (uc->config.pkt_mode)
1535 ring_cfg.size = SG_MAX_SEGMENTS;
1537 ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE;
1539 ring_cfg.elm_size = K3_RINGACC_RING_ELSIZE_8;
1540 ring_cfg.mode = K3_RINGACC_RING_MODE_MESSAGE;
1542 ret = k3_ringacc_ring_cfg(rflow->fd_ring, &ring_cfg);
1543 ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE;
1544 ret |= k3_ringacc_ring_cfg(rflow->r_ring, &ring_cfg);
1552 k3_ringacc_ring_free(rflow->r_ring);
1553 rflow->r_ring = NULL;
1555 k3_ringacc_ring_free(rflow->fd_ring);
1556 rflow->fd_ring = NULL;
1565 #define TISCI_TCHAN_VALID_PARAMS ( \
1566 TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \
1567 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_EINFO_VALID | \
1568 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_PSWORDS_VALID | \
1569 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID | \
1570 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID | \
1571 TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID | \
1572 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID)
1574 #define TISCI_RCHAN_VALID_PARAMS ( \
1575 TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \
1576 TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID | \
1577 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID | \
1578 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID | \
1579 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_SHORT_VALID | \
1580 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_LONG_VALID | \
1581 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID | \
1582 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID)
1584 static int udma_tisci_m2m_channel_config(struct udma_chan *uc)
1586 struct udma_dev *ud = uc->ud;
1587 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1588 const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
1589 struct udma_tchan *tchan = uc->tchan;
1590 struct udma_rchan *rchan = uc->rchan;
1593 /* Non synchronized - mem to mem type of transfer */
1594 int tc_ring = k3_ringacc_get_ring_id(tchan->tc_ring);
1595 struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 };
1596 struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 };
1598 req_tx.valid_params = TISCI_TCHAN_VALID_PARAMS;
1599 req_tx.nav_id = tisci_rm->tisci_dev_id;
1600 req_tx.index = tchan->id;
1601 req_tx.tx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR;
1602 req_tx.tx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2;
1603 req_tx.txcq_qnum = tc_ring;
1605 ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx);
1607 dev_err(ud->dev, "tchan%d cfg failed %d\n", tchan->id, ret);
1611 req_rx.valid_params = TISCI_RCHAN_VALID_PARAMS;
1612 req_rx.nav_id = tisci_rm->tisci_dev_id;
1613 req_rx.index = rchan->id;
1614 req_rx.rx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2;
1615 req_rx.rxcq_qnum = tc_ring;
1616 req_rx.rx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR;
1618 ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx);
1620 dev_err(ud->dev, "rchan%d alloc failed %d\n", rchan->id, ret);
1625 static int udma_tisci_tx_channel_config(struct udma_chan *uc)
1627 struct udma_dev *ud = uc->ud;
1628 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1629 const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
1630 struct udma_tchan *tchan = uc->tchan;
1631 int tc_ring = k3_ringacc_get_ring_id(tchan->tc_ring);
1632 struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 };
1633 u32 mode, fetch_size;
1636 if (uc->config.pkt_mode) {
1637 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
1638 fetch_size = cppi5_hdesc_calc_size(uc->config.needs_epib,
1639 uc->config.psd_size, 0);
1641 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_PBRR;
1642 fetch_size = sizeof(struct cppi5_desc_hdr_t);
1645 req_tx.valid_params = TISCI_TCHAN_VALID_PARAMS;
1646 req_tx.nav_id = tisci_rm->tisci_dev_id;
1647 req_tx.index = tchan->id;
1648 req_tx.tx_chan_type = mode;
1649 req_tx.tx_supr_tdpkt = uc->config.notdpkt;
1650 req_tx.tx_fetch_size = fetch_size >> 2;
1651 req_tx.txcq_qnum = tc_ring;
1653 ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx);
1655 dev_err(ud->dev, "tchan%d cfg failed %d\n", tchan->id, ret);
1660 static int udma_tisci_rx_channel_config(struct udma_chan *uc)
1662 struct udma_dev *ud = uc->ud;
1663 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1664 const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
1665 struct udma_rchan *rchan = uc->rchan;
1666 int fd_ring = k3_ringacc_get_ring_id(uc->rflow->fd_ring);
1667 int rx_ring = k3_ringacc_get_ring_id(uc->rflow->r_ring);
1668 struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 };
1669 struct ti_sci_msg_rm_udmap_flow_cfg flow_req = { 0 };
1670 u32 mode, fetch_size;
1673 if (uc->config.pkt_mode) {
1674 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
1675 fetch_size = cppi5_hdesc_calc_size(uc->config.needs_epib,
1676 uc->config.psd_size, 0);
1678 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_PBRR;
1679 fetch_size = sizeof(struct cppi5_desc_hdr_t);
1682 req_rx.valid_params = TISCI_RCHAN_VALID_PARAMS;
1683 req_rx.nav_id = tisci_rm->tisci_dev_id;
1684 req_rx.index = rchan->id;
1685 req_rx.rx_fetch_size = fetch_size >> 2;
1686 req_rx.rxcq_qnum = rx_ring;
1687 req_rx.rx_chan_type = mode;
1689 ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx);
1691 dev_err(ud->dev, "rchan%d cfg failed %d\n", rchan->id, ret);
1695 flow_req.valid_params =
1696 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID |
1697 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID |
1698 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID |
1699 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DESC_TYPE_VALID |
1700 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID |
1701 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_HI_SEL_VALID |
1702 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_LO_SEL_VALID |
1703 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_HI_SEL_VALID |
1704 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_LO_SEL_VALID |
1705 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID |
1706 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID |
1707 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID |
1708 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID;
1710 flow_req.nav_id = tisci_rm->tisci_dev_id;
1711 flow_req.flow_index = rchan->id;
1713 if (uc->config.needs_epib)
1714 flow_req.rx_einfo_present = 1;
1716 flow_req.rx_einfo_present = 0;
1717 if (uc->config.psd_size)
1718 flow_req.rx_psinfo_present = 1;
1720 flow_req.rx_psinfo_present = 0;
1721 flow_req.rx_error_handling = 1;
1722 flow_req.rx_dest_qnum = rx_ring;
1723 flow_req.rx_src_tag_hi_sel = UDMA_RFLOW_SRCTAG_NONE;
1724 flow_req.rx_src_tag_lo_sel = UDMA_RFLOW_SRCTAG_SRC_TAG;
1725 flow_req.rx_dest_tag_hi_sel = UDMA_RFLOW_DSTTAG_DST_TAG_HI;
1726 flow_req.rx_dest_tag_lo_sel = UDMA_RFLOW_DSTTAG_DST_TAG_LO;
1727 flow_req.rx_fdq0_sz0_qnum = fd_ring;
1728 flow_req.rx_fdq1_qnum = fd_ring;
1729 flow_req.rx_fdq2_qnum = fd_ring;
1730 flow_req.rx_fdq3_qnum = fd_ring;
1732 ret = tisci_ops->rx_flow_cfg(tisci_rm->tisci, &flow_req);
1735 dev_err(ud->dev, "flow%d config failed: %d\n", rchan->id, ret);
1740 static int udma_alloc_chan_resources(struct dma_chan *chan)
1742 struct udma_chan *uc = to_udma_chan(chan);
1743 struct udma_dev *ud = to_udma_dev(chan->device);
1744 const struct udma_match_data *match_data = ud->match_data;
1745 struct k3_ring *irq_ring;
1749 if (uc->config.pkt_mode || uc->config.dir == DMA_MEM_TO_MEM) {
1750 uc->use_dma_pool = true;
1751 /* in case of MEM_TO_MEM we have maximum of two TRs */
1752 if (uc->config.dir == DMA_MEM_TO_MEM) {
1753 uc->config.hdesc_size = cppi5_trdesc_calc_size(
1754 sizeof(struct cppi5_tr_type15_t), 2);
1755 uc->config.pkt_mode = false;
1759 if (uc->use_dma_pool) {
1760 uc->hdesc_pool = dma_pool_create(uc->name, ud->ddev.dev,
1761 uc->config.hdesc_size,
1764 if (!uc->hdesc_pool) {
1765 dev_err(ud->ddev.dev,
1766 "Descriptor pool allocation failed\n");
1767 uc->use_dma_pool = false;
1773 * Make sure that the completion is in a known state:
1774 * No teardown, the channel is idle
1776 reinit_completion(&uc->teardown_completed);
1777 complete_all(&uc->teardown_completed);
1778 uc->state = UDMA_CHAN_IS_IDLE;
1780 switch (uc->config.dir) {
1781 case DMA_MEM_TO_MEM:
1782 /* Non synchronized - mem to mem type of transfer */
1783 dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-MEM\n", __func__,
1786 ret = udma_get_chan_pair(uc);
1790 ret = udma_alloc_tx_resources(uc);
1794 ret = udma_alloc_rx_resources(uc);
1796 udma_free_tx_resources(uc);
1800 uc->config.src_thread = ud->psil_base + uc->tchan->id;
1801 uc->config.dst_thread = (ud->psil_base + uc->rchan->id) |
1802 K3_PSIL_DST_THREAD_ID_OFFSET;
1804 irq_ring = uc->tchan->tc_ring;
1805 irq_udma_idx = uc->tchan->id;
1807 ret = udma_tisci_m2m_channel_config(uc);
1809 case DMA_MEM_TO_DEV:
1810 /* Slave transfer synchronized - mem to dev (TX) trasnfer */
1811 dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-DEV\n", __func__,
1814 ret = udma_alloc_tx_resources(uc);
1816 uc->config.remote_thread_id = -1;
1820 uc->config.src_thread = ud->psil_base + uc->tchan->id;
1821 uc->config.dst_thread = uc->config.remote_thread_id;
1822 uc->config.dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET;
1824 irq_ring = uc->tchan->tc_ring;
1825 irq_udma_idx = uc->tchan->id;
1827 ret = udma_tisci_tx_channel_config(uc);
1829 case DMA_DEV_TO_MEM:
1830 /* Slave transfer synchronized - dev to mem (RX) trasnfer */
1831 dev_dbg(uc->ud->dev, "%s: chan%d as DEV-to-MEM\n", __func__,
1834 ret = udma_alloc_rx_resources(uc);
1836 uc->config.remote_thread_id = -1;
1840 uc->config.src_thread = uc->config.remote_thread_id;
1841 uc->config.dst_thread = (ud->psil_base + uc->rchan->id) |
1842 K3_PSIL_DST_THREAD_ID_OFFSET;
1844 irq_ring = uc->rflow->r_ring;
1845 irq_udma_idx = match_data->rchan_oes_offset + uc->rchan->id;
1847 ret = udma_tisci_rx_channel_config(uc);
1850 /* Can not happen */
1851 dev_err(uc->ud->dev, "%s: chan%d invalid direction (%u)\n",
1852 __func__, uc->id, uc->config.dir);
1856 /* check if the channel configuration was successful */
1860 if (udma_is_chan_running(uc)) {
1861 dev_warn(ud->dev, "chan%d: is running!\n", uc->id);
1863 if (udma_is_chan_running(uc)) {
1864 dev_err(ud->dev, "chan%d: won't stop!\n", uc->id);
1870 ret = navss_psil_pair(ud, uc->config.src_thread, uc->config.dst_thread);
1872 dev_err(ud->dev, "PSI-L pairing failed: 0x%04x -> 0x%04x\n",
1873 uc->config.src_thread, uc->config.dst_thread);
1877 uc->psil_paired = true;
1879 uc->irq_num_ring = k3_ringacc_get_ring_irq_num(irq_ring);
1880 if (uc->irq_num_ring <= 0) {
1881 dev_err(ud->dev, "Failed to get ring irq (index: %u)\n",
1882 k3_ringacc_get_ring_id(irq_ring));
1887 ret = request_irq(uc->irq_num_ring, udma_ring_irq_handler,
1888 IRQF_TRIGGER_HIGH, uc->name, uc);
1890 dev_err(ud->dev, "chan%d: ring irq request failed\n", uc->id);
1894 /* Event from UDMA (TR events) only needed for slave TR mode channels */
1895 if (is_slave_direction(uc->config.dir) && !uc->config.pkt_mode) {
1896 uc->irq_num_udma = ti_sci_inta_msi_get_virq(ud->dev,
1898 if (uc->irq_num_udma <= 0) {
1899 dev_err(ud->dev, "Failed to get udma irq (index: %u)\n",
1901 free_irq(uc->irq_num_ring, uc);
1906 ret = request_irq(uc->irq_num_udma, udma_udma_irq_handler, 0,
1909 dev_err(ud->dev, "chan%d: UDMA irq request failed\n",
1911 free_irq(uc->irq_num_ring, uc);
1915 uc->irq_num_udma = 0;
1918 udma_reset_rings(uc);
1920 INIT_DELAYED_WORK_ONSTACK(&uc->tx_drain.work,
1921 udma_check_tx_completion);
1925 uc->irq_num_ring = 0;
1926 uc->irq_num_udma = 0;
1928 navss_psil_unpair(ud, uc->config.src_thread, uc->config.dst_thread);
1929 uc->psil_paired = false;
1931 udma_free_tx_resources(uc);
1932 udma_free_rx_resources(uc);
1934 udma_reset_uchan(uc);
1936 if (uc->use_dma_pool) {
1937 dma_pool_destroy(uc->hdesc_pool);
1938 uc->use_dma_pool = false;
1944 static int udma_slave_config(struct dma_chan *chan,
1945 struct dma_slave_config *cfg)
1947 struct udma_chan *uc = to_udma_chan(chan);
1949 memcpy(&uc->cfg, cfg, sizeof(uc->cfg));
1954 static struct udma_desc *udma_alloc_tr_desc(struct udma_chan *uc,
1955 size_t tr_size, int tr_count,
1956 enum dma_transfer_direction dir)
1958 struct udma_hwdesc *hwdesc;
1959 struct cppi5_desc_hdr_t *tr_desc;
1960 struct udma_desc *d;
1961 u32 reload_count = 0;
1971 dev_err(uc->ud->dev, "Unsupported TR size of %zu\n", tr_size);
1975 /* We have only one descriptor containing multiple TRs */
1976 d = kzalloc(sizeof(*d) + sizeof(d->hwdesc[0]), GFP_NOWAIT);
1980 d->sglen = tr_count;
1982 d->hwdesc_count = 1;
1983 hwdesc = &d->hwdesc[0];
1985 /* Allocate memory for DMA ring descriptor */
1986 if (uc->use_dma_pool) {
1987 hwdesc->cppi5_desc_size = uc->config.hdesc_size;
1988 hwdesc->cppi5_desc_vaddr = dma_pool_zalloc(uc->hdesc_pool,
1990 &hwdesc->cppi5_desc_paddr);
1992 hwdesc->cppi5_desc_size = cppi5_trdesc_calc_size(tr_size,
1994 hwdesc->cppi5_desc_size = ALIGN(hwdesc->cppi5_desc_size,
1995 uc->ud->desc_align);
1996 hwdesc->cppi5_desc_vaddr = dma_alloc_coherent(uc->ud->dev,
1997 hwdesc->cppi5_desc_size,
1998 &hwdesc->cppi5_desc_paddr,
2002 if (!hwdesc->cppi5_desc_vaddr) {
2007 /* Start of the TR req records */
2008 hwdesc->tr_req_base = hwdesc->cppi5_desc_vaddr + tr_size;
2009 /* Start address of the TR response array */
2010 hwdesc->tr_resp_base = hwdesc->tr_req_base + tr_size * tr_count;
2012 tr_desc = hwdesc->cppi5_desc_vaddr;
2015 reload_count = CPPI5_INFO0_TRDESC_RLDCNT_INFINITE;
2017 if (dir == DMA_DEV_TO_MEM)
2018 ring_id = k3_ringacc_get_ring_id(uc->rflow->r_ring);
2020 ring_id = k3_ringacc_get_ring_id(uc->tchan->tc_ring);
2022 cppi5_trdesc_init(tr_desc, tr_count, tr_size, 0, reload_count);
2023 cppi5_desc_set_pktids(tr_desc, uc->id,
2024 CPPI5_INFO1_DESC_FLOWID_DEFAULT);
2025 cppi5_desc_set_retpolicy(tr_desc, 0, ring_id);
2031 * udma_get_tr_counters - calculate TR counters for a given length
2032 * @len: Length of the trasnfer
2033 * @align_to: Preferred alignment
2034 * @tr0_cnt0: First TR icnt0
2035 * @tr0_cnt1: First TR icnt1
2036 * @tr1_cnt0: Second (if used) TR icnt0
2038 * For len < SZ_64K only one TR is enough, tr1_cnt0 is not updated
2039 * For len >= SZ_64K two TRs are used in a simple way:
2040 * First TR: SZ_64K-alignment blocks (tr0_cnt0, tr0_cnt1)
2041 * Second TR: the remaining length (tr1_cnt0)
2043 * Returns the number of TRs the length needs (1 or 2)
2044 * -EINVAL if the length can not be supported
2046 static int udma_get_tr_counters(size_t len, unsigned long align_to,
2047 u16 *tr0_cnt0, u16 *tr0_cnt1, u16 *tr1_cnt0)
2060 *tr0_cnt0 = SZ_64K - BIT(align_to);
2061 if (len / *tr0_cnt0 >= SZ_64K) {
2069 *tr0_cnt1 = len / *tr0_cnt0;
2070 *tr1_cnt0 = len % *tr0_cnt0;
2075 static struct udma_desc *
2076 udma_prep_slave_sg_tr(struct udma_chan *uc, struct scatterlist *sgl,
2077 unsigned int sglen, enum dma_transfer_direction dir,
2078 unsigned long tx_flags, void *context)
2080 struct scatterlist *sgent;
2081 struct udma_desc *d;
2082 struct cppi5_tr_type1_t *tr_req = NULL;
2083 u16 tr0_cnt0, tr0_cnt1, tr1_cnt0;
2089 if (!is_slave_direction(dir)) {
2090 dev_err(uc->ud->dev, "Only slave cyclic is supported\n");
2094 /* estimate the number of TRs we will need */
2095 for_each_sg(sgl, sgent, sglen, i) {
2096 if (sg_dma_len(sgent) < SZ_64K)
2102 /* Now allocate and setup the descriptor. */
2103 tr_size = sizeof(struct cppi5_tr_type1_t);
2104 d = udma_alloc_tr_desc(uc, tr_size, num_tr, dir);
2110 tr_req = d->hwdesc[0].tr_req_base;
2111 for_each_sg(sgl, sgent, sglen, i) {
2112 dma_addr_t sg_addr = sg_dma_address(sgent);
2114 num_tr = udma_get_tr_counters(sg_dma_len(sgent), __ffs(sg_addr),
2115 &tr0_cnt0, &tr0_cnt1, &tr1_cnt0);
2117 dev_err(uc->ud->dev, "size %u is not supported\n",
2119 udma_free_hwdesc(uc, d);
2124 cppi5_tr_init(&tr_req[i].flags, CPPI5_TR_TYPE1, false, false,
2125 CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
2126 cppi5_tr_csf_set(&tr_req[i].flags, CPPI5_TR_CSF_SUPR_EVT);
2128 tr_req[tr_idx].addr = sg_addr;
2129 tr_req[tr_idx].icnt0 = tr0_cnt0;
2130 tr_req[tr_idx].icnt1 = tr0_cnt1;
2131 tr_req[tr_idx].dim1 = tr0_cnt0;
2135 cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE1,
2137 CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
2138 cppi5_tr_csf_set(&tr_req[tr_idx].flags,
2139 CPPI5_TR_CSF_SUPR_EVT);
2141 tr_req[tr_idx].addr = sg_addr + tr0_cnt1 * tr0_cnt0;
2142 tr_req[tr_idx].icnt0 = tr1_cnt0;
2143 tr_req[tr_idx].icnt1 = 1;
2144 tr_req[tr_idx].dim1 = tr1_cnt0;
2148 d->residue += sg_dma_len(sgent);
2151 cppi5_tr_csf_set(&tr_req[tr_idx - 1].flags, CPPI5_TR_CSF_EOP);
2156 static int udma_configure_statictr(struct udma_chan *uc, struct udma_desc *d,
2157 enum dma_slave_buswidth dev_width,
2160 if (uc->config.ep_type != PSIL_EP_PDMA_XY)
2163 /* Bus width translates to the element size (ES) */
2164 switch (dev_width) {
2165 case DMA_SLAVE_BUSWIDTH_1_BYTE:
2166 d->static_tr.elsize = 0;
2168 case DMA_SLAVE_BUSWIDTH_2_BYTES:
2169 d->static_tr.elsize = 1;
2171 case DMA_SLAVE_BUSWIDTH_3_BYTES:
2172 d->static_tr.elsize = 2;
2174 case DMA_SLAVE_BUSWIDTH_4_BYTES:
2175 d->static_tr.elsize = 3;
2177 case DMA_SLAVE_BUSWIDTH_8_BYTES:
2178 d->static_tr.elsize = 4;
2180 default: /* not reached */
2184 d->static_tr.elcnt = elcnt;
2187 * PDMA must to close the packet when the channel is in packet mode.
2188 * For TR mode when the channel is not cyclic we also need PDMA to close
2189 * the packet otherwise the transfer will stall because PDMA holds on
2190 * the data it has received from the peripheral.
2192 if (uc->config.pkt_mode || !uc->cyclic) {
2193 unsigned int div = dev_width * elcnt;
2196 d->static_tr.bstcnt = d->residue / d->sglen / div;
2198 d->static_tr.bstcnt = d->residue / div;
2200 if (uc->config.dir == DMA_DEV_TO_MEM &&
2201 d->static_tr.bstcnt > uc->ud->match_data->statictr_z_mask)
2204 d->static_tr.bstcnt = 0;
2210 static struct udma_desc *
2211 udma_prep_slave_sg_pkt(struct udma_chan *uc, struct scatterlist *sgl,
2212 unsigned int sglen, enum dma_transfer_direction dir,
2213 unsigned long tx_flags, void *context)
2215 struct scatterlist *sgent;
2216 struct cppi5_host_desc_t *h_desc = NULL;
2217 struct udma_desc *d;
2221 d = kzalloc(sizeof(*d) + sglen * sizeof(d->hwdesc[0]), GFP_NOWAIT);
2226 d->hwdesc_count = sglen;
2228 if (dir == DMA_DEV_TO_MEM)
2229 ring_id = k3_ringacc_get_ring_id(uc->rflow->r_ring);
2231 ring_id = k3_ringacc_get_ring_id(uc->tchan->tc_ring);
2233 for_each_sg(sgl, sgent, sglen, i) {
2234 struct udma_hwdesc *hwdesc = &d->hwdesc[i];
2235 dma_addr_t sg_addr = sg_dma_address(sgent);
2236 struct cppi5_host_desc_t *desc;
2237 size_t sg_len = sg_dma_len(sgent);
2239 hwdesc->cppi5_desc_vaddr = dma_pool_zalloc(uc->hdesc_pool,
2241 &hwdesc->cppi5_desc_paddr);
2242 if (!hwdesc->cppi5_desc_vaddr) {
2243 dev_err(uc->ud->dev,
2244 "descriptor%d allocation failed\n", i);
2246 udma_free_hwdesc(uc, d);
2251 d->residue += sg_len;
2252 hwdesc->cppi5_desc_size = uc->config.hdesc_size;
2253 desc = hwdesc->cppi5_desc_vaddr;
2256 cppi5_hdesc_init(desc, 0, 0);
2257 /* Flow and Packed ID */
2258 cppi5_desc_set_pktids(&desc->hdr, uc->id,
2259 CPPI5_INFO1_DESC_FLOWID_DEFAULT);
2260 cppi5_desc_set_retpolicy(&desc->hdr, 0, ring_id);
2262 cppi5_hdesc_reset_hbdesc(desc);
2263 cppi5_desc_set_retpolicy(&desc->hdr, 0, 0xffff);
2266 /* attach the sg buffer to the descriptor */
2267 cppi5_hdesc_attach_buf(desc, sg_addr, sg_len, sg_addr, sg_len);
2269 /* Attach link as host buffer descriptor */
2271 cppi5_hdesc_link_hbdesc(h_desc,
2272 hwdesc->cppi5_desc_paddr);
2274 if (dir == DMA_MEM_TO_DEV)
2278 if (d->residue >= SZ_4M) {
2279 dev_err(uc->ud->dev,
2280 "%s: Transfer size %u is over the supported 4M range\n",
2281 __func__, d->residue);
2282 udma_free_hwdesc(uc, d);
2287 h_desc = d->hwdesc[0].cppi5_desc_vaddr;
2288 cppi5_hdesc_set_pktlen(h_desc, d->residue);
2293 static int udma_attach_metadata(struct dma_async_tx_descriptor *desc,
2294 void *data, size_t len)
2296 struct udma_desc *d = to_udma_desc(desc);
2297 struct udma_chan *uc = to_udma_chan(desc->chan);
2298 struct cppi5_host_desc_t *h_desc;
2302 if (!uc->config.pkt_mode || !uc->config.metadata_size)
2305 if (!data || len > uc->config.metadata_size)
2308 if (uc->config.needs_epib && len < CPPI5_INFO0_HDESC_EPIB_SIZE)
2311 h_desc = d->hwdesc[0].cppi5_desc_vaddr;
2312 if (d->dir == DMA_MEM_TO_DEV)
2313 memcpy(h_desc->epib, data, len);
2315 if (uc->config.needs_epib)
2316 psd_size -= CPPI5_INFO0_HDESC_EPIB_SIZE;
2319 d->metadata_size = len;
2320 if (uc->config.needs_epib)
2321 flags |= CPPI5_INFO0_HDESC_EPIB_PRESENT;
2323 cppi5_hdesc_update_flags(h_desc, flags);
2324 cppi5_hdesc_update_psdata_size(h_desc, psd_size);
2329 static void *udma_get_metadata_ptr(struct dma_async_tx_descriptor *desc,
2330 size_t *payload_len, size_t *max_len)
2332 struct udma_desc *d = to_udma_desc(desc);
2333 struct udma_chan *uc = to_udma_chan(desc->chan);
2334 struct cppi5_host_desc_t *h_desc;
2336 if (!uc->config.pkt_mode || !uc->config.metadata_size)
2337 return ERR_PTR(-ENOTSUPP);
2339 h_desc = d->hwdesc[0].cppi5_desc_vaddr;
2341 *max_len = uc->config.metadata_size;
2343 *payload_len = cppi5_hdesc_epib_present(&h_desc->hdr) ?
2344 CPPI5_INFO0_HDESC_EPIB_SIZE : 0;
2345 *payload_len += cppi5_hdesc_get_psdata_size(h_desc);
2347 return h_desc->epib;
2350 static int udma_set_metadata_len(struct dma_async_tx_descriptor *desc,
2353 struct udma_desc *d = to_udma_desc(desc);
2354 struct udma_chan *uc = to_udma_chan(desc->chan);
2355 struct cppi5_host_desc_t *h_desc;
2356 u32 psd_size = payload_len;
2359 if (!uc->config.pkt_mode || !uc->config.metadata_size)
2362 if (payload_len > uc->config.metadata_size)
2365 if (uc->config.needs_epib && payload_len < CPPI5_INFO0_HDESC_EPIB_SIZE)
2368 h_desc = d->hwdesc[0].cppi5_desc_vaddr;
2370 if (uc->config.needs_epib) {
2371 psd_size -= CPPI5_INFO0_HDESC_EPIB_SIZE;
2372 flags |= CPPI5_INFO0_HDESC_EPIB_PRESENT;
2375 cppi5_hdesc_update_flags(h_desc, flags);
2376 cppi5_hdesc_update_psdata_size(h_desc, psd_size);
2381 static struct dma_descriptor_metadata_ops metadata_ops = {
2382 .attach = udma_attach_metadata,
2383 .get_ptr = udma_get_metadata_ptr,
2384 .set_len = udma_set_metadata_len,
2387 static struct dma_async_tx_descriptor *
2388 udma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
2389 unsigned int sglen, enum dma_transfer_direction dir,
2390 unsigned long tx_flags, void *context)
2392 struct udma_chan *uc = to_udma_chan(chan);
2393 enum dma_slave_buswidth dev_width;
2394 struct udma_desc *d;
2397 if (dir != uc->config.dir) {
2398 dev_err(chan->device->dev,
2399 "%s: chan%d is for %s, not supporting %s\n",
2401 dmaengine_get_direction_text(uc->config.dir),
2402 dmaengine_get_direction_text(dir));
2406 if (dir == DMA_DEV_TO_MEM) {
2407 dev_width = uc->cfg.src_addr_width;
2408 burst = uc->cfg.src_maxburst;
2409 } else if (dir == DMA_MEM_TO_DEV) {
2410 dev_width = uc->cfg.dst_addr_width;
2411 burst = uc->cfg.dst_maxburst;
2413 dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
2420 if (uc->config.pkt_mode)
2421 d = udma_prep_slave_sg_pkt(uc, sgl, sglen, dir, tx_flags,
2424 d = udma_prep_slave_sg_tr(uc, sgl, sglen, dir, tx_flags,
2434 /* static TR for remote PDMA */
2435 if (udma_configure_statictr(uc, d, dev_width, burst)) {
2436 dev_err(uc->ud->dev,
2437 "%s: StaticTR Z is limited to maximum 4095 (%u)\n",
2438 __func__, d->static_tr.bstcnt);
2440 udma_free_hwdesc(uc, d);
2445 if (uc->config.metadata_size)
2446 d->vd.tx.metadata_ops = &metadata_ops;
2448 return vchan_tx_prep(&uc->vc, &d->vd, tx_flags);
2451 static struct udma_desc *
2452 udma_prep_dma_cyclic_tr(struct udma_chan *uc, dma_addr_t buf_addr,
2453 size_t buf_len, size_t period_len,
2454 enum dma_transfer_direction dir, unsigned long flags)
2456 struct udma_desc *d;
2457 size_t tr_size, period_addr;
2458 struct cppi5_tr_type1_t *tr_req;
2459 unsigned int periods = buf_len / period_len;
2460 u16 tr0_cnt0, tr0_cnt1, tr1_cnt0;
2464 if (!is_slave_direction(dir)) {
2465 dev_err(uc->ud->dev, "Only slave cyclic is supported\n");
2469 num_tr = udma_get_tr_counters(period_len, __ffs(buf_addr), &tr0_cnt0,
2470 &tr0_cnt1, &tr1_cnt0);
2472 dev_err(uc->ud->dev, "size %zu is not supported\n",
2477 /* Now allocate and setup the descriptor. */
2478 tr_size = sizeof(struct cppi5_tr_type1_t);
2479 d = udma_alloc_tr_desc(uc, tr_size, periods * num_tr, dir);
2483 tr_req = d->hwdesc[0].tr_req_base;
2484 period_addr = buf_addr;
2485 for (i = 0; i < periods; i++) {
2486 int tr_idx = i * num_tr;
2488 cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE1, false,
2489 false, CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
2491 tr_req[tr_idx].addr = period_addr;
2492 tr_req[tr_idx].icnt0 = tr0_cnt0;
2493 tr_req[tr_idx].icnt1 = tr0_cnt1;
2494 tr_req[tr_idx].dim1 = tr0_cnt0;
2497 cppi5_tr_csf_set(&tr_req[tr_idx].flags,
2498 CPPI5_TR_CSF_SUPR_EVT);
2501 cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE1,
2503 CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
2505 tr_req[tr_idx].addr = period_addr + tr0_cnt1 * tr0_cnt0;
2506 tr_req[tr_idx].icnt0 = tr1_cnt0;
2507 tr_req[tr_idx].icnt1 = 1;
2508 tr_req[tr_idx].dim1 = tr1_cnt0;
2511 if (!(flags & DMA_PREP_INTERRUPT))
2512 cppi5_tr_csf_set(&tr_req[tr_idx].flags,
2513 CPPI5_TR_CSF_SUPR_EVT);
2515 period_addr += period_len;
2521 static struct udma_desc *
2522 udma_prep_dma_cyclic_pkt(struct udma_chan *uc, dma_addr_t buf_addr,
2523 size_t buf_len, size_t period_len,
2524 enum dma_transfer_direction dir, unsigned long flags)
2526 struct udma_desc *d;
2529 int periods = buf_len / period_len;
2531 if (periods > (K3_UDMA_DEFAULT_RING_SIZE - 1))
2534 if (period_len >= SZ_4M)
2537 d = kzalloc(sizeof(*d) + periods * sizeof(d->hwdesc[0]), GFP_NOWAIT);
2541 d->hwdesc_count = periods;
2543 /* TODO: re-check this... */
2544 if (dir == DMA_DEV_TO_MEM)
2545 ring_id = k3_ringacc_get_ring_id(uc->rflow->r_ring);
2547 ring_id = k3_ringacc_get_ring_id(uc->tchan->tc_ring);
2549 for (i = 0; i < periods; i++) {
2550 struct udma_hwdesc *hwdesc = &d->hwdesc[i];
2551 dma_addr_t period_addr = buf_addr + (period_len * i);
2552 struct cppi5_host_desc_t *h_desc;
2554 hwdesc->cppi5_desc_vaddr = dma_pool_zalloc(uc->hdesc_pool,
2556 &hwdesc->cppi5_desc_paddr);
2557 if (!hwdesc->cppi5_desc_vaddr) {
2558 dev_err(uc->ud->dev,
2559 "descriptor%d allocation failed\n", i);
2561 udma_free_hwdesc(uc, d);
2566 hwdesc->cppi5_desc_size = uc->config.hdesc_size;
2567 h_desc = hwdesc->cppi5_desc_vaddr;
2569 cppi5_hdesc_init(h_desc, 0, 0);
2570 cppi5_hdesc_set_pktlen(h_desc, period_len);
2572 /* Flow and Packed ID */
2573 cppi5_desc_set_pktids(&h_desc->hdr, uc->id,
2574 CPPI5_INFO1_DESC_FLOWID_DEFAULT);
2575 cppi5_desc_set_retpolicy(&h_desc->hdr, 0, ring_id);
2577 /* attach each period to a new descriptor */
2578 cppi5_hdesc_attach_buf(h_desc,
2579 period_addr, period_len,
2580 period_addr, period_len);
2586 static struct dma_async_tx_descriptor *
2587 udma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
2588 size_t period_len, enum dma_transfer_direction dir,
2589 unsigned long flags)
2591 struct udma_chan *uc = to_udma_chan(chan);
2592 enum dma_slave_buswidth dev_width;
2593 struct udma_desc *d;
2596 if (dir != uc->config.dir) {
2597 dev_err(chan->device->dev,
2598 "%s: chan%d is for %s, not supporting %s\n",
2600 dmaengine_get_direction_text(uc->config.dir),
2601 dmaengine_get_direction_text(dir));
2607 if (dir == DMA_DEV_TO_MEM) {
2608 dev_width = uc->cfg.src_addr_width;
2609 burst = uc->cfg.src_maxburst;
2610 } else if (dir == DMA_MEM_TO_DEV) {
2611 dev_width = uc->cfg.dst_addr_width;
2612 burst = uc->cfg.dst_maxburst;
2614 dev_err(uc->ud->dev, "%s: bad direction?\n", __func__);
2621 if (uc->config.pkt_mode)
2622 d = udma_prep_dma_cyclic_pkt(uc, buf_addr, buf_len, period_len,
2625 d = udma_prep_dma_cyclic_tr(uc, buf_addr, buf_len, period_len,
2631 d->sglen = buf_len / period_len;
2634 d->residue = buf_len;
2636 /* static TR for remote PDMA */
2637 if (udma_configure_statictr(uc, d, dev_width, burst)) {
2638 dev_err(uc->ud->dev,
2639 "%s: StaticTR Z is limited to maximum 4095 (%u)\n",
2640 __func__, d->static_tr.bstcnt);
2642 udma_free_hwdesc(uc, d);
2647 if (uc->config.metadata_size)
2648 d->vd.tx.metadata_ops = &metadata_ops;
2650 return vchan_tx_prep(&uc->vc, &d->vd, flags);
2653 static struct dma_async_tx_descriptor *
2654 udma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
2655 size_t len, unsigned long tx_flags)
2657 struct udma_chan *uc = to_udma_chan(chan);
2658 struct udma_desc *d;
2659 struct cppi5_tr_type15_t *tr_req;
2661 size_t tr_size = sizeof(struct cppi5_tr_type15_t);
2662 u16 tr0_cnt0, tr0_cnt1, tr1_cnt0;
2664 if (uc->config.dir != DMA_MEM_TO_MEM) {
2665 dev_err(chan->device->dev,
2666 "%s: chan%d is for %s, not supporting %s\n",
2668 dmaengine_get_direction_text(uc->config.dir),
2669 dmaengine_get_direction_text(DMA_MEM_TO_MEM));
2673 num_tr = udma_get_tr_counters(len, __ffs(src | dest), &tr0_cnt0,
2674 &tr0_cnt1, &tr1_cnt0);
2676 dev_err(uc->ud->dev, "size %zu is not supported\n",
2681 d = udma_alloc_tr_desc(uc, tr_size, num_tr, DMA_MEM_TO_MEM);
2685 d->dir = DMA_MEM_TO_MEM;
2690 tr_req = d->hwdesc[0].tr_req_base;
2692 cppi5_tr_init(&tr_req[0].flags, CPPI5_TR_TYPE15, false, true,
2693 CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
2694 cppi5_tr_csf_set(&tr_req[0].flags, CPPI5_TR_CSF_SUPR_EVT);
2696 tr_req[0].addr = src;
2697 tr_req[0].icnt0 = tr0_cnt0;
2698 tr_req[0].icnt1 = tr0_cnt1;
2699 tr_req[0].icnt2 = 1;
2700 tr_req[0].icnt3 = 1;
2701 tr_req[0].dim1 = tr0_cnt0;
2703 tr_req[0].daddr = dest;
2704 tr_req[0].dicnt0 = tr0_cnt0;
2705 tr_req[0].dicnt1 = tr0_cnt1;
2706 tr_req[0].dicnt2 = 1;
2707 tr_req[0].dicnt3 = 1;
2708 tr_req[0].ddim1 = tr0_cnt0;
2711 cppi5_tr_init(&tr_req[1].flags, CPPI5_TR_TYPE15, false, true,
2712 CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
2713 cppi5_tr_csf_set(&tr_req[1].flags, CPPI5_TR_CSF_SUPR_EVT);
2715 tr_req[1].addr = src + tr0_cnt1 * tr0_cnt0;
2716 tr_req[1].icnt0 = tr1_cnt0;
2717 tr_req[1].icnt1 = 1;
2718 tr_req[1].icnt2 = 1;
2719 tr_req[1].icnt3 = 1;
2721 tr_req[1].daddr = dest + tr0_cnt1 * tr0_cnt0;
2722 tr_req[1].dicnt0 = tr1_cnt0;
2723 tr_req[1].dicnt1 = 1;
2724 tr_req[1].dicnt2 = 1;
2725 tr_req[1].dicnt3 = 1;
2728 cppi5_tr_csf_set(&tr_req[num_tr - 1].flags, CPPI5_TR_CSF_EOP);
2730 if (uc->config.metadata_size)
2731 d->vd.tx.metadata_ops = &metadata_ops;
2733 return vchan_tx_prep(&uc->vc, &d->vd, tx_flags);
2736 static void udma_issue_pending(struct dma_chan *chan)
2738 struct udma_chan *uc = to_udma_chan(chan);
2739 unsigned long flags;
2741 spin_lock_irqsave(&uc->vc.lock, flags);
2743 /* If we have something pending and no active descriptor, then */
2744 if (vchan_issue_pending(&uc->vc) && !uc->desc) {
2746 * start a descriptor if the channel is NOT [marked as
2747 * terminating _and_ it is still running (teardown has not
2750 if (!(uc->state == UDMA_CHAN_IS_TERMINATING &&
2751 udma_is_chan_running(uc)))
2755 spin_unlock_irqrestore(&uc->vc.lock, flags);
2758 static enum dma_status udma_tx_status(struct dma_chan *chan,
2759 dma_cookie_t cookie,
2760 struct dma_tx_state *txstate)
2762 struct udma_chan *uc = to_udma_chan(chan);
2763 enum dma_status ret;
2764 unsigned long flags;
2766 spin_lock_irqsave(&uc->vc.lock, flags);
2768 ret = dma_cookie_status(chan, cookie, txstate);
2770 if (!udma_is_chan_running(uc))
2773 if (ret == DMA_IN_PROGRESS && udma_is_chan_paused(uc))
2776 if (ret == DMA_COMPLETE || !txstate)
2779 if (uc->desc && uc->desc->vd.tx.cookie == cookie) {
2782 u32 residue = uc->desc->residue;
2785 if (uc->desc->dir == DMA_MEM_TO_DEV) {
2786 bcnt = udma_tchanrt_read(uc->tchan,
2787 UDMA_TCHAN_RT_SBCNT_REG);
2789 if (uc->config.ep_type != PSIL_EP_NATIVE) {
2790 peer_bcnt = udma_tchanrt_read(uc->tchan,
2791 UDMA_TCHAN_RT_PEER_BCNT_REG);
2793 if (bcnt > peer_bcnt)
2794 delay = bcnt - peer_bcnt;
2796 } else if (uc->desc->dir == DMA_DEV_TO_MEM) {
2797 bcnt = udma_rchanrt_read(uc->rchan,
2798 UDMA_RCHAN_RT_BCNT_REG);
2800 if (uc->config.ep_type != PSIL_EP_NATIVE) {
2801 peer_bcnt = udma_rchanrt_read(uc->rchan,
2802 UDMA_RCHAN_RT_PEER_BCNT_REG);
2804 if (peer_bcnt > bcnt)
2805 delay = peer_bcnt - bcnt;
2808 bcnt = udma_tchanrt_read(uc->tchan,
2809 UDMA_TCHAN_RT_BCNT_REG);
2813 if (bcnt && !(bcnt % uc->desc->residue))
2816 residue -= bcnt % uc->desc->residue;
2818 if (!residue && (uc->config.dir == DMA_DEV_TO_MEM || !delay)) {
2823 dma_set_residue(txstate, residue);
2824 dma_set_in_flight_bytes(txstate, delay);
2831 spin_unlock_irqrestore(&uc->vc.lock, flags);
2835 static int udma_pause(struct dma_chan *chan)
2837 struct udma_chan *uc = to_udma_chan(chan);
2839 /* pause the channel */
2840 switch (uc->config.dir) {
2841 case DMA_DEV_TO_MEM:
2842 udma_rchanrt_update_bits(uc->rchan,
2843 UDMA_RCHAN_RT_PEER_RT_EN_REG,
2844 UDMA_PEER_RT_EN_PAUSE,
2845 UDMA_PEER_RT_EN_PAUSE);
2847 case DMA_MEM_TO_DEV:
2848 udma_tchanrt_update_bits(uc->tchan,
2849 UDMA_TCHAN_RT_PEER_RT_EN_REG,
2850 UDMA_PEER_RT_EN_PAUSE,
2851 UDMA_PEER_RT_EN_PAUSE);
2853 case DMA_MEM_TO_MEM:
2854 udma_tchanrt_update_bits(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
2855 UDMA_CHAN_RT_CTL_PAUSE,
2856 UDMA_CHAN_RT_CTL_PAUSE);
2865 static int udma_resume(struct dma_chan *chan)
2867 struct udma_chan *uc = to_udma_chan(chan);
2869 /* resume the channel */
2870 switch (uc->config.dir) {
2871 case DMA_DEV_TO_MEM:
2872 udma_rchanrt_update_bits(uc->rchan,
2873 UDMA_RCHAN_RT_PEER_RT_EN_REG,
2874 UDMA_PEER_RT_EN_PAUSE, 0);
2877 case DMA_MEM_TO_DEV:
2878 udma_tchanrt_update_bits(uc->tchan,
2879 UDMA_TCHAN_RT_PEER_RT_EN_REG,
2880 UDMA_PEER_RT_EN_PAUSE, 0);
2882 case DMA_MEM_TO_MEM:
2883 udma_tchanrt_update_bits(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
2884 UDMA_CHAN_RT_CTL_PAUSE, 0);
2893 static int udma_terminate_all(struct dma_chan *chan)
2895 struct udma_chan *uc = to_udma_chan(chan);
2896 unsigned long flags;
2899 spin_lock_irqsave(&uc->vc.lock, flags);
2901 if (udma_is_chan_running(uc))
2905 uc->terminated_desc = uc->desc;
2907 uc->terminated_desc->terminated = true;
2908 cancel_delayed_work(&uc->tx_drain.work);
2913 vchan_get_all_descriptors(&uc->vc, &head);
2914 spin_unlock_irqrestore(&uc->vc.lock, flags);
2915 vchan_dma_desc_free_list(&uc->vc, &head);
2920 static void udma_synchronize(struct dma_chan *chan)
2922 struct udma_chan *uc = to_udma_chan(chan);
2923 unsigned long timeout = msecs_to_jiffies(1000);
2925 vchan_synchronize(&uc->vc);
2927 if (uc->state == UDMA_CHAN_IS_TERMINATING) {
2928 timeout = wait_for_completion_timeout(&uc->teardown_completed,
2931 dev_warn(uc->ud->dev, "chan%d teardown timeout!\n",
2933 udma_dump_chan_stdata(uc);
2934 udma_reset_chan(uc, true);
2938 udma_reset_chan(uc, false);
2939 if (udma_is_chan_running(uc))
2940 dev_warn(uc->ud->dev, "chan%d refused to stop!\n", uc->id);
2942 cancel_delayed_work_sync(&uc->tx_drain.work);
2943 udma_reset_rings(uc);
2946 static void udma_desc_pre_callback(struct virt_dma_chan *vc,
2947 struct virt_dma_desc *vd,
2948 struct dmaengine_result *result)
2950 struct udma_chan *uc = to_udma_chan(&vc->chan);
2951 struct udma_desc *d;
2956 d = to_udma_desc(&vd->tx);
2958 if (d->metadata_size)
2959 udma_fetch_epib(uc, d);
2961 /* Provide residue information for the client */
2963 void *desc_vaddr = udma_curr_cppi5_desc_vaddr(d, d->desc_idx);
2965 if (cppi5_desc_get_type(desc_vaddr) ==
2966 CPPI5_INFO0_DESC_TYPE_VAL_HOST) {
2967 result->residue = d->residue -
2968 cppi5_hdesc_get_pktlen(desc_vaddr);
2969 if (result->residue)
2970 result->result = DMA_TRANS_ABORTED;
2972 result->result = DMA_TRANS_NOERROR;
2974 result->residue = 0;
2975 result->result = DMA_TRANS_NOERROR;
2981 * This tasklet handles the completion of a DMA descriptor by
2982 * calling its callback and freeing it.
2984 static void udma_vchan_complete(unsigned long arg)
2986 struct virt_dma_chan *vc = (struct virt_dma_chan *)arg;
2987 struct virt_dma_desc *vd, *_vd;
2988 struct dmaengine_desc_callback cb;
2991 spin_lock_irq(&vc->lock);
2992 list_splice_tail_init(&vc->desc_completed, &head);
2996 dmaengine_desc_get_callback(&vd->tx, &cb);
2998 memset(&cb, 0, sizeof(cb));
3000 spin_unlock_irq(&vc->lock);
3002 udma_desc_pre_callback(vc, vd, NULL);
3003 dmaengine_desc_callback_invoke(&cb, NULL);
3005 list_for_each_entry_safe(vd, _vd, &head, node) {
3006 struct dmaengine_result result;
3008 dmaengine_desc_get_callback(&vd->tx, &cb);
3010 list_del(&vd->node);
3012 udma_desc_pre_callback(vc, vd, &result);
3013 dmaengine_desc_callback_invoke(&cb, &result);
3015 vchan_vdesc_fini(vd);
3019 static void udma_free_chan_resources(struct dma_chan *chan)
3021 struct udma_chan *uc = to_udma_chan(chan);
3022 struct udma_dev *ud = to_udma_dev(chan->device);
3024 udma_terminate_all(chan);
3025 if (uc->terminated_desc) {
3026 udma_reset_chan(uc, false);
3027 udma_reset_rings(uc);
3030 cancel_delayed_work_sync(&uc->tx_drain.work);
3031 destroy_delayed_work_on_stack(&uc->tx_drain.work);
3033 if (uc->irq_num_ring > 0) {
3034 free_irq(uc->irq_num_ring, uc);
3036 uc->irq_num_ring = 0;
3038 if (uc->irq_num_udma > 0) {
3039 free_irq(uc->irq_num_udma, uc);
3041 uc->irq_num_udma = 0;
3044 /* Release PSI-L pairing */
3045 if (uc->psil_paired) {
3046 navss_psil_unpair(ud, uc->config.src_thread,
3047 uc->config.dst_thread);
3048 uc->psil_paired = false;
3051 vchan_free_chan_resources(&uc->vc);
3052 tasklet_kill(&uc->vc.task);
3054 udma_free_tx_resources(uc);
3055 udma_free_rx_resources(uc);
3056 udma_reset_uchan(uc);
3058 if (uc->use_dma_pool) {
3059 dma_pool_destroy(uc->hdesc_pool);
3060 uc->use_dma_pool = false;
3064 static struct platform_driver udma_driver;
3066 static bool udma_dma_filter_fn(struct dma_chan *chan, void *param)
3068 struct udma_chan_config *ucc;
3069 struct psil_endpoint_config *ep_config;
3070 struct udma_chan *uc;
3071 struct udma_dev *ud;
3074 if (chan->device->dev->driver != &udma_driver.driver)
3077 uc = to_udma_chan(chan);
3082 ucc->remote_thread_id = args[0];
3084 if (ucc->remote_thread_id & K3_PSIL_DST_THREAD_ID_OFFSET)
3085 ucc->dir = DMA_MEM_TO_DEV;
3087 ucc->dir = DMA_DEV_TO_MEM;
3089 ep_config = psil_get_ep_config(ucc->remote_thread_id);
3090 if (IS_ERR(ep_config)) {
3091 dev_err(ud->dev, "No configuration for psi-l thread 0x%04x\n",
3092 ucc->remote_thread_id);
3093 ucc->dir = DMA_MEM_TO_MEM;
3094 ucc->remote_thread_id = -1;
3098 ucc->pkt_mode = ep_config->pkt_mode;
3099 ucc->channel_tpl = ep_config->channel_tpl;
3100 ucc->notdpkt = ep_config->notdpkt;
3101 ucc->ep_type = ep_config->ep_type;
3103 if (ucc->ep_type != PSIL_EP_NATIVE) {
3104 const struct udma_match_data *match_data = ud->match_data;
3106 if (match_data->flags & UDMA_FLAG_PDMA_ACC32)
3107 ucc->enable_acc32 = ep_config->pdma_acc32;
3108 if (match_data->flags & UDMA_FLAG_PDMA_BURST)
3109 ucc->enable_burst = ep_config->pdma_burst;
3112 ucc->needs_epib = ep_config->needs_epib;
3113 ucc->psd_size = ep_config->psd_size;
3114 ucc->metadata_size =
3115 (ucc->needs_epib ? CPPI5_INFO0_HDESC_EPIB_SIZE : 0) +
3119 ucc->hdesc_size = ALIGN(sizeof(struct cppi5_host_desc_t) +
3120 ucc->metadata_size, ud->desc_align);
3122 dev_dbg(ud->dev, "chan%d: Remote thread: 0x%04x (%s)\n", uc->id,
3123 ucc->remote_thread_id, dmaengine_get_direction_text(ucc->dir));
3128 static struct dma_chan *udma_of_xlate(struct of_phandle_args *dma_spec,
3129 struct of_dma *ofdma)
3131 struct udma_dev *ud = ofdma->of_dma_data;
3132 dma_cap_mask_t mask = ud->ddev.cap_mask;
3133 struct dma_chan *chan;
3135 if (dma_spec->args_count != 1)
3138 chan = __dma_request_channel(&mask, udma_dma_filter_fn,
3139 &dma_spec->args[0], ofdma->of_node);
3141 dev_err(ud->dev, "get channel fail in %s.\n", __func__);
3142 return ERR_PTR(-EINVAL);
3148 static struct udma_match_data am654_main_data = {
3149 .psil_base = 0x1000,
3150 .enable_memcpy_support = true,
3151 .statictr_z_mask = GENMASK(11, 0),
3152 .rchan_oes_offset = 0x2000,
3154 .level_start_idx = {
3155 [0] = 8, /* Normal channels */
3156 [1] = 0, /* High Throughput channels */
3160 static struct udma_match_data am654_mcu_data = {
3161 .psil_base = 0x6000,
3162 .enable_memcpy_support = true, /* TEST: DMA domains */
3163 .statictr_z_mask = GENMASK(11, 0),
3164 .rchan_oes_offset = 0x2000,
3166 .level_start_idx = {
3167 [0] = 2, /* Normal channels */
3168 [1] = 0, /* High Throughput channels */
3172 static struct udma_match_data j721e_main_data = {
3173 .psil_base = 0x1000,
3174 .enable_memcpy_support = true,
3175 .flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST,
3176 .statictr_z_mask = GENMASK(23, 0),
3177 .rchan_oes_offset = 0x400,
3179 .level_start_idx = {
3180 [0] = 16, /* Normal channels */
3181 [1] = 4, /* High Throughput channels */
3182 [2] = 0, /* Ultra High Throughput channels */
3186 static struct udma_match_data j721e_mcu_data = {
3187 .psil_base = 0x6000,
3188 .enable_memcpy_support = false, /* MEM_TO_MEM is slow via MCU UDMA */
3189 .flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST,
3190 .statictr_z_mask = GENMASK(23, 0),
3191 .rchan_oes_offset = 0x400,
3193 .level_start_idx = {
3194 [0] = 2, /* Normal channels */
3195 [1] = 0, /* High Throughput channels */
3199 static const struct of_device_id udma_of_match[] = {
3201 .compatible = "ti,am654-navss-main-udmap",
3202 .data = &am654_main_data,
3205 .compatible = "ti,am654-navss-mcu-udmap",
3206 .data = &am654_mcu_data,
3208 .compatible = "ti,j721e-navss-main-udmap",
3209 .data = &j721e_main_data,
3211 .compatible = "ti,j721e-navss-mcu-udmap",
3212 .data = &j721e_mcu_data,
3217 static int udma_get_mmrs(struct platform_device *pdev, struct udma_dev *ud)
3219 struct resource *res;
3222 for (i = 0; i < MMR_LAST; i++) {
3223 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
3225 ud->mmrs[i] = devm_ioremap_resource(&pdev->dev, res);
3226 if (IS_ERR(ud->mmrs[i]))
3227 return PTR_ERR(ud->mmrs[i]);
3233 static int udma_setup_resources(struct udma_dev *ud)
3235 struct device *dev = ud->dev;
3236 int ch_count, ret, i, j;
3238 struct ti_sci_resource_desc *rm_desc;
3239 struct ti_sci_resource *rm_res, irq_res;
3240 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
3241 static const char * const range_names[] = { "ti,sci-rm-range-tchan",
3242 "ti,sci-rm-range-rchan",
3243 "ti,sci-rm-range-rflow" };
3245 cap2 = udma_read(ud->mmrs[MMR_GCFG], 0x28);
3246 cap3 = udma_read(ud->mmrs[MMR_GCFG], 0x2c);
3248 ud->rflow_cnt = cap3 & 0x3fff;
3249 ud->tchan_cnt = cap2 & 0x1ff;
3250 ud->echan_cnt = (cap2 >> 9) & 0x1ff;
3251 ud->rchan_cnt = (cap2 >> 18) & 0x1ff;
3252 ch_count = ud->tchan_cnt + ud->rchan_cnt;
3254 ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt),
3255 sizeof(unsigned long), GFP_KERNEL);
3256 ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans),
3258 ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt),
3259 sizeof(unsigned long), GFP_KERNEL);
3260 ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans),
3262 ud->rflow_gp_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rflow_cnt),
3263 sizeof(unsigned long),
3265 ud->rflow_gp_map_allocated = devm_kcalloc(dev,
3266 BITS_TO_LONGS(ud->rflow_cnt),
3267 sizeof(unsigned long),
3269 ud->rflow_in_use = devm_kcalloc(dev, BITS_TO_LONGS(ud->rflow_cnt),
3270 sizeof(unsigned long),
3272 ud->rflows = devm_kcalloc(dev, ud->rflow_cnt, sizeof(*ud->rflows),
3275 if (!ud->tchan_map || !ud->rchan_map || !ud->rflow_gp_map ||
3276 !ud->rflow_gp_map_allocated || !ud->tchans || !ud->rchans ||
3277 !ud->rflows || !ud->rflow_in_use)
3281 * RX flows with the same Ids as RX channels are reserved to be used
3282 * as default flows if remote HW can't generate flow_ids. Those
3283 * RX flows can be requested only explicitly by id.
3285 bitmap_set(ud->rflow_gp_map_allocated, 0, ud->rchan_cnt);
3287 /* by default no GP rflows are assigned to Linux */
3288 bitmap_set(ud->rflow_gp_map, 0, ud->rflow_cnt);
3290 /* Get resource ranges from tisci */
3291 for (i = 0; i < RM_RANGE_LAST; i++)
3292 tisci_rm->rm_ranges[i] =
3293 devm_ti_sci_get_of_resource(tisci_rm->tisci, dev,
3294 tisci_rm->tisci_dev_id,
3295 (char *)range_names[i]);
3298 rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
3299 if (IS_ERR(rm_res)) {
3300 bitmap_zero(ud->tchan_map, ud->tchan_cnt);
3302 bitmap_fill(ud->tchan_map, ud->tchan_cnt);
3303 for (i = 0; i < rm_res->sets; i++) {
3304 rm_desc = &rm_res->desc[i];
3305 bitmap_clear(ud->tchan_map, rm_desc->start,
3307 dev_dbg(dev, "ti-sci-res: tchan: %d:%d\n",
3308 rm_desc->start, rm_desc->num);
3311 irq_res.sets = rm_res->sets;
3313 /* rchan and matching default flow ranges */
3314 rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
3315 if (IS_ERR(rm_res)) {
3316 bitmap_zero(ud->rchan_map, ud->rchan_cnt);
3318 bitmap_fill(ud->rchan_map, ud->rchan_cnt);
3319 for (i = 0; i < rm_res->sets; i++) {
3320 rm_desc = &rm_res->desc[i];
3321 bitmap_clear(ud->rchan_map, rm_desc->start,
3323 dev_dbg(dev, "ti-sci-res: rchan: %d:%d\n",
3324 rm_desc->start, rm_desc->num);
3328 irq_res.sets += rm_res->sets;
3329 irq_res.desc = kcalloc(irq_res.sets, sizeof(*irq_res.desc), GFP_KERNEL);
3330 rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
3331 for (i = 0; i < rm_res->sets; i++) {
3332 irq_res.desc[i].start = rm_res->desc[i].start;
3333 irq_res.desc[i].num = rm_res->desc[i].num;
3335 rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
3336 for (j = 0; j < rm_res->sets; j++, i++) {
3337 irq_res.desc[i].start = rm_res->desc[j].start +
3338 ud->match_data->rchan_oes_offset;
3339 irq_res.desc[i].num = rm_res->desc[j].num;
3341 ret = ti_sci_inta_msi_domain_alloc_irqs(ud->dev, &irq_res);
3342 kfree(irq_res.desc);
3344 dev_err(ud->dev, "Failed to allocate MSI interrupts\n");
3348 /* GP rflow ranges */
3349 rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW];
3350 if (IS_ERR(rm_res)) {
3351 /* all gp flows are assigned exclusively to Linux */
3352 bitmap_clear(ud->rflow_gp_map, ud->rchan_cnt,
3353 ud->rflow_cnt - ud->rchan_cnt);
3355 for (i = 0; i < rm_res->sets; i++) {
3356 rm_desc = &rm_res->desc[i];
3357 bitmap_clear(ud->rflow_gp_map, rm_desc->start,
3359 dev_dbg(dev, "ti-sci-res: rflow: %d:%d\n",
3360 rm_desc->start, rm_desc->num);
3364 ch_count -= bitmap_weight(ud->tchan_map, ud->tchan_cnt);
3365 ch_count -= bitmap_weight(ud->rchan_map, ud->rchan_cnt);
3369 ud->channels = devm_kcalloc(dev, ch_count, sizeof(*ud->channels),
3374 dev_info(dev, "Channels: %d (tchan: %u, rchan: %u, gp-rflow: %u)\n",
3376 ud->tchan_cnt - bitmap_weight(ud->tchan_map, ud->tchan_cnt),
3377 ud->rchan_cnt - bitmap_weight(ud->rchan_map, ud->rchan_cnt),
3378 ud->rflow_cnt - bitmap_weight(ud->rflow_gp_map,
3384 static int udma_setup_rx_flush(struct udma_dev *ud)
3386 struct udma_rx_flush *rx_flush = &ud->rx_flush;
3387 struct cppi5_desc_hdr_t *tr_desc;
3388 struct cppi5_tr_type1_t *tr_req;
3389 struct cppi5_host_desc_t *desc;
3390 struct device *dev = ud->dev;
3391 struct udma_hwdesc *hwdesc;
3394 /* Allocate 1K buffer for discarded data on RX channel teardown */
3395 rx_flush->buffer_size = SZ_1K;
3396 rx_flush->buffer_vaddr = devm_kzalloc(dev, rx_flush->buffer_size,
3398 if (!rx_flush->buffer_vaddr)
3401 rx_flush->buffer_paddr = dma_map_single(dev, rx_flush->buffer_vaddr,
3402 rx_flush->buffer_size,
3404 if (dma_mapping_error(dev, rx_flush->buffer_paddr))
3407 /* Set up descriptor to be used for TR mode */
3408 hwdesc = &rx_flush->hwdescs[0];
3409 tr_size = sizeof(struct cppi5_tr_type1_t);
3410 hwdesc->cppi5_desc_size = cppi5_trdesc_calc_size(tr_size, 1);
3411 hwdesc->cppi5_desc_size = ALIGN(hwdesc->cppi5_desc_size,
3414 hwdesc->cppi5_desc_vaddr = devm_kzalloc(dev, hwdesc->cppi5_desc_size,
3416 if (!hwdesc->cppi5_desc_vaddr)
3419 hwdesc->cppi5_desc_paddr = dma_map_single(dev, hwdesc->cppi5_desc_vaddr,
3420 hwdesc->cppi5_desc_size,
3422 if (dma_mapping_error(dev, hwdesc->cppi5_desc_paddr))
3425 /* Start of the TR req records */
3426 hwdesc->tr_req_base = hwdesc->cppi5_desc_vaddr + tr_size;
3427 /* Start address of the TR response array */
3428 hwdesc->tr_resp_base = hwdesc->tr_req_base + tr_size;
3430 tr_desc = hwdesc->cppi5_desc_vaddr;
3431 cppi5_trdesc_init(tr_desc, 1, tr_size, 0, 0);
3432 cppi5_desc_set_pktids(tr_desc, 0, CPPI5_INFO1_DESC_FLOWID_DEFAULT);
3433 cppi5_desc_set_retpolicy(tr_desc, 0, 0);
3435 tr_req = hwdesc->tr_req_base;
3436 cppi5_tr_init(&tr_req->flags, CPPI5_TR_TYPE1, false, false,
3437 CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
3438 cppi5_tr_csf_set(&tr_req->flags, CPPI5_TR_CSF_SUPR_EVT);
3440 tr_req->addr = rx_flush->buffer_paddr;
3441 tr_req->icnt0 = rx_flush->buffer_size;
3444 /* Set up descriptor to be used for packet mode */
3445 hwdesc = &rx_flush->hwdescs[1];
3446 hwdesc->cppi5_desc_size = ALIGN(sizeof(struct cppi5_host_desc_t) +
3447 CPPI5_INFO0_HDESC_EPIB_SIZE +
3448 CPPI5_INFO0_HDESC_PSDATA_MAX_SIZE,
3451 hwdesc->cppi5_desc_vaddr = devm_kzalloc(dev, hwdesc->cppi5_desc_size,
3453 if (!hwdesc->cppi5_desc_vaddr)
3456 hwdesc->cppi5_desc_paddr = dma_map_single(dev, hwdesc->cppi5_desc_vaddr,
3457 hwdesc->cppi5_desc_size,
3459 if (dma_mapping_error(dev, hwdesc->cppi5_desc_paddr))
3462 desc = hwdesc->cppi5_desc_vaddr;
3463 cppi5_hdesc_init(desc, 0, 0);
3464 cppi5_desc_set_pktids(&desc->hdr, 0, CPPI5_INFO1_DESC_FLOWID_DEFAULT);
3465 cppi5_desc_set_retpolicy(&desc->hdr, 0, 0);
3467 cppi5_hdesc_attach_buf(desc,
3468 rx_flush->buffer_paddr, rx_flush->buffer_size,
3469 rx_flush->buffer_paddr, rx_flush->buffer_size);
3471 dma_sync_single_for_device(dev, hwdesc->cppi5_desc_paddr,
3472 hwdesc->cppi5_desc_size, DMA_TO_DEVICE);
3476 #define TI_UDMAC_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
3477 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
3478 BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \
3479 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
3480 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
3482 static int udma_probe(struct platform_device *pdev)
3484 struct device_node *navss_node = pdev->dev.parent->of_node;
3485 struct device *dev = &pdev->dev;
3486 struct udma_dev *ud;
3487 const struct of_device_id *match;
3491 ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(48));
3493 dev_err(dev, "failed to set dma mask stuff\n");
3495 ud = devm_kzalloc(dev, sizeof(*ud), GFP_KERNEL);
3499 ret = udma_get_mmrs(pdev, ud);
3503 ud->tisci_rm.tisci = ti_sci_get_by_phandle(dev->of_node, "ti,sci");
3504 if (IS_ERR(ud->tisci_rm.tisci))
3505 return PTR_ERR(ud->tisci_rm.tisci);
3507 ret = of_property_read_u32(dev->of_node, "ti,sci-dev-id",
3508 &ud->tisci_rm.tisci_dev_id);
3510 dev_err(dev, "ti,sci-dev-id read failure %d\n", ret);
3513 pdev->id = ud->tisci_rm.tisci_dev_id;
3515 ret = of_property_read_u32(navss_node, "ti,sci-dev-id",
3516 &ud->tisci_rm.tisci_navss_dev_id);
3518 dev_err(dev, "NAVSS ti,sci-dev-id read failure %d\n", ret);
3522 ud->tisci_rm.tisci_udmap_ops = &ud->tisci_rm.tisci->ops.rm_udmap_ops;
3523 ud->tisci_rm.tisci_psil_ops = &ud->tisci_rm.tisci->ops.rm_psil_ops;
3525 ud->ringacc = of_k3_ringacc_get_by_phandle(dev->of_node, "ti,ringacc");
3526 if (IS_ERR(ud->ringacc))
3527 return PTR_ERR(ud->ringacc);
3529 dev->msi_domain = of_msi_get_domain(dev, dev->of_node,
3530 DOMAIN_BUS_TI_SCI_INTA_MSI);
3531 if (!dev->msi_domain) {
3532 dev_err(dev, "Failed to get MSI domain\n");
3533 return -EPROBE_DEFER;
3536 match = of_match_node(udma_of_match, dev->of_node);
3538 dev_err(dev, "No compatible match found\n");
3541 ud->match_data = match->data;
3543 dma_cap_set(DMA_SLAVE, ud->ddev.cap_mask);
3544 dma_cap_set(DMA_CYCLIC, ud->ddev.cap_mask);
3546 ud->ddev.device_alloc_chan_resources = udma_alloc_chan_resources;
3547 ud->ddev.device_config = udma_slave_config;
3548 ud->ddev.device_prep_slave_sg = udma_prep_slave_sg;
3549 ud->ddev.device_prep_dma_cyclic = udma_prep_dma_cyclic;
3550 ud->ddev.device_issue_pending = udma_issue_pending;
3551 ud->ddev.device_tx_status = udma_tx_status;
3552 ud->ddev.device_pause = udma_pause;
3553 ud->ddev.device_resume = udma_resume;
3554 ud->ddev.device_terminate_all = udma_terminate_all;
3555 ud->ddev.device_synchronize = udma_synchronize;
3557 ud->ddev.device_free_chan_resources = udma_free_chan_resources;
3558 ud->ddev.src_addr_widths = TI_UDMAC_BUSWIDTHS;
3559 ud->ddev.dst_addr_widths = TI_UDMAC_BUSWIDTHS;
3560 ud->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
3561 ud->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
3562 ud->ddev.copy_align = DMAENGINE_ALIGN_8_BYTES;
3563 ud->ddev.desc_metadata_modes = DESC_METADATA_CLIENT |
3564 DESC_METADATA_ENGINE;
3565 if (ud->match_data->enable_memcpy_support) {
3566 dma_cap_set(DMA_MEMCPY, ud->ddev.cap_mask);
3567 ud->ddev.device_prep_dma_memcpy = udma_prep_dma_memcpy;
3568 ud->ddev.directions |= BIT(DMA_MEM_TO_MEM);
3573 ud->psil_base = ud->match_data->psil_base;
3575 INIT_LIST_HEAD(&ud->ddev.channels);
3576 INIT_LIST_HEAD(&ud->desc_to_purge);
3578 ch_count = udma_setup_resources(ud);
3582 spin_lock_init(&ud->lock);
3583 INIT_WORK(&ud->purge_work, udma_purge_desc_work);
3585 ud->desc_align = 64;
3586 if (ud->desc_align < dma_get_cache_alignment())
3587 ud->desc_align = dma_get_cache_alignment();
3589 ret = udma_setup_rx_flush(ud);
3593 for (i = 0; i < ud->tchan_cnt; i++) {
3594 struct udma_tchan *tchan = &ud->tchans[i];
3597 tchan->reg_rt = ud->mmrs[MMR_TCHANRT] + i * 0x1000;
3600 for (i = 0; i < ud->rchan_cnt; i++) {
3601 struct udma_rchan *rchan = &ud->rchans[i];
3604 rchan->reg_rt = ud->mmrs[MMR_RCHANRT] + i * 0x1000;
3607 for (i = 0; i < ud->rflow_cnt; i++) {
3608 struct udma_rflow *rflow = &ud->rflows[i];
3613 for (i = 0; i < ch_count; i++) {
3614 struct udma_chan *uc = &ud->channels[i];
3617 uc->vc.desc_free = udma_desc_free;
3621 uc->config.remote_thread_id = -1;
3622 uc->config.dir = DMA_MEM_TO_MEM;
3623 uc->name = devm_kasprintf(dev, GFP_KERNEL, "%s chan%d",
3626 vchan_init(&uc->vc, &ud->ddev);
3627 /* Use custom vchan completion handling */
3628 tasklet_init(&uc->vc.task, udma_vchan_complete,
3629 (unsigned long)&uc->vc);
3630 init_completion(&uc->teardown_completed);
3633 ret = dma_async_device_register(&ud->ddev);
3635 dev_err(dev, "failed to register slave DMA engine: %d\n", ret);
3639 platform_set_drvdata(pdev, ud);
3641 ret = of_dma_controller_register(dev->of_node, udma_of_xlate, ud);
3643 dev_err(dev, "failed to register of_dma controller\n");
3644 dma_async_device_unregister(&ud->ddev);
3650 static struct platform_driver udma_driver = {
3653 .of_match_table = udma_of_match,
3654 .suppress_bind_attrs = true,
3656 .probe = udma_probe,
3658 builtin_platform_driver(udma_driver);
3660 /* Private interfaces to UDMA */
3661 #include "k3-udma-private.c"