Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
[linux-2.6-microblaze.git] / drivers / dma / ti / k3-udma.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
4  *  Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
5  */
6
7 #include <linux/kernel.h>
8 #include <linux/delay.h>
9 #include <linux/dmaengine.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/dmapool.h>
12 #include <linux/err.h>
13 #include <linux/init.h>
14 #include <linux/interrupt.h>
15 #include <linux/list.h>
16 #include <linux/platform_device.h>
17 #include <linux/slab.h>
18 #include <linux/spinlock.h>
19 #include <linux/of.h>
20 #include <linux/of_dma.h>
21 #include <linux/of_device.h>
22 #include <linux/of_irq.h>
23 #include <linux/workqueue.h>
24 #include <linux/completion.h>
25 #include <linux/soc/ti/k3-ringacc.h>
26 #include <linux/soc/ti/ti_sci_protocol.h>
27 #include <linux/soc/ti/ti_sci_inta_msi.h>
28 #include <linux/dma/ti-cppi5.h>
29
30 #include "../virt-dma.h"
31 #include "k3-udma.h"
32 #include "k3-psil-priv.h"
33
34 struct udma_static_tr {
35         u8 elsize; /* RPSTR0 */
36         u16 elcnt; /* RPSTR0 */
37         u16 bstcnt; /* RPSTR1 */
38 };
39
40 #define K3_UDMA_MAX_RFLOWS              1024
41 #define K3_UDMA_DEFAULT_RING_SIZE       16
42
43 /* How SRC/DST tag should be updated by UDMA in the descriptor's Word 3 */
44 #define UDMA_RFLOW_SRCTAG_NONE          0
45 #define UDMA_RFLOW_SRCTAG_CFG_TAG       1
46 #define UDMA_RFLOW_SRCTAG_FLOW_ID       2
47 #define UDMA_RFLOW_SRCTAG_SRC_TAG       4
48
49 #define UDMA_RFLOW_DSTTAG_NONE          0
50 #define UDMA_RFLOW_DSTTAG_CFG_TAG       1
51 #define UDMA_RFLOW_DSTTAG_FLOW_ID       2
52 #define UDMA_RFLOW_DSTTAG_DST_TAG_LO    4
53 #define UDMA_RFLOW_DSTTAG_DST_TAG_HI    5
54
55 struct udma_chan;
56
57 enum udma_mmr {
58         MMR_GCFG = 0,
59         MMR_RCHANRT,
60         MMR_TCHANRT,
61         MMR_LAST,
62 };
63
64 static const char * const mmr_names[] = { "gcfg", "rchanrt", "tchanrt" };
65
66 struct udma_tchan {
67         void __iomem *reg_rt;
68
69         int id;
70         struct k3_ring *t_ring; /* Transmit ring */
71         struct k3_ring *tc_ring; /* Transmit Completion ring */
72 };
73
74 struct udma_rflow {
75         int id;
76         struct k3_ring *fd_ring; /* Free Descriptor ring */
77         struct k3_ring *r_ring; /* Receive ring */
78 };
79
80 struct udma_rchan {
81         void __iomem *reg_rt;
82
83         int id;
84 };
85
86 #define UDMA_FLAG_PDMA_ACC32            BIT(0)
87 #define UDMA_FLAG_PDMA_BURST            BIT(1)
88
89 struct udma_match_data {
90         u32 psil_base;
91         bool enable_memcpy_support;
92         u32 flags;
93         u32 statictr_z_mask;
94         u32 rchan_oes_offset;
95
96         u8 tpl_levels;
97         u32 level_start_idx[];
98 };
99
100 struct udma_hwdesc {
101         size_t cppi5_desc_size;
102         void *cppi5_desc_vaddr;
103         dma_addr_t cppi5_desc_paddr;
104
105         /* TR descriptor internal pointers */
106         void *tr_req_base;
107         struct cppi5_tr_resp_t *tr_resp_base;
108 };
109
110 struct udma_rx_flush {
111         struct udma_hwdesc hwdescs[2];
112
113         size_t buffer_size;
114         void *buffer_vaddr;
115         dma_addr_t buffer_paddr;
116 };
117
118 struct udma_dev {
119         struct dma_device ddev;
120         struct device *dev;
121         void __iomem *mmrs[MMR_LAST];
122         const struct udma_match_data *match_data;
123
124         size_t desc_align; /* alignment to use for descriptors */
125
126         struct udma_tisci_rm tisci_rm;
127
128         struct k3_ringacc *ringacc;
129
130         struct work_struct purge_work;
131         struct list_head desc_to_purge;
132         spinlock_t lock;
133
134         struct udma_rx_flush rx_flush;
135
136         int tchan_cnt;
137         int echan_cnt;
138         int rchan_cnt;
139         int rflow_cnt;
140         unsigned long *tchan_map;
141         unsigned long *rchan_map;
142         unsigned long *rflow_gp_map;
143         unsigned long *rflow_gp_map_allocated;
144         unsigned long *rflow_in_use;
145
146         struct udma_tchan *tchans;
147         struct udma_rchan *rchans;
148         struct udma_rflow *rflows;
149
150         struct udma_chan *channels;
151         u32 psil_base;
152 };
153
154 struct udma_desc {
155         struct virt_dma_desc vd;
156
157         bool terminated;
158
159         enum dma_transfer_direction dir;
160
161         struct udma_static_tr static_tr;
162         u32 residue;
163
164         unsigned int sglen;
165         unsigned int desc_idx; /* Only used for cyclic in packet mode */
166         unsigned int tr_idx;
167
168         u32 metadata_size;
169         void *metadata; /* pointer to provided metadata buffer (EPIP, PSdata) */
170
171         unsigned int hwdesc_count;
172         struct udma_hwdesc hwdesc[0];
173 };
174
175 enum udma_chan_state {
176         UDMA_CHAN_IS_IDLE = 0, /* not active, no teardown is in progress */
177         UDMA_CHAN_IS_ACTIVE, /* Normal operation */
178         UDMA_CHAN_IS_TERMINATING, /* channel is being terminated */
179 };
180
181 struct udma_tx_drain {
182         struct delayed_work work;
183         ktime_t tstamp;
184         u32 residue;
185 };
186
187 struct udma_chan_config {
188         bool pkt_mode; /* TR or packet */
189         bool needs_epib; /* EPIB is needed for the communication or not */
190         u32 psd_size; /* size of Protocol Specific Data */
191         u32 metadata_size; /* (needs_epib ? 16:0) + psd_size */
192         u32 hdesc_size; /* Size of a packet descriptor in packet mode */
193         bool notdpkt; /* Suppress sending TDC packet */
194         int remote_thread_id;
195         u32 src_thread;
196         u32 dst_thread;
197         enum psil_endpoint_type ep_type;
198         bool enable_acc32;
199         bool enable_burst;
200         enum udma_tp_level channel_tpl; /* Channel Throughput Level */
201
202         enum dma_transfer_direction dir;
203 };
204
205 struct udma_chan {
206         struct virt_dma_chan vc;
207         struct dma_slave_config cfg;
208         struct udma_dev *ud;
209         struct udma_desc *desc;
210         struct udma_desc *terminated_desc;
211         struct udma_static_tr static_tr;
212         char *name;
213
214         struct udma_tchan *tchan;
215         struct udma_rchan *rchan;
216         struct udma_rflow *rflow;
217
218         bool psil_paired;
219
220         int irq_num_ring;
221         int irq_num_udma;
222
223         bool cyclic;
224         bool paused;
225
226         enum udma_chan_state state;
227         struct completion teardown_completed;
228
229         struct udma_tx_drain tx_drain;
230
231         u32 bcnt; /* number of bytes completed since the start of the channel */
232         u32 in_ring_cnt; /* number of descriptors in flight */
233
234         /* Channel configuration parameters */
235         struct udma_chan_config config;
236
237         /* dmapool for packet mode descriptors */
238         bool use_dma_pool;
239         struct dma_pool *hdesc_pool;
240
241         u32 id;
242 };
243
244 static inline struct udma_dev *to_udma_dev(struct dma_device *d)
245 {
246         return container_of(d, struct udma_dev, ddev);
247 }
248
249 static inline struct udma_chan *to_udma_chan(struct dma_chan *c)
250 {
251         return container_of(c, struct udma_chan, vc.chan);
252 }
253
254 static inline struct udma_desc *to_udma_desc(struct dma_async_tx_descriptor *t)
255 {
256         return container_of(t, struct udma_desc, vd.tx);
257 }
258
259 /* Generic register access functions */
260 static inline u32 udma_read(void __iomem *base, int reg)
261 {
262         return readl(base + reg);
263 }
264
265 static inline void udma_write(void __iomem *base, int reg, u32 val)
266 {
267         writel(val, base + reg);
268 }
269
270 static inline void udma_update_bits(void __iomem *base, int reg,
271                                     u32 mask, u32 val)
272 {
273         u32 tmp, orig;
274
275         orig = readl(base + reg);
276         tmp = orig & ~mask;
277         tmp |= (val & mask);
278
279         if (tmp != orig)
280                 writel(tmp, base + reg);
281 }
282
283 /* TCHANRT */
284 static inline u32 udma_tchanrt_read(struct udma_tchan *tchan, int reg)
285 {
286         if (!tchan)
287                 return 0;
288         return udma_read(tchan->reg_rt, reg);
289 }
290
291 static inline void udma_tchanrt_write(struct udma_tchan *tchan, int reg,
292                                       u32 val)
293 {
294         if (!tchan)
295                 return;
296         udma_write(tchan->reg_rt, reg, val);
297 }
298
299 static inline void udma_tchanrt_update_bits(struct udma_tchan *tchan, int reg,
300                                             u32 mask, u32 val)
301 {
302         if (!tchan)
303                 return;
304         udma_update_bits(tchan->reg_rt, reg, mask, val);
305 }
306
307 /* RCHANRT */
308 static inline u32 udma_rchanrt_read(struct udma_rchan *rchan, int reg)
309 {
310         if (!rchan)
311                 return 0;
312         return udma_read(rchan->reg_rt, reg);
313 }
314
315 static inline void udma_rchanrt_write(struct udma_rchan *rchan, int reg,
316                                       u32 val)
317 {
318         if (!rchan)
319                 return;
320         udma_write(rchan->reg_rt, reg, val);
321 }
322
323 static inline void udma_rchanrt_update_bits(struct udma_rchan *rchan, int reg,
324                                             u32 mask, u32 val)
325 {
326         if (!rchan)
327                 return;
328         udma_update_bits(rchan->reg_rt, reg, mask, val);
329 }
330
331 static int navss_psil_pair(struct udma_dev *ud, u32 src_thread, u32 dst_thread)
332 {
333         struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
334
335         dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET;
336         return tisci_rm->tisci_psil_ops->pair(tisci_rm->tisci,
337                                               tisci_rm->tisci_navss_dev_id,
338                                               src_thread, dst_thread);
339 }
340
341 static int navss_psil_unpair(struct udma_dev *ud, u32 src_thread,
342                              u32 dst_thread)
343 {
344         struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
345
346         dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET;
347         return tisci_rm->tisci_psil_ops->unpair(tisci_rm->tisci,
348                                                 tisci_rm->tisci_navss_dev_id,
349                                                 src_thread, dst_thread);
350 }
351
352 static void udma_reset_uchan(struct udma_chan *uc)
353 {
354         memset(&uc->config, 0, sizeof(uc->config));
355         uc->config.remote_thread_id = -1;
356         uc->state = UDMA_CHAN_IS_IDLE;
357 }
358
359 static void udma_dump_chan_stdata(struct udma_chan *uc)
360 {
361         struct device *dev = uc->ud->dev;
362         u32 offset;
363         int i;
364
365         if (uc->config.dir == DMA_MEM_TO_DEV || uc->config.dir == DMA_MEM_TO_MEM) {
366                 dev_dbg(dev, "TCHAN State data:\n");
367                 for (i = 0; i < 32; i++) {
368                         offset = UDMA_TCHAN_RT_STDATA_REG + i * 4;
369                         dev_dbg(dev, "TRT_STDATA[%02d]: 0x%08x\n", i,
370                                 udma_tchanrt_read(uc->tchan, offset));
371                 }
372         }
373
374         if (uc->config.dir == DMA_DEV_TO_MEM || uc->config.dir == DMA_MEM_TO_MEM) {
375                 dev_dbg(dev, "RCHAN State data:\n");
376                 for (i = 0; i < 32; i++) {
377                         offset = UDMA_RCHAN_RT_STDATA_REG + i * 4;
378                         dev_dbg(dev, "RRT_STDATA[%02d]: 0x%08x\n", i,
379                                 udma_rchanrt_read(uc->rchan, offset));
380                 }
381         }
382 }
383
384 static inline dma_addr_t udma_curr_cppi5_desc_paddr(struct udma_desc *d,
385                                                     int idx)
386 {
387         return d->hwdesc[idx].cppi5_desc_paddr;
388 }
389
390 static inline void *udma_curr_cppi5_desc_vaddr(struct udma_desc *d, int idx)
391 {
392         return d->hwdesc[idx].cppi5_desc_vaddr;
393 }
394
395 static struct udma_desc *udma_udma_desc_from_paddr(struct udma_chan *uc,
396                                                    dma_addr_t paddr)
397 {
398         struct udma_desc *d = uc->terminated_desc;
399
400         if (d) {
401                 dma_addr_t desc_paddr = udma_curr_cppi5_desc_paddr(d,
402                                                                    d->desc_idx);
403
404                 if (desc_paddr != paddr)
405                         d = NULL;
406         }
407
408         if (!d) {
409                 d = uc->desc;
410                 if (d) {
411                         dma_addr_t desc_paddr = udma_curr_cppi5_desc_paddr(d,
412                                                                 d->desc_idx);
413
414                         if (desc_paddr != paddr)
415                                 d = NULL;
416                 }
417         }
418
419         return d;
420 }
421
422 static void udma_free_hwdesc(struct udma_chan *uc, struct udma_desc *d)
423 {
424         if (uc->use_dma_pool) {
425                 int i;
426
427                 for (i = 0; i < d->hwdesc_count; i++) {
428                         if (!d->hwdesc[i].cppi5_desc_vaddr)
429                                 continue;
430
431                         dma_pool_free(uc->hdesc_pool,
432                                       d->hwdesc[i].cppi5_desc_vaddr,
433                                       d->hwdesc[i].cppi5_desc_paddr);
434
435                         d->hwdesc[i].cppi5_desc_vaddr = NULL;
436                 }
437         } else if (d->hwdesc[0].cppi5_desc_vaddr) {
438                 struct udma_dev *ud = uc->ud;
439
440                 dma_free_coherent(ud->dev, d->hwdesc[0].cppi5_desc_size,
441                                   d->hwdesc[0].cppi5_desc_vaddr,
442                                   d->hwdesc[0].cppi5_desc_paddr);
443
444                 d->hwdesc[0].cppi5_desc_vaddr = NULL;
445         }
446 }
447
448 static void udma_purge_desc_work(struct work_struct *work)
449 {
450         struct udma_dev *ud = container_of(work, typeof(*ud), purge_work);
451         struct virt_dma_desc *vd, *_vd;
452         unsigned long flags;
453         LIST_HEAD(head);
454
455         spin_lock_irqsave(&ud->lock, flags);
456         list_splice_tail_init(&ud->desc_to_purge, &head);
457         spin_unlock_irqrestore(&ud->lock, flags);
458
459         list_for_each_entry_safe(vd, _vd, &head, node) {
460                 struct udma_chan *uc = to_udma_chan(vd->tx.chan);
461                 struct udma_desc *d = to_udma_desc(&vd->tx);
462
463                 udma_free_hwdesc(uc, d);
464                 list_del(&vd->node);
465                 kfree(d);
466         }
467
468         /* If more to purge, schedule the work again */
469         if (!list_empty(&ud->desc_to_purge))
470                 schedule_work(&ud->purge_work);
471 }
472
473 static void udma_desc_free(struct virt_dma_desc *vd)
474 {
475         struct udma_dev *ud = to_udma_dev(vd->tx.chan->device);
476         struct udma_chan *uc = to_udma_chan(vd->tx.chan);
477         struct udma_desc *d = to_udma_desc(&vd->tx);
478         unsigned long flags;
479
480         if (uc->terminated_desc == d)
481                 uc->terminated_desc = NULL;
482
483         if (uc->use_dma_pool) {
484                 udma_free_hwdesc(uc, d);
485                 kfree(d);
486                 return;
487         }
488
489         spin_lock_irqsave(&ud->lock, flags);
490         list_add_tail(&vd->node, &ud->desc_to_purge);
491         spin_unlock_irqrestore(&ud->lock, flags);
492
493         schedule_work(&ud->purge_work);
494 }
495
496 static bool udma_is_chan_running(struct udma_chan *uc)
497 {
498         u32 trt_ctl = 0;
499         u32 rrt_ctl = 0;
500
501         if (uc->tchan)
502                 trt_ctl = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG);
503         if (uc->rchan)
504                 rrt_ctl = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_CTL_REG);
505
506         if (trt_ctl & UDMA_CHAN_RT_CTL_EN || rrt_ctl & UDMA_CHAN_RT_CTL_EN)
507                 return true;
508
509         return false;
510 }
511
512 static bool udma_is_chan_paused(struct udma_chan *uc)
513 {
514         u32 val, pause_mask;
515
516         switch (uc->config.dir) {
517         case DMA_DEV_TO_MEM:
518                 val = udma_rchanrt_read(uc->rchan,
519                                         UDMA_RCHAN_RT_PEER_RT_EN_REG);
520                 pause_mask = UDMA_PEER_RT_EN_PAUSE;
521                 break;
522         case DMA_MEM_TO_DEV:
523                 val = udma_tchanrt_read(uc->tchan,
524                                         UDMA_TCHAN_RT_PEER_RT_EN_REG);
525                 pause_mask = UDMA_PEER_RT_EN_PAUSE;
526                 break;
527         case DMA_MEM_TO_MEM:
528                 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG);
529                 pause_mask = UDMA_CHAN_RT_CTL_PAUSE;
530                 break;
531         default:
532                 return false;
533         }
534
535         if (val & pause_mask)
536                 return true;
537
538         return false;
539 }
540
541 static void udma_sync_for_device(struct udma_chan *uc, int idx)
542 {
543         struct udma_desc *d = uc->desc;
544
545         if (uc->cyclic && uc->config.pkt_mode) {
546                 dma_sync_single_for_device(uc->ud->dev,
547                                            d->hwdesc[idx].cppi5_desc_paddr,
548                                            d->hwdesc[idx].cppi5_desc_size,
549                                            DMA_TO_DEVICE);
550         } else {
551                 int i;
552
553                 for (i = 0; i < d->hwdesc_count; i++) {
554                         if (!d->hwdesc[i].cppi5_desc_vaddr)
555                                 continue;
556
557                         dma_sync_single_for_device(uc->ud->dev,
558                                                 d->hwdesc[i].cppi5_desc_paddr,
559                                                 d->hwdesc[i].cppi5_desc_size,
560                                                 DMA_TO_DEVICE);
561                 }
562         }
563 }
564
565 static inline dma_addr_t udma_get_rx_flush_hwdesc_paddr(struct udma_chan *uc)
566 {
567         return uc->ud->rx_flush.hwdescs[uc->config.pkt_mode].cppi5_desc_paddr;
568 }
569
570 static int udma_push_to_ring(struct udma_chan *uc, int idx)
571 {
572         struct udma_desc *d = uc->desc;
573         struct k3_ring *ring = NULL;
574         dma_addr_t paddr;
575         int ret;
576
577         switch (uc->config.dir) {
578         case DMA_DEV_TO_MEM:
579                 ring = uc->rflow->fd_ring;
580                 break;
581         case DMA_MEM_TO_DEV:
582         case DMA_MEM_TO_MEM:
583                 ring = uc->tchan->t_ring;
584                 break;
585         default:
586                 return -EINVAL;
587         }
588
589         /* RX flush packet: idx == -1 is only passed in case of DEV_TO_MEM */
590         if (idx == -1) {
591                 paddr = udma_get_rx_flush_hwdesc_paddr(uc);
592         } else {
593                 paddr = udma_curr_cppi5_desc_paddr(d, idx);
594
595                 wmb(); /* Ensure that writes are not moved over this point */
596                 udma_sync_for_device(uc, idx);
597         }
598
599         ret = k3_ringacc_ring_push(ring, &paddr);
600         if (!ret)
601                 uc->in_ring_cnt++;
602
603         return ret;
604 }
605
606 static bool udma_desc_is_rx_flush(struct udma_chan *uc, dma_addr_t addr)
607 {
608         if (uc->config.dir != DMA_DEV_TO_MEM)
609                 return false;
610
611         if (addr == udma_get_rx_flush_hwdesc_paddr(uc))
612                 return true;
613
614         return false;
615 }
616
617 static int udma_pop_from_ring(struct udma_chan *uc, dma_addr_t *addr)
618 {
619         struct k3_ring *ring = NULL;
620         int ret = -ENOENT;
621
622         switch (uc->config.dir) {
623         case DMA_DEV_TO_MEM:
624                 ring = uc->rflow->r_ring;
625                 break;
626         case DMA_MEM_TO_DEV:
627         case DMA_MEM_TO_MEM:
628                 ring = uc->tchan->tc_ring;
629                 break;
630         default:
631                 break;
632         }
633
634         if (ring && k3_ringacc_ring_get_occ(ring)) {
635                 struct udma_desc *d = NULL;
636
637                 ret = k3_ringacc_ring_pop(ring, addr);
638                 if (ret)
639                         return ret;
640
641                 /* Teardown completion */
642                 if (cppi5_desc_is_tdcm(*addr))
643                         return ret;
644
645                 /* Check for flush descriptor */
646                 if (udma_desc_is_rx_flush(uc, *addr))
647                         return -ENOENT;
648
649                 d = udma_udma_desc_from_paddr(uc, *addr);
650
651                 if (d)
652                         dma_sync_single_for_cpu(uc->ud->dev, *addr,
653                                                 d->hwdesc[0].cppi5_desc_size,
654                                                 DMA_FROM_DEVICE);
655                 rmb(); /* Ensure that reads are not moved before this point */
656
657                 if (!ret)
658                         uc->in_ring_cnt--;
659         }
660
661         return ret;
662 }
663
664 static void udma_reset_rings(struct udma_chan *uc)
665 {
666         struct k3_ring *ring1 = NULL;
667         struct k3_ring *ring2 = NULL;
668
669         switch (uc->config.dir) {
670         case DMA_DEV_TO_MEM:
671                 if (uc->rchan) {
672                         ring1 = uc->rflow->fd_ring;
673                         ring2 = uc->rflow->r_ring;
674                 }
675                 break;
676         case DMA_MEM_TO_DEV:
677         case DMA_MEM_TO_MEM:
678                 if (uc->tchan) {
679                         ring1 = uc->tchan->t_ring;
680                         ring2 = uc->tchan->tc_ring;
681                 }
682                 break;
683         default:
684                 break;
685         }
686
687         if (ring1)
688                 k3_ringacc_ring_reset_dma(ring1,
689                                           k3_ringacc_ring_get_occ(ring1));
690         if (ring2)
691                 k3_ringacc_ring_reset(ring2);
692
693         /* make sure we are not leaking memory by stalled descriptor */
694         if (uc->terminated_desc) {
695                 udma_desc_free(&uc->terminated_desc->vd);
696                 uc->terminated_desc = NULL;
697         }
698
699         uc->in_ring_cnt = 0;
700 }
701
702 static void udma_reset_counters(struct udma_chan *uc)
703 {
704         u32 val;
705
706         if (uc->tchan) {
707                 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_BCNT_REG);
708                 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_BCNT_REG, val);
709
710                 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_SBCNT_REG);
711                 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_SBCNT_REG, val);
712
713                 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PCNT_REG);
714                 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PCNT_REG, val);
715
716                 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PEER_BCNT_REG);
717                 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_BCNT_REG, val);
718         }
719
720         if (uc->rchan) {
721                 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_BCNT_REG);
722                 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_BCNT_REG, val);
723
724                 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_SBCNT_REG);
725                 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_SBCNT_REG, val);
726
727                 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_PCNT_REG);
728                 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PCNT_REG, val);
729
730                 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_PEER_BCNT_REG);
731                 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_BCNT_REG, val);
732         }
733
734         uc->bcnt = 0;
735 }
736
737 static int udma_reset_chan(struct udma_chan *uc, bool hard)
738 {
739         switch (uc->config.dir) {
740         case DMA_DEV_TO_MEM:
741                 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG, 0);
742                 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG, 0);
743                 break;
744         case DMA_MEM_TO_DEV:
745                 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG, 0);
746                 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_RT_EN_REG, 0);
747                 break;
748         case DMA_MEM_TO_MEM:
749                 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG, 0);
750                 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG, 0);
751                 break;
752         default:
753                 return -EINVAL;
754         }
755
756         /* Reset all counters */
757         udma_reset_counters(uc);
758
759         /* Hard reset: re-initialize the channel to reset */
760         if (hard) {
761                 struct udma_chan_config ucc_backup;
762                 int ret;
763
764                 memcpy(&ucc_backup, &uc->config, sizeof(uc->config));
765                 uc->ud->ddev.device_free_chan_resources(&uc->vc.chan);
766
767                 /* restore the channel configuration */
768                 memcpy(&uc->config, &ucc_backup, sizeof(uc->config));
769                 ret = uc->ud->ddev.device_alloc_chan_resources(&uc->vc.chan);
770                 if (ret)
771                         return ret;
772
773                 /*
774                  * Setting forced teardown after forced reset helps recovering
775                  * the rchan.
776                  */
777                 if (uc->config.dir == DMA_DEV_TO_MEM)
778                         udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG,
779                                            UDMA_CHAN_RT_CTL_EN |
780                                            UDMA_CHAN_RT_CTL_TDOWN |
781                                            UDMA_CHAN_RT_CTL_FTDOWN);
782         }
783         uc->state = UDMA_CHAN_IS_IDLE;
784
785         return 0;
786 }
787
788 static void udma_start_desc(struct udma_chan *uc)
789 {
790         struct udma_chan_config *ucc = &uc->config;
791
792         if (ucc->pkt_mode && (uc->cyclic || ucc->dir == DMA_DEV_TO_MEM)) {
793                 int i;
794
795                 /* Push all descriptors to ring for packet mode cyclic or RX */
796                 for (i = 0; i < uc->desc->sglen; i++)
797                         udma_push_to_ring(uc, i);
798         } else {
799                 udma_push_to_ring(uc, 0);
800         }
801 }
802
803 static bool udma_chan_needs_reconfiguration(struct udma_chan *uc)
804 {
805         /* Only PDMAs have staticTR */
806         if (uc->config.ep_type == PSIL_EP_NATIVE)
807                 return false;
808
809         /* Check if the staticTR configuration has changed for TX */
810         if (memcmp(&uc->static_tr, &uc->desc->static_tr, sizeof(uc->static_tr)))
811                 return true;
812
813         return false;
814 }
815
816 static int udma_start(struct udma_chan *uc)
817 {
818         struct virt_dma_desc *vd = vchan_next_desc(&uc->vc);
819
820         if (!vd) {
821                 uc->desc = NULL;
822                 return -ENOENT;
823         }
824
825         list_del(&vd->node);
826
827         uc->desc = to_udma_desc(&vd->tx);
828
829         /* Channel is already running and does not need reconfiguration */
830         if (udma_is_chan_running(uc) && !udma_chan_needs_reconfiguration(uc)) {
831                 udma_start_desc(uc);
832                 goto out;
833         }
834
835         /* Make sure that we clear the teardown bit, if it is set */
836         udma_reset_chan(uc, false);
837
838         /* Push descriptors before we start the channel */
839         udma_start_desc(uc);
840
841         switch (uc->desc->dir) {
842         case DMA_DEV_TO_MEM:
843                 /* Config remote TR */
844                 if (uc->config.ep_type == PSIL_EP_PDMA_XY) {
845                         u32 val = PDMA_STATIC_TR_Y(uc->desc->static_tr.elcnt) |
846                                   PDMA_STATIC_TR_X(uc->desc->static_tr.elsize);
847                         const struct udma_match_data *match_data =
848                                                         uc->ud->match_data;
849
850                         if (uc->config.enable_acc32)
851                                 val |= PDMA_STATIC_TR_XY_ACC32;
852                         if (uc->config.enable_burst)
853                                 val |= PDMA_STATIC_TR_XY_BURST;
854
855                         udma_rchanrt_write(uc->rchan,
856                                 UDMA_RCHAN_RT_PEER_STATIC_TR_XY_REG, val);
857
858                         udma_rchanrt_write(uc->rchan,
859                                 UDMA_RCHAN_RT_PEER_STATIC_TR_Z_REG,
860                                 PDMA_STATIC_TR_Z(uc->desc->static_tr.bstcnt,
861                                                  match_data->statictr_z_mask));
862
863                         /* save the current staticTR configuration */
864                         memcpy(&uc->static_tr, &uc->desc->static_tr,
865                                sizeof(uc->static_tr));
866                 }
867
868                 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG,
869                                    UDMA_CHAN_RT_CTL_EN);
870
871                 /* Enable remote */
872                 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG,
873                                    UDMA_PEER_RT_EN_ENABLE);
874
875                 break;
876         case DMA_MEM_TO_DEV:
877                 /* Config remote TR */
878                 if (uc->config.ep_type == PSIL_EP_PDMA_XY) {
879                         u32 val = PDMA_STATIC_TR_Y(uc->desc->static_tr.elcnt) |
880                                   PDMA_STATIC_TR_X(uc->desc->static_tr.elsize);
881
882                         if (uc->config.enable_acc32)
883                                 val |= PDMA_STATIC_TR_XY_ACC32;
884                         if (uc->config.enable_burst)
885                                 val |= PDMA_STATIC_TR_XY_BURST;
886
887                         udma_tchanrt_write(uc->tchan,
888                                 UDMA_TCHAN_RT_PEER_STATIC_TR_XY_REG, val);
889
890                         /* save the current staticTR configuration */
891                         memcpy(&uc->static_tr, &uc->desc->static_tr,
892                                sizeof(uc->static_tr));
893                 }
894
895                 /* Enable remote */
896                 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_RT_EN_REG,
897                                    UDMA_PEER_RT_EN_ENABLE);
898
899                 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
900                                    UDMA_CHAN_RT_CTL_EN);
901
902                 break;
903         case DMA_MEM_TO_MEM:
904                 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG,
905                                    UDMA_CHAN_RT_CTL_EN);
906                 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
907                                    UDMA_CHAN_RT_CTL_EN);
908
909                 break;
910         default:
911                 return -EINVAL;
912         }
913
914         uc->state = UDMA_CHAN_IS_ACTIVE;
915 out:
916
917         return 0;
918 }
919
920 static int udma_stop(struct udma_chan *uc)
921 {
922         enum udma_chan_state old_state = uc->state;
923
924         uc->state = UDMA_CHAN_IS_TERMINATING;
925         reinit_completion(&uc->teardown_completed);
926
927         switch (uc->config.dir) {
928         case DMA_DEV_TO_MEM:
929                 if (!uc->cyclic && !uc->desc)
930                         udma_push_to_ring(uc, -1);
931
932                 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG,
933                                    UDMA_PEER_RT_EN_ENABLE |
934                                    UDMA_PEER_RT_EN_TEARDOWN);
935                 break;
936         case DMA_MEM_TO_DEV:
937                 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_RT_EN_REG,
938                                    UDMA_PEER_RT_EN_ENABLE |
939                                    UDMA_PEER_RT_EN_FLUSH);
940                 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
941                                    UDMA_CHAN_RT_CTL_EN |
942                                    UDMA_CHAN_RT_CTL_TDOWN);
943                 break;
944         case DMA_MEM_TO_MEM:
945                 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
946                                    UDMA_CHAN_RT_CTL_EN |
947                                    UDMA_CHAN_RT_CTL_TDOWN);
948                 break;
949         default:
950                 uc->state = old_state;
951                 complete_all(&uc->teardown_completed);
952                 return -EINVAL;
953         }
954
955         return 0;
956 }
957
958 static void udma_cyclic_packet_elapsed(struct udma_chan *uc)
959 {
960         struct udma_desc *d = uc->desc;
961         struct cppi5_host_desc_t *h_desc;
962
963         h_desc = d->hwdesc[d->desc_idx].cppi5_desc_vaddr;
964         cppi5_hdesc_reset_to_original(h_desc);
965         udma_push_to_ring(uc, d->desc_idx);
966         d->desc_idx = (d->desc_idx + 1) % d->sglen;
967 }
968
969 static inline void udma_fetch_epib(struct udma_chan *uc, struct udma_desc *d)
970 {
971         struct cppi5_host_desc_t *h_desc = d->hwdesc[0].cppi5_desc_vaddr;
972
973         memcpy(d->metadata, h_desc->epib, d->metadata_size);
974 }
975
976 static bool udma_is_desc_really_done(struct udma_chan *uc, struct udma_desc *d)
977 {
978         u32 peer_bcnt, bcnt;
979
980         /* Only TX towards PDMA is affected */
981         if (uc->config.ep_type == PSIL_EP_NATIVE ||
982             uc->config.dir != DMA_MEM_TO_DEV)
983                 return true;
984
985         peer_bcnt = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PEER_BCNT_REG);
986         bcnt = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_BCNT_REG);
987
988         /* Transfer is incomplete, store current residue and time stamp */
989         if (peer_bcnt < bcnt) {
990                 uc->tx_drain.residue = bcnt - peer_bcnt;
991                 uc->tx_drain.tstamp = ktime_get();
992                 return false;
993         }
994
995         return true;
996 }
997
998 static void udma_check_tx_completion(struct work_struct *work)
999 {
1000         struct udma_chan *uc = container_of(work, typeof(*uc),
1001                                             tx_drain.work.work);
1002         bool desc_done = true;
1003         u32 residue_diff;
1004         ktime_t time_diff;
1005         unsigned long delay;
1006
1007         while (1) {
1008                 if (uc->desc) {
1009                         /* Get previous residue and time stamp */
1010                         residue_diff = uc->tx_drain.residue;
1011                         time_diff = uc->tx_drain.tstamp;
1012                         /*
1013                          * Get current residue and time stamp or see if
1014                          * transfer is complete
1015                          */
1016                         desc_done = udma_is_desc_really_done(uc, uc->desc);
1017                 }
1018
1019                 if (!desc_done) {
1020                         /*
1021                          * Find the time delta and residue delta w.r.t
1022                          * previous poll
1023                          */
1024                         time_diff = ktime_sub(uc->tx_drain.tstamp,
1025                                               time_diff) + 1;
1026                         residue_diff -= uc->tx_drain.residue;
1027                         if (residue_diff) {
1028                                 /*
1029                                  * Try to guess when we should check
1030                                  * next time by calculating rate at
1031                                  * which data is being drained at the
1032                                  * peer device
1033                                  */
1034                                 delay = (time_diff / residue_diff) *
1035                                         uc->tx_drain.residue;
1036                         } else {
1037                                 /* No progress, check again in 1 second  */
1038                                 schedule_delayed_work(&uc->tx_drain.work, HZ);
1039                                 break;
1040                         }
1041
1042                         usleep_range(ktime_to_us(delay),
1043                                      ktime_to_us(delay) + 10);
1044                         continue;
1045                 }
1046
1047                 if (uc->desc) {
1048                         struct udma_desc *d = uc->desc;
1049
1050                         uc->bcnt += d->residue;
1051                         udma_start(uc);
1052                         vchan_cookie_complete(&d->vd);
1053                         break;
1054                 }
1055
1056                 break;
1057         }
1058 }
1059
1060 static irqreturn_t udma_ring_irq_handler(int irq, void *data)
1061 {
1062         struct udma_chan *uc = data;
1063         struct udma_desc *d;
1064         unsigned long flags;
1065         dma_addr_t paddr = 0;
1066
1067         if (udma_pop_from_ring(uc, &paddr) || !paddr)
1068                 return IRQ_HANDLED;
1069
1070         spin_lock_irqsave(&uc->vc.lock, flags);
1071
1072         /* Teardown completion message */
1073         if (cppi5_desc_is_tdcm(paddr)) {
1074                 /* Compensate our internal pop/push counter */
1075                 uc->in_ring_cnt++;
1076
1077                 complete_all(&uc->teardown_completed);
1078
1079                 if (uc->terminated_desc) {
1080                         udma_desc_free(&uc->terminated_desc->vd);
1081                         uc->terminated_desc = NULL;
1082                 }
1083
1084                 if (!uc->desc)
1085                         udma_start(uc);
1086
1087                 goto out;
1088         }
1089
1090         d = udma_udma_desc_from_paddr(uc, paddr);
1091
1092         if (d) {
1093                 dma_addr_t desc_paddr = udma_curr_cppi5_desc_paddr(d,
1094                                                                    d->desc_idx);
1095                 if (desc_paddr != paddr) {
1096                         dev_err(uc->ud->dev, "not matching descriptors!\n");
1097                         goto out;
1098                 }
1099
1100                 if (d == uc->desc) {
1101                         /* active descriptor */
1102                         if (uc->cyclic) {
1103                                 udma_cyclic_packet_elapsed(uc);
1104                                 vchan_cyclic_callback(&d->vd);
1105                         } else {
1106                                 if (udma_is_desc_really_done(uc, d)) {
1107                                         uc->bcnt += d->residue;
1108                                         udma_start(uc);
1109                                         vchan_cookie_complete(&d->vd);
1110                                 } else {
1111                                         schedule_delayed_work(&uc->tx_drain.work,
1112                                                               0);
1113                                 }
1114                         }
1115                 } else {
1116                         /*
1117                          * terminated descriptor, mark the descriptor as
1118                          * completed to update the channel's cookie marker
1119                          */
1120                         dma_cookie_complete(&d->vd.tx);
1121                 }
1122         }
1123 out:
1124         spin_unlock_irqrestore(&uc->vc.lock, flags);
1125
1126         return IRQ_HANDLED;
1127 }
1128
1129 static irqreturn_t udma_udma_irq_handler(int irq, void *data)
1130 {
1131         struct udma_chan *uc = data;
1132         struct udma_desc *d;
1133         unsigned long flags;
1134
1135         spin_lock_irqsave(&uc->vc.lock, flags);
1136         d = uc->desc;
1137         if (d) {
1138                 d->tr_idx = (d->tr_idx + 1) % d->sglen;
1139
1140                 if (uc->cyclic) {
1141                         vchan_cyclic_callback(&d->vd);
1142                 } else {
1143                         /* TODO: figure out the real amount of data */
1144                         uc->bcnt += d->residue;
1145                         udma_start(uc);
1146                         vchan_cookie_complete(&d->vd);
1147                 }
1148         }
1149
1150         spin_unlock_irqrestore(&uc->vc.lock, flags);
1151
1152         return IRQ_HANDLED;
1153 }
1154
1155 /**
1156  * __udma_alloc_gp_rflow_range - alloc range of GP RX flows
1157  * @ud: UDMA device
1158  * @from: Start the search from this flow id number
1159  * @cnt: Number of consecutive flow ids to allocate
1160  *
1161  * Allocate range of RX flow ids for future use, those flows can be requested
1162  * only using explicit flow id number. if @from is set to -1 it will try to find
1163  * first free range. if @from is positive value it will force allocation only
1164  * of the specified range of flows.
1165  *
1166  * Returns -ENOMEM if can't find free range.
1167  * -EEXIST if requested range is busy.
1168  * -EINVAL if wrong input values passed.
1169  * Returns flow id on success.
1170  */
1171 static int __udma_alloc_gp_rflow_range(struct udma_dev *ud, int from, int cnt)
1172 {
1173         int start, tmp_from;
1174         DECLARE_BITMAP(tmp, K3_UDMA_MAX_RFLOWS);
1175
1176         tmp_from = from;
1177         if (tmp_from < 0)
1178                 tmp_from = ud->rchan_cnt;
1179         /* default flows can't be allocated and accessible only by id */
1180         if (tmp_from < ud->rchan_cnt)
1181                 return -EINVAL;
1182
1183         if (tmp_from + cnt > ud->rflow_cnt)
1184                 return -EINVAL;
1185
1186         bitmap_or(tmp, ud->rflow_gp_map, ud->rflow_gp_map_allocated,
1187                   ud->rflow_cnt);
1188
1189         start = bitmap_find_next_zero_area(tmp,
1190                                            ud->rflow_cnt,
1191                                            tmp_from, cnt, 0);
1192         if (start >= ud->rflow_cnt)
1193                 return -ENOMEM;
1194
1195         if (from >= 0 && start != from)
1196                 return -EEXIST;
1197
1198         bitmap_set(ud->rflow_gp_map_allocated, start, cnt);
1199         return start;
1200 }
1201
1202 static int __udma_free_gp_rflow_range(struct udma_dev *ud, int from, int cnt)
1203 {
1204         if (from < ud->rchan_cnt)
1205                 return -EINVAL;
1206         if (from + cnt > ud->rflow_cnt)
1207                 return -EINVAL;
1208
1209         bitmap_clear(ud->rflow_gp_map_allocated, from, cnt);
1210         return 0;
1211 }
1212
1213 static struct udma_rflow *__udma_get_rflow(struct udma_dev *ud, int id)
1214 {
1215         /*
1216          * Attempt to request rflow by ID can be made for any rflow
1217          * if not in use with assumption that caller knows what's doing.
1218          * TI-SCI FW will perform additional permission check ant way, it's
1219          * safe
1220          */
1221
1222         if (id < 0 || id >= ud->rflow_cnt)
1223                 return ERR_PTR(-ENOENT);
1224
1225         if (test_bit(id, ud->rflow_in_use))
1226                 return ERR_PTR(-ENOENT);
1227
1228         /* GP rflow has to be allocated first */
1229         if (!test_bit(id, ud->rflow_gp_map) &&
1230             !test_bit(id, ud->rflow_gp_map_allocated))
1231                 return ERR_PTR(-EINVAL);
1232
1233         dev_dbg(ud->dev, "get rflow%d\n", id);
1234         set_bit(id, ud->rflow_in_use);
1235         return &ud->rflows[id];
1236 }
1237
1238 static void __udma_put_rflow(struct udma_dev *ud, struct udma_rflow *rflow)
1239 {
1240         if (!test_bit(rflow->id, ud->rflow_in_use)) {
1241                 dev_err(ud->dev, "attempt to put unused rflow%d\n", rflow->id);
1242                 return;
1243         }
1244
1245         dev_dbg(ud->dev, "put rflow%d\n", rflow->id);
1246         clear_bit(rflow->id, ud->rflow_in_use);
1247 }
1248
1249 #define UDMA_RESERVE_RESOURCE(res)                                      \
1250 static struct udma_##res *__udma_reserve_##res(struct udma_dev *ud,     \
1251                                                enum udma_tp_level tpl,  \
1252                                                int id)                  \
1253 {                                                                       \
1254         if (id >= 0) {                                                  \
1255                 if (test_bit(id, ud->res##_map)) {                      \
1256                         dev_err(ud->dev, "res##%d is in use\n", id);    \
1257                         return ERR_PTR(-ENOENT);                        \
1258                 }                                                       \
1259         } else {                                                        \
1260                 int start;                                              \
1261                                                                         \
1262                 if (tpl >= ud->match_data->tpl_levels)                  \
1263                         tpl = ud->match_data->tpl_levels - 1;           \
1264                                                                         \
1265                 start = ud->match_data->level_start_idx[tpl];           \
1266                                                                         \
1267                 id = find_next_zero_bit(ud->res##_map, ud->res##_cnt,   \
1268                                         start);                         \
1269                 if (id == ud->res##_cnt) {                              \
1270                         return ERR_PTR(-ENOENT);                        \
1271                 }                                                       \
1272         }                                                               \
1273                                                                         \
1274         set_bit(id, ud->res##_map);                                     \
1275         return &ud->res##s[id];                                         \
1276 }
1277
1278 UDMA_RESERVE_RESOURCE(tchan);
1279 UDMA_RESERVE_RESOURCE(rchan);
1280
1281 static int udma_get_tchan(struct udma_chan *uc)
1282 {
1283         struct udma_dev *ud = uc->ud;
1284
1285         if (uc->tchan) {
1286                 dev_dbg(ud->dev, "chan%d: already have tchan%d allocated\n",
1287                         uc->id, uc->tchan->id);
1288                 return 0;
1289         }
1290
1291         uc->tchan = __udma_reserve_tchan(ud, uc->config.channel_tpl, -1);
1292         if (IS_ERR(uc->tchan))
1293                 return PTR_ERR(uc->tchan);
1294
1295         return 0;
1296 }
1297
1298 static int udma_get_rchan(struct udma_chan *uc)
1299 {
1300         struct udma_dev *ud = uc->ud;
1301
1302         if (uc->rchan) {
1303                 dev_dbg(ud->dev, "chan%d: already have rchan%d allocated\n",
1304                         uc->id, uc->rchan->id);
1305                 return 0;
1306         }
1307
1308         uc->rchan = __udma_reserve_rchan(ud, uc->config.channel_tpl, -1);
1309         if (IS_ERR(uc->rchan))
1310                 return PTR_ERR(uc->rchan);
1311
1312         return 0;
1313 }
1314
1315 static int udma_get_chan_pair(struct udma_chan *uc)
1316 {
1317         struct udma_dev *ud = uc->ud;
1318         const struct udma_match_data *match_data = ud->match_data;
1319         int chan_id, end;
1320
1321         if ((uc->tchan && uc->rchan) && uc->tchan->id == uc->rchan->id) {
1322                 dev_info(ud->dev, "chan%d: already have %d pair allocated\n",
1323                          uc->id, uc->tchan->id);
1324                 return 0;
1325         }
1326
1327         if (uc->tchan) {
1328                 dev_err(ud->dev, "chan%d: already have tchan%d allocated\n",
1329                         uc->id, uc->tchan->id);
1330                 return -EBUSY;
1331         } else if (uc->rchan) {
1332                 dev_err(ud->dev, "chan%d: already have rchan%d allocated\n",
1333                         uc->id, uc->rchan->id);
1334                 return -EBUSY;
1335         }
1336
1337         /* Can be optimized, but let's have it like this for now */
1338         end = min(ud->tchan_cnt, ud->rchan_cnt);
1339         /* Try to use the highest TPL channel pair for MEM_TO_MEM channels */
1340         chan_id = match_data->level_start_idx[match_data->tpl_levels - 1];
1341         for (; chan_id < end; chan_id++) {
1342                 if (!test_bit(chan_id, ud->tchan_map) &&
1343                     !test_bit(chan_id, ud->rchan_map))
1344                         break;
1345         }
1346
1347         if (chan_id == end)
1348                 return -ENOENT;
1349
1350         set_bit(chan_id, ud->tchan_map);
1351         set_bit(chan_id, ud->rchan_map);
1352         uc->tchan = &ud->tchans[chan_id];
1353         uc->rchan = &ud->rchans[chan_id];
1354
1355         return 0;
1356 }
1357
1358 static int udma_get_rflow(struct udma_chan *uc, int flow_id)
1359 {
1360         struct udma_dev *ud = uc->ud;
1361
1362         if (!uc->rchan) {
1363                 dev_err(ud->dev, "chan%d: does not have rchan??\n", uc->id);
1364                 return -EINVAL;
1365         }
1366
1367         if (uc->rflow) {
1368                 dev_dbg(ud->dev, "chan%d: already have rflow%d allocated\n",
1369                         uc->id, uc->rflow->id);
1370                 return 0;
1371         }
1372
1373         uc->rflow = __udma_get_rflow(ud, flow_id);
1374         if (IS_ERR(uc->rflow))
1375                 return PTR_ERR(uc->rflow);
1376
1377         return 0;
1378 }
1379
1380 static void udma_put_rchan(struct udma_chan *uc)
1381 {
1382         struct udma_dev *ud = uc->ud;
1383
1384         if (uc->rchan) {
1385                 dev_dbg(ud->dev, "chan%d: put rchan%d\n", uc->id,
1386                         uc->rchan->id);
1387                 clear_bit(uc->rchan->id, ud->rchan_map);
1388                 uc->rchan = NULL;
1389         }
1390 }
1391
1392 static void udma_put_tchan(struct udma_chan *uc)
1393 {
1394         struct udma_dev *ud = uc->ud;
1395
1396         if (uc->tchan) {
1397                 dev_dbg(ud->dev, "chan%d: put tchan%d\n", uc->id,
1398                         uc->tchan->id);
1399                 clear_bit(uc->tchan->id, ud->tchan_map);
1400                 uc->tchan = NULL;
1401         }
1402 }
1403
1404 static void udma_put_rflow(struct udma_chan *uc)
1405 {
1406         struct udma_dev *ud = uc->ud;
1407
1408         if (uc->rflow) {
1409                 dev_dbg(ud->dev, "chan%d: put rflow%d\n", uc->id,
1410                         uc->rflow->id);
1411                 __udma_put_rflow(ud, uc->rflow);
1412                 uc->rflow = NULL;
1413         }
1414 }
1415
1416 static void udma_free_tx_resources(struct udma_chan *uc)
1417 {
1418         if (!uc->tchan)
1419                 return;
1420
1421         k3_ringacc_ring_free(uc->tchan->t_ring);
1422         k3_ringacc_ring_free(uc->tchan->tc_ring);
1423         uc->tchan->t_ring = NULL;
1424         uc->tchan->tc_ring = NULL;
1425
1426         udma_put_tchan(uc);
1427 }
1428
1429 static int udma_alloc_tx_resources(struct udma_chan *uc)
1430 {
1431         struct k3_ring_cfg ring_cfg;
1432         struct udma_dev *ud = uc->ud;
1433         int ret;
1434
1435         ret = udma_get_tchan(uc);
1436         if (ret)
1437                 return ret;
1438
1439         uc->tchan->t_ring = k3_ringacc_request_ring(ud->ringacc,
1440                                                     uc->tchan->id, 0);
1441         if (!uc->tchan->t_ring) {
1442                 ret = -EBUSY;
1443                 goto err_tx_ring;
1444         }
1445
1446         uc->tchan->tc_ring = k3_ringacc_request_ring(ud->ringacc, -1, 0);
1447         if (!uc->tchan->tc_ring) {
1448                 ret = -EBUSY;
1449                 goto err_txc_ring;
1450         }
1451
1452         memset(&ring_cfg, 0, sizeof(ring_cfg));
1453         ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE;
1454         ring_cfg.elm_size = K3_RINGACC_RING_ELSIZE_8;
1455         ring_cfg.mode = K3_RINGACC_RING_MODE_MESSAGE;
1456
1457         ret = k3_ringacc_ring_cfg(uc->tchan->t_ring, &ring_cfg);
1458         ret |= k3_ringacc_ring_cfg(uc->tchan->tc_ring, &ring_cfg);
1459
1460         if (ret)
1461                 goto err_ringcfg;
1462
1463         return 0;
1464
1465 err_ringcfg:
1466         k3_ringacc_ring_free(uc->tchan->tc_ring);
1467         uc->tchan->tc_ring = NULL;
1468 err_txc_ring:
1469         k3_ringacc_ring_free(uc->tchan->t_ring);
1470         uc->tchan->t_ring = NULL;
1471 err_tx_ring:
1472         udma_put_tchan(uc);
1473
1474         return ret;
1475 }
1476
1477 static void udma_free_rx_resources(struct udma_chan *uc)
1478 {
1479         if (!uc->rchan)
1480                 return;
1481
1482         if (uc->rflow) {
1483                 struct udma_rflow *rflow = uc->rflow;
1484
1485                 k3_ringacc_ring_free(rflow->fd_ring);
1486                 k3_ringacc_ring_free(rflow->r_ring);
1487                 rflow->fd_ring = NULL;
1488                 rflow->r_ring = NULL;
1489
1490                 udma_put_rflow(uc);
1491         }
1492
1493         udma_put_rchan(uc);
1494 }
1495
1496 static int udma_alloc_rx_resources(struct udma_chan *uc)
1497 {
1498         struct udma_dev *ud = uc->ud;
1499         struct k3_ring_cfg ring_cfg;
1500         struct udma_rflow *rflow;
1501         int fd_ring_id;
1502         int ret;
1503
1504         ret = udma_get_rchan(uc);
1505         if (ret)
1506                 return ret;
1507
1508         /* For MEM_TO_MEM we don't need rflow or rings */
1509         if (uc->config.dir == DMA_MEM_TO_MEM)
1510                 return 0;
1511
1512         ret = udma_get_rflow(uc, uc->rchan->id);
1513         if (ret) {
1514                 ret = -EBUSY;
1515                 goto err_rflow;
1516         }
1517
1518         rflow = uc->rflow;
1519         fd_ring_id = ud->tchan_cnt + ud->echan_cnt + uc->rchan->id;
1520         rflow->fd_ring = k3_ringacc_request_ring(ud->ringacc, fd_ring_id, 0);
1521         if (!rflow->fd_ring) {
1522                 ret = -EBUSY;
1523                 goto err_rx_ring;
1524         }
1525
1526         rflow->r_ring = k3_ringacc_request_ring(ud->ringacc, -1, 0);
1527         if (!rflow->r_ring) {
1528                 ret = -EBUSY;
1529                 goto err_rxc_ring;
1530         }
1531
1532         memset(&ring_cfg, 0, sizeof(ring_cfg));
1533
1534         if (uc->config.pkt_mode)
1535                 ring_cfg.size = SG_MAX_SEGMENTS;
1536         else
1537                 ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE;
1538
1539         ring_cfg.elm_size = K3_RINGACC_RING_ELSIZE_8;
1540         ring_cfg.mode = K3_RINGACC_RING_MODE_MESSAGE;
1541
1542         ret = k3_ringacc_ring_cfg(rflow->fd_ring, &ring_cfg);
1543         ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE;
1544         ret |= k3_ringacc_ring_cfg(rflow->r_ring, &ring_cfg);
1545
1546         if (ret)
1547                 goto err_ringcfg;
1548
1549         return 0;
1550
1551 err_ringcfg:
1552         k3_ringacc_ring_free(rflow->r_ring);
1553         rflow->r_ring = NULL;
1554 err_rxc_ring:
1555         k3_ringacc_ring_free(rflow->fd_ring);
1556         rflow->fd_ring = NULL;
1557 err_rx_ring:
1558         udma_put_rflow(uc);
1559 err_rflow:
1560         udma_put_rchan(uc);
1561
1562         return ret;
1563 }
1564
1565 #define TISCI_TCHAN_VALID_PARAMS (                              \
1566         TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID |       \
1567         TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_EINFO_VALID |      \
1568         TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_PSWORDS_VALID |    \
1569         TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID |          \
1570         TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID |      \
1571         TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID |         \
1572         TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID)
1573
1574 #define TISCI_RCHAN_VALID_PARAMS (                              \
1575         TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID |       \
1576         TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID |         \
1577         TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID |            \
1578         TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID |          \
1579         TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_SHORT_VALID |    \
1580         TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_LONG_VALID |     \
1581         TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID |    \
1582         TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID)
1583
1584 static int udma_tisci_m2m_channel_config(struct udma_chan *uc)
1585 {
1586         struct udma_dev *ud = uc->ud;
1587         struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1588         const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
1589         struct udma_tchan *tchan = uc->tchan;
1590         struct udma_rchan *rchan = uc->rchan;
1591         int ret = 0;
1592
1593         /* Non synchronized - mem to mem type of transfer */
1594         int tc_ring = k3_ringacc_get_ring_id(tchan->tc_ring);
1595         struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 };
1596         struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 };
1597
1598         req_tx.valid_params = TISCI_TCHAN_VALID_PARAMS;
1599         req_tx.nav_id = tisci_rm->tisci_dev_id;
1600         req_tx.index = tchan->id;
1601         req_tx.tx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR;
1602         req_tx.tx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2;
1603         req_tx.txcq_qnum = tc_ring;
1604
1605         ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx);
1606         if (ret) {
1607                 dev_err(ud->dev, "tchan%d cfg failed %d\n", tchan->id, ret);
1608                 return ret;
1609         }
1610
1611         req_rx.valid_params = TISCI_RCHAN_VALID_PARAMS;
1612         req_rx.nav_id = tisci_rm->tisci_dev_id;
1613         req_rx.index = rchan->id;
1614         req_rx.rx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2;
1615         req_rx.rxcq_qnum = tc_ring;
1616         req_rx.rx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR;
1617
1618         ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx);
1619         if (ret)
1620                 dev_err(ud->dev, "rchan%d alloc failed %d\n", rchan->id, ret);
1621
1622         return ret;
1623 }
1624
1625 static int udma_tisci_tx_channel_config(struct udma_chan *uc)
1626 {
1627         struct udma_dev *ud = uc->ud;
1628         struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1629         const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
1630         struct udma_tchan *tchan = uc->tchan;
1631         int tc_ring = k3_ringacc_get_ring_id(tchan->tc_ring);
1632         struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 };
1633         u32 mode, fetch_size;
1634         int ret = 0;
1635
1636         if (uc->config.pkt_mode) {
1637                 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
1638                 fetch_size = cppi5_hdesc_calc_size(uc->config.needs_epib,
1639                                                    uc->config.psd_size, 0);
1640         } else {
1641                 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_PBRR;
1642                 fetch_size = sizeof(struct cppi5_desc_hdr_t);
1643         }
1644
1645         req_tx.valid_params = TISCI_TCHAN_VALID_PARAMS;
1646         req_tx.nav_id = tisci_rm->tisci_dev_id;
1647         req_tx.index = tchan->id;
1648         req_tx.tx_chan_type = mode;
1649         req_tx.tx_supr_tdpkt = uc->config.notdpkt;
1650         req_tx.tx_fetch_size = fetch_size >> 2;
1651         req_tx.txcq_qnum = tc_ring;
1652
1653         ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx);
1654         if (ret)
1655                 dev_err(ud->dev, "tchan%d cfg failed %d\n", tchan->id, ret);
1656
1657         return ret;
1658 }
1659
1660 static int udma_tisci_rx_channel_config(struct udma_chan *uc)
1661 {
1662         struct udma_dev *ud = uc->ud;
1663         struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1664         const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
1665         struct udma_rchan *rchan = uc->rchan;
1666         int fd_ring = k3_ringacc_get_ring_id(uc->rflow->fd_ring);
1667         int rx_ring = k3_ringacc_get_ring_id(uc->rflow->r_ring);
1668         struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 };
1669         struct ti_sci_msg_rm_udmap_flow_cfg flow_req = { 0 };
1670         u32 mode, fetch_size;
1671         int ret = 0;
1672
1673         if (uc->config.pkt_mode) {
1674                 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
1675                 fetch_size = cppi5_hdesc_calc_size(uc->config.needs_epib,
1676                                                    uc->config.psd_size, 0);
1677         } else {
1678                 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_PBRR;
1679                 fetch_size = sizeof(struct cppi5_desc_hdr_t);
1680         }
1681
1682         req_rx.valid_params = TISCI_RCHAN_VALID_PARAMS;
1683         req_rx.nav_id = tisci_rm->tisci_dev_id;
1684         req_rx.index = rchan->id;
1685         req_rx.rx_fetch_size =  fetch_size >> 2;
1686         req_rx.rxcq_qnum = rx_ring;
1687         req_rx.rx_chan_type = mode;
1688
1689         ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx);
1690         if (ret) {
1691                 dev_err(ud->dev, "rchan%d cfg failed %d\n", rchan->id, ret);
1692                 return ret;
1693         }
1694
1695         flow_req.valid_params =
1696                 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID |
1697                 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID |
1698                 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID |
1699                 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DESC_TYPE_VALID |
1700                 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID |
1701                 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_HI_SEL_VALID |
1702                 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_LO_SEL_VALID |
1703                 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_HI_SEL_VALID |
1704                 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_LO_SEL_VALID |
1705                 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID |
1706                 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID |
1707                 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID |
1708                 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID;
1709
1710         flow_req.nav_id = tisci_rm->tisci_dev_id;
1711         flow_req.flow_index = rchan->id;
1712
1713         if (uc->config.needs_epib)
1714                 flow_req.rx_einfo_present = 1;
1715         else
1716                 flow_req.rx_einfo_present = 0;
1717         if (uc->config.psd_size)
1718                 flow_req.rx_psinfo_present = 1;
1719         else
1720                 flow_req.rx_psinfo_present = 0;
1721         flow_req.rx_error_handling = 1;
1722         flow_req.rx_dest_qnum = rx_ring;
1723         flow_req.rx_src_tag_hi_sel = UDMA_RFLOW_SRCTAG_NONE;
1724         flow_req.rx_src_tag_lo_sel = UDMA_RFLOW_SRCTAG_SRC_TAG;
1725         flow_req.rx_dest_tag_hi_sel = UDMA_RFLOW_DSTTAG_DST_TAG_HI;
1726         flow_req.rx_dest_tag_lo_sel = UDMA_RFLOW_DSTTAG_DST_TAG_LO;
1727         flow_req.rx_fdq0_sz0_qnum = fd_ring;
1728         flow_req.rx_fdq1_qnum = fd_ring;
1729         flow_req.rx_fdq2_qnum = fd_ring;
1730         flow_req.rx_fdq3_qnum = fd_ring;
1731
1732         ret = tisci_ops->rx_flow_cfg(tisci_rm->tisci, &flow_req);
1733
1734         if (ret)
1735                 dev_err(ud->dev, "flow%d config failed: %d\n", rchan->id, ret);
1736
1737         return 0;
1738 }
1739
1740 static int udma_alloc_chan_resources(struct dma_chan *chan)
1741 {
1742         struct udma_chan *uc = to_udma_chan(chan);
1743         struct udma_dev *ud = to_udma_dev(chan->device);
1744         const struct udma_match_data *match_data = ud->match_data;
1745         struct k3_ring *irq_ring;
1746         u32 irq_udma_idx;
1747         int ret;
1748
1749         if (uc->config.pkt_mode || uc->config.dir == DMA_MEM_TO_MEM) {
1750                 uc->use_dma_pool = true;
1751                 /* in case of MEM_TO_MEM we have maximum of two TRs */
1752                 if (uc->config.dir == DMA_MEM_TO_MEM) {
1753                         uc->config.hdesc_size = cppi5_trdesc_calc_size(
1754                                         sizeof(struct cppi5_tr_type15_t), 2);
1755                         uc->config.pkt_mode = false;
1756                 }
1757         }
1758
1759         if (uc->use_dma_pool) {
1760                 uc->hdesc_pool = dma_pool_create(uc->name, ud->ddev.dev,
1761                                                  uc->config.hdesc_size,
1762                                                  ud->desc_align,
1763                                                  0);
1764                 if (!uc->hdesc_pool) {
1765                         dev_err(ud->ddev.dev,
1766                                 "Descriptor pool allocation failed\n");
1767                         uc->use_dma_pool = false;
1768                         return -ENOMEM;
1769                 }
1770         }
1771
1772         /*
1773          * Make sure that the completion is in a known state:
1774          * No teardown, the channel is idle
1775          */
1776         reinit_completion(&uc->teardown_completed);
1777         complete_all(&uc->teardown_completed);
1778         uc->state = UDMA_CHAN_IS_IDLE;
1779
1780         switch (uc->config.dir) {
1781         case DMA_MEM_TO_MEM:
1782                 /* Non synchronized - mem to mem type of transfer */
1783                 dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-MEM\n", __func__,
1784                         uc->id);
1785
1786                 ret = udma_get_chan_pair(uc);
1787                 if (ret)
1788                         return ret;
1789
1790                 ret = udma_alloc_tx_resources(uc);
1791                 if (ret)
1792                         return ret;
1793
1794                 ret = udma_alloc_rx_resources(uc);
1795                 if (ret) {
1796                         udma_free_tx_resources(uc);
1797                         return ret;
1798                 }
1799
1800                 uc->config.src_thread = ud->psil_base + uc->tchan->id;
1801                 uc->config.dst_thread = (ud->psil_base + uc->rchan->id) |
1802                                         K3_PSIL_DST_THREAD_ID_OFFSET;
1803
1804                 irq_ring = uc->tchan->tc_ring;
1805                 irq_udma_idx = uc->tchan->id;
1806
1807                 ret = udma_tisci_m2m_channel_config(uc);
1808                 break;
1809         case DMA_MEM_TO_DEV:
1810                 /* Slave transfer synchronized - mem to dev (TX) trasnfer */
1811                 dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-DEV\n", __func__,
1812                         uc->id);
1813
1814                 ret = udma_alloc_tx_resources(uc);
1815                 if (ret) {
1816                         uc->config.remote_thread_id = -1;
1817                         return ret;
1818                 }
1819
1820                 uc->config.src_thread = ud->psil_base + uc->tchan->id;
1821                 uc->config.dst_thread = uc->config.remote_thread_id;
1822                 uc->config.dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET;
1823
1824                 irq_ring = uc->tchan->tc_ring;
1825                 irq_udma_idx = uc->tchan->id;
1826
1827                 ret = udma_tisci_tx_channel_config(uc);
1828                 break;
1829         case DMA_DEV_TO_MEM:
1830                 /* Slave transfer synchronized - dev to mem (RX) trasnfer */
1831                 dev_dbg(uc->ud->dev, "%s: chan%d as DEV-to-MEM\n", __func__,
1832                         uc->id);
1833
1834                 ret = udma_alloc_rx_resources(uc);
1835                 if (ret) {
1836                         uc->config.remote_thread_id = -1;
1837                         return ret;
1838                 }
1839
1840                 uc->config.src_thread = uc->config.remote_thread_id;
1841                 uc->config.dst_thread = (ud->psil_base + uc->rchan->id) |
1842                                         K3_PSIL_DST_THREAD_ID_OFFSET;
1843
1844                 irq_ring = uc->rflow->r_ring;
1845                 irq_udma_idx = match_data->rchan_oes_offset + uc->rchan->id;
1846
1847                 ret = udma_tisci_rx_channel_config(uc);
1848                 break;
1849         default:
1850                 /* Can not happen */
1851                 dev_err(uc->ud->dev, "%s: chan%d invalid direction (%u)\n",
1852                         __func__, uc->id, uc->config.dir);
1853                 return -EINVAL;
1854         }
1855
1856         /* check if the channel configuration was successful */
1857         if (ret)
1858                 goto err_res_free;
1859
1860         if (udma_is_chan_running(uc)) {
1861                 dev_warn(ud->dev, "chan%d: is running!\n", uc->id);
1862                 udma_stop(uc);
1863                 if (udma_is_chan_running(uc)) {
1864                         dev_err(ud->dev, "chan%d: won't stop!\n", uc->id);
1865                         goto err_res_free;
1866                 }
1867         }
1868
1869         /* PSI-L pairing */
1870         ret = navss_psil_pair(ud, uc->config.src_thread, uc->config.dst_thread);
1871         if (ret) {
1872                 dev_err(ud->dev, "PSI-L pairing failed: 0x%04x -> 0x%04x\n",
1873                         uc->config.src_thread, uc->config.dst_thread);
1874                 goto err_res_free;
1875         }
1876
1877         uc->psil_paired = true;
1878
1879         uc->irq_num_ring = k3_ringacc_get_ring_irq_num(irq_ring);
1880         if (uc->irq_num_ring <= 0) {
1881                 dev_err(ud->dev, "Failed to get ring irq (index: %u)\n",
1882                         k3_ringacc_get_ring_id(irq_ring));
1883                 ret = -EINVAL;
1884                 goto err_psi_free;
1885         }
1886
1887         ret = request_irq(uc->irq_num_ring, udma_ring_irq_handler,
1888                           IRQF_TRIGGER_HIGH, uc->name, uc);
1889         if (ret) {
1890                 dev_err(ud->dev, "chan%d: ring irq request failed\n", uc->id);
1891                 goto err_irq_free;
1892         }
1893
1894         /* Event from UDMA (TR events) only needed for slave TR mode channels */
1895         if (is_slave_direction(uc->config.dir) && !uc->config.pkt_mode) {
1896                 uc->irq_num_udma = ti_sci_inta_msi_get_virq(ud->dev,
1897                                                             irq_udma_idx);
1898                 if (uc->irq_num_udma <= 0) {
1899                         dev_err(ud->dev, "Failed to get udma irq (index: %u)\n",
1900                                 irq_udma_idx);
1901                         free_irq(uc->irq_num_ring, uc);
1902                         ret = -EINVAL;
1903                         goto err_irq_free;
1904                 }
1905
1906                 ret = request_irq(uc->irq_num_udma, udma_udma_irq_handler, 0,
1907                                   uc->name, uc);
1908                 if (ret) {
1909                         dev_err(ud->dev, "chan%d: UDMA irq request failed\n",
1910                                 uc->id);
1911                         free_irq(uc->irq_num_ring, uc);
1912                         goto err_irq_free;
1913                 }
1914         } else {
1915                 uc->irq_num_udma = 0;
1916         }
1917
1918         udma_reset_rings(uc);
1919
1920         INIT_DELAYED_WORK_ONSTACK(&uc->tx_drain.work,
1921                                   udma_check_tx_completion);
1922         return 0;
1923
1924 err_irq_free:
1925         uc->irq_num_ring = 0;
1926         uc->irq_num_udma = 0;
1927 err_psi_free:
1928         navss_psil_unpair(ud, uc->config.src_thread, uc->config.dst_thread);
1929         uc->psil_paired = false;
1930 err_res_free:
1931         udma_free_tx_resources(uc);
1932         udma_free_rx_resources(uc);
1933
1934         udma_reset_uchan(uc);
1935
1936         if (uc->use_dma_pool) {
1937                 dma_pool_destroy(uc->hdesc_pool);
1938                 uc->use_dma_pool = false;
1939         }
1940
1941         return ret;
1942 }
1943
1944 static int udma_slave_config(struct dma_chan *chan,
1945                              struct dma_slave_config *cfg)
1946 {
1947         struct udma_chan *uc = to_udma_chan(chan);
1948
1949         memcpy(&uc->cfg, cfg, sizeof(uc->cfg));
1950
1951         return 0;
1952 }
1953
1954 static struct udma_desc *udma_alloc_tr_desc(struct udma_chan *uc,
1955                                             size_t tr_size, int tr_count,
1956                                             enum dma_transfer_direction dir)
1957 {
1958         struct udma_hwdesc *hwdesc;
1959         struct cppi5_desc_hdr_t *tr_desc;
1960         struct udma_desc *d;
1961         u32 reload_count = 0;
1962         u32 ring_id;
1963
1964         switch (tr_size) {
1965         case 16:
1966         case 32:
1967         case 64:
1968         case 128:
1969                 break;
1970         default:
1971                 dev_err(uc->ud->dev, "Unsupported TR size of %zu\n", tr_size);
1972                 return NULL;
1973         }
1974
1975         /* We have only one descriptor containing multiple TRs */
1976         d = kzalloc(sizeof(*d) + sizeof(d->hwdesc[0]), GFP_NOWAIT);
1977         if (!d)
1978                 return NULL;
1979
1980         d->sglen = tr_count;
1981
1982         d->hwdesc_count = 1;
1983         hwdesc = &d->hwdesc[0];
1984
1985         /* Allocate memory for DMA ring descriptor */
1986         if (uc->use_dma_pool) {
1987                 hwdesc->cppi5_desc_size = uc->config.hdesc_size;
1988                 hwdesc->cppi5_desc_vaddr = dma_pool_zalloc(uc->hdesc_pool,
1989                                                 GFP_NOWAIT,
1990                                                 &hwdesc->cppi5_desc_paddr);
1991         } else {
1992                 hwdesc->cppi5_desc_size = cppi5_trdesc_calc_size(tr_size,
1993                                                                  tr_count);
1994                 hwdesc->cppi5_desc_size = ALIGN(hwdesc->cppi5_desc_size,
1995                                                 uc->ud->desc_align);
1996                 hwdesc->cppi5_desc_vaddr = dma_alloc_coherent(uc->ud->dev,
1997                                                 hwdesc->cppi5_desc_size,
1998                                                 &hwdesc->cppi5_desc_paddr,
1999                                                 GFP_NOWAIT);
2000         }
2001
2002         if (!hwdesc->cppi5_desc_vaddr) {
2003                 kfree(d);
2004                 return NULL;
2005         }
2006
2007         /* Start of the TR req records */
2008         hwdesc->tr_req_base = hwdesc->cppi5_desc_vaddr + tr_size;
2009         /* Start address of the TR response array */
2010         hwdesc->tr_resp_base = hwdesc->tr_req_base + tr_size * tr_count;
2011
2012         tr_desc = hwdesc->cppi5_desc_vaddr;
2013
2014         if (uc->cyclic)
2015                 reload_count = CPPI5_INFO0_TRDESC_RLDCNT_INFINITE;
2016
2017         if (dir == DMA_DEV_TO_MEM)
2018                 ring_id = k3_ringacc_get_ring_id(uc->rflow->r_ring);
2019         else
2020                 ring_id = k3_ringacc_get_ring_id(uc->tchan->tc_ring);
2021
2022         cppi5_trdesc_init(tr_desc, tr_count, tr_size, 0, reload_count);
2023         cppi5_desc_set_pktids(tr_desc, uc->id,
2024                               CPPI5_INFO1_DESC_FLOWID_DEFAULT);
2025         cppi5_desc_set_retpolicy(tr_desc, 0, ring_id);
2026
2027         return d;
2028 }
2029
2030 /**
2031  * udma_get_tr_counters - calculate TR counters for a given length
2032  * @len: Length of the trasnfer
2033  * @align_to: Preferred alignment
2034  * @tr0_cnt0: First TR icnt0
2035  * @tr0_cnt1: First TR icnt1
2036  * @tr1_cnt0: Second (if used) TR icnt0
2037  *
2038  * For len < SZ_64K only one TR is enough, tr1_cnt0 is not updated
2039  * For len >= SZ_64K two TRs are used in a simple way:
2040  * First TR: SZ_64K-alignment blocks (tr0_cnt0, tr0_cnt1)
2041  * Second TR: the remaining length (tr1_cnt0)
2042  *
2043  * Returns the number of TRs the length needs (1 or 2)
2044  * -EINVAL if the length can not be supported
2045  */
2046 static int udma_get_tr_counters(size_t len, unsigned long align_to,
2047                                 u16 *tr0_cnt0, u16 *tr0_cnt1, u16 *tr1_cnt0)
2048 {
2049         if (len < SZ_64K) {
2050                 *tr0_cnt0 = len;
2051                 *tr0_cnt1 = 1;
2052
2053                 return 1;
2054         }
2055
2056         if (align_to > 3)
2057                 align_to = 3;
2058
2059 realign:
2060         *tr0_cnt0 = SZ_64K - BIT(align_to);
2061         if (len / *tr0_cnt0 >= SZ_64K) {
2062                 if (align_to) {
2063                         align_to--;
2064                         goto realign;
2065                 }
2066                 return -EINVAL;
2067         }
2068
2069         *tr0_cnt1 = len / *tr0_cnt0;
2070         *tr1_cnt0 = len % *tr0_cnt0;
2071
2072         return 2;
2073 }
2074
2075 static struct udma_desc *
2076 udma_prep_slave_sg_tr(struct udma_chan *uc, struct scatterlist *sgl,
2077                       unsigned int sglen, enum dma_transfer_direction dir,
2078                       unsigned long tx_flags, void *context)
2079 {
2080         struct scatterlist *sgent;
2081         struct udma_desc *d;
2082         struct cppi5_tr_type1_t *tr_req = NULL;
2083         u16 tr0_cnt0, tr0_cnt1, tr1_cnt0;
2084         unsigned int i;
2085         size_t tr_size;
2086         int num_tr = 0;
2087         int tr_idx = 0;
2088
2089         if (!is_slave_direction(dir)) {
2090                 dev_err(uc->ud->dev, "Only slave cyclic is supported\n");
2091                 return NULL;
2092         }
2093
2094         /* estimate the number of TRs we will need */
2095         for_each_sg(sgl, sgent, sglen, i) {
2096                 if (sg_dma_len(sgent) < SZ_64K)
2097                         num_tr++;
2098                 else
2099                         num_tr += 2;
2100         }
2101
2102         /* Now allocate and setup the descriptor. */
2103         tr_size = sizeof(struct cppi5_tr_type1_t);
2104         d = udma_alloc_tr_desc(uc, tr_size, num_tr, dir);
2105         if (!d)
2106                 return NULL;
2107
2108         d->sglen = sglen;
2109
2110         tr_req = d->hwdesc[0].tr_req_base;
2111         for_each_sg(sgl, sgent, sglen, i) {
2112                 dma_addr_t sg_addr = sg_dma_address(sgent);
2113
2114                 num_tr = udma_get_tr_counters(sg_dma_len(sgent), __ffs(sg_addr),
2115                                               &tr0_cnt0, &tr0_cnt1, &tr1_cnt0);
2116                 if (num_tr < 0) {
2117                         dev_err(uc->ud->dev, "size %u is not supported\n",
2118                                 sg_dma_len(sgent));
2119                         udma_free_hwdesc(uc, d);
2120                         kfree(d);
2121                         return NULL;
2122                 }
2123
2124                 cppi5_tr_init(&tr_req[i].flags, CPPI5_TR_TYPE1, false, false,
2125                               CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
2126                 cppi5_tr_csf_set(&tr_req[i].flags, CPPI5_TR_CSF_SUPR_EVT);
2127
2128                 tr_req[tr_idx].addr = sg_addr;
2129                 tr_req[tr_idx].icnt0 = tr0_cnt0;
2130                 tr_req[tr_idx].icnt1 = tr0_cnt1;
2131                 tr_req[tr_idx].dim1 = tr0_cnt0;
2132                 tr_idx++;
2133
2134                 if (num_tr == 2) {
2135                         cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE1,
2136                                       false, false,
2137                                       CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
2138                         cppi5_tr_csf_set(&tr_req[tr_idx].flags,
2139                                          CPPI5_TR_CSF_SUPR_EVT);
2140
2141                         tr_req[tr_idx].addr = sg_addr + tr0_cnt1 * tr0_cnt0;
2142                         tr_req[tr_idx].icnt0 = tr1_cnt0;
2143                         tr_req[tr_idx].icnt1 = 1;
2144                         tr_req[tr_idx].dim1 = tr1_cnt0;
2145                         tr_idx++;
2146                 }
2147
2148                 d->residue += sg_dma_len(sgent);
2149         }
2150
2151         cppi5_tr_csf_set(&tr_req[tr_idx - 1].flags, CPPI5_TR_CSF_EOP);
2152
2153         return d;
2154 }
2155
2156 static int udma_configure_statictr(struct udma_chan *uc, struct udma_desc *d,
2157                                    enum dma_slave_buswidth dev_width,
2158                                    u16 elcnt)
2159 {
2160         if (uc->config.ep_type != PSIL_EP_PDMA_XY)
2161                 return 0;
2162
2163         /* Bus width translates to the element size (ES) */
2164         switch (dev_width) {
2165         case DMA_SLAVE_BUSWIDTH_1_BYTE:
2166                 d->static_tr.elsize = 0;
2167                 break;
2168         case DMA_SLAVE_BUSWIDTH_2_BYTES:
2169                 d->static_tr.elsize = 1;
2170                 break;
2171         case DMA_SLAVE_BUSWIDTH_3_BYTES:
2172                 d->static_tr.elsize = 2;
2173                 break;
2174         case DMA_SLAVE_BUSWIDTH_4_BYTES:
2175                 d->static_tr.elsize = 3;
2176                 break;
2177         case DMA_SLAVE_BUSWIDTH_8_BYTES:
2178                 d->static_tr.elsize = 4;
2179                 break;
2180         default: /* not reached */
2181                 return -EINVAL;
2182         }
2183
2184         d->static_tr.elcnt = elcnt;
2185
2186         /*
2187          * PDMA must to close the packet when the channel is in packet mode.
2188          * For TR mode when the channel is not cyclic we also need PDMA to close
2189          * the packet otherwise the transfer will stall because PDMA holds on
2190          * the data it has received from the peripheral.
2191          */
2192         if (uc->config.pkt_mode || !uc->cyclic) {
2193                 unsigned int div = dev_width * elcnt;
2194
2195                 if (uc->cyclic)
2196                         d->static_tr.bstcnt = d->residue / d->sglen / div;
2197                 else
2198                         d->static_tr.bstcnt = d->residue / div;
2199
2200                 if (uc->config.dir == DMA_DEV_TO_MEM &&
2201                     d->static_tr.bstcnt > uc->ud->match_data->statictr_z_mask)
2202                         return -EINVAL;
2203         } else {
2204                 d->static_tr.bstcnt = 0;
2205         }
2206
2207         return 0;
2208 }
2209
2210 static struct udma_desc *
2211 udma_prep_slave_sg_pkt(struct udma_chan *uc, struct scatterlist *sgl,
2212                        unsigned int sglen, enum dma_transfer_direction dir,
2213                        unsigned long tx_flags, void *context)
2214 {
2215         struct scatterlist *sgent;
2216         struct cppi5_host_desc_t *h_desc = NULL;
2217         struct udma_desc *d;
2218         u32 ring_id;
2219         unsigned int i;
2220
2221         d = kzalloc(sizeof(*d) + sglen * sizeof(d->hwdesc[0]), GFP_NOWAIT);
2222         if (!d)
2223                 return NULL;
2224
2225         d->sglen = sglen;
2226         d->hwdesc_count = sglen;
2227
2228         if (dir == DMA_DEV_TO_MEM)
2229                 ring_id = k3_ringacc_get_ring_id(uc->rflow->r_ring);
2230         else
2231                 ring_id = k3_ringacc_get_ring_id(uc->tchan->tc_ring);
2232
2233         for_each_sg(sgl, sgent, sglen, i) {
2234                 struct udma_hwdesc *hwdesc = &d->hwdesc[i];
2235                 dma_addr_t sg_addr = sg_dma_address(sgent);
2236                 struct cppi5_host_desc_t *desc;
2237                 size_t sg_len = sg_dma_len(sgent);
2238
2239                 hwdesc->cppi5_desc_vaddr = dma_pool_zalloc(uc->hdesc_pool,
2240                                                 GFP_NOWAIT,
2241                                                 &hwdesc->cppi5_desc_paddr);
2242                 if (!hwdesc->cppi5_desc_vaddr) {
2243                         dev_err(uc->ud->dev,
2244                                 "descriptor%d allocation failed\n", i);
2245
2246                         udma_free_hwdesc(uc, d);
2247                         kfree(d);
2248                         return NULL;
2249                 }
2250
2251                 d->residue += sg_len;
2252                 hwdesc->cppi5_desc_size = uc->config.hdesc_size;
2253                 desc = hwdesc->cppi5_desc_vaddr;
2254
2255                 if (i == 0) {
2256                         cppi5_hdesc_init(desc, 0, 0);
2257                         /* Flow and Packed ID */
2258                         cppi5_desc_set_pktids(&desc->hdr, uc->id,
2259                                               CPPI5_INFO1_DESC_FLOWID_DEFAULT);
2260                         cppi5_desc_set_retpolicy(&desc->hdr, 0, ring_id);
2261                 } else {
2262                         cppi5_hdesc_reset_hbdesc(desc);
2263                         cppi5_desc_set_retpolicy(&desc->hdr, 0, 0xffff);
2264                 }
2265
2266                 /* attach the sg buffer to the descriptor */
2267                 cppi5_hdesc_attach_buf(desc, sg_addr, sg_len, sg_addr, sg_len);
2268
2269                 /* Attach link as host buffer descriptor */
2270                 if (h_desc)
2271                         cppi5_hdesc_link_hbdesc(h_desc,
2272                                                 hwdesc->cppi5_desc_paddr);
2273
2274                 if (dir == DMA_MEM_TO_DEV)
2275                         h_desc = desc;
2276         }
2277
2278         if (d->residue >= SZ_4M) {
2279                 dev_err(uc->ud->dev,
2280                         "%s: Transfer size %u is over the supported 4M range\n",
2281                         __func__, d->residue);
2282                 udma_free_hwdesc(uc, d);
2283                 kfree(d);
2284                 return NULL;
2285         }
2286
2287         h_desc = d->hwdesc[0].cppi5_desc_vaddr;
2288         cppi5_hdesc_set_pktlen(h_desc, d->residue);
2289
2290         return d;
2291 }
2292
2293 static int udma_attach_metadata(struct dma_async_tx_descriptor *desc,
2294                                 void *data, size_t len)
2295 {
2296         struct udma_desc *d = to_udma_desc(desc);
2297         struct udma_chan *uc = to_udma_chan(desc->chan);
2298         struct cppi5_host_desc_t *h_desc;
2299         u32 psd_size = len;
2300         u32 flags = 0;
2301
2302         if (!uc->config.pkt_mode || !uc->config.metadata_size)
2303                 return -ENOTSUPP;
2304
2305         if (!data || len > uc->config.metadata_size)
2306                 return -EINVAL;
2307
2308         if (uc->config.needs_epib && len < CPPI5_INFO0_HDESC_EPIB_SIZE)
2309                 return -EINVAL;
2310
2311         h_desc = d->hwdesc[0].cppi5_desc_vaddr;
2312         if (d->dir == DMA_MEM_TO_DEV)
2313                 memcpy(h_desc->epib, data, len);
2314
2315         if (uc->config.needs_epib)
2316                 psd_size -= CPPI5_INFO0_HDESC_EPIB_SIZE;
2317
2318         d->metadata = data;
2319         d->metadata_size = len;
2320         if (uc->config.needs_epib)
2321                 flags |= CPPI5_INFO0_HDESC_EPIB_PRESENT;
2322
2323         cppi5_hdesc_update_flags(h_desc, flags);
2324         cppi5_hdesc_update_psdata_size(h_desc, psd_size);
2325
2326         return 0;
2327 }
2328
2329 static void *udma_get_metadata_ptr(struct dma_async_tx_descriptor *desc,
2330                                    size_t *payload_len, size_t *max_len)
2331 {
2332         struct udma_desc *d = to_udma_desc(desc);
2333         struct udma_chan *uc = to_udma_chan(desc->chan);
2334         struct cppi5_host_desc_t *h_desc;
2335
2336         if (!uc->config.pkt_mode || !uc->config.metadata_size)
2337                 return ERR_PTR(-ENOTSUPP);
2338
2339         h_desc = d->hwdesc[0].cppi5_desc_vaddr;
2340
2341         *max_len = uc->config.metadata_size;
2342
2343         *payload_len = cppi5_hdesc_epib_present(&h_desc->hdr) ?
2344                        CPPI5_INFO0_HDESC_EPIB_SIZE : 0;
2345         *payload_len += cppi5_hdesc_get_psdata_size(h_desc);
2346
2347         return h_desc->epib;
2348 }
2349
2350 static int udma_set_metadata_len(struct dma_async_tx_descriptor *desc,
2351                                  size_t payload_len)
2352 {
2353         struct udma_desc *d = to_udma_desc(desc);
2354         struct udma_chan *uc = to_udma_chan(desc->chan);
2355         struct cppi5_host_desc_t *h_desc;
2356         u32 psd_size = payload_len;
2357         u32 flags = 0;
2358
2359         if (!uc->config.pkt_mode || !uc->config.metadata_size)
2360                 return -ENOTSUPP;
2361
2362         if (payload_len > uc->config.metadata_size)
2363                 return -EINVAL;
2364
2365         if (uc->config.needs_epib && payload_len < CPPI5_INFO0_HDESC_EPIB_SIZE)
2366                 return -EINVAL;
2367
2368         h_desc = d->hwdesc[0].cppi5_desc_vaddr;
2369
2370         if (uc->config.needs_epib) {
2371                 psd_size -= CPPI5_INFO0_HDESC_EPIB_SIZE;
2372                 flags |= CPPI5_INFO0_HDESC_EPIB_PRESENT;
2373         }
2374
2375         cppi5_hdesc_update_flags(h_desc, flags);
2376         cppi5_hdesc_update_psdata_size(h_desc, psd_size);
2377
2378         return 0;
2379 }
2380
2381 static struct dma_descriptor_metadata_ops metadata_ops = {
2382         .attach = udma_attach_metadata,
2383         .get_ptr = udma_get_metadata_ptr,
2384         .set_len = udma_set_metadata_len,
2385 };
2386
2387 static struct dma_async_tx_descriptor *
2388 udma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
2389                    unsigned int sglen, enum dma_transfer_direction dir,
2390                    unsigned long tx_flags, void *context)
2391 {
2392         struct udma_chan *uc = to_udma_chan(chan);
2393         enum dma_slave_buswidth dev_width;
2394         struct udma_desc *d;
2395         u32 burst;
2396
2397         if (dir != uc->config.dir) {
2398                 dev_err(chan->device->dev,
2399                         "%s: chan%d is for %s, not supporting %s\n",
2400                         __func__, uc->id,
2401                         dmaengine_get_direction_text(uc->config.dir),
2402                         dmaengine_get_direction_text(dir));
2403                 return NULL;
2404         }
2405
2406         if (dir == DMA_DEV_TO_MEM) {
2407                 dev_width = uc->cfg.src_addr_width;
2408                 burst = uc->cfg.src_maxburst;
2409         } else if (dir == DMA_MEM_TO_DEV) {
2410                 dev_width = uc->cfg.dst_addr_width;
2411                 burst = uc->cfg.dst_maxburst;
2412         } else {
2413                 dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
2414                 return NULL;
2415         }
2416
2417         if (!burst)
2418                 burst = 1;
2419
2420         if (uc->config.pkt_mode)
2421                 d = udma_prep_slave_sg_pkt(uc, sgl, sglen, dir, tx_flags,
2422                                            context);
2423         else
2424                 d = udma_prep_slave_sg_tr(uc, sgl, sglen, dir, tx_flags,
2425                                           context);
2426
2427         if (!d)
2428                 return NULL;
2429
2430         d->dir = dir;
2431         d->desc_idx = 0;
2432         d->tr_idx = 0;
2433
2434         /* static TR for remote PDMA */
2435         if (udma_configure_statictr(uc, d, dev_width, burst)) {
2436                 dev_err(uc->ud->dev,
2437                         "%s: StaticTR Z is limited to maximum 4095 (%u)\n",
2438                         __func__, d->static_tr.bstcnt);
2439
2440                 udma_free_hwdesc(uc, d);
2441                 kfree(d);
2442                 return NULL;
2443         }
2444
2445         if (uc->config.metadata_size)
2446                 d->vd.tx.metadata_ops = &metadata_ops;
2447
2448         return vchan_tx_prep(&uc->vc, &d->vd, tx_flags);
2449 }
2450
2451 static struct udma_desc *
2452 udma_prep_dma_cyclic_tr(struct udma_chan *uc, dma_addr_t buf_addr,
2453                         size_t buf_len, size_t period_len,
2454                         enum dma_transfer_direction dir, unsigned long flags)
2455 {
2456         struct udma_desc *d;
2457         size_t tr_size, period_addr;
2458         struct cppi5_tr_type1_t *tr_req;
2459         unsigned int periods = buf_len / period_len;
2460         u16 tr0_cnt0, tr0_cnt1, tr1_cnt0;
2461         unsigned int i;
2462         int num_tr;
2463
2464         if (!is_slave_direction(dir)) {
2465                 dev_err(uc->ud->dev, "Only slave cyclic is supported\n");
2466                 return NULL;
2467         }
2468
2469         num_tr = udma_get_tr_counters(period_len, __ffs(buf_addr), &tr0_cnt0,
2470                                       &tr0_cnt1, &tr1_cnt0);
2471         if (num_tr < 0) {
2472                 dev_err(uc->ud->dev, "size %zu is not supported\n",
2473                         period_len);
2474                 return NULL;
2475         }
2476
2477         /* Now allocate and setup the descriptor. */
2478         tr_size = sizeof(struct cppi5_tr_type1_t);
2479         d = udma_alloc_tr_desc(uc, tr_size, periods * num_tr, dir);
2480         if (!d)
2481                 return NULL;
2482
2483         tr_req = d->hwdesc[0].tr_req_base;
2484         period_addr = buf_addr;
2485         for (i = 0; i < periods; i++) {
2486                 int tr_idx = i * num_tr;
2487
2488                 cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE1, false,
2489                               false, CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
2490
2491                 tr_req[tr_idx].addr = period_addr;
2492                 tr_req[tr_idx].icnt0 = tr0_cnt0;
2493                 tr_req[tr_idx].icnt1 = tr0_cnt1;
2494                 tr_req[tr_idx].dim1 = tr0_cnt0;
2495
2496                 if (num_tr == 2) {
2497                         cppi5_tr_csf_set(&tr_req[tr_idx].flags,
2498                                          CPPI5_TR_CSF_SUPR_EVT);
2499                         tr_idx++;
2500
2501                         cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE1,
2502                                       false, false,
2503                                       CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
2504
2505                         tr_req[tr_idx].addr = period_addr + tr0_cnt1 * tr0_cnt0;
2506                         tr_req[tr_idx].icnt0 = tr1_cnt0;
2507                         tr_req[tr_idx].icnt1 = 1;
2508                         tr_req[tr_idx].dim1 = tr1_cnt0;
2509                 }
2510
2511                 if (!(flags & DMA_PREP_INTERRUPT))
2512                         cppi5_tr_csf_set(&tr_req[tr_idx].flags,
2513                                          CPPI5_TR_CSF_SUPR_EVT);
2514
2515                 period_addr += period_len;
2516         }
2517
2518         return d;
2519 }
2520
2521 static struct udma_desc *
2522 udma_prep_dma_cyclic_pkt(struct udma_chan *uc, dma_addr_t buf_addr,
2523                          size_t buf_len, size_t period_len,
2524                          enum dma_transfer_direction dir, unsigned long flags)
2525 {
2526         struct udma_desc *d;
2527         u32 ring_id;
2528         int i;
2529         int periods = buf_len / period_len;
2530
2531         if (periods > (K3_UDMA_DEFAULT_RING_SIZE - 1))
2532                 return NULL;
2533
2534         if (period_len >= SZ_4M)
2535                 return NULL;
2536
2537         d = kzalloc(sizeof(*d) + periods * sizeof(d->hwdesc[0]), GFP_NOWAIT);
2538         if (!d)
2539                 return NULL;
2540
2541         d->hwdesc_count = periods;
2542
2543         /* TODO: re-check this... */
2544         if (dir == DMA_DEV_TO_MEM)
2545                 ring_id = k3_ringacc_get_ring_id(uc->rflow->r_ring);
2546         else
2547                 ring_id = k3_ringacc_get_ring_id(uc->tchan->tc_ring);
2548
2549         for (i = 0; i < periods; i++) {
2550                 struct udma_hwdesc *hwdesc = &d->hwdesc[i];
2551                 dma_addr_t period_addr = buf_addr + (period_len * i);
2552                 struct cppi5_host_desc_t *h_desc;
2553
2554                 hwdesc->cppi5_desc_vaddr = dma_pool_zalloc(uc->hdesc_pool,
2555                                                 GFP_NOWAIT,
2556                                                 &hwdesc->cppi5_desc_paddr);
2557                 if (!hwdesc->cppi5_desc_vaddr) {
2558                         dev_err(uc->ud->dev,
2559                                 "descriptor%d allocation failed\n", i);
2560
2561                         udma_free_hwdesc(uc, d);
2562                         kfree(d);
2563                         return NULL;
2564                 }
2565
2566                 hwdesc->cppi5_desc_size = uc->config.hdesc_size;
2567                 h_desc = hwdesc->cppi5_desc_vaddr;
2568
2569                 cppi5_hdesc_init(h_desc, 0, 0);
2570                 cppi5_hdesc_set_pktlen(h_desc, period_len);
2571
2572                 /* Flow and Packed ID */
2573                 cppi5_desc_set_pktids(&h_desc->hdr, uc->id,
2574                                       CPPI5_INFO1_DESC_FLOWID_DEFAULT);
2575                 cppi5_desc_set_retpolicy(&h_desc->hdr, 0, ring_id);
2576
2577                 /* attach each period to a new descriptor */
2578                 cppi5_hdesc_attach_buf(h_desc,
2579                                        period_addr, period_len,
2580                                        period_addr, period_len);
2581         }
2582
2583         return d;
2584 }
2585
2586 static struct dma_async_tx_descriptor *
2587 udma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
2588                      size_t period_len, enum dma_transfer_direction dir,
2589                      unsigned long flags)
2590 {
2591         struct udma_chan *uc = to_udma_chan(chan);
2592         enum dma_slave_buswidth dev_width;
2593         struct udma_desc *d;
2594         u32 burst;
2595
2596         if (dir != uc->config.dir) {
2597                 dev_err(chan->device->dev,
2598                         "%s: chan%d is for %s, not supporting %s\n",
2599                         __func__, uc->id,
2600                         dmaengine_get_direction_text(uc->config.dir),
2601                         dmaengine_get_direction_text(dir));
2602                 return NULL;
2603         }
2604
2605         uc->cyclic = true;
2606
2607         if (dir == DMA_DEV_TO_MEM) {
2608                 dev_width = uc->cfg.src_addr_width;
2609                 burst = uc->cfg.src_maxburst;
2610         } else if (dir == DMA_MEM_TO_DEV) {
2611                 dev_width = uc->cfg.dst_addr_width;
2612                 burst = uc->cfg.dst_maxburst;
2613         } else {
2614                 dev_err(uc->ud->dev, "%s: bad direction?\n", __func__);
2615                 return NULL;
2616         }
2617
2618         if (!burst)
2619                 burst = 1;
2620
2621         if (uc->config.pkt_mode)
2622                 d = udma_prep_dma_cyclic_pkt(uc, buf_addr, buf_len, period_len,
2623                                              dir, flags);
2624         else
2625                 d = udma_prep_dma_cyclic_tr(uc, buf_addr, buf_len, period_len,
2626                                             dir, flags);
2627
2628         if (!d)
2629                 return NULL;
2630
2631         d->sglen = buf_len / period_len;
2632
2633         d->dir = dir;
2634         d->residue = buf_len;
2635
2636         /* static TR for remote PDMA */
2637         if (udma_configure_statictr(uc, d, dev_width, burst)) {
2638                 dev_err(uc->ud->dev,
2639                         "%s: StaticTR Z is limited to maximum 4095 (%u)\n",
2640                         __func__, d->static_tr.bstcnt);
2641
2642                 udma_free_hwdesc(uc, d);
2643                 kfree(d);
2644                 return NULL;
2645         }
2646
2647         if (uc->config.metadata_size)
2648                 d->vd.tx.metadata_ops = &metadata_ops;
2649
2650         return vchan_tx_prep(&uc->vc, &d->vd, flags);
2651 }
2652
2653 static struct dma_async_tx_descriptor *
2654 udma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
2655                      size_t len, unsigned long tx_flags)
2656 {
2657         struct udma_chan *uc = to_udma_chan(chan);
2658         struct udma_desc *d;
2659         struct cppi5_tr_type15_t *tr_req;
2660         int num_tr;
2661         size_t tr_size = sizeof(struct cppi5_tr_type15_t);
2662         u16 tr0_cnt0, tr0_cnt1, tr1_cnt0;
2663
2664         if (uc->config.dir != DMA_MEM_TO_MEM) {
2665                 dev_err(chan->device->dev,
2666                         "%s: chan%d is for %s, not supporting %s\n",
2667                         __func__, uc->id,
2668                         dmaengine_get_direction_text(uc->config.dir),
2669                         dmaengine_get_direction_text(DMA_MEM_TO_MEM));
2670                 return NULL;
2671         }
2672
2673         num_tr = udma_get_tr_counters(len, __ffs(src | dest), &tr0_cnt0,
2674                                       &tr0_cnt1, &tr1_cnt0);
2675         if (num_tr < 0) {
2676                 dev_err(uc->ud->dev, "size %zu is not supported\n",
2677                         len);
2678                 return NULL;
2679         }
2680
2681         d = udma_alloc_tr_desc(uc, tr_size, num_tr, DMA_MEM_TO_MEM);
2682         if (!d)
2683                 return NULL;
2684
2685         d->dir = DMA_MEM_TO_MEM;
2686         d->desc_idx = 0;
2687         d->tr_idx = 0;
2688         d->residue = len;
2689
2690         tr_req = d->hwdesc[0].tr_req_base;
2691
2692         cppi5_tr_init(&tr_req[0].flags, CPPI5_TR_TYPE15, false, true,
2693                       CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
2694         cppi5_tr_csf_set(&tr_req[0].flags, CPPI5_TR_CSF_SUPR_EVT);
2695
2696         tr_req[0].addr = src;
2697         tr_req[0].icnt0 = tr0_cnt0;
2698         tr_req[0].icnt1 = tr0_cnt1;
2699         tr_req[0].icnt2 = 1;
2700         tr_req[0].icnt3 = 1;
2701         tr_req[0].dim1 = tr0_cnt0;
2702
2703         tr_req[0].daddr = dest;
2704         tr_req[0].dicnt0 = tr0_cnt0;
2705         tr_req[0].dicnt1 = tr0_cnt1;
2706         tr_req[0].dicnt2 = 1;
2707         tr_req[0].dicnt3 = 1;
2708         tr_req[0].ddim1 = tr0_cnt0;
2709
2710         if (num_tr == 2) {
2711                 cppi5_tr_init(&tr_req[1].flags, CPPI5_TR_TYPE15, false, true,
2712                               CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
2713                 cppi5_tr_csf_set(&tr_req[1].flags, CPPI5_TR_CSF_SUPR_EVT);
2714
2715                 tr_req[1].addr = src + tr0_cnt1 * tr0_cnt0;
2716                 tr_req[1].icnt0 = tr1_cnt0;
2717                 tr_req[1].icnt1 = 1;
2718                 tr_req[1].icnt2 = 1;
2719                 tr_req[1].icnt3 = 1;
2720
2721                 tr_req[1].daddr = dest + tr0_cnt1 * tr0_cnt0;
2722                 tr_req[1].dicnt0 = tr1_cnt0;
2723                 tr_req[1].dicnt1 = 1;
2724                 tr_req[1].dicnt2 = 1;
2725                 tr_req[1].dicnt3 = 1;
2726         }
2727
2728         cppi5_tr_csf_set(&tr_req[num_tr - 1].flags, CPPI5_TR_CSF_EOP);
2729
2730         if (uc->config.metadata_size)
2731                 d->vd.tx.metadata_ops = &metadata_ops;
2732
2733         return vchan_tx_prep(&uc->vc, &d->vd, tx_flags);
2734 }
2735
2736 static void udma_issue_pending(struct dma_chan *chan)
2737 {
2738         struct udma_chan *uc = to_udma_chan(chan);
2739         unsigned long flags;
2740
2741         spin_lock_irqsave(&uc->vc.lock, flags);
2742
2743         /* If we have something pending and no active descriptor, then */
2744         if (vchan_issue_pending(&uc->vc) && !uc->desc) {
2745                 /*
2746                  * start a descriptor if the channel is NOT [marked as
2747                  * terminating _and_ it is still running (teardown has not
2748                  * completed yet)].
2749                  */
2750                 if (!(uc->state == UDMA_CHAN_IS_TERMINATING &&
2751                       udma_is_chan_running(uc)))
2752                         udma_start(uc);
2753         }
2754
2755         spin_unlock_irqrestore(&uc->vc.lock, flags);
2756 }
2757
2758 static enum dma_status udma_tx_status(struct dma_chan *chan,
2759                                       dma_cookie_t cookie,
2760                                       struct dma_tx_state *txstate)
2761 {
2762         struct udma_chan *uc = to_udma_chan(chan);
2763         enum dma_status ret;
2764         unsigned long flags;
2765
2766         spin_lock_irqsave(&uc->vc.lock, flags);
2767
2768         ret = dma_cookie_status(chan, cookie, txstate);
2769
2770         if (!udma_is_chan_running(uc))
2771                 ret = DMA_COMPLETE;
2772
2773         if (ret == DMA_IN_PROGRESS && udma_is_chan_paused(uc))
2774                 ret = DMA_PAUSED;
2775
2776         if (ret == DMA_COMPLETE || !txstate)
2777                 goto out;
2778
2779         if (uc->desc && uc->desc->vd.tx.cookie == cookie) {
2780                 u32 peer_bcnt = 0;
2781                 u32 bcnt = 0;
2782                 u32 residue = uc->desc->residue;
2783                 u32 delay = 0;
2784
2785                 if (uc->desc->dir == DMA_MEM_TO_DEV) {
2786                         bcnt = udma_tchanrt_read(uc->tchan,
2787                                                  UDMA_TCHAN_RT_SBCNT_REG);
2788
2789                         if (uc->config.ep_type != PSIL_EP_NATIVE) {
2790                                 peer_bcnt = udma_tchanrt_read(uc->tchan,
2791                                                 UDMA_TCHAN_RT_PEER_BCNT_REG);
2792
2793                                 if (bcnt > peer_bcnt)
2794                                         delay = bcnt - peer_bcnt;
2795                         }
2796                 } else if (uc->desc->dir == DMA_DEV_TO_MEM) {
2797                         bcnt = udma_rchanrt_read(uc->rchan,
2798                                                  UDMA_RCHAN_RT_BCNT_REG);
2799
2800                         if (uc->config.ep_type != PSIL_EP_NATIVE) {
2801                                 peer_bcnt = udma_rchanrt_read(uc->rchan,
2802                                                 UDMA_RCHAN_RT_PEER_BCNT_REG);
2803
2804                                 if (peer_bcnt > bcnt)
2805                                         delay = peer_bcnt - bcnt;
2806                         }
2807                 } else {
2808                         bcnt = udma_tchanrt_read(uc->tchan,
2809                                                  UDMA_TCHAN_RT_BCNT_REG);
2810                 }
2811
2812                 bcnt -= uc->bcnt;
2813                 if (bcnt && !(bcnt % uc->desc->residue))
2814                         residue = 0;
2815                 else
2816                         residue -= bcnt % uc->desc->residue;
2817
2818                 if (!residue && (uc->config.dir == DMA_DEV_TO_MEM || !delay)) {
2819                         ret = DMA_COMPLETE;
2820                         delay = 0;
2821                 }
2822
2823                 dma_set_residue(txstate, residue);
2824                 dma_set_in_flight_bytes(txstate, delay);
2825
2826         } else {
2827                 ret = DMA_COMPLETE;
2828         }
2829
2830 out:
2831         spin_unlock_irqrestore(&uc->vc.lock, flags);
2832         return ret;
2833 }
2834
2835 static int udma_pause(struct dma_chan *chan)
2836 {
2837         struct udma_chan *uc = to_udma_chan(chan);
2838
2839         /* pause the channel */
2840         switch (uc->config.dir) {
2841         case DMA_DEV_TO_MEM:
2842                 udma_rchanrt_update_bits(uc->rchan,
2843                                          UDMA_RCHAN_RT_PEER_RT_EN_REG,
2844                                          UDMA_PEER_RT_EN_PAUSE,
2845                                          UDMA_PEER_RT_EN_PAUSE);
2846                 break;
2847         case DMA_MEM_TO_DEV:
2848                 udma_tchanrt_update_bits(uc->tchan,
2849                                          UDMA_TCHAN_RT_PEER_RT_EN_REG,
2850                                          UDMA_PEER_RT_EN_PAUSE,
2851                                          UDMA_PEER_RT_EN_PAUSE);
2852                 break;
2853         case DMA_MEM_TO_MEM:
2854                 udma_tchanrt_update_bits(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
2855                                          UDMA_CHAN_RT_CTL_PAUSE,
2856                                          UDMA_CHAN_RT_CTL_PAUSE);
2857                 break;
2858         default:
2859                 return -EINVAL;
2860         }
2861
2862         return 0;
2863 }
2864
2865 static int udma_resume(struct dma_chan *chan)
2866 {
2867         struct udma_chan *uc = to_udma_chan(chan);
2868
2869         /* resume the channel */
2870         switch (uc->config.dir) {
2871         case DMA_DEV_TO_MEM:
2872                 udma_rchanrt_update_bits(uc->rchan,
2873                                          UDMA_RCHAN_RT_PEER_RT_EN_REG,
2874                                          UDMA_PEER_RT_EN_PAUSE, 0);
2875
2876                 break;
2877         case DMA_MEM_TO_DEV:
2878                 udma_tchanrt_update_bits(uc->tchan,
2879                                          UDMA_TCHAN_RT_PEER_RT_EN_REG,
2880                                          UDMA_PEER_RT_EN_PAUSE, 0);
2881                 break;
2882         case DMA_MEM_TO_MEM:
2883                 udma_tchanrt_update_bits(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
2884                                          UDMA_CHAN_RT_CTL_PAUSE, 0);
2885                 break;
2886         default:
2887                 return -EINVAL;
2888         }
2889
2890         return 0;
2891 }
2892
2893 static int udma_terminate_all(struct dma_chan *chan)
2894 {
2895         struct udma_chan *uc = to_udma_chan(chan);
2896         unsigned long flags;
2897         LIST_HEAD(head);
2898
2899         spin_lock_irqsave(&uc->vc.lock, flags);
2900
2901         if (udma_is_chan_running(uc))
2902                 udma_stop(uc);
2903
2904         if (uc->desc) {
2905                 uc->terminated_desc = uc->desc;
2906                 uc->desc = NULL;
2907                 uc->terminated_desc->terminated = true;
2908                 cancel_delayed_work(&uc->tx_drain.work);
2909         }
2910
2911         uc->paused = false;
2912
2913         vchan_get_all_descriptors(&uc->vc, &head);
2914         spin_unlock_irqrestore(&uc->vc.lock, flags);
2915         vchan_dma_desc_free_list(&uc->vc, &head);
2916
2917         return 0;
2918 }
2919
2920 static void udma_synchronize(struct dma_chan *chan)
2921 {
2922         struct udma_chan *uc = to_udma_chan(chan);
2923         unsigned long timeout = msecs_to_jiffies(1000);
2924
2925         vchan_synchronize(&uc->vc);
2926
2927         if (uc->state == UDMA_CHAN_IS_TERMINATING) {
2928                 timeout = wait_for_completion_timeout(&uc->teardown_completed,
2929                                                       timeout);
2930                 if (!timeout) {
2931                         dev_warn(uc->ud->dev, "chan%d teardown timeout!\n",
2932                                  uc->id);
2933                         udma_dump_chan_stdata(uc);
2934                         udma_reset_chan(uc, true);
2935                 }
2936         }
2937
2938         udma_reset_chan(uc, false);
2939         if (udma_is_chan_running(uc))
2940                 dev_warn(uc->ud->dev, "chan%d refused to stop!\n", uc->id);
2941
2942         cancel_delayed_work_sync(&uc->tx_drain.work);
2943         udma_reset_rings(uc);
2944 }
2945
2946 static void udma_desc_pre_callback(struct virt_dma_chan *vc,
2947                                    struct virt_dma_desc *vd,
2948                                    struct dmaengine_result *result)
2949 {
2950         struct udma_chan *uc = to_udma_chan(&vc->chan);
2951         struct udma_desc *d;
2952
2953         if (!vd)
2954                 return;
2955
2956         d = to_udma_desc(&vd->tx);
2957
2958         if (d->metadata_size)
2959                 udma_fetch_epib(uc, d);
2960
2961         /* Provide residue information for the client */
2962         if (result) {
2963                 void *desc_vaddr = udma_curr_cppi5_desc_vaddr(d, d->desc_idx);
2964
2965                 if (cppi5_desc_get_type(desc_vaddr) ==
2966                     CPPI5_INFO0_DESC_TYPE_VAL_HOST) {
2967                         result->residue = d->residue -
2968                                           cppi5_hdesc_get_pktlen(desc_vaddr);
2969                         if (result->residue)
2970                                 result->result = DMA_TRANS_ABORTED;
2971                         else
2972                                 result->result = DMA_TRANS_NOERROR;
2973                 } else {
2974                         result->residue = 0;
2975                         result->result = DMA_TRANS_NOERROR;
2976                 }
2977         }
2978 }
2979
2980 /*
2981  * This tasklet handles the completion of a DMA descriptor by
2982  * calling its callback and freeing it.
2983  */
2984 static void udma_vchan_complete(unsigned long arg)
2985 {
2986         struct virt_dma_chan *vc = (struct virt_dma_chan *)arg;
2987         struct virt_dma_desc *vd, *_vd;
2988         struct dmaengine_desc_callback cb;
2989         LIST_HEAD(head);
2990
2991         spin_lock_irq(&vc->lock);
2992         list_splice_tail_init(&vc->desc_completed, &head);
2993         vd = vc->cyclic;
2994         if (vd) {
2995                 vc->cyclic = NULL;
2996                 dmaengine_desc_get_callback(&vd->tx, &cb);
2997         } else {
2998                 memset(&cb, 0, sizeof(cb));
2999         }
3000         spin_unlock_irq(&vc->lock);
3001
3002         udma_desc_pre_callback(vc, vd, NULL);
3003         dmaengine_desc_callback_invoke(&cb, NULL);
3004
3005         list_for_each_entry_safe(vd, _vd, &head, node) {
3006                 struct dmaengine_result result;
3007
3008                 dmaengine_desc_get_callback(&vd->tx, &cb);
3009
3010                 list_del(&vd->node);
3011
3012                 udma_desc_pre_callback(vc, vd, &result);
3013                 dmaengine_desc_callback_invoke(&cb, &result);
3014
3015                 vchan_vdesc_fini(vd);
3016         }
3017 }
3018
3019 static void udma_free_chan_resources(struct dma_chan *chan)
3020 {
3021         struct udma_chan *uc = to_udma_chan(chan);
3022         struct udma_dev *ud = to_udma_dev(chan->device);
3023
3024         udma_terminate_all(chan);
3025         if (uc->terminated_desc) {
3026                 udma_reset_chan(uc, false);
3027                 udma_reset_rings(uc);
3028         }
3029
3030         cancel_delayed_work_sync(&uc->tx_drain.work);
3031         destroy_delayed_work_on_stack(&uc->tx_drain.work);
3032
3033         if (uc->irq_num_ring > 0) {
3034                 free_irq(uc->irq_num_ring, uc);
3035
3036                 uc->irq_num_ring = 0;
3037         }
3038         if (uc->irq_num_udma > 0) {
3039                 free_irq(uc->irq_num_udma, uc);
3040
3041                 uc->irq_num_udma = 0;
3042         }
3043
3044         /* Release PSI-L pairing */
3045         if (uc->psil_paired) {
3046                 navss_psil_unpair(ud, uc->config.src_thread,
3047                                   uc->config.dst_thread);
3048                 uc->psil_paired = false;
3049         }
3050
3051         vchan_free_chan_resources(&uc->vc);
3052         tasklet_kill(&uc->vc.task);
3053
3054         udma_free_tx_resources(uc);
3055         udma_free_rx_resources(uc);
3056         udma_reset_uchan(uc);
3057
3058         if (uc->use_dma_pool) {
3059                 dma_pool_destroy(uc->hdesc_pool);
3060                 uc->use_dma_pool = false;
3061         }
3062 }
3063
3064 static struct platform_driver udma_driver;
3065
3066 static bool udma_dma_filter_fn(struct dma_chan *chan, void *param)
3067 {
3068         struct udma_chan_config *ucc;
3069         struct psil_endpoint_config *ep_config;
3070         struct udma_chan *uc;
3071         struct udma_dev *ud;
3072         u32 *args;
3073
3074         if (chan->device->dev->driver != &udma_driver.driver)
3075                 return false;
3076
3077         uc = to_udma_chan(chan);
3078         ucc = &uc->config;
3079         ud = uc->ud;
3080         args = param;
3081
3082         ucc->remote_thread_id = args[0];
3083
3084         if (ucc->remote_thread_id & K3_PSIL_DST_THREAD_ID_OFFSET)
3085                 ucc->dir = DMA_MEM_TO_DEV;
3086         else
3087                 ucc->dir = DMA_DEV_TO_MEM;
3088
3089         ep_config = psil_get_ep_config(ucc->remote_thread_id);
3090         if (IS_ERR(ep_config)) {
3091                 dev_err(ud->dev, "No configuration for psi-l thread 0x%04x\n",
3092                         ucc->remote_thread_id);
3093                 ucc->dir = DMA_MEM_TO_MEM;
3094                 ucc->remote_thread_id = -1;
3095                 return false;
3096         }
3097
3098         ucc->pkt_mode = ep_config->pkt_mode;
3099         ucc->channel_tpl = ep_config->channel_tpl;
3100         ucc->notdpkt = ep_config->notdpkt;
3101         ucc->ep_type = ep_config->ep_type;
3102
3103         if (ucc->ep_type != PSIL_EP_NATIVE) {
3104                 const struct udma_match_data *match_data = ud->match_data;
3105
3106                 if (match_data->flags & UDMA_FLAG_PDMA_ACC32)
3107                         ucc->enable_acc32 = ep_config->pdma_acc32;
3108                 if (match_data->flags & UDMA_FLAG_PDMA_BURST)
3109                         ucc->enable_burst = ep_config->pdma_burst;
3110         }
3111
3112         ucc->needs_epib = ep_config->needs_epib;
3113         ucc->psd_size = ep_config->psd_size;
3114         ucc->metadata_size =
3115                         (ucc->needs_epib ? CPPI5_INFO0_HDESC_EPIB_SIZE : 0) +
3116                         ucc->psd_size;
3117
3118         if (ucc->pkt_mode)
3119                 ucc->hdesc_size = ALIGN(sizeof(struct cppi5_host_desc_t) +
3120                                  ucc->metadata_size, ud->desc_align);
3121
3122         dev_dbg(ud->dev, "chan%d: Remote thread: 0x%04x (%s)\n", uc->id,
3123                 ucc->remote_thread_id, dmaengine_get_direction_text(ucc->dir));
3124
3125         return true;
3126 }
3127
3128 static struct dma_chan *udma_of_xlate(struct of_phandle_args *dma_spec,
3129                                       struct of_dma *ofdma)
3130 {
3131         struct udma_dev *ud = ofdma->of_dma_data;
3132         dma_cap_mask_t mask = ud->ddev.cap_mask;
3133         struct dma_chan *chan;
3134
3135         if (dma_spec->args_count != 1)
3136                 return NULL;
3137
3138         chan = __dma_request_channel(&mask, udma_dma_filter_fn,
3139                                      &dma_spec->args[0], ofdma->of_node);
3140         if (!chan) {
3141                 dev_err(ud->dev, "get channel fail in %s.\n", __func__);
3142                 return ERR_PTR(-EINVAL);
3143         }
3144
3145         return chan;
3146 }
3147
3148 static struct udma_match_data am654_main_data = {
3149         .psil_base = 0x1000,
3150         .enable_memcpy_support = true,
3151         .statictr_z_mask = GENMASK(11, 0),
3152         .rchan_oes_offset = 0x2000,
3153         .tpl_levels = 2,
3154         .level_start_idx = {
3155                 [0] = 8, /* Normal channels */
3156                 [1] = 0, /* High Throughput channels */
3157         },
3158 };
3159
3160 static struct udma_match_data am654_mcu_data = {
3161         .psil_base = 0x6000,
3162         .enable_memcpy_support = true, /* TEST: DMA domains */
3163         .statictr_z_mask = GENMASK(11, 0),
3164         .rchan_oes_offset = 0x2000,
3165         .tpl_levels = 2,
3166         .level_start_idx = {
3167                 [0] = 2, /* Normal channels */
3168                 [1] = 0, /* High Throughput channels */
3169         },
3170 };
3171
3172 static struct udma_match_data j721e_main_data = {
3173         .psil_base = 0x1000,
3174         .enable_memcpy_support = true,
3175         .flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST,
3176         .statictr_z_mask = GENMASK(23, 0),
3177         .rchan_oes_offset = 0x400,
3178         .tpl_levels = 3,
3179         .level_start_idx = {
3180                 [0] = 16, /* Normal channels */
3181                 [1] = 4, /* High Throughput channels */
3182                 [2] = 0, /* Ultra High Throughput channels */
3183         },
3184 };
3185
3186 static struct udma_match_data j721e_mcu_data = {
3187         .psil_base = 0x6000,
3188         .enable_memcpy_support = false, /* MEM_TO_MEM is slow via MCU UDMA */
3189         .flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST,
3190         .statictr_z_mask = GENMASK(23, 0),
3191         .rchan_oes_offset = 0x400,
3192         .tpl_levels = 2,
3193         .level_start_idx = {
3194                 [0] = 2, /* Normal channels */
3195                 [1] = 0, /* High Throughput channels */
3196         },
3197 };
3198
3199 static const struct of_device_id udma_of_match[] = {
3200         {
3201                 .compatible = "ti,am654-navss-main-udmap",
3202                 .data = &am654_main_data,
3203         },
3204         {
3205                 .compatible = "ti,am654-navss-mcu-udmap",
3206                 .data = &am654_mcu_data,
3207         }, {
3208                 .compatible = "ti,j721e-navss-main-udmap",
3209                 .data = &j721e_main_data,
3210         }, {
3211                 .compatible = "ti,j721e-navss-mcu-udmap",
3212                 .data = &j721e_mcu_data,
3213         },
3214         { /* Sentinel */ },
3215 };
3216
3217 static int udma_get_mmrs(struct platform_device *pdev, struct udma_dev *ud)
3218 {
3219         struct resource *res;
3220         int i;
3221
3222         for (i = 0; i < MMR_LAST; i++) {
3223                 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
3224                                                    mmr_names[i]);
3225                 ud->mmrs[i] = devm_ioremap_resource(&pdev->dev, res);
3226                 if (IS_ERR(ud->mmrs[i]))
3227                         return PTR_ERR(ud->mmrs[i]);
3228         }
3229
3230         return 0;
3231 }
3232
3233 static int udma_setup_resources(struct udma_dev *ud)
3234 {
3235         struct device *dev = ud->dev;
3236         int ch_count, ret, i, j;
3237         u32 cap2, cap3;
3238         struct ti_sci_resource_desc *rm_desc;
3239         struct ti_sci_resource *rm_res, irq_res;
3240         struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
3241         static const char * const range_names[] = { "ti,sci-rm-range-tchan",
3242                                                     "ti,sci-rm-range-rchan",
3243                                                     "ti,sci-rm-range-rflow" };
3244
3245         cap2 = udma_read(ud->mmrs[MMR_GCFG], 0x28);
3246         cap3 = udma_read(ud->mmrs[MMR_GCFG], 0x2c);
3247
3248         ud->rflow_cnt = cap3 & 0x3fff;
3249         ud->tchan_cnt = cap2 & 0x1ff;
3250         ud->echan_cnt = (cap2 >> 9) & 0x1ff;
3251         ud->rchan_cnt = (cap2 >> 18) & 0x1ff;
3252         ch_count  = ud->tchan_cnt + ud->rchan_cnt;
3253
3254         ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt),
3255                                            sizeof(unsigned long), GFP_KERNEL);
3256         ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans),
3257                                   GFP_KERNEL);
3258         ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt),
3259                                            sizeof(unsigned long), GFP_KERNEL);
3260         ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans),
3261                                   GFP_KERNEL);
3262         ud->rflow_gp_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rflow_cnt),
3263                                               sizeof(unsigned long),
3264                                               GFP_KERNEL);
3265         ud->rflow_gp_map_allocated = devm_kcalloc(dev,
3266                                                   BITS_TO_LONGS(ud->rflow_cnt),
3267                                                   sizeof(unsigned long),
3268                                                   GFP_KERNEL);
3269         ud->rflow_in_use = devm_kcalloc(dev, BITS_TO_LONGS(ud->rflow_cnt),
3270                                         sizeof(unsigned long),
3271                                         GFP_KERNEL);
3272         ud->rflows = devm_kcalloc(dev, ud->rflow_cnt, sizeof(*ud->rflows),
3273                                   GFP_KERNEL);
3274
3275         if (!ud->tchan_map || !ud->rchan_map || !ud->rflow_gp_map ||
3276             !ud->rflow_gp_map_allocated || !ud->tchans || !ud->rchans ||
3277             !ud->rflows || !ud->rflow_in_use)
3278                 return -ENOMEM;
3279
3280         /*
3281          * RX flows with the same Ids as RX channels are reserved to be used
3282          * as default flows if remote HW can't generate flow_ids. Those
3283          * RX flows can be requested only explicitly by id.
3284          */
3285         bitmap_set(ud->rflow_gp_map_allocated, 0, ud->rchan_cnt);
3286
3287         /* by default no GP rflows are assigned to Linux */
3288         bitmap_set(ud->rflow_gp_map, 0, ud->rflow_cnt);
3289
3290         /* Get resource ranges from tisci */
3291         for (i = 0; i < RM_RANGE_LAST; i++)
3292                 tisci_rm->rm_ranges[i] =
3293                         devm_ti_sci_get_of_resource(tisci_rm->tisci, dev,
3294                                                     tisci_rm->tisci_dev_id,
3295                                                     (char *)range_names[i]);
3296
3297         /* tchan ranges */
3298         rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
3299         if (IS_ERR(rm_res)) {
3300                 bitmap_zero(ud->tchan_map, ud->tchan_cnt);
3301         } else {
3302                 bitmap_fill(ud->tchan_map, ud->tchan_cnt);
3303                 for (i = 0; i < rm_res->sets; i++) {
3304                         rm_desc = &rm_res->desc[i];
3305                         bitmap_clear(ud->tchan_map, rm_desc->start,
3306                                      rm_desc->num);
3307                         dev_dbg(dev, "ti-sci-res: tchan: %d:%d\n",
3308                                 rm_desc->start, rm_desc->num);
3309                 }
3310         }
3311         irq_res.sets = rm_res->sets;
3312
3313         /* rchan and matching default flow ranges */
3314         rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
3315         if (IS_ERR(rm_res)) {
3316                 bitmap_zero(ud->rchan_map, ud->rchan_cnt);
3317         } else {
3318                 bitmap_fill(ud->rchan_map, ud->rchan_cnt);
3319                 for (i = 0; i < rm_res->sets; i++) {
3320                         rm_desc = &rm_res->desc[i];
3321                         bitmap_clear(ud->rchan_map, rm_desc->start,
3322                                      rm_desc->num);
3323                         dev_dbg(dev, "ti-sci-res: rchan: %d:%d\n",
3324                                 rm_desc->start, rm_desc->num);
3325                 }
3326         }
3327
3328         irq_res.sets += rm_res->sets;
3329         irq_res.desc = kcalloc(irq_res.sets, sizeof(*irq_res.desc), GFP_KERNEL);
3330         rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
3331         for (i = 0; i < rm_res->sets; i++) {
3332                 irq_res.desc[i].start = rm_res->desc[i].start;
3333                 irq_res.desc[i].num = rm_res->desc[i].num;
3334         }
3335         rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
3336         for (j = 0; j < rm_res->sets; j++, i++) {
3337                 irq_res.desc[i].start = rm_res->desc[j].start +
3338                                         ud->match_data->rchan_oes_offset;
3339                 irq_res.desc[i].num = rm_res->desc[j].num;
3340         }
3341         ret = ti_sci_inta_msi_domain_alloc_irqs(ud->dev, &irq_res);
3342         kfree(irq_res.desc);
3343         if (ret) {
3344                 dev_err(ud->dev, "Failed to allocate MSI interrupts\n");
3345                 return ret;
3346         }
3347
3348         /* GP rflow ranges */
3349         rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW];
3350         if (IS_ERR(rm_res)) {
3351                 /* all gp flows are assigned exclusively to Linux */
3352                 bitmap_clear(ud->rflow_gp_map, ud->rchan_cnt,
3353                              ud->rflow_cnt - ud->rchan_cnt);
3354         } else {
3355                 for (i = 0; i < rm_res->sets; i++) {
3356                         rm_desc = &rm_res->desc[i];
3357                         bitmap_clear(ud->rflow_gp_map, rm_desc->start,
3358                                      rm_desc->num);
3359                         dev_dbg(dev, "ti-sci-res: rflow: %d:%d\n",
3360                                 rm_desc->start, rm_desc->num);
3361                 }
3362         }
3363
3364         ch_count -= bitmap_weight(ud->tchan_map, ud->tchan_cnt);
3365         ch_count -= bitmap_weight(ud->rchan_map, ud->rchan_cnt);
3366         if (!ch_count)
3367                 return -ENODEV;
3368
3369         ud->channels = devm_kcalloc(dev, ch_count, sizeof(*ud->channels),
3370                                     GFP_KERNEL);
3371         if (!ud->channels)
3372                 return -ENOMEM;
3373
3374         dev_info(dev, "Channels: %d (tchan: %u, rchan: %u, gp-rflow: %u)\n",
3375                  ch_count,
3376                  ud->tchan_cnt - bitmap_weight(ud->tchan_map, ud->tchan_cnt),
3377                  ud->rchan_cnt - bitmap_weight(ud->rchan_map, ud->rchan_cnt),
3378                  ud->rflow_cnt - bitmap_weight(ud->rflow_gp_map,
3379                                                ud->rflow_cnt));
3380
3381         return ch_count;
3382 }
3383
3384 static int udma_setup_rx_flush(struct udma_dev *ud)
3385 {
3386         struct udma_rx_flush *rx_flush = &ud->rx_flush;
3387         struct cppi5_desc_hdr_t *tr_desc;
3388         struct cppi5_tr_type1_t *tr_req;
3389         struct cppi5_host_desc_t *desc;
3390         struct device *dev = ud->dev;
3391         struct udma_hwdesc *hwdesc;
3392         size_t tr_size;
3393
3394         /* Allocate 1K buffer for discarded data on RX channel teardown */
3395         rx_flush->buffer_size = SZ_1K;
3396         rx_flush->buffer_vaddr = devm_kzalloc(dev, rx_flush->buffer_size,
3397                                               GFP_KERNEL);
3398         if (!rx_flush->buffer_vaddr)
3399                 return -ENOMEM;
3400
3401         rx_flush->buffer_paddr = dma_map_single(dev, rx_flush->buffer_vaddr,
3402                                                 rx_flush->buffer_size,
3403                                                 DMA_TO_DEVICE);
3404         if (dma_mapping_error(dev, rx_flush->buffer_paddr))
3405                 return -ENOMEM;
3406
3407         /* Set up descriptor to be used for TR mode */
3408         hwdesc = &rx_flush->hwdescs[0];
3409         tr_size = sizeof(struct cppi5_tr_type1_t);
3410         hwdesc->cppi5_desc_size = cppi5_trdesc_calc_size(tr_size, 1);
3411         hwdesc->cppi5_desc_size = ALIGN(hwdesc->cppi5_desc_size,
3412                                         ud->desc_align);
3413
3414         hwdesc->cppi5_desc_vaddr = devm_kzalloc(dev, hwdesc->cppi5_desc_size,
3415                                                 GFP_KERNEL);
3416         if (!hwdesc->cppi5_desc_vaddr)
3417                 return -ENOMEM;
3418
3419         hwdesc->cppi5_desc_paddr = dma_map_single(dev, hwdesc->cppi5_desc_vaddr,
3420                                                   hwdesc->cppi5_desc_size,
3421                                                   DMA_TO_DEVICE);
3422         if (dma_mapping_error(dev, hwdesc->cppi5_desc_paddr))
3423                 return -ENOMEM;
3424
3425         /* Start of the TR req records */
3426         hwdesc->tr_req_base = hwdesc->cppi5_desc_vaddr + tr_size;
3427         /* Start address of the TR response array */
3428         hwdesc->tr_resp_base = hwdesc->tr_req_base + tr_size;
3429
3430         tr_desc = hwdesc->cppi5_desc_vaddr;
3431         cppi5_trdesc_init(tr_desc, 1, tr_size, 0, 0);
3432         cppi5_desc_set_pktids(tr_desc, 0, CPPI5_INFO1_DESC_FLOWID_DEFAULT);
3433         cppi5_desc_set_retpolicy(tr_desc, 0, 0);
3434
3435         tr_req = hwdesc->tr_req_base;
3436         cppi5_tr_init(&tr_req->flags, CPPI5_TR_TYPE1, false, false,
3437                       CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
3438         cppi5_tr_csf_set(&tr_req->flags, CPPI5_TR_CSF_SUPR_EVT);
3439
3440         tr_req->addr = rx_flush->buffer_paddr;
3441         tr_req->icnt0 = rx_flush->buffer_size;
3442         tr_req->icnt1 = 1;
3443
3444         /* Set up descriptor to be used for packet mode */
3445         hwdesc = &rx_flush->hwdescs[1];
3446         hwdesc->cppi5_desc_size = ALIGN(sizeof(struct cppi5_host_desc_t) +
3447                                         CPPI5_INFO0_HDESC_EPIB_SIZE +
3448                                         CPPI5_INFO0_HDESC_PSDATA_MAX_SIZE,
3449                                         ud->desc_align);
3450
3451         hwdesc->cppi5_desc_vaddr = devm_kzalloc(dev, hwdesc->cppi5_desc_size,
3452                                                 GFP_KERNEL);
3453         if (!hwdesc->cppi5_desc_vaddr)
3454                 return -ENOMEM;
3455
3456         hwdesc->cppi5_desc_paddr = dma_map_single(dev, hwdesc->cppi5_desc_vaddr,
3457                                                   hwdesc->cppi5_desc_size,
3458                                                   DMA_TO_DEVICE);
3459         if (dma_mapping_error(dev, hwdesc->cppi5_desc_paddr))
3460                 return -ENOMEM;
3461
3462         desc = hwdesc->cppi5_desc_vaddr;
3463         cppi5_hdesc_init(desc, 0, 0);
3464         cppi5_desc_set_pktids(&desc->hdr, 0, CPPI5_INFO1_DESC_FLOWID_DEFAULT);
3465         cppi5_desc_set_retpolicy(&desc->hdr, 0, 0);
3466
3467         cppi5_hdesc_attach_buf(desc,
3468                                rx_flush->buffer_paddr, rx_flush->buffer_size,
3469                                rx_flush->buffer_paddr, rx_flush->buffer_size);
3470
3471         dma_sync_single_for_device(dev, hwdesc->cppi5_desc_paddr,
3472                                    hwdesc->cppi5_desc_size, DMA_TO_DEVICE);
3473         return 0;
3474 }
3475
3476 #define TI_UDMAC_BUSWIDTHS      (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
3477                                  BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
3478                                  BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \
3479                                  BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
3480                                  BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
3481
3482 static int udma_probe(struct platform_device *pdev)
3483 {
3484         struct device_node *navss_node = pdev->dev.parent->of_node;
3485         struct device *dev = &pdev->dev;
3486         struct udma_dev *ud;
3487         const struct of_device_id *match;
3488         int i, ret;
3489         int ch_count;
3490
3491         ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(48));
3492         if (ret)
3493                 dev_err(dev, "failed to set dma mask stuff\n");
3494
3495         ud = devm_kzalloc(dev, sizeof(*ud), GFP_KERNEL);
3496         if (!ud)
3497                 return -ENOMEM;
3498
3499         ret = udma_get_mmrs(pdev, ud);
3500         if (ret)
3501                 return ret;
3502
3503         ud->tisci_rm.tisci = ti_sci_get_by_phandle(dev->of_node, "ti,sci");
3504         if (IS_ERR(ud->tisci_rm.tisci))
3505                 return PTR_ERR(ud->tisci_rm.tisci);
3506
3507         ret = of_property_read_u32(dev->of_node, "ti,sci-dev-id",
3508                                    &ud->tisci_rm.tisci_dev_id);
3509         if (ret) {
3510                 dev_err(dev, "ti,sci-dev-id read failure %d\n", ret);
3511                 return ret;
3512         }
3513         pdev->id = ud->tisci_rm.tisci_dev_id;
3514
3515         ret = of_property_read_u32(navss_node, "ti,sci-dev-id",
3516                                    &ud->tisci_rm.tisci_navss_dev_id);
3517         if (ret) {
3518                 dev_err(dev, "NAVSS ti,sci-dev-id read failure %d\n", ret);
3519                 return ret;
3520         }
3521
3522         ud->tisci_rm.tisci_udmap_ops = &ud->tisci_rm.tisci->ops.rm_udmap_ops;
3523         ud->tisci_rm.tisci_psil_ops = &ud->tisci_rm.tisci->ops.rm_psil_ops;
3524
3525         ud->ringacc = of_k3_ringacc_get_by_phandle(dev->of_node, "ti,ringacc");
3526         if (IS_ERR(ud->ringacc))
3527                 return PTR_ERR(ud->ringacc);
3528
3529         dev->msi_domain = of_msi_get_domain(dev, dev->of_node,
3530                                             DOMAIN_BUS_TI_SCI_INTA_MSI);
3531         if (!dev->msi_domain) {
3532                 dev_err(dev, "Failed to get MSI domain\n");
3533                 return -EPROBE_DEFER;
3534         }
3535
3536         match = of_match_node(udma_of_match, dev->of_node);
3537         if (!match) {
3538                 dev_err(dev, "No compatible match found\n");
3539                 return -ENODEV;
3540         }
3541         ud->match_data = match->data;
3542
3543         dma_cap_set(DMA_SLAVE, ud->ddev.cap_mask);
3544         dma_cap_set(DMA_CYCLIC, ud->ddev.cap_mask);
3545
3546         ud->ddev.device_alloc_chan_resources = udma_alloc_chan_resources;
3547         ud->ddev.device_config = udma_slave_config;
3548         ud->ddev.device_prep_slave_sg = udma_prep_slave_sg;
3549         ud->ddev.device_prep_dma_cyclic = udma_prep_dma_cyclic;
3550         ud->ddev.device_issue_pending = udma_issue_pending;
3551         ud->ddev.device_tx_status = udma_tx_status;
3552         ud->ddev.device_pause = udma_pause;
3553         ud->ddev.device_resume = udma_resume;
3554         ud->ddev.device_terminate_all = udma_terminate_all;
3555         ud->ddev.device_synchronize = udma_synchronize;
3556
3557         ud->ddev.device_free_chan_resources = udma_free_chan_resources;
3558         ud->ddev.src_addr_widths = TI_UDMAC_BUSWIDTHS;
3559         ud->ddev.dst_addr_widths = TI_UDMAC_BUSWIDTHS;
3560         ud->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
3561         ud->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
3562         ud->ddev.copy_align = DMAENGINE_ALIGN_8_BYTES;
3563         ud->ddev.desc_metadata_modes = DESC_METADATA_CLIENT |
3564                                        DESC_METADATA_ENGINE;
3565         if (ud->match_data->enable_memcpy_support) {
3566                 dma_cap_set(DMA_MEMCPY, ud->ddev.cap_mask);
3567                 ud->ddev.device_prep_dma_memcpy = udma_prep_dma_memcpy;
3568                 ud->ddev.directions |= BIT(DMA_MEM_TO_MEM);
3569         }
3570
3571         ud->ddev.dev = dev;
3572         ud->dev = dev;
3573         ud->psil_base = ud->match_data->psil_base;
3574
3575         INIT_LIST_HEAD(&ud->ddev.channels);
3576         INIT_LIST_HEAD(&ud->desc_to_purge);
3577
3578         ch_count = udma_setup_resources(ud);
3579         if (ch_count <= 0)
3580                 return ch_count;
3581
3582         spin_lock_init(&ud->lock);
3583         INIT_WORK(&ud->purge_work, udma_purge_desc_work);
3584
3585         ud->desc_align = 64;
3586         if (ud->desc_align < dma_get_cache_alignment())
3587                 ud->desc_align = dma_get_cache_alignment();
3588
3589         ret = udma_setup_rx_flush(ud);
3590         if (ret)
3591                 return ret;
3592
3593         for (i = 0; i < ud->tchan_cnt; i++) {
3594                 struct udma_tchan *tchan = &ud->tchans[i];
3595
3596                 tchan->id = i;
3597                 tchan->reg_rt = ud->mmrs[MMR_TCHANRT] + i * 0x1000;
3598         }
3599
3600         for (i = 0; i < ud->rchan_cnt; i++) {
3601                 struct udma_rchan *rchan = &ud->rchans[i];
3602
3603                 rchan->id = i;
3604                 rchan->reg_rt = ud->mmrs[MMR_RCHANRT] + i * 0x1000;
3605         }
3606
3607         for (i = 0; i < ud->rflow_cnt; i++) {
3608                 struct udma_rflow *rflow = &ud->rflows[i];
3609
3610                 rflow->id = i;
3611         }
3612
3613         for (i = 0; i < ch_count; i++) {
3614                 struct udma_chan *uc = &ud->channels[i];
3615
3616                 uc->ud = ud;
3617                 uc->vc.desc_free = udma_desc_free;
3618                 uc->id = i;
3619                 uc->tchan = NULL;
3620                 uc->rchan = NULL;
3621                 uc->config.remote_thread_id = -1;
3622                 uc->config.dir = DMA_MEM_TO_MEM;
3623                 uc->name = devm_kasprintf(dev, GFP_KERNEL, "%s chan%d",
3624                                           dev_name(dev), i);
3625
3626                 vchan_init(&uc->vc, &ud->ddev);
3627                 /* Use custom vchan completion handling */
3628                 tasklet_init(&uc->vc.task, udma_vchan_complete,
3629                              (unsigned long)&uc->vc);
3630                 init_completion(&uc->teardown_completed);
3631         }
3632
3633         ret = dma_async_device_register(&ud->ddev);
3634         if (ret) {
3635                 dev_err(dev, "failed to register slave DMA engine: %d\n", ret);
3636                 return ret;
3637         }
3638
3639         platform_set_drvdata(pdev, ud);
3640
3641         ret = of_dma_controller_register(dev->of_node, udma_of_xlate, ud);
3642         if (ret) {
3643                 dev_err(dev, "failed to register of_dma controller\n");
3644                 dma_async_device_unregister(&ud->ddev);
3645         }
3646
3647         return ret;
3648 }
3649
3650 static struct platform_driver udma_driver = {
3651         .driver = {
3652                 .name   = "ti-udma",
3653                 .of_match_table = udma_of_match,
3654                 .suppress_bind_attrs = true,
3655         },
3656         .probe          = udma_probe,
3657 };
3658 builtin_platform_driver(udma_driver);
3659
3660 /* Private interfaces to UDMA */
3661 #include "k3-udma-private.c"