Merge tag 'for-v5.15' of git://git.kernel.org/pub/scm/linux/kernel/git/sre/linux...
[linux-2.6-microblaze.git] / drivers / dma / ti / k3-udma.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
4  *  Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
5  */
6
7 #include <linux/kernel.h>
8 #include <linux/delay.h>
9 #include <linux/dmaengine.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/dmapool.h>
12 #include <linux/err.h>
13 #include <linux/init.h>
14 #include <linux/interrupt.h>
15 #include <linux/list.h>
16 #include <linux/platform_device.h>
17 #include <linux/slab.h>
18 #include <linux/spinlock.h>
19 #include <linux/sys_soc.h>
20 #include <linux/of.h>
21 #include <linux/of_dma.h>
22 #include <linux/of_device.h>
23 #include <linux/of_irq.h>
24 #include <linux/workqueue.h>
25 #include <linux/completion.h>
26 #include <linux/soc/ti/k3-ringacc.h>
27 #include <linux/soc/ti/ti_sci_protocol.h>
28 #include <linux/soc/ti/ti_sci_inta_msi.h>
29 #include <linux/dma/k3-event-router.h>
30 #include <linux/dma/ti-cppi5.h>
31
32 #include "../virt-dma.h"
33 #include "k3-udma.h"
34 #include "k3-psil-priv.h"
35
36 struct udma_static_tr {
37         u8 elsize; /* RPSTR0 */
38         u16 elcnt; /* RPSTR0 */
39         u16 bstcnt; /* RPSTR1 */
40 };
41
42 #define K3_UDMA_MAX_RFLOWS              1024
43 #define K3_UDMA_DEFAULT_RING_SIZE       16
44
45 /* How SRC/DST tag should be updated by UDMA in the descriptor's Word 3 */
46 #define UDMA_RFLOW_SRCTAG_NONE          0
47 #define UDMA_RFLOW_SRCTAG_CFG_TAG       1
48 #define UDMA_RFLOW_SRCTAG_FLOW_ID       2
49 #define UDMA_RFLOW_SRCTAG_SRC_TAG       4
50
51 #define UDMA_RFLOW_DSTTAG_NONE          0
52 #define UDMA_RFLOW_DSTTAG_CFG_TAG       1
53 #define UDMA_RFLOW_DSTTAG_FLOW_ID       2
54 #define UDMA_RFLOW_DSTTAG_DST_TAG_LO    4
55 #define UDMA_RFLOW_DSTTAG_DST_TAG_HI    5
56
57 struct udma_chan;
58
59 enum k3_dma_type {
60         DMA_TYPE_UDMA = 0,
61         DMA_TYPE_BCDMA,
62         DMA_TYPE_PKTDMA,
63 };
64
65 enum udma_mmr {
66         MMR_GCFG = 0,
67         MMR_BCHANRT,
68         MMR_RCHANRT,
69         MMR_TCHANRT,
70         MMR_LAST,
71 };
72
73 static const char * const mmr_names[] = {
74         [MMR_GCFG] = "gcfg",
75         [MMR_BCHANRT] = "bchanrt",
76         [MMR_RCHANRT] = "rchanrt",
77         [MMR_TCHANRT] = "tchanrt",
78 };
79
80 struct udma_tchan {
81         void __iomem *reg_rt;
82
83         int id;
84         struct k3_ring *t_ring; /* Transmit ring */
85         struct k3_ring *tc_ring; /* Transmit Completion ring */
86         int tflow_id; /* applicable only for PKTDMA */
87
88 };
89
90 #define udma_bchan udma_tchan
91
92 struct udma_rflow {
93         int id;
94         struct k3_ring *fd_ring; /* Free Descriptor ring */
95         struct k3_ring *r_ring; /* Receive ring */
96 };
97
98 struct udma_rchan {
99         void __iomem *reg_rt;
100
101         int id;
102 };
103
104 struct udma_oes_offsets {
105         /* K3 UDMA Output Event Offset */
106         u32 udma_rchan;
107
108         /* BCDMA Output Event Offsets */
109         u32 bcdma_bchan_data;
110         u32 bcdma_bchan_ring;
111         u32 bcdma_tchan_data;
112         u32 bcdma_tchan_ring;
113         u32 bcdma_rchan_data;
114         u32 bcdma_rchan_ring;
115
116         /* PKTDMA Output Event Offsets */
117         u32 pktdma_tchan_flow;
118         u32 pktdma_rchan_flow;
119 };
120
121 #define UDMA_FLAG_PDMA_ACC32            BIT(0)
122 #define UDMA_FLAG_PDMA_BURST            BIT(1)
123 #define UDMA_FLAG_TDTYPE                BIT(2)
124 #define UDMA_FLAG_BURST_SIZE            BIT(3)
125 #define UDMA_FLAGS_J7_CLASS             (UDMA_FLAG_PDMA_ACC32 | \
126                                          UDMA_FLAG_PDMA_BURST | \
127                                          UDMA_FLAG_TDTYPE | \
128                                          UDMA_FLAG_BURST_SIZE)
129
130 struct udma_match_data {
131         enum k3_dma_type type;
132         u32 psil_base;
133         bool enable_memcpy_support;
134         u32 flags;
135         u32 statictr_z_mask;
136         u8 burst_size[3];
137 };
138
139 struct udma_soc_data {
140         struct udma_oes_offsets oes;
141         u32 bcdma_trigger_event_offset;
142 };
143
144 struct udma_hwdesc {
145         size_t cppi5_desc_size;
146         void *cppi5_desc_vaddr;
147         dma_addr_t cppi5_desc_paddr;
148
149         /* TR descriptor internal pointers */
150         void *tr_req_base;
151         struct cppi5_tr_resp_t *tr_resp_base;
152 };
153
154 struct udma_rx_flush {
155         struct udma_hwdesc hwdescs[2];
156
157         size_t buffer_size;
158         void *buffer_vaddr;
159         dma_addr_t buffer_paddr;
160 };
161
162 struct udma_tpl {
163         u8 levels;
164         u32 start_idx[3];
165 };
166
167 struct udma_dev {
168         struct dma_device ddev;
169         struct device *dev;
170         void __iomem *mmrs[MMR_LAST];
171         const struct udma_match_data *match_data;
172         const struct udma_soc_data *soc_data;
173
174         struct udma_tpl bchan_tpl;
175         struct udma_tpl tchan_tpl;
176         struct udma_tpl rchan_tpl;
177
178         size_t desc_align; /* alignment to use for descriptors */
179
180         struct udma_tisci_rm tisci_rm;
181
182         struct k3_ringacc *ringacc;
183
184         struct work_struct purge_work;
185         struct list_head desc_to_purge;
186         spinlock_t lock;
187
188         struct udma_rx_flush rx_flush;
189
190         int bchan_cnt;
191         int tchan_cnt;
192         int echan_cnt;
193         int rchan_cnt;
194         int rflow_cnt;
195         int tflow_cnt;
196         unsigned long *bchan_map;
197         unsigned long *tchan_map;
198         unsigned long *rchan_map;
199         unsigned long *rflow_gp_map;
200         unsigned long *rflow_gp_map_allocated;
201         unsigned long *rflow_in_use;
202         unsigned long *tflow_map;
203
204         struct udma_bchan *bchans;
205         struct udma_tchan *tchans;
206         struct udma_rchan *rchans;
207         struct udma_rflow *rflows;
208
209         struct udma_chan *channels;
210         u32 psil_base;
211         u32 atype;
212         u32 asel;
213 };
214
215 struct udma_desc {
216         struct virt_dma_desc vd;
217
218         bool terminated;
219
220         enum dma_transfer_direction dir;
221
222         struct udma_static_tr static_tr;
223         u32 residue;
224
225         unsigned int sglen;
226         unsigned int desc_idx; /* Only used for cyclic in packet mode */
227         unsigned int tr_idx;
228
229         u32 metadata_size;
230         void *metadata; /* pointer to provided metadata buffer (EPIP, PSdata) */
231
232         unsigned int hwdesc_count;
233         struct udma_hwdesc hwdesc[];
234 };
235
236 enum udma_chan_state {
237         UDMA_CHAN_IS_IDLE = 0, /* not active, no teardown is in progress */
238         UDMA_CHAN_IS_ACTIVE, /* Normal operation */
239         UDMA_CHAN_IS_TERMINATING, /* channel is being terminated */
240 };
241
242 struct udma_tx_drain {
243         struct delayed_work work;
244         ktime_t tstamp;
245         u32 residue;
246 };
247
248 struct udma_chan_config {
249         bool pkt_mode; /* TR or packet */
250         bool needs_epib; /* EPIB is needed for the communication or not */
251         u32 psd_size; /* size of Protocol Specific Data */
252         u32 metadata_size; /* (needs_epib ? 16:0) + psd_size */
253         u32 hdesc_size; /* Size of a packet descriptor in packet mode */
254         bool notdpkt; /* Suppress sending TDC packet */
255         int remote_thread_id;
256         u32 atype;
257         u32 asel;
258         u32 src_thread;
259         u32 dst_thread;
260         enum psil_endpoint_type ep_type;
261         bool enable_acc32;
262         bool enable_burst;
263         enum udma_tp_level channel_tpl; /* Channel Throughput Level */
264
265         u32 tr_trigger_type;
266
267         /* PKDMA mapped channel */
268         int mapped_channel_id;
269         /* PKTDMA default tflow or rflow for mapped channel */
270         int default_flow_id;
271
272         enum dma_transfer_direction dir;
273 };
274
275 struct udma_chan {
276         struct virt_dma_chan vc;
277         struct dma_slave_config cfg;
278         struct udma_dev *ud;
279         struct device *dma_dev;
280         struct udma_desc *desc;
281         struct udma_desc *terminated_desc;
282         struct udma_static_tr static_tr;
283         char *name;
284
285         struct udma_bchan *bchan;
286         struct udma_tchan *tchan;
287         struct udma_rchan *rchan;
288         struct udma_rflow *rflow;
289
290         bool psil_paired;
291
292         int irq_num_ring;
293         int irq_num_udma;
294
295         bool cyclic;
296         bool paused;
297
298         enum udma_chan_state state;
299         struct completion teardown_completed;
300
301         struct udma_tx_drain tx_drain;
302
303         u32 bcnt; /* number of bytes completed since the start of the channel */
304
305         /* Channel configuration parameters */
306         struct udma_chan_config config;
307
308         /* dmapool for packet mode descriptors */
309         bool use_dma_pool;
310         struct dma_pool *hdesc_pool;
311
312         u32 id;
313 };
314
315 static inline struct udma_dev *to_udma_dev(struct dma_device *d)
316 {
317         return container_of(d, struct udma_dev, ddev);
318 }
319
320 static inline struct udma_chan *to_udma_chan(struct dma_chan *c)
321 {
322         return container_of(c, struct udma_chan, vc.chan);
323 }
324
325 static inline struct udma_desc *to_udma_desc(struct dma_async_tx_descriptor *t)
326 {
327         return container_of(t, struct udma_desc, vd.tx);
328 }
329
330 /* Generic register access functions */
331 static inline u32 udma_read(void __iomem *base, int reg)
332 {
333         return readl(base + reg);
334 }
335
336 static inline void udma_write(void __iomem *base, int reg, u32 val)
337 {
338         writel(val, base + reg);
339 }
340
341 static inline void udma_update_bits(void __iomem *base, int reg,
342                                     u32 mask, u32 val)
343 {
344         u32 tmp, orig;
345
346         orig = readl(base + reg);
347         tmp = orig & ~mask;
348         tmp |= (val & mask);
349
350         if (tmp != orig)
351                 writel(tmp, base + reg);
352 }
353
354 /* TCHANRT */
355 static inline u32 udma_tchanrt_read(struct udma_chan *uc, int reg)
356 {
357         if (!uc->tchan)
358                 return 0;
359         return udma_read(uc->tchan->reg_rt, reg);
360 }
361
362 static inline void udma_tchanrt_write(struct udma_chan *uc, int reg, u32 val)
363 {
364         if (!uc->tchan)
365                 return;
366         udma_write(uc->tchan->reg_rt, reg, val);
367 }
368
369 static inline void udma_tchanrt_update_bits(struct udma_chan *uc, int reg,
370                                             u32 mask, u32 val)
371 {
372         if (!uc->tchan)
373                 return;
374         udma_update_bits(uc->tchan->reg_rt, reg, mask, val);
375 }
376
377 /* RCHANRT */
378 static inline u32 udma_rchanrt_read(struct udma_chan *uc, int reg)
379 {
380         if (!uc->rchan)
381                 return 0;
382         return udma_read(uc->rchan->reg_rt, reg);
383 }
384
385 static inline void udma_rchanrt_write(struct udma_chan *uc, int reg, u32 val)
386 {
387         if (!uc->rchan)
388                 return;
389         udma_write(uc->rchan->reg_rt, reg, val);
390 }
391
392 static inline void udma_rchanrt_update_bits(struct udma_chan *uc, int reg,
393                                             u32 mask, u32 val)
394 {
395         if (!uc->rchan)
396                 return;
397         udma_update_bits(uc->rchan->reg_rt, reg, mask, val);
398 }
399
400 static int navss_psil_pair(struct udma_dev *ud, u32 src_thread, u32 dst_thread)
401 {
402         struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
403
404         dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET;
405         return tisci_rm->tisci_psil_ops->pair(tisci_rm->tisci,
406                                               tisci_rm->tisci_navss_dev_id,
407                                               src_thread, dst_thread);
408 }
409
410 static int navss_psil_unpair(struct udma_dev *ud, u32 src_thread,
411                              u32 dst_thread)
412 {
413         struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
414
415         dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET;
416         return tisci_rm->tisci_psil_ops->unpair(tisci_rm->tisci,
417                                                 tisci_rm->tisci_navss_dev_id,
418                                                 src_thread, dst_thread);
419 }
420
421 static void k3_configure_chan_coherency(struct dma_chan *chan, u32 asel)
422 {
423         struct device *chan_dev = &chan->dev->device;
424
425         if (asel == 0) {
426                 /* No special handling for the channel */
427                 chan->dev->chan_dma_dev = false;
428
429                 chan_dev->dma_coherent = false;
430                 chan_dev->dma_parms = NULL;
431         } else if (asel == 14 || asel == 15) {
432                 chan->dev->chan_dma_dev = true;
433
434                 chan_dev->dma_coherent = true;
435                 dma_coerce_mask_and_coherent(chan_dev, DMA_BIT_MASK(48));
436                 chan_dev->dma_parms = chan_dev->parent->dma_parms;
437         } else {
438                 dev_warn(chan->device->dev, "Invalid ASEL value: %u\n", asel);
439
440                 chan_dev->dma_coherent = false;
441                 chan_dev->dma_parms = NULL;
442         }
443 }
444
445 static u8 udma_get_chan_tpl_index(struct udma_tpl *tpl_map, int chan_id)
446 {
447         int i;
448
449         for (i = 0; i < tpl_map->levels; i++) {
450                 if (chan_id >= tpl_map->start_idx[i])
451                         return i;
452         }
453
454         return 0;
455 }
456
457 static void udma_reset_uchan(struct udma_chan *uc)
458 {
459         memset(&uc->config, 0, sizeof(uc->config));
460         uc->config.remote_thread_id = -1;
461         uc->config.mapped_channel_id = -1;
462         uc->config.default_flow_id = -1;
463         uc->state = UDMA_CHAN_IS_IDLE;
464 }
465
466 static void udma_dump_chan_stdata(struct udma_chan *uc)
467 {
468         struct device *dev = uc->ud->dev;
469         u32 offset;
470         int i;
471
472         if (uc->config.dir == DMA_MEM_TO_DEV || uc->config.dir == DMA_MEM_TO_MEM) {
473                 dev_dbg(dev, "TCHAN State data:\n");
474                 for (i = 0; i < 32; i++) {
475                         offset = UDMA_CHAN_RT_STDATA_REG + i * 4;
476                         dev_dbg(dev, "TRT_STDATA[%02d]: 0x%08x\n", i,
477                                 udma_tchanrt_read(uc, offset));
478                 }
479         }
480
481         if (uc->config.dir == DMA_DEV_TO_MEM || uc->config.dir == DMA_MEM_TO_MEM) {
482                 dev_dbg(dev, "RCHAN State data:\n");
483                 for (i = 0; i < 32; i++) {
484                         offset = UDMA_CHAN_RT_STDATA_REG + i * 4;
485                         dev_dbg(dev, "RRT_STDATA[%02d]: 0x%08x\n", i,
486                                 udma_rchanrt_read(uc, offset));
487                 }
488         }
489 }
490
491 static inline dma_addr_t udma_curr_cppi5_desc_paddr(struct udma_desc *d,
492                                                     int idx)
493 {
494         return d->hwdesc[idx].cppi5_desc_paddr;
495 }
496
497 static inline void *udma_curr_cppi5_desc_vaddr(struct udma_desc *d, int idx)
498 {
499         return d->hwdesc[idx].cppi5_desc_vaddr;
500 }
501
502 static struct udma_desc *udma_udma_desc_from_paddr(struct udma_chan *uc,
503                                                    dma_addr_t paddr)
504 {
505         struct udma_desc *d = uc->terminated_desc;
506
507         if (d) {
508                 dma_addr_t desc_paddr = udma_curr_cppi5_desc_paddr(d,
509                                                                    d->desc_idx);
510
511                 if (desc_paddr != paddr)
512                         d = NULL;
513         }
514
515         if (!d) {
516                 d = uc->desc;
517                 if (d) {
518                         dma_addr_t desc_paddr = udma_curr_cppi5_desc_paddr(d,
519                                                                 d->desc_idx);
520
521                         if (desc_paddr != paddr)
522                                 d = NULL;
523                 }
524         }
525
526         return d;
527 }
528
529 static void udma_free_hwdesc(struct udma_chan *uc, struct udma_desc *d)
530 {
531         if (uc->use_dma_pool) {
532                 int i;
533
534                 for (i = 0; i < d->hwdesc_count; i++) {
535                         if (!d->hwdesc[i].cppi5_desc_vaddr)
536                                 continue;
537
538                         dma_pool_free(uc->hdesc_pool,
539                                       d->hwdesc[i].cppi5_desc_vaddr,
540                                       d->hwdesc[i].cppi5_desc_paddr);
541
542                         d->hwdesc[i].cppi5_desc_vaddr = NULL;
543                 }
544         } else if (d->hwdesc[0].cppi5_desc_vaddr) {
545                 dma_free_coherent(uc->dma_dev, d->hwdesc[0].cppi5_desc_size,
546                                   d->hwdesc[0].cppi5_desc_vaddr,
547                                   d->hwdesc[0].cppi5_desc_paddr);
548
549                 d->hwdesc[0].cppi5_desc_vaddr = NULL;
550         }
551 }
552
553 static void udma_purge_desc_work(struct work_struct *work)
554 {
555         struct udma_dev *ud = container_of(work, typeof(*ud), purge_work);
556         struct virt_dma_desc *vd, *_vd;
557         unsigned long flags;
558         LIST_HEAD(head);
559
560         spin_lock_irqsave(&ud->lock, flags);
561         list_splice_tail_init(&ud->desc_to_purge, &head);
562         spin_unlock_irqrestore(&ud->lock, flags);
563
564         list_for_each_entry_safe(vd, _vd, &head, node) {
565                 struct udma_chan *uc = to_udma_chan(vd->tx.chan);
566                 struct udma_desc *d = to_udma_desc(&vd->tx);
567
568                 udma_free_hwdesc(uc, d);
569                 list_del(&vd->node);
570                 kfree(d);
571         }
572
573         /* If more to purge, schedule the work again */
574         if (!list_empty(&ud->desc_to_purge))
575                 schedule_work(&ud->purge_work);
576 }
577
578 static void udma_desc_free(struct virt_dma_desc *vd)
579 {
580         struct udma_dev *ud = to_udma_dev(vd->tx.chan->device);
581         struct udma_chan *uc = to_udma_chan(vd->tx.chan);
582         struct udma_desc *d = to_udma_desc(&vd->tx);
583         unsigned long flags;
584
585         if (uc->terminated_desc == d)
586                 uc->terminated_desc = NULL;
587
588         if (uc->use_dma_pool) {
589                 udma_free_hwdesc(uc, d);
590                 kfree(d);
591                 return;
592         }
593
594         spin_lock_irqsave(&ud->lock, flags);
595         list_add_tail(&vd->node, &ud->desc_to_purge);
596         spin_unlock_irqrestore(&ud->lock, flags);
597
598         schedule_work(&ud->purge_work);
599 }
600
601 static bool udma_is_chan_running(struct udma_chan *uc)
602 {
603         u32 trt_ctl = 0;
604         u32 rrt_ctl = 0;
605
606         if (uc->tchan)
607                 trt_ctl = udma_tchanrt_read(uc, UDMA_CHAN_RT_CTL_REG);
608         if (uc->rchan)
609                 rrt_ctl = udma_rchanrt_read(uc, UDMA_CHAN_RT_CTL_REG);
610
611         if (trt_ctl & UDMA_CHAN_RT_CTL_EN || rrt_ctl & UDMA_CHAN_RT_CTL_EN)
612                 return true;
613
614         return false;
615 }
616
617 static bool udma_is_chan_paused(struct udma_chan *uc)
618 {
619         u32 val, pause_mask;
620
621         switch (uc->config.dir) {
622         case DMA_DEV_TO_MEM:
623                 val = udma_rchanrt_read(uc, UDMA_CHAN_RT_PEER_RT_EN_REG);
624                 pause_mask = UDMA_PEER_RT_EN_PAUSE;
625                 break;
626         case DMA_MEM_TO_DEV:
627                 val = udma_tchanrt_read(uc, UDMA_CHAN_RT_PEER_RT_EN_REG);
628                 pause_mask = UDMA_PEER_RT_EN_PAUSE;
629                 break;
630         case DMA_MEM_TO_MEM:
631                 val = udma_tchanrt_read(uc, UDMA_CHAN_RT_CTL_REG);
632                 pause_mask = UDMA_CHAN_RT_CTL_PAUSE;
633                 break;
634         default:
635                 return false;
636         }
637
638         if (val & pause_mask)
639                 return true;
640
641         return false;
642 }
643
644 static inline dma_addr_t udma_get_rx_flush_hwdesc_paddr(struct udma_chan *uc)
645 {
646         return uc->ud->rx_flush.hwdescs[uc->config.pkt_mode].cppi5_desc_paddr;
647 }
648
649 static int udma_push_to_ring(struct udma_chan *uc, int idx)
650 {
651         struct udma_desc *d = uc->desc;
652         struct k3_ring *ring = NULL;
653         dma_addr_t paddr;
654
655         switch (uc->config.dir) {
656         case DMA_DEV_TO_MEM:
657                 ring = uc->rflow->fd_ring;
658                 break;
659         case DMA_MEM_TO_DEV:
660         case DMA_MEM_TO_MEM:
661                 ring = uc->tchan->t_ring;
662                 break;
663         default:
664                 return -EINVAL;
665         }
666
667         /* RX flush packet: idx == -1 is only passed in case of DEV_TO_MEM */
668         if (idx == -1) {
669                 paddr = udma_get_rx_flush_hwdesc_paddr(uc);
670         } else {
671                 paddr = udma_curr_cppi5_desc_paddr(d, idx);
672
673                 wmb(); /* Ensure that writes are not moved over this point */
674         }
675
676         return k3_ringacc_ring_push(ring, &paddr);
677 }
678
679 static bool udma_desc_is_rx_flush(struct udma_chan *uc, dma_addr_t addr)
680 {
681         if (uc->config.dir != DMA_DEV_TO_MEM)
682                 return false;
683
684         if (addr == udma_get_rx_flush_hwdesc_paddr(uc))
685                 return true;
686
687         return false;
688 }
689
690 static int udma_pop_from_ring(struct udma_chan *uc, dma_addr_t *addr)
691 {
692         struct k3_ring *ring = NULL;
693         int ret;
694
695         switch (uc->config.dir) {
696         case DMA_DEV_TO_MEM:
697                 ring = uc->rflow->r_ring;
698                 break;
699         case DMA_MEM_TO_DEV:
700         case DMA_MEM_TO_MEM:
701                 ring = uc->tchan->tc_ring;
702                 break;
703         default:
704                 return -ENOENT;
705         }
706
707         ret = k3_ringacc_ring_pop(ring, addr);
708         if (ret)
709                 return ret;
710
711         rmb(); /* Ensure that reads are not moved before this point */
712
713         /* Teardown completion */
714         if (cppi5_desc_is_tdcm(*addr))
715                 return 0;
716
717         /* Check for flush descriptor */
718         if (udma_desc_is_rx_flush(uc, *addr))
719                 return -ENOENT;
720
721         return 0;
722 }
723
724 static void udma_reset_rings(struct udma_chan *uc)
725 {
726         struct k3_ring *ring1 = NULL;
727         struct k3_ring *ring2 = NULL;
728
729         switch (uc->config.dir) {
730         case DMA_DEV_TO_MEM:
731                 if (uc->rchan) {
732                         ring1 = uc->rflow->fd_ring;
733                         ring2 = uc->rflow->r_ring;
734                 }
735                 break;
736         case DMA_MEM_TO_DEV:
737         case DMA_MEM_TO_MEM:
738                 if (uc->tchan) {
739                         ring1 = uc->tchan->t_ring;
740                         ring2 = uc->tchan->tc_ring;
741                 }
742                 break;
743         default:
744                 break;
745         }
746
747         if (ring1)
748                 k3_ringacc_ring_reset_dma(ring1,
749                                           k3_ringacc_ring_get_occ(ring1));
750         if (ring2)
751                 k3_ringacc_ring_reset(ring2);
752
753         /* make sure we are not leaking memory by stalled descriptor */
754         if (uc->terminated_desc) {
755                 udma_desc_free(&uc->terminated_desc->vd);
756                 uc->terminated_desc = NULL;
757         }
758 }
759
760 static void udma_reset_counters(struct udma_chan *uc)
761 {
762         u32 val;
763
764         if (uc->tchan) {
765                 val = udma_tchanrt_read(uc, UDMA_CHAN_RT_BCNT_REG);
766                 udma_tchanrt_write(uc, UDMA_CHAN_RT_BCNT_REG, val);
767
768                 val = udma_tchanrt_read(uc, UDMA_CHAN_RT_SBCNT_REG);
769                 udma_tchanrt_write(uc, UDMA_CHAN_RT_SBCNT_REG, val);
770
771                 val = udma_tchanrt_read(uc, UDMA_CHAN_RT_PCNT_REG);
772                 udma_tchanrt_write(uc, UDMA_CHAN_RT_PCNT_REG, val);
773
774                 if (!uc->bchan) {
775                         val = udma_tchanrt_read(uc, UDMA_CHAN_RT_PEER_BCNT_REG);
776                         udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_BCNT_REG, val);
777                 }
778         }
779
780         if (uc->rchan) {
781                 val = udma_rchanrt_read(uc, UDMA_CHAN_RT_BCNT_REG);
782                 udma_rchanrt_write(uc, UDMA_CHAN_RT_BCNT_REG, val);
783
784                 val = udma_rchanrt_read(uc, UDMA_CHAN_RT_SBCNT_REG);
785                 udma_rchanrt_write(uc, UDMA_CHAN_RT_SBCNT_REG, val);
786
787                 val = udma_rchanrt_read(uc, UDMA_CHAN_RT_PCNT_REG);
788                 udma_rchanrt_write(uc, UDMA_CHAN_RT_PCNT_REG, val);
789
790                 val = udma_rchanrt_read(uc, UDMA_CHAN_RT_PEER_BCNT_REG);
791                 udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_BCNT_REG, val);
792         }
793
794         uc->bcnt = 0;
795 }
796
797 static int udma_reset_chan(struct udma_chan *uc, bool hard)
798 {
799         switch (uc->config.dir) {
800         case DMA_DEV_TO_MEM:
801                 udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG, 0);
802                 udma_rchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, 0);
803                 break;
804         case DMA_MEM_TO_DEV:
805                 udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, 0);
806                 udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG, 0);
807                 break;
808         case DMA_MEM_TO_MEM:
809                 udma_rchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, 0);
810                 udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, 0);
811                 break;
812         default:
813                 return -EINVAL;
814         }
815
816         /* Reset all counters */
817         udma_reset_counters(uc);
818
819         /* Hard reset: re-initialize the channel to reset */
820         if (hard) {
821                 struct udma_chan_config ucc_backup;
822                 int ret;
823
824                 memcpy(&ucc_backup, &uc->config, sizeof(uc->config));
825                 uc->ud->ddev.device_free_chan_resources(&uc->vc.chan);
826
827                 /* restore the channel configuration */
828                 memcpy(&uc->config, &ucc_backup, sizeof(uc->config));
829                 ret = uc->ud->ddev.device_alloc_chan_resources(&uc->vc.chan);
830                 if (ret)
831                         return ret;
832
833                 /*
834                  * Setting forced teardown after forced reset helps recovering
835                  * the rchan.
836                  */
837                 if (uc->config.dir == DMA_DEV_TO_MEM)
838                         udma_rchanrt_write(uc, UDMA_CHAN_RT_CTL_REG,
839                                            UDMA_CHAN_RT_CTL_EN |
840                                            UDMA_CHAN_RT_CTL_TDOWN |
841                                            UDMA_CHAN_RT_CTL_FTDOWN);
842         }
843         uc->state = UDMA_CHAN_IS_IDLE;
844
845         return 0;
846 }
847
848 static void udma_start_desc(struct udma_chan *uc)
849 {
850         struct udma_chan_config *ucc = &uc->config;
851
852         if (uc->ud->match_data->type == DMA_TYPE_UDMA && ucc->pkt_mode &&
853             (uc->cyclic || ucc->dir == DMA_DEV_TO_MEM)) {
854                 int i;
855
856                 /*
857                  * UDMA only: Push all descriptors to ring for packet mode
858                  * cyclic or RX
859                  * PKTDMA supports pre-linked descriptor and cyclic is not
860                  * supported
861                  */
862                 for (i = 0; i < uc->desc->sglen; i++)
863                         udma_push_to_ring(uc, i);
864         } else {
865                 udma_push_to_ring(uc, 0);
866         }
867 }
868
869 static bool udma_chan_needs_reconfiguration(struct udma_chan *uc)
870 {
871         /* Only PDMAs have staticTR */
872         if (uc->config.ep_type == PSIL_EP_NATIVE)
873                 return false;
874
875         /* Check if the staticTR configuration has changed for TX */
876         if (memcmp(&uc->static_tr, &uc->desc->static_tr, sizeof(uc->static_tr)))
877                 return true;
878
879         return false;
880 }
881
882 static int udma_start(struct udma_chan *uc)
883 {
884         struct virt_dma_desc *vd = vchan_next_desc(&uc->vc);
885
886         if (!vd) {
887                 uc->desc = NULL;
888                 return -ENOENT;
889         }
890
891         list_del(&vd->node);
892
893         uc->desc = to_udma_desc(&vd->tx);
894
895         /* Channel is already running and does not need reconfiguration */
896         if (udma_is_chan_running(uc) && !udma_chan_needs_reconfiguration(uc)) {
897                 udma_start_desc(uc);
898                 goto out;
899         }
900
901         /* Make sure that we clear the teardown bit, if it is set */
902         udma_reset_chan(uc, false);
903
904         /* Push descriptors before we start the channel */
905         udma_start_desc(uc);
906
907         switch (uc->desc->dir) {
908         case DMA_DEV_TO_MEM:
909                 /* Config remote TR */
910                 if (uc->config.ep_type == PSIL_EP_PDMA_XY) {
911                         u32 val = PDMA_STATIC_TR_Y(uc->desc->static_tr.elcnt) |
912                                   PDMA_STATIC_TR_X(uc->desc->static_tr.elsize);
913                         const struct udma_match_data *match_data =
914                                                         uc->ud->match_data;
915
916                         if (uc->config.enable_acc32)
917                                 val |= PDMA_STATIC_TR_XY_ACC32;
918                         if (uc->config.enable_burst)
919                                 val |= PDMA_STATIC_TR_XY_BURST;
920
921                         udma_rchanrt_write(uc,
922                                            UDMA_CHAN_RT_PEER_STATIC_TR_XY_REG,
923                                            val);
924
925                         udma_rchanrt_write(uc,
926                                 UDMA_CHAN_RT_PEER_STATIC_TR_Z_REG,
927                                 PDMA_STATIC_TR_Z(uc->desc->static_tr.bstcnt,
928                                                  match_data->statictr_z_mask));
929
930                         /* save the current staticTR configuration */
931                         memcpy(&uc->static_tr, &uc->desc->static_tr,
932                                sizeof(uc->static_tr));
933                 }
934
935                 udma_rchanrt_write(uc, UDMA_CHAN_RT_CTL_REG,
936                                    UDMA_CHAN_RT_CTL_EN);
937
938                 /* Enable remote */
939                 udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG,
940                                    UDMA_PEER_RT_EN_ENABLE);
941
942                 break;
943         case DMA_MEM_TO_DEV:
944                 /* Config remote TR */
945                 if (uc->config.ep_type == PSIL_EP_PDMA_XY) {
946                         u32 val = PDMA_STATIC_TR_Y(uc->desc->static_tr.elcnt) |
947                                   PDMA_STATIC_TR_X(uc->desc->static_tr.elsize);
948
949                         if (uc->config.enable_acc32)
950                                 val |= PDMA_STATIC_TR_XY_ACC32;
951                         if (uc->config.enable_burst)
952                                 val |= PDMA_STATIC_TR_XY_BURST;
953
954                         udma_tchanrt_write(uc,
955                                            UDMA_CHAN_RT_PEER_STATIC_TR_XY_REG,
956                                            val);
957
958                         /* save the current staticTR configuration */
959                         memcpy(&uc->static_tr, &uc->desc->static_tr,
960                                sizeof(uc->static_tr));
961                 }
962
963                 /* Enable remote */
964                 udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG,
965                                    UDMA_PEER_RT_EN_ENABLE);
966
967                 udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG,
968                                    UDMA_CHAN_RT_CTL_EN);
969
970                 break;
971         case DMA_MEM_TO_MEM:
972                 udma_rchanrt_write(uc, UDMA_CHAN_RT_CTL_REG,
973                                    UDMA_CHAN_RT_CTL_EN);
974                 udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG,
975                                    UDMA_CHAN_RT_CTL_EN);
976
977                 break;
978         default:
979                 return -EINVAL;
980         }
981
982         uc->state = UDMA_CHAN_IS_ACTIVE;
983 out:
984
985         return 0;
986 }
987
988 static int udma_stop(struct udma_chan *uc)
989 {
990         enum udma_chan_state old_state = uc->state;
991
992         uc->state = UDMA_CHAN_IS_TERMINATING;
993         reinit_completion(&uc->teardown_completed);
994
995         switch (uc->config.dir) {
996         case DMA_DEV_TO_MEM:
997                 if (!uc->cyclic && !uc->desc)
998                         udma_push_to_ring(uc, -1);
999
1000                 udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG,
1001                                    UDMA_PEER_RT_EN_ENABLE |
1002                                    UDMA_PEER_RT_EN_TEARDOWN);
1003                 break;
1004         case DMA_MEM_TO_DEV:
1005                 udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG,
1006                                    UDMA_PEER_RT_EN_ENABLE |
1007                                    UDMA_PEER_RT_EN_FLUSH);
1008                 udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG,
1009                                    UDMA_CHAN_RT_CTL_EN |
1010                                    UDMA_CHAN_RT_CTL_TDOWN);
1011                 break;
1012         case DMA_MEM_TO_MEM:
1013                 udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG,
1014                                    UDMA_CHAN_RT_CTL_EN |
1015                                    UDMA_CHAN_RT_CTL_TDOWN);
1016                 break;
1017         default:
1018                 uc->state = old_state;
1019                 complete_all(&uc->teardown_completed);
1020                 return -EINVAL;
1021         }
1022
1023         return 0;
1024 }
1025
1026 static void udma_cyclic_packet_elapsed(struct udma_chan *uc)
1027 {
1028         struct udma_desc *d = uc->desc;
1029         struct cppi5_host_desc_t *h_desc;
1030
1031         h_desc = d->hwdesc[d->desc_idx].cppi5_desc_vaddr;
1032         cppi5_hdesc_reset_to_original(h_desc);
1033         udma_push_to_ring(uc, d->desc_idx);
1034         d->desc_idx = (d->desc_idx + 1) % d->sglen;
1035 }
1036
1037 static inline void udma_fetch_epib(struct udma_chan *uc, struct udma_desc *d)
1038 {
1039         struct cppi5_host_desc_t *h_desc = d->hwdesc[0].cppi5_desc_vaddr;
1040
1041         memcpy(d->metadata, h_desc->epib, d->metadata_size);
1042 }
1043
1044 static bool udma_is_desc_really_done(struct udma_chan *uc, struct udma_desc *d)
1045 {
1046         u32 peer_bcnt, bcnt;
1047
1048         /* Only TX towards PDMA is affected */
1049         if (uc->config.ep_type == PSIL_EP_NATIVE ||
1050             uc->config.dir != DMA_MEM_TO_DEV)
1051                 return true;
1052
1053         peer_bcnt = udma_tchanrt_read(uc, UDMA_CHAN_RT_PEER_BCNT_REG);
1054         bcnt = udma_tchanrt_read(uc, UDMA_CHAN_RT_BCNT_REG);
1055
1056         /* Transfer is incomplete, store current residue and time stamp */
1057         if (peer_bcnt < bcnt) {
1058                 uc->tx_drain.residue = bcnt - peer_bcnt;
1059                 uc->tx_drain.tstamp = ktime_get();
1060                 return false;
1061         }
1062
1063         return true;
1064 }
1065
1066 static void udma_check_tx_completion(struct work_struct *work)
1067 {
1068         struct udma_chan *uc = container_of(work, typeof(*uc),
1069                                             tx_drain.work.work);
1070         bool desc_done = true;
1071         u32 residue_diff;
1072         ktime_t time_diff;
1073         unsigned long delay;
1074
1075         while (1) {
1076                 if (uc->desc) {
1077                         /* Get previous residue and time stamp */
1078                         residue_diff = uc->tx_drain.residue;
1079                         time_diff = uc->tx_drain.tstamp;
1080                         /*
1081                          * Get current residue and time stamp or see if
1082                          * transfer is complete
1083                          */
1084                         desc_done = udma_is_desc_really_done(uc, uc->desc);
1085                 }
1086
1087                 if (!desc_done) {
1088                         /*
1089                          * Find the time delta and residue delta w.r.t
1090                          * previous poll
1091                          */
1092                         time_diff = ktime_sub(uc->tx_drain.tstamp,
1093                                               time_diff) + 1;
1094                         residue_diff -= uc->tx_drain.residue;
1095                         if (residue_diff) {
1096                                 /*
1097                                  * Try to guess when we should check
1098                                  * next time by calculating rate at
1099                                  * which data is being drained at the
1100                                  * peer device
1101                                  */
1102                                 delay = (time_diff / residue_diff) *
1103                                         uc->tx_drain.residue;
1104                         } else {
1105                                 /* No progress, check again in 1 second  */
1106                                 schedule_delayed_work(&uc->tx_drain.work, HZ);
1107                                 break;
1108                         }
1109
1110                         usleep_range(ktime_to_us(delay),
1111                                      ktime_to_us(delay) + 10);
1112                         continue;
1113                 }
1114
1115                 if (uc->desc) {
1116                         struct udma_desc *d = uc->desc;
1117
1118                         uc->bcnt += d->residue;
1119                         udma_start(uc);
1120                         vchan_cookie_complete(&d->vd);
1121                         break;
1122                 }
1123
1124                 break;
1125         }
1126 }
1127
1128 static irqreturn_t udma_ring_irq_handler(int irq, void *data)
1129 {
1130         struct udma_chan *uc = data;
1131         struct udma_desc *d;
1132         dma_addr_t paddr = 0;
1133
1134         if (udma_pop_from_ring(uc, &paddr) || !paddr)
1135                 return IRQ_HANDLED;
1136
1137         spin_lock(&uc->vc.lock);
1138
1139         /* Teardown completion message */
1140         if (cppi5_desc_is_tdcm(paddr)) {
1141                 complete_all(&uc->teardown_completed);
1142
1143                 if (uc->terminated_desc) {
1144                         udma_desc_free(&uc->terminated_desc->vd);
1145                         uc->terminated_desc = NULL;
1146                 }
1147
1148                 if (!uc->desc)
1149                         udma_start(uc);
1150
1151                 goto out;
1152         }
1153
1154         d = udma_udma_desc_from_paddr(uc, paddr);
1155
1156         if (d) {
1157                 dma_addr_t desc_paddr = udma_curr_cppi5_desc_paddr(d,
1158                                                                    d->desc_idx);
1159                 if (desc_paddr != paddr) {
1160                         dev_err(uc->ud->dev, "not matching descriptors!\n");
1161                         goto out;
1162                 }
1163
1164                 if (d == uc->desc) {
1165                         /* active descriptor */
1166                         if (uc->cyclic) {
1167                                 udma_cyclic_packet_elapsed(uc);
1168                                 vchan_cyclic_callback(&d->vd);
1169                         } else {
1170                                 if (udma_is_desc_really_done(uc, d)) {
1171                                         uc->bcnt += d->residue;
1172                                         udma_start(uc);
1173                                         vchan_cookie_complete(&d->vd);
1174                                 } else {
1175                                         schedule_delayed_work(&uc->tx_drain.work,
1176                                                               0);
1177                                 }
1178                         }
1179                 } else {
1180                         /*
1181                          * terminated descriptor, mark the descriptor as
1182                          * completed to update the channel's cookie marker
1183                          */
1184                         dma_cookie_complete(&d->vd.tx);
1185                 }
1186         }
1187 out:
1188         spin_unlock(&uc->vc.lock);
1189
1190         return IRQ_HANDLED;
1191 }
1192
1193 static irqreturn_t udma_udma_irq_handler(int irq, void *data)
1194 {
1195         struct udma_chan *uc = data;
1196         struct udma_desc *d;
1197
1198         spin_lock(&uc->vc.lock);
1199         d = uc->desc;
1200         if (d) {
1201                 d->tr_idx = (d->tr_idx + 1) % d->sglen;
1202
1203                 if (uc->cyclic) {
1204                         vchan_cyclic_callback(&d->vd);
1205                 } else {
1206                         /* TODO: figure out the real amount of data */
1207                         uc->bcnt += d->residue;
1208                         udma_start(uc);
1209                         vchan_cookie_complete(&d->vd);
1210                 }
1211         }
1212
1213         spin_unlock(&uc->vc.lock);
1214
1215         return IRQ_HANDLED;
1216 }
1217
1218 /**
1219  * __udma_alloc_gp_rflow_range - alloc range of GP RX flows
1220  * @ud: UDMA device
1221  * @from: Start the search from this flow id number
1222  * @cnt: Number of consecutive flow ids to allocate
1223  *
1224  * Allocate range of RX flow ids for future use, those flows can be requested
1225  * only using explicit flow id number. if @from is set to -1 it will try to find
1226  * first free range. if @from is positive value it will force allocation only
1227  * of the specified range of flows.
1228  *
1229  * Returns -ENOMEM if can't find free range.
1230  * -EEXIST if requested range is busy.
1231  * -EINVAL if wrong input values passed.
1232  * Returns flow id on success.
1233  */
1234 static int __udma_alloc_gp_rflow_range(struct udma_dev *ud, int from, int cnt)
1235 {
1236         int start, tmp_from;
1237         DECLARE_BITMAP(tmp, K3_UDMA_MAX_RFLOWS);
1238
1239         tmp_from = from;
1240         if (tmp_from < 0)
1241                 tmp_from = ud->rchan_cnt;
1242         /* default flows can't be allocated and accessible only by id */
1243         if (tmp_from < ud->rchan_cnt)
1244                 return -EINVAL;
1245
1246         if (tmp_from + cnt > ud->rflow_cnt)
1247                 return -EINVAL;
1248
1249         bitmap_or(tmp, ud->rflow_gp_map, ud->rflow_gp_map_allocated,
1250                   ud->rflow_cnt);
1251
1252         start = bitmap_find_next_zero_area(tmp,
1253                                            ud->rflow_cnt,
1254                                            tmp_from, cnt, 0);
1255         if (start >= ud->rflow_cnt)
1256                 return -ENOMEM;
1257
1258         if (from >= 0 && start != from)
1259                 return -EEXIST;
1260
1261         bitmap_set(ud->rflow_gp_map_allocated, start, cnt);
1262         return start;
1263 }
1264
1265 static int __udma_free_gp_rflow_range(struct udma_dev *ud, int from, int cnt)
1266 {
1267         if (from < ud->rchan_cnt)
1268                 return -EINVAL;
1269         if (from + cnt > ud->rflow_cnt)
1270                 return -EINVAL;
1271
1272         bitmap_clear(ud->rflow_gp_map_allocated, from, cnt);
1273         return 0;
1274 }
1275
1276 static struct udma_rflow *__udma_get_rflow(struct udma_dev *ud, int id)
1277 {
1278         /*
1279          * Attempt to request rflow by ID can be made for any rflow
1280          * if not in use with assumption that caller knows what's doing.
1281          * TI-SCI FW will perform additional permission check ant way, it's
1282          * safe
1283          */
1284
1285         if (id < 0 || id >= ud->rflow_cnt)
1286                 return ERR_PTR(-ENOENT);
1287
1288         if (test_bit(id, ud->rflow_in_use))
1289                 return ERR_PTR(-ENOENT);
1290
1291         if (ud->rflow_gp_map) {
1292                 /* GP rflow has to be allocated first */
1293                 if (!test_bit(id, ud->rflow_gp_map) &&
1294                     !test_bit(id, ud->rflow_gp_map_allocated))
1295                         return ERR_PTR(-EINVAL);
1296         }
1297
1298         dev_dbg(ud->dev, "get rflow%d\n", id);
1299         set_bit(id, ud->rflow_in_use);
1300         return &ud->rflows[id];
1301 }
1302
1303 static void __udma_put_rflow(struct udma_dev *ud, struct udma_rflow *rflow)
1304 {
1305         if (!test_bit(rflow->id, ud->rflow_in_use)) {
1306                 dev_err(ud->dev, "attempt to put unused rflow%d\n", rflow->id);
1307                 return;
1308         }
1309
1310         dev_dbg(ud->dev, "put rflow%d\n", rflow->id);
1311         clear_bit(rflow->id, ud->rflow_in_use);
1312 }
1313
1314 #define UDMA_RESERVE_RESOURCE(res)                                      \
1315 static struct udma_##res *__udma_reserve_##res(struct udma_dev *ud,     \
1316                                                enum udma_tp_level tpl,  \
1317                                                int id)                  \
1318 {                                                                       \
1319         if (id >= 0) {                                                  \
1320                 if (test_bit(id, ud->res##_map)) {                      \
1321                         dev_err(ud->dev, "res##%d is in use\n", id);    \
1322                         return ERR_PTR(-ENOENT);                        \
1323                 }                                                       \
1324         } else {                                                        \
1325                 int start;                                              \
1326                                                                         \
1327                 if (tpl >= ud->res##_tpl.levels)                        \
1328                         tpl = ud->res##_tpl.levels - 1;                 \
1329                                                                         \
1330                 start = ud->res##_tpl.start_idx[tpl];                   \
1331                                                                         \
1332                 id = find_next_zero_bit(ud->res##_map, ud->res##_cnt,   \
1333                                         start);                         \
1334                 if (id == ud->res##_cnt) {                              \
1335                         return ERR_PTR(-ENOENT);                        \
1336                 }                                                       \
1337         }                                                               \
1338                                                                         \
1339         set_bit(id, ud->res##_map);                                     \
1340         return &ud->res##s[id];                                         \
1341 }
1342
1343 UDMA_RESERVE_RESOURCE(bchan);
1344 UDMA_RESERVE_RESOURCE(tchan);
1345 UDMA_RESERVE_RESOURCE(rchan);
1346
1347 static int bcdma_get_bchan(struct udma_chan *uc)
1348 {
1349         struct udma_dev *ud = uc->ud;
1350         enum udma_tp_level tpl;
1351
1352         if (uc->bchan) {
1353                 dev_dbg(ud->dev, "chan%d: already have bchan%d allocated\n",
1354                         uc->id, uc->bchan->id);
1355                 return 0;
1356         }
1357
1358         /*
1359          * Use normal channels for peripherals, and highest TPL channel for
1360          * mem2mem
1361          */
1362         if (uc->config.tr_trigger_type)
1363                 tpl = 0;
1364         else
1365                 tpl = ud->bchan_tpl.levels - 1;
1366
1367         uc->bchan = __udma_reserve_bchan(ud, tpl, -1);
1368         if (IS_ERR(uc->bchan))
1369                 return PTR_ERR(uc->bchan);
1370
1371         uc->tchan = uc->bchan;
1372
1373         return 0;
1374 }
1375
1376 static int udma_get_tchan(struct udma_chan *uc)
1377 {
1378         struct udma_dev *ud = uc->ud;
1379
1380         if (uc->tchan) {
1381                 dev_dbg(ud->dev, "chan%d: already have tchan%d allocated\n",
1382                         uc->id, uc->tchan->id);
1383                 return 0;
1384         }
1385
1386         /*
1387          * mapped_channel_id is -1 for UDMA, BCDMA and PKTDMA unmapped channels.
1388          * For PKTDMA mapped channels it is configured to a channel which must
1389          * be used to service the peripheral.
1390          */
1391         uc->tchan = __udma_reserve_tchan(ud, uc->config.channel_tpl,
1392                                          uc->config.mapped_channel_id);
1393         if (IS_ERR(uc->tchan))
1394                 return PTR_ERR(uc->tchan);
1395
1396         if (ud->tflow_cnt) {
1397                 int tflow_id;
1398
1399                 /* Only PKTDMA have support for tx flows */
1400                 if (uc->config.default_flow_id >= 0)
1401                         tflow_id = uc->config.default_flow_id;
1402                 else
1403                         tflow_id = uc->tchan->id;
1404
1405                 if (test_bit(tflow_id, ud->tflow_map)) {
1406                         dev_err(ud->dev, "tflow%d is in use\n", tflow_id);
1407                         clear_bit(uc->tchan->id, ud->tchan_map);
1408                         uc->tchan = NULL;
1409                         return -ENOENT;
1410                 }
1411
1412                 uc->tchan->tflow_id = tflow_id;
1413                 set_bit(tflow_id, ud->tflow_map);
1414         } else {
1415                 uc->tchan->tflow_id = -1;
1416         }
1417
1418         return 0;
1419 }
1420
1421 static int udma_get_rchan(struct udma_chan *uc)
1422 {
1423         struct udma_dev *ud = uc->ud;
1424
1425         if (uc->rchan) {
1426                 dev_dbg(ud->dev, "chan%d: already have rchan%d allocated\n",
1427                         uc->id, uc->rchan->id);
1428                 return 0;
1429         }
1430
1431         /*
1432          * mapped_channel_id is -1 for UDMA, BCDMA and PKTDMA unmapped channels.
1433          * For PKTDMA mapped channels it is configured to a channel which must
1434          * be used to service the peripheral.
1435          */
1436         uc->rchan = __udma_reserve_rchan(ud, uc->config.channel_tpl,
1437                                          uc->config.mapped_channel_id);
1438
1439         return PTR_ERR_OR_ZERO(uc->rchan);
1440 }
1441
1442 static int udma_get_chan_pair(struct udma_chan *uc)
1443 {
1444         struct udma_dev *ud = uc->ud;
1445         int chan_id, end;
1446
1447         if ((uc->tchan && uc->rchan) && uc->tchan->id == uc->rchan->id) {
1448                 dev_info(ud->dev, "chan%d: already have %d pair allocated\n",
1449                          uc->id, uc->tchan->id);
1450                 return 0;
1451         }
1452
1453         if (uc->tchan) {
1454                 dev_err(ud->dev, "chan%d: already have tchan%d allocated\n",
1455                         uc->id, uc->tchan->id);
1456                 return -EBUSY;
1457         } else if (uc->rchan) {
1458                 dev_err(ud->dev, "chan%d: already have rchan%d allocated\n",
1459                         uc->id, uc->rchan->id);
1460                 return -EBUSY;
1461         }
1462
1463         /* Can be optimized, but let's have it like this for now */
1464         end = min(ud->tchan_cnt, ud->rchan_cnt);
1465         /*
1466          * Try to use the highest TPL channel pair for MEM_TO_MEM channels
1467          * Note: in UDMAP the channel TPL is symmetric between tchan and rchan
1468          */
1469         chan_id = ud->tchan_tpl.start_idx[ud->tchan_tpl.levels - 1];
1470         for (; chan_id < end; chan_id++) {
1471                 if (!test_bit(chan_id, ud->tchan_map) &&
1472                     !test_bit(chan_id, ud->rchan_map))
1473                         break;
1474         }
1475
1476         if (chan_id == end)
1477                 return -ENOENT;
1478
1479         set_bit(chan_id, ud->tchan_map);
1480         set_bit(chan_id, ud->rchan_map);
1481         uc->tchan = &ud->tchans[chan_id];
1482         uc->rchan = &ud->rchans[chan_id];
1483
1484         /* UDMA does not use tx flows */
1485         uc->tchan->tflow_id = -1;
1486
1487         return 0;
1488 }
1489
1490 static int udma_get_rflow(struct udma_chan *uc, int flow_id)
1491 {
1492         struct udma_dev *ud = uc->ud;
1493
1494         if (!uc->rchan) {
1495                 dev_err(ud->dev, "chan%d: does not have rchan??\n", uc->id);
1496                 return -EINVAL;
1497         }
1498
1499         if (uc->rflow) {
1500                 dev_dbg(ud->dev, "chan%d: already have rflow%d allocated\n",
1501                         uc->id, uc->rflow->id);
1502                 return 0;
1503         }
1504
1505         uc->rflow = __udma_get_rflow(ud, flow_id);
1506
1507         return PTR_ERR_OR_ZERO(uc->rflow);
1508 }
1509
1510 static void bcdma_put_bchan(struct udma_chan *uc)
1511 {
1512         struct udma_dev *ud = uc->ud;
1513
1514         if (uc->bchan) {
1515                 dev_dbg(ud->dev, "chan%d: put bchan%d\n", uc->id,
1516                         uc->bchan->id);
1517                 clear_bit(uc->bchan->id, ud->bchan_map);
1518                 uc->bchan = NULL;
1519                 uc->tchan = NULL;
1520         }
1521 }
1522
1523 static void udma_put_rchan(struct udma_chan *uc)
1524 {
1525         struct udma_dev *ud = uc->ud;
1526
1527         if (uc->rchan) {
1528                 dev_dbg(ud->dev, "chan%d: put rchan%d\n", uc->id,
1529                         uc->rchan->id);
1530                 clear_bit(uc->rchan->id, ud->rchan_map);
1531                 uc->rchan = NULL;
1532         }
1533 }
1534
1535 static void udma_put_tchan(struct udma_chan *uc)
1536 {
1537         struct udma_dev *ud = uc->ud;
1538
1539         if (uc->tchan) {
1540                 dev_dbg(ud->dev, "chan%d: put tchan%d\n", uc->id,
1541                         uc->tchan->id);
1542                 clear_bit(uc->tchan->id, ud->tchan_map);
1543
1544                 if (uc->tchan->tflow_id >= 0)
1545                         clear_bit(uc->tchan->tflow_id, ud->tflow_map);
1546
1547                 uc->tchan = NULL;
1548         }
1549 }
1550
1551 static void udma_put_rflow(struct udma_chan *uc)
1552 {
1553         struct udma_dev *ud = uc->ud;
1554
1555         if (uc->rflow) {
1556                 dev_dbg(ud->dev, "chan%d: put rflow%d\n", uc->id,
1557                         uc->rflow->id);
1558                 __udma_put_rflow(ud, uc->rflow);
1559                 uc->rflow = NULL;
1560         }
1561 }
1562
1563 static void bcdma_free_bchan_resources(struct udma_chan *uc)
1564 {
1565         if (!uc->bchan)
1566                 return;
1567
1568         k3_ringacc_ring_free(uc->bchan->tc_ring);
1569         k3_ringacc_ring_free(uc->bchan->t_ring);
1570         uc->bchan->tc_ring = NULL;
1571         uc->bchan->t_ring = NULL;
1572         k3_configure_chan_coherency(&uc->vc.chan, 0);
1573
1574         bcdma_put_bchan(uc);
1575 }
1576
1577 static int bcdma_alloc_bchan_resources(struct udma_chan *uc)
1578 {
1579         struct k3_ring_cfg ring_cfg;
1580         struct udma_dev *ud = uc->ud;
1581         int ret;
1582
1583         ret = bcdma_get_bchan(uc);
1584         if (ret)
1585                 return ret;
1586
1587         ret = k3_ringacc_request_rings_pair(ud->ringacc, uc->bchan->id, -1,
1588                                             &uc->bchan->t_ring,
1589                                             &uc->bchan->tc_ring);
1590         if (ret) {
1591                 ret = -EBUSY;
1592                 goto err_ring;
1593         }
1594
1595         memset(&ring_cfg, 0, sizeof(ring_cfg));
1596         ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE;
1597         ring_cfg.elm_size = K3_RINGACC_RING_ELSIZE_8;
1598         ring_cfg.mode = K3_RINGACC_RING_MODE_RING;
1599
1600         k3_configure_chan_coherency(&uc->vc.chan, ud->asel);
1601         ring_cfg.asel = ud->asel;
1602         ring_cfg.dma_dev = dmaengine_get_dma_device(&uc->vc.chan);
1603
1604         ret = k3_ringacc_ring_cfg(uc->bchan->t_ring, &ring_cfg);
1605         if (ret)
1606                 goto err_ringcfg;
1607
1608         return 0;
1609
1610 err_ringcfg:
1611         k3_ringacc_ring_free(uc->bchan->tc_ring);
1612         uc->bchan->tc_ring = NULL;
1613         k3_ringacc_ring_free(uc->bchan->t_ring);
1614         uc->bchan->t_ring = NULL;
1615         k3_configure_chan_coherency(&uc->vc.chan, 0);
1616 err_ring:
1617         bcdma_put_bchan(uc);
1618
1619         return ret;
1620 }
1621
1622 static void udma_free_tx_resources(struct udma_chan *uc)
1623 {
1624         if (!uc->tchan)
1625                 return;
1626
1627         k3_ringacc_ring_free(uc->tchan->t_ring);
1628         k3_ringacc_ring_free(uc->tchan->tc_ring);
1629         uc->tchan->t_ring = NULL;
1630         uc->tchan->tc_ring = NULL;
1631
1632         udma_put_tchan(uc);
1633 }
1634
1635 static int udma_alloc_tx_resources(struct udma_chan *uc)
1636 {
1637         struct k3_ring_cfg ring_cfg;
1638         struct udma_dev *ud = uc->ud;
1639         struct udma_tchan *tchan;
1640         int ring_idx, ret;
1641
1642         ret = udma_get_tchan(uc);
1643         if (ret)
1644                 return ret;
1645
1646         tchan = uc->tchan;
1647         if (tchan->tflow_id >= 0)
1648                 ring_idx = tchan->tflow_id;
1649         else
1650                 ring_idx = ud->bchan_cnt + tchan->id;
1651
1652         ret = k3_ringacc_request_rings_pair(ud->ringacc, ring_idx, -1,
1653                                             &tchan->t_ring,
1654                                             &tchan->tc_ring);
1655         if (ret) {
1656                 ret = -EBUSY;
1657                 goto err_ring;
1658         }
1659
1660         memset(&ring_cfg, 0, sizeof(ring_cfg));
1661         ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE;
1662         ring_cfg.elm_size = K3_RINGACC_RING_ELSIZE_8;
1663         if (ud->match_data->type == DMA_TYPE_UDMA) {
1664                 ring_cfg.mode = K3_RINGACC_RING_MODE_MESSAGE;
1665         } else {
1666                 ring_cfg.mode = K3_RINGACC_RING_MODE_RING;
1667
1668                 k3_configure_chan_coherency(&uc->vc.chan, uc->config.asel);
1669                 ring_cfg.asel = uc->config.asel;
1670                 ring_cfg.dma_dev = dmaengine_get_dma_device(&uc->vc.chan);
1671         }
1672
1673         ret = k3_ringacc_ring_cfg(tchan->t_ring, &ring_cfg);
1674         ret |= k3_ringacc_ring_cfg(tchan->tc_ring, &ring_cfg);
1675
1676         if (ret)
1677                 goto err_ringcfg;
1678
1679         return 0;
1680
1681 err_ringcfg:
1682         k3_ringacc_ring_free(uc->tchan->tc_ring);
1683         uc->tchan->tc_ring = NULL;
1684         k3_ringacc_ring_free(uc->tchan->t_ring);
1685         uc->tchan->t_ring = NULL;
1686 err_ring:
1687         udma_put_tchan(uc);
1688
1689         return ret;
1690 }
1691
1692 static void udma_free_rx_resources(struct udma_chan *uc)
1693 {
1694         if (!uc->rchan)
1695                 return;
1696
1697         if (uc->rflow) {
1698                 struct udma_rflow *rflow = uc->rflow;
1699
1700                 k3_ringacc_ring_free(rflow->fd_ring);
1701                 k3_ringacc_ring_free(rflow->r_ring);
1702                 rflow->fd_ring = NULL;
1703                 rflow->r_ring = NULL;
1704
1705                 udma_put_rflow(uc);
1706         }
1707
1708         udma_put_rchan(uc);
1709 }
1710
1711 static int udma_alloc_rx_resources(struct udma_chan *uc)
1712 {
1713         struct udma_dev *ud = uc->ud;
1714         struct k3_ring_cfg ring_cfg;
1715         struct udma_rflow *rflow;
1716         int fd_ring_id;
1717         int ret;
1718
1719         ret = udma_get_rchan(uc);
1720         if (ret)
1721                 return ret;
1722
1723         /* For MEM_TO_MEM we don't need rflow or rings */
1724         if (uc->config.dir == DMA_MEM_TO_MEM)
1725                 return 0;
1726
1727         if (uc->config.default_flow_id >= 0)
1728                 ret = udma_get_rflow(uc, uc->config.default_flow_id);
1729         else
1730                 ret = udma_get_rflow(uc, uc->rchan->id);
1731
1732         if (ret) {
1733                 ret = -EBUSY;
1734                 goto err_rflow;
1735         }
1736
1737         rflow = uc->rflow;
1738         if (ud->tflow_cnt)
1739                 fd_ring_id = ud->tflow_cnt + rflow->id;
1740         else
1741                 fd_ring_id = ud->bchan_cnt + ud->tchan_cnt + ud->echan_cnt +
1742                              uc->rchan->id;
1743
1744         ret = k3_ringacc_request_rings_pair(ud->ringacc, fd_ring_id, -1,
1745                                             &rflow->fd_ring, &rflow->r_ring);
1746         if (ret) {
1747                 ret = -EBUSY;
1748                 goto err_ring;
1749         }
1750
1751         memset(&ring_cfg, 0, sizeof(ring_cfg));
1752
1753         ring_cfg.elm_size = K3_RINGACC_RING_ELSIZE_8;
1754         if (ud->match_data->type == DMA_TYPE_UDMA) {
1755                 if (uc->config.pkt_mode)
1756                         ring_cfg.size = SG_MAX_SEGMENTS;
1757                 else
1758                         ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE;
1759
1760                 ring_cfg.mode = K3_RINGACC_RING_MODE_MESSAGE;
1761         } else {
1762                 ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE;
1763                 ring_cfg.mode = K3_RINGACC_RING_MODE_RING;
1764
1765                 k3_configure_chan_coherency(&uc->vc.chan, uc->config.asel);
1766                 ring_cfg.asel = uc->config.asel;
1767                 ring_cfg.dma_dev = dmaengine_get_dma_device(&uc->vc.chan);
1768         }
1769
1770         ret = k3_ringacc_ring_cfg(rflow->fd_ring, &ring_cfg);
1771
1772         ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE;
1773         ret |= k3_ringacc_ring_cfg(rflow->r_ring, &ring_cfg);
1774
1775         if (ret)
1776                 goto err_ringcfg;
1777
1778         return 0;
1779
1780 err_ringcfg:
1781         k3_ringacc_ring_free(rflow->r_ring);
1782         rflow->r_ring = NULL;
1783         k3_ringacc_ring_free(rflow->fd_ring);
1784         rflow->fd_ring = NULL;
1785 err_ring:
1786         udma_put_rflow(uc);
1787 err_rflow:
1788         udma_put_rchan(uc);
1789
1790         return ret;
1791 }
1792
1793 #define TISCI_BCDMA_BCHAN_VALID_PARAMS (                        \
1794         TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID |       \
1795         TI_SCI_MSG_VALUE_RM_UDMAP_CH_EXTENDED_CH_TYPE_VALID)
1796
1797 #define TISCI_BCDMA_TCHAN_VALID_PARAMS (                        \
1798         TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID |       \
1799         TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID)
1800
1801 #define TISCI_BCDMA_RCHAN_VALID_PARAMS (                        \
1802         TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID)
1803
1804 #define TISCI_UDMA_TCHAN_VALID_PARAMS (                         \
1805         TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID |       \
1806         TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_EINFO_VALID |      \
1807         TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_PSWORDS_VALID |    \
1808         TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID |          \
1809         TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID |      \
1810         TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID |         \
1811         TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID |            \
1812         TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID)
1813
1814 #define TISCI_UDMA_RCHAN_VALID_PARAMS (                         \
1815         TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID |       \
1816         TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID |         \
1817         TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID |            \
1818         TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID |          \
1819         TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_SHORT_VALID |    \
1820         TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_LONG_VALID |     \
1821         TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID |    \
1822         TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID |      \
1823         TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID)
1824
1825 static int udma_tisci_m2m_channel_config(struct udma_chan *uc)
1826 {
1827         struct udma_dev *ud = uc->ud;
1828         struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1829         const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
1830         struct udma_tchan *tchan = uc->tchan;
1831         struct udma_rchan *rchan = uc->rchan;
1832         u8 burst_size = 0;
1833         int ret;
1834         u8 tpl;
1835
1836         /* Non synchronized - mem to mem type of transfer */
1837         int tc_ring = k3_ringacc_get_ring_id(tchan->tc_ring);
1838         struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 };
1839         struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 };
1840
1841         if (ud->match_data->flags & UDMA_FLAG_BURST_SIZE) {
1842                 tpl = udma_get_chan_tpl_index(&ud->tchan_tpl, tchan->id);
1843
1844                 burst_size = ud->match_data->burst_size[tpl];
1845         }
1846
1847         req_tx.valid_params = TISCI_UDMA_TCHAN_VALID_PARAMS;
1848         req_tx.nav_id = tisci_rm->tisci_dev_id;
1849         req_tx.index = tchan->id;
1850         req_tx.tx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR;
1851         req_tx.tx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2;
1852         req_tx.txcq_qnum = tc_ring;
1853         req_tx.tx_atype = ud->atype;
1854         if (burst_size) {
1855                 req_tx.valid_params |= TI_SCI_MSG_VALUE_RM_UDMAP_CH_BURST_SIZE_VALID;
1856                 req_tx.tx_burst_size = burst_size;
1857         }
1858
1859         ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx);
1860         if (ret) {
1861                 dev_err(ud->dev, "tchan%d cfg failed %d\n", tchan->id, ret);
1862                 return ret;
1863         }
1864
1865         req_rx.valid_params = TISCI_UDMA_RCHAN_VALID_PARAMS;
1866         req_rx.nav_id = tisci_rm->tisci_dev_id;
1867         req_rx.index = rchan->id;
1868         req_rx.rx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2;
1869         req_rx.rxcq_qnum = tc_ring;
1870         req_rx.rx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR;
1871         req_rx.rx_atype = ud->atype;
1872         if (burst_size) {
1873                 req_rx.valid_params |= TI_SCI_MSG_VALUE_RM_UDMAP_CH_BURST_SIZE_VALID;
1874                 req_rx.rx_burst_size = burst_size;
1875         }
1876
1877         ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx);
1878         if (ret)
1879                 dev_err(ud->dev, "rchan%d alloc failed %d\n", rchan->id, ret);
1880
1881         return ret;
1882 }
1883
1884 static int bcdma_tisci_m2m_channel_config(struct udma_chan *uc)
1885 {
1886         struct udma_dev *ud = uc->ud;
1887         struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1888         const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
1889         struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 };
1890         struct udma_bchan *bchan = uc->bchan;
1891         u8 burst_size = 0;
1892         int ret;
1893         u8 tpl;
1894
1895         if (ud->match_data->flags & UDMA_FLAG_BURST_SIZE) {
1896                 tpl = udma_get_chan_tpl_index(&ud->bchan_tpl, bchan->id);
1897
1898                 burst_size = ud->match_data->burst_size[tpl];
1899         }
1900
1901         req_tx.valid_params = TISCI_BCDMA_BCHAN_VALID_PARAMS;
1902         req_tx.nav_id = tisci_rm->tisci_dev_id;
1903         req_tx.extended_ch_type = TI_SCI_RM_BCDMA_EXTENDED_CH_TYPE_BCHAN;
1904         req_tx.index = bchan->id;
1905         if (burst_size) {
1906                 req_tx.valid_params |= TI_SCI_MSG_VALUE_RM_UDMAP_CH_BURST_SIZE_VALID;
1907                 req_tx.tx_burst_size = burst_size;
1908         }
1909
1910         ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx);
1911         if (ret)
1912                 dev_err(ud->dev, "bchan%d cfg failed %d\n", bchan->id, ret);
1913
1914         return ret;
1915 }
1916
1917 static int udma_tisci_tx_channel_config(struct udma_chan *uc)
1918 {
1919         struct udma_dev *ud = uc->ud;
1920         struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1921         const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
1922         struct udma_tchan *tchan = uc->tchan;
1923         int tc_ring = k3_ringacc_get_ring_id(tchan->tc_ring);
1924         struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 };
1925         u32 mode, fetch_size;
1926         int ret;
1927
1928         if (uc->config.pkt_mode) {
1929                 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
1930                 fetch_size = cppi5_hdesc_calc_size(uc->config.needs_epib,
1931                                                    uc->config.psd_size, 0);
1932         } else {
1933                 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_PBRR;
1934                 fetch_size = sizeof(struct cppi5_desc_hdr_t);
1935         }
1936
1937         req_tx.valid_params = TISCI_UDMA_TCHAN_VALID_PARAMS;
1938         req_tx.nav_id = tisci_rm->tisci_dev_id;
1939         req_tx.index = tchan->id;
1940         req_tx.tx_chan_type = mode;
1941         req_tx.tx_supr_tdpkt = uc->config.notdpkt;
1942         req_tx.tx_fetch_size = fetch_size >> 2;
1943         req_tx.txcq_qnum = tc_ring;
1944         req_tx.tx_atype = uc->config.atype;
1945         if (uc->config.ep_type == PSIL_EP_PDMA_XY &&
1946             ud->match_data->flags & UDMA_FLAG_TDTYPE) {
1947                 /* wait for peer to complete the teardown for PDMAs */
1948                 req_tx.valid_params |=
1949                                 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_TDTYPE_VALID;
1950                 req_tx.tx_tdtype = 1;
1951         }
1952
1953         ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx);
1954         if (ret)
1955                 dev_err(ud->dev, "tchan%d cfg failed %d\n", tchan->id, ret);
1956
1957         return ret;
1958 }
1959
1960 static int bcdma_tisci_tx_channel_config(struct udma_chan *uc)
1961 {
1962         struct udma_dev *ud = uc->ud;
1963         struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1964         const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
1965         struct udma_tchan *tchan = uc->tchan;
1966         struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 };
1967         int ret;
1968
1969         req_tx.valid_params = TISCI_BCDMA_TCHAN_VALID_PARAMS;
1970         req_tx.nav_id = tisci_rm->tisci_dev_id;
1971         req_tx.index = tchan->id;
1972         req_tx.tx_supr_tdpkt = uc->config.notdpkt;
1973         if (ud->match_data->flags & UDMA_FLAG_TDTYPE) {
1974                 /* wait for peer to complete the teardown for PDMAs */
1975                 req_tx.valid_params |=
1976                                 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_TDTYPE_VALID;
1977                 req_tx.tx_tdtype = 1;
1978         }
1979
1980         ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx);
1981         if (ret)
1982                 dev_err(ud->dev, "tchan%d cfg failed %d\n", tchan->id, ret);
1983
1984         return ret;
1985 }
1986
1987 #define pktdma_tisci_tx_channel_config bcdma_tisci_tx_channel_config
1988
1989 static int udma_tisci_rx_channel_config(struct udma_chan *uc)
1990 {
1991         struct udma_dev *ud = uc->ud;
1992         struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1993         const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
1994         struct udma_rchan *rchan = uc->rchan;
1995         int fd_ring = k3_ringacc_get_ring_id(uc->rflow->fd_ring);
1996         int rx_ring = k3_ringacc_get_ring_id(uc->rflow->r_ring);
1997         struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 };
1998         struct ti_sci_msg_rm_udmap_flow_cfg flow_req = { 0 };
1999         u32 mode, fetch_size;
2000         int ret;
2001
2002         if (uc->config.pkt_mode) {
2003                 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
2004                 fetch_size = cppi5_hdesc_calc_size(uc->config.needs_epib,
2005                                                    uc->config.psd_size, 0);
2006         } else {
2007                 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_PBRR;
2008                 fetch_size = sizeof(struct cppi5_desc_hdr_t);
2009         }
2010
2011         req_rx.valid_params = TISCI_UDMA_RCHAN_VALID_PARAMS;
2012         req_rx.nav_id = tisci_rm->tisci_dev_id;
2013         req_rx.index = rchan->id;
2014         req_rx.rx_fetch_size =  fetch_size >> 2;
2015         req_rx.rxcq_qnum = rx_ring;
2016         req_rx.rx_chan_type = mode;
2017         req_rx.rx_atype = uc->config.atype;
2018
2019         ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx);
2020         if (ret) {
2021                 dev_err(ud->dev, "rchan%d cfg failed %d\n", rchan->id, ret);
2022                 return ret;
2023         }
2024
2025         flow_req.valid_params =
2026                 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID |
2027                 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID |
2028                 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID |
2029                 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DESC_TYPE_VALID |
2030                 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID |
2031                 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_HI_SEL_VALID |
2032                 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_LO_SEL_VALID |
2033                 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_HI_SEL_VALID |
2034                 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_LO_SEL_VALID |
2035                 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID |
2036                 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID |
2037                 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID |
2038                 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID;
2039
2040         flow_req.nav_id = tisci_rm->tisci_dev_id;
2041         flow_req.flow_index = rchan->id;
2042
2043         if (uc->config.needs_epib)
2044                 flow_req.rx_einfo_present = 1;
2045         else
2046                 flow_req.rx_einfo_present = 0;
2047         if (uc->config.psd_size)
2048                 flow_req.rx_psinfo_present = 1;
2049         else
2050                 flow_req.rx_psinfo_present = 0;
2051         flow_req.rx_error_handling = 1;
2052         flow_req.rx_dest_qnum = rx_ring;
2053         flow_req.rx_src_tag_hi_sel = UDMA_RFLOW_SRCTAG_NONE;
2054         flow_req.rx_src_tag_lo_sel = UDMA_RFLOW_SRCTAG_SRC_TAG;
2055         flow_req.rx_dest_tag_hi_sel = UDMA_RFLOW_DSTTAG_DST_TAG_HI;
2056         flow_req.rx_dest_tag_lo_sel = UDMA_RFLOW_DSTTAG_DST_TAG_LO;
2057         flow_req.rx_fdq0_sz0_qnum = fd_ring;
2058         flow_req.rx_fdq1_qnum = fd_ring;
2059         flow_req.rx_fdq2_qnum = fd_ring;
2060         flow_req.rx_fdq3_qnum = fd_ring;
2061
2062         ret = tisci_ops->rx_flow_cfg(tisci_rm->tisci, &flow_req);
2063
2064         if (ret)
2065                 dev_err(ud->dev, "flow%d config failed: %d\n", rchan->id, ret);
2066
2067         return 0;
2068 }
2069
2070 static int bcdma_tisci_rx_channel_config(struct udma_chan *uc)
2071 {
2072         struct udma_dev *ud = uc->ud;
2073         struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
2074         const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
2075         struct udma_rchan *rchan = uc->rchan;
2076         struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 };
2077         int ret;
2078
2079         req_rx.valid_params = TISCI_BCDMA_RCHAN_VALID_PARAMS;
2080         req_rx.nav_id = tisci_rm->tisci_dev_id;
2081         req_rx.index = rchan->id;
2082
2083         ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx);
2084         if (ret)
2085                 dev_err(ud->dev, "rchan%d cfg failed %d\n", rchan->id, ret);
2086
2087         return ret;
2088 }
2089
2090 static int pktdma_tisci_rx_channel_config(struct udma_chan *uc)
2091 {
2092         struct udma_dev *ud = uc->ud;
2093         struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
2094         const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
2095         struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 };
2096         struct ti_sci_msg_rm_udmap_flow_cfg flow_req = { 0 };
2097         int ret;
2098
2099         req_rx.valid_params = TISCI_BCDMA_RCHAN_VALID_PARAMS;
2100         req_rx.nav_id = tisci_rm->tisci_dev_id;
2101         req_rx.index = uc->rchan->id;
2102
2103         ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx);
2104         if (ret) {
2105                 dev_err(ud->dev, "rchan%d cfg failed %d\n", uc->rchan->id, ret);
2106                 return ret;
2107         }
2108
2109         flow_req.valid_params =
2110                 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID |
2111                 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID |
2112                 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID;
2113
2114         flow_req.nav_id = tisci_rm->tisci_dev_id;
2115         flow_req.flow_index = uc->rflow->id;
2116
2117         if (uc->config.needs_epib)
2118                 flow_req.rx_einfo_present = 1;
2119         else
2120                 flow_req.rx_einfo_present = 0;
2121         if (uc->config.psd_size)
2122                 flow_req.rx_psinfo_present = 1;
2123         else
2124                 flow_req.rx_psinfo_present = 0;
2125         flow_req.rx_error_handling = 1;
2126
2127         ret = tisci_ops->rx_flow_cfg(tisci_rm->tisci, &flow_req);
2128
2129         if (ret)
2130                 dev_err(ud->dev, "flow%d config failed: %d\n", uc->rflow->id,
2131                         ret);
2132
2133         return ret;
2134 }
2135
2136 static int udma_alloc_chan_resources(struct dma_chan *chan)
2137 {
2138         struct udma_chan *uc = to_udma_chan(chan);
2139         struct udma_dev *ud = to_udma_dev(chan->device);
2140         const struct udma_soc_data *soc_data = ud->soc_data;
2141         struct k3_ring *irq_ring;
2142         u32 irq_udma_idx;
2143         int ret;
2144
2145         uc->dma_dev = ud->dev;
2146
2147         if (uc->config.pkt_mode || uc->config.dir == DMA_MEM_TO_MEM) {
2148                 uc->use_dma_pool = true;
2149                 /* in case of MEM_TO_MEM we have maximum of two TRs */
2150                 if (uc->config.dir == DMA_MEM_TO_MEM) {
2151                         uc->config.hdesc_size = cppi5_trdesc_calc_size(
2152                                         sizeof(struct cppi5_tr_type15_t), 2);
2153                         uc->config.pkt_mode = false;
2154                 }
2155         }
2156
2157         if (uc->use_dma_pool) {
2158                 uc->hdesc_pool = dma_pool_create(uc->name, ud->ddev.dev,
2159                                                  uc->config.hdesc_size,
2160                                                  ud->desc_align,
2161                                                  0);
2162                 if (!uc->hdesc_pool) {
2163                         dev_err(ud->ddev.dev,
2164                                 "Descriptor pool allocation failed\n");
2165                         uc->use_dma_pool = false;
2166                         ret = -ENOMEM;
2167                         goto err_cleanup;
2168                 }
2169         }
2170
2171         /*
2172          * Make sure that the completion is in a known state:
2173          * No teardown, the channel is idle
2174          */
2175         reinit_completion(&uc->teardown_completed);
2176         complete_all(&uc->teardown_completed);
2177         uc->state = UDMA_CHAN_IS_IDLE;
2178
2179         switch (uc->config.dir) {
2180         case DMA_MEM_TO_MEM:
2181                 /* Non synchronized - mem to mem type of transfer */
2182                 dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-MEM\n", __func__,
2183                         uc->id);
2184
2185                 ret = udma_get_chan_pair(uc);
2186                 if (ret)
2187                         goto err_cleanup;
2188
2189                 ret = udma_alloc_tx_resources(uc);
2190                 if (ret) {
2191                         udma_put_rchan(uc);
2192                         goto err_cleanup;
2193                 }
2194
2195                 ret = udma_alloc_rx_resources(uc);
2196                 if (ret) {
2197                         udma_free_tx_resources(uc);
2198                         goto err_cleanup;
2199                 }
2200
2201                 uc->config.src_thread = ud->psil_base + uc->tchan->id;
2202                 uc->config.dst_thread = (ud->psil_base + uc->rchan->id) |
2203                                         K3_PSIL_DST_THREAD_ID_OFFSET;
2204
2205                 irq_ring = uc->tchan->tc_ring;
2206                 irq_udma_idx = uc->tchan->id;
2207
2208                 ret = udma_tisci_m2m_channel_config(uc);
2209                 break;
2210         case DMA_MEM_TO_DEV:
2211                 /* Slave transfer synchronized - mem to dev (TX) trasnfer */
2212                 dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-DEV\n", __func__,
2213                         uc->id);
2214
2215                 ret = udma_alloc_tx_resources(uc);
2216                 if (ret)
2217                         goto err_cleanup;
2218
2219                 uc->config.src_thread = ud->psil_base + uc->tchan->id;
2220                 uc->config.dst_thread = uc->config.remote_thread_id;
2221                 uc->config.dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET;
2222
2223                 irq_ring = uc->tchan->tc_ring;
2224                 irq_udma_idx = uc->tchan->id;
2225
2226                 ret = udma_tisci_tx_channel_config(uc);
2227                 break;
2228         case DMA_DEV_TO_MEM:
2229                 /* Slave transfer synchronized - dev to mem (RX) trasnfer */
2230                 dev_dbg(uc->ud->dev, "%s: chan%d as DEV-to-MEM\n", __func__,
2231                         uc->id);
2232
2233                 ret = udma_alloc_rx_resources(uc);
2234                 if (ret)
2235                         goto err_cleanup;
2236
2237                 uc->config.src_thread = uc->config.remote_thread_id;
2238                 uc->config.dst_thread = (ud->psil_base + uc->rchan->id) |
2239                                         K3_PSIL_DST_THREAD_ID_OFFSET;
2240
2241                 irq_ring = uc->rflow->r_ring;
2242                 irq_udma_idx = soc_data->oes.udma_rchan + uc->rchan->id;
2243
2244                 ret = udma_tisci_rx_channel_config(uc);
2245                 break;
2246         default:
2247                 /* Can not happen */
2248                 dev_err(uc->ud->dev, "%s: chan%d invalid direction (%u)\n",
2249                         __func__, uc->id, uc->config.dir);
2250                 ret = -EINVAL;
2251                 goto err_cleanup;
2252
2253         }
2254
2255         /* check if the channel configuration was successful */
2256         if (ret)
2257                 goto err_res_free;
2258
2259         if (udma_is_chan_running(uc)) {
2260                 dev_warn(ud->dev, "chan%d: is running!\n", uc->id);
2261                 udma_reset_chan(uc, false);
2262                 if (udma_is_chan_running(uc)) {
2263                         dev_err(ud->dev, "chan%d: won't stop!\n", uc->id);
2264                         ret = -EBUSY;
2265                         goto err_res_free;
2266                 }
2267         }
2268
2269         /* PSI-L pairing */
2270         ret = navss_psil_pair(ud, uc->config.src_thread, uc->config.dst_thread);
2271         if (ret) {
2272                 dev_err(ud->dev, "PSI-L pairing failed: 0x%04x -> 0x%04x\n",
2273                         uc->config.src_thread, uc->config.dst_thread);
2274                 goto err_res_free;
2275         }
2276
2277         uc->psil_paired = true;
2278
2279         uc->irq_num_ring = k3_ringacc_get_ring_irq_num(irq_ring);
2280         if (uc->irq_num_ring <= 0) {
2281                 dev_err(ud->dev, "Failed to get ring irq (index: %u)\n",
2282                         k3_ringacc_get_ring_id(irq_ring));
2283                 ret = -EINVAL;
2284                 goto err_psi_free;
2285         }
2286
2287         ret = request_irq(uc->irq_num_ring, udma_ring_irq_handler,
2288                           IRQF_TRIGGER_HIGH, uc->name, uc);
2289         if (ret) {
2290                 dev_err(ud->dev, "chan%d: ring irq request failed\n", uc->id);
2291                 goto err_irq_free;
2292         }
2293
2294         /* Event from UDMA (TR events) only needed for slave TR mode channels */
2295         if (is_slave_direction(uc->config.dir) && !uc->config.pkt_mode) {
2296                 uc->irq_num_udma = ti_sci_inta_msi_get_virq(ud->dev,
2297                                                             irq_udma_idx);
2298                 if (uc->irq_num_udma <= 0) {
2299                         dev_err(ud->dev, "Failed to get udma irq (index: %u)\n",
2300                                 irq_udma_idx);
2301                         free_irq(uc->irq_num_ring, uc);
2302                         ret = -EINVAL;
2303                         goto err_irq_free;
2304                 }
2305
2306                 ret = request_irq(uc->irq_num_udma, udma_udma_irq_handler, 0,
2307                                   uc->name, uc);
2308                 if (ret) {
2309                         dev_err(ud->dev, "chan%d: UDMA irq request failed\n",
2310                                 uc->id);
2311                         free_irq(uc->irq_num_ring, uc);
2312                         goto err_irq_free;
2313                 }
2314         } else {
2315                 uc->irq_num_udma = 0;
2316         }
2317
2318         udma_reset_rings(uc);
2319
2320         return 0;
2321
2322 err_irq_free:
2323         uc->irq_num_ring = 0;
2324         uc->irq_num_udma = 0;
2325 err_psi_free:
2326         navss_psil_unpair(ud, uc->config.src_thread, uc->config.dst_thread);
2327         uc->psil_paired = false;
2328 err_res_free:
2329         udma_free_tx_resources(uc);
2330         udma_free_rx_resources(uc);
2331 err_cleanup:
2332         udma_reset_uchan(uc);
2333
2334         if (uc->use_dma_pool) {
2335                 dma_pool_destroy(uc->hdesc_pool);
2336                 uc->use_dma_pool = false;
2337         }
2338
2339         return ret;
2340 }
2341
2342 static int bcdma_alloc_chan_resources(struct dma_chan *chan)
2343 {
2344         struct udma_chan *uc = to_udma_chan(chan);
2345         struct udma_dev *ud = to_udma_dev(chan->device);
2346         const struct udma_oes_offsets *oes = &ud->soc_data->oes;
2347         u32 irq_udma_idx, irq_ring_idx;
2348         int ret;
2349
2350         /* Only TR mode is supported */
2351         uc->config.pkt_mode = false;
2352
2353         /*
2354          * Make sure that the completion is in a known state:
2355          * No teardown, the channel is idle
2356          */
2357         reinit_completion(&uc->teardown_completed);
2358         complete_all(&uc->teardown_completed);
2359         uc->state = UDMA_CHAN_IS_IDLE;
2360
2361         switch (uc->config.dir) {
2362         case DMA_MEM_TO_MEM:
2363                 /* Non synchronized - mem to mem type of transfer */
2364                 dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-MEM\n", __func__,
2365                         uc->id);
2366
2367                 ret = bcdma_alloc_bchan_resources(uc);
2368                 if (ret)
2369                         return ret;
2370
2371                 irq_ring_idx = uc->bchan->id + oes->bcdma_bchan_ring;
2372                 irq_udma_idx = uc->bchan->id + oes->bcdma_bchan_data;
2373
2374                 ret = bcdma_tisci_m2m_channel_config(uc);
2375                 break;
2376         case DMA_MEM_TO_DEV:
2377                 /* Slave transfer synchronized - mem to dev (TX) trasnfer */
2378                 dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-DEV\n", __func__,
2379                         uc->id);
2380
2381                 ret = udma_alloc_tx_resources(uc);
2382                 if (ret) {
2383                         uc->config.remote_thread_id = -1;
2384                         return ret;
2385                 }
2386
2387                 uc->config.src_thread = ud->psil_base + uc->tchan->id;
2388                 uc->config.dst_thread = uc->config.remote_thread_id;
2389                 uc->config.dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET;
2390
2391                 irq_ring_idx = uc->tchan->id + oes->bcdma_tchan_ring;
2392                 irq_udma_idx = uc->tchan->id + oes->bcdma_tchan_data;
2393
2394                 ret = bcdma_tisci_tx_channel_config(uc);
2395                 break;
2396         case DMA_DEV_TO_MEM:
2397                 /* Slave transfer synchronized - dev to mem (RX) trasnfer */
2398                 dev_dbg(uc->ud->dev, "%s: chan%d as DEV-to-MEM\n", __func__,
2399                         uc->id);
2400
2401                 ret = udma_alloc_rx_resources(uc);
2402                 if (ret) {
2403                         uc->config.remote_thread_id = -1;
2404                         return ret;
2405                 }
2406
2407                 uc->config.src_thread = uc->config.remote_thread_id;
2408                 uc->config.dst_thread = (ud->psil_base + uc->rchan->id) |
2409                                         K3_PSIL_DST_THREAD_ID_OFFSET;
2410
2411                 irq_ring_idx = uc->rchan->id + oes->bcdma_rchan_ring;
2412                 irq_udma_idx = uc->rchan->id + oes->bcdma_rchan_data;
2413
2414                 ret = bcdma_tisci_rx_channel_config(uc);
2415                 break;
2416         default:
2417                 /* Can not happen */
2418                 dev_err(uc->ud->dev, "%s: chan%d invalid direction (%u)\n",
2419                         __func__, uc->id, uc->config.dir);
2420                 return -EINVAL;
2421         }
2422
2423         /* check if the channel configuration was successful */
2424         if (ret)
2425                 goto err_res_free;
2426
2427         if (udma_is_chan_running(uc)) {
2428                 dev_warn(ud->dev, "chan%d: is running!\n", uc->id);
2429                 udma_reset_chan(uc, false);
2430                 if (udma_is_chan_running(uc)) {
2431                         dev_err(ud->dev, "chan%d: won't stop!\n", uc->id);
2432                         ret = -EBUSY;
2433                         goto err_res_free;
2434                 }
2435         }
2436
2437         uc->dma_dev = dmaengine_get_dma_device(chan);
2438         if (uc->config.dir == DMA_MEM_TO_MEM  && !uc->config.tr_trigger_type) {
2439                 uc->config.hdesc_size = cppi5_trdesc_calc_size(
2440                                         sizeof(struct cppi5_tr_type15_t), 2);
2441
2442                 uc->hdesc_pool = dma_pool_create(uc->name, ud->ddev.dev,
2443                                                  uc->config.hdesc_size,
2444                                                  ud->desc_align,
2445                                                  0);
2446                 if (!uc->hdesc_pool) {
2447                         dev_err(ud->ddev.dev,
2448                                 "Descriptor pool allocation failed\n");
2449                         uc->use_dma_pool = false;
2450                         ret = -ENOMEM;
2451                         goto err_res_free;
2452                 }
2453
2454                 uc->use_dma_pool = true;
2455         } else if (uc->config.dir != DMA_MEM_TO_MEM) {
2456                 /* PSI-L pairing */
2457                 ret = navss_psil_pair(ud, uc->config.src_thread,
2458                                       uc->config.dst_thread);
2459                 if (ret) {
2460                         dev_err(ud->dev,
2461                                 "PSI-L pairing failed: 0x%04x -> 0x%04x\n",
2462                                 uc->config.src_thread, uc->config.dst_thread);
2463                         goto err_res_free;
2464                 }
2465
2466                 uc->psil_paired = true;
2467         }
2468
2469         uc->irq_num_ring = ti_sci_inta_msi_get_virq(ud->dev, irq_ring_idx);
2470         if (uc->irq_num_ring <= 0) {
2471                 dev_err(ud->dev, "Failed to get ring irq (index: %u)\n",
2472                         irq_ring_idx);
2473                 ret = -EINVAL;
2474                 goto err_psi_free;
2475         }
2476
2477         ret = request_irq(uc->irq_num_ring, udma_ring_irq_handler,
2478                           IRQF_TRIGGER_HIGH, uc->name, uc);
2479         if (ret) {
2480                 dev_err(ud->dev, "chan%d: ring irq request failed\n", uc->id);
2481                 goto err_irq_free;
2482         }
2483
2484         /* Event from BCDMA (TR events) only needed for slave channels */
2485         if (is_slave_direction(uc->config.dir)) {
2486                 uc->irq_num_udma = ti_sci_inta_msi_get_virq(ud->dev,
2487                                                             irq_udma_idx);
2488                 if (uc->irq_num_udma <= 0) {
2489                         dev_err(ud->dev, "Failed to get bcdma irq (index: %u)\n",
2490                                 irq_udma_idx);
2491                         free_irq(uc->irq_num_ring, uc);
2492                         ret = -EINVAL;
2493                         goto err_irq_free;
2494                 }
2495
2496                 ret = request_irq(uc->irq_num_udma, udma_udma_irq_handler, 0,
2497                                   uc->name, uc);
2498                 if (ret) {
2499                         dev_err(ud->dev, "chan%d: BCDMA irq request failed\n",
2500                                 uc->id);
2501                         free_irq(uc->irq_num_ring, uc);
2502                         goto err_irq_free;
2503                 }
2504         } else {
2505                 uc->irq_num_udma = 0;
2506         }
2507
2508         udma_reset_rings(uc);
2509
2510         INIT_DELAYED_WORK_ONSTACK(&uc->tx_drain.work,
2511                                   udma_check_tx_completion);
2512         return 0;
2513
2514 err_irq_free:
2515         uc->irq_num_ring = 0;
2516         uc->irq_num_udma = 0;
2517 err_psi_free:
2518         if (uc->psil_paired)
2519                 navss_psil_unpair(ud, uc->config.src_thread,
2520                                   uc->config.dst_thread);
2521         uc->psil_paired = false;
2522 err_res_free:
2523         bcdma_free_bchan_resources(uc);
2524         udma_free_tx_resources(uc);
2525         udma_free_rx_resources(uc);
2526
2527         udma_reset_uchan(uc);
2528
2529         if (uc->use_dma_pool) {
2530                 dma_pool_destroy(uc->hdesc_pool);
2531                 uc->use_dma_pool = false;
2532         }
2533
2534         return ret;
2535 }
2536
2537 static int bcdma_router_config(struct dma_chan *chan)
2538 {
2539         struct k3_event_route_data *router_data = chan->route_data;
2540         struct udma_chan *uc = to_udma_chan(chan);
2541         u32 trigger_event;
2542
2543         if (!uc->bchan)
2544                 return -EINVAL;
2545
2546         if (uc->config.tr_trigger_type != 1 && uc->config.tr_trigger_type != 2)
2547                 return -EINVAL;
2548
2549         trigger_event = uc->ud->soc_data->bcdma_trigger_event_offset;
2550         trigger_event += (uc->bchan->id * 2) + uc->config.tr_trigger_type - 1;
2551
2552         return router_data->set_event(router_data->priv, trigger_event);
2553 }
2554
2555 static int pktdma_alloc_chan_resources(struct dma_chan *chan)
2556 {
2557         struct udma_chan *uc = to_udma_chan(chan);
2558         struct udma_dev *ud = to_udma_dev(chan->device);
2559         const struct udma_oes_offsets *oes = &ud->soc_data->oes;
2560         u32 irq_ring_idx;
2561         int ret;
2562
2563         /*
2564          * Make sure that the completion is in a known state:
2565          * No teardown, the channel is idle
2566          */
2567         reinit_completion(&uc->teardown_completed);
2568         complete_all(&uc->teardown_completed);
2569         uc->state = UDMA_CHAN_IS_IDLE;
2570
2571         switch (uc->config.dir) {
2572         case DMA_MEM_TO_DEV:
2573                 /* Slave transfer synchronized - mem to dev (TX) trasnfer */
2574                 dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-DEV\n", __func__,
2575                         uc->id);
2576
2577                 ret = udma_alloc_tx_resources(uc);
2578                 if (ret) {
2579                         uc->config.remote_thread_id = -1;
2580                         return ret;
2581                 }
2582
2583                 uc->config.src_thread = ud->psil_base + uc->tchan->id;
2584                 uc->config.dst_thread = uc->config.remote_thread_id;
2585                 uc->config.dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET;
2586
2587                 irq_ring_idx = uc->tchan->tflow_id + oes->pktdma_tchan_flow;
2588
2589                 ret = pktdma_tisci_tx_channel_config(uc);
2590                 break;
2591         case DMA_DEV_TO_MEM:
2592                 /* Slave transfer synchronized - dev to mem (RX) trasnfer */
2593                 dev_dbg(uc->ud->dev, "%s: chan%d as DEV-to-MEM\n", __func__,
2594                         uc->id);
2595
2596                 ret = udma_alloc_rx_resources(uc);
2597                 if (ret) {
2598                         uc->config.remote_thread_id = -1;
2599                         return ret;
2600                 }
2601
2602                 uc->config.src_thread = uc->config.remote_thread_id;
2603                 uc->config.dst_thread = (ud->psil_base + uc->rchan->id) |
2604                                         K3_PSIL_DST_THREAD_ID_OFFSET;
2605
2606                 irq_ring_idx = uc->rflow->id + oes->pktdma_rchan_flow;
2607
2608                 ret = pktdma_tisci_rx_channel_config(uc);
2609                 break;
2610         default:
2611                 /* Can not happen */
2612                 dev_err(uc->ud->dev, "%s: chan%d invalid direction (%u)\n",
2613                         __func__, uc->id, uc->config.dir);
2614                 return -EINVAL;
2615         }
2616
2617         /* check if the channel configuration was successful */
2618         if (ret)
2619                 goto err_res_free;
2620
2621         if (udma_is_chan_running(uc)) {
2622                 dev_warn(ud->dev, "chan%d: is running!\n", uc->id);
2623                 udma_reset_chan(uc, false);
2624                 if (udma_is_chan_running(uc)) {
2625                         dev_err(ud->dev, "chan%d: won't stop!\n", uc->id);
2626                         ret = -EBUSY;
2627                         goto err_res_free;
2628                 }
2629         }
2630
2631         uc->dma_dev = dmaengine_get_dma_device(chan);
2632         uc->hdesc_pool = dma_pool_create(uc->name, uc->dma_dev,
2633                                          uc->config.hdesc_size, ud->desc_align,
2634                                          0);
2635         if (!uc->hdesc_pool) {
2636                 dev_err(ud->ddev.dev,
2637                         "Descriptor pool allocation failed\n");
2638                 uc->use_dma_pool = false;
2639                 ret = -ENOMEM;
2640                 goto err_res_free;
2641         }
2642
2643         uc->use_dma_pool = true;
2644
2645         /* PSI-L pairing */
2646         ret = navss_psil_pair(ud, uc->config.src_thread, uc->config.dst_thread);
2647         if (ret) {
2648                 dev_err(ud->dev, "PSI-L pairing failed: 0x%04x -> 0x%04x\n",
2649                         uc->config.src_thread, uc->config.dst_thread);
2650                 goto err_res_free;
2651         }
2652
2653         uc->psil_paired = true;
2654
2655         uc->irq_num_ring = ti_sci_inta_msi_get_virq(ud->dev, irq_ring_idx);
2656         if (uc->irq_num_ring <= 0) {
2657                 dev_err(ud->dev, "Failed to get ring irq (index: %u)\n",
2658                         irq_ring_idx);
2659                 ret = -EINVAL;
2660                 goto err_psi_free;
2661         }
2662
2663         ret = request_irq(uc->irq_num_ring, udma_ring_irq_handler,
2664                           IRQF_TRIGGER_HIGH, uc->name, uc);
2665         if (ret) {
2666                 dev_err(ud->dev, "chan%d: ring irq request failed\n", uc->id);
2667                 goto err_irq_free;
2668         }
2669
2670         uc->irq_num_udma = 0;
2671
2672         udma_reset_rings(uc);
2673
2674         INIT_DELAYED_WORK_ONSTACK(&uc->tx_drain.work,
2675                                   udma_check_tx_completion);
2676
2677         if (uc->tchan)
2678                 dev_dbg(ud->dev,
2679                         "chan%d: tchan%d, tflow%d, Remote thread: 0x%04x\n",
2680                         uc->id, uc->tchan->id, uc->tchan->tflow_id,
2681                         uc->config.remote_thread_id);
2682         else if (uc->rchan)
2683                 dev_dbg(ud->dev,
2684                         "chan%d: rchan%d, rflow%d, Remote thread: 0x%04x\n",
2685                         uc->id, uc->rchan->id, uc->rflow->id,
2686                         uc->config.remote_thread_id);
2687         return 0;
2688
2689 err_irq_free:
2690         uc->irq_num_ring = 0;
2691 err_psi_free:
2692         navss_psil_unpair(ud, uc->config.src_thread, uc->config.dst_thread);
2693         uc->psil_paired = false;
2694 err_res_free:
2695         udma_free_tx_resources(uc);
2696         udma_free_rx_resources(uc);
2697
2698         udma_reset_uchan(uc);
2699
2700         dma_pool_destroy(uc->hdesc_pool);
2701         uc->use_dma_pool = false;
2702
2703         return ret;
2704 }
2705
2706 static int udma_slave_config(struct dma_chan *chan,
2707                              struct dma_slave_config *cfg)
2708 {
2709         struct udma_chan *uc = to_udma_chan(chan);
2710
2711         memcpy(&uc->cfg, cfg, sizeof(uc->cfg));
2712
2713         return 0;
2714 }
2715
2716 static struct udma_desc *udma_alloc_tr_desc(struct udma_chan *uc,
2717                                             size_t tr_size, int tr_count,
2718                                             enum dma_transfer_direction dir)
2719 {
2720         struct udma_hwdesc *hwdesc;
2721         struct cppi5_desc_hdr_t *tr_desc;
2722         struct udma_desc *d;
2723         u32 reload_count = 0;
2724         u32 ring_id;
2725
2726         switch (tr_size) {
2727         case 16:
2728         case 32:
2729         case 64:
2730         case 128:
2731                 break;
2732         default:
2733                 dev_err(uc->ud->dev, "Unsupported TR size of %zu\n", tr_size);
2734                 return NULL;
2735         }
2736
2737         /* We have only one descriptor containing multiple TRs */
2738         d = kzalloc(sizeof(*d) + sizeof(d->hwdesc[0]), GFP_NOWAIT);
2739         if (!d)
2740                 return NULL;
2741
2742         d->sglen = tr_count;
2743
2744         d->hwdesc_count = 1;
2745         hwdesc = &d->hwdesc[0];
2746
2747         /* Allocate memory for DMA ring descriptor */
2748         if (uc->use_dma_pool) {
2749                 hwdesc->cppi5_desc_size = uc->config.hdesc_size;
2750                 hwdesc->cppi5_desc_vaddr = dma_pool_zalloc(uc->hdesc_pool,
2751                                                 GFP_NOWAIT,
2752                                                 &hwdesc->cppi5_desc_paddr);
2753         } else {
2754                 hwdesc->cppi5_desc_size = cppi5_trdesc_calc_size(tr_size,
2755                                                                  tr_count);
2756                 hwdesc->cppi5_desc_size = ALIGN(hwdesc->cppi5_desc_size,
2757                                                 uc->ud->desc_align);
2758                 hwdesc->cppi5_desc_vaddr = dma_alloc_coherent(uc->ud->dev,
2759                                                 hwdesc->cppi5_desc_size,
2760                                                 &hwdesc->cppi5_desc_paddr,
2761                                                 GFP_NOWAIT);
2762         }
2763
2764         if (!hwdesc->cppi5_desc_vaddr) {
2765                 kfree(d);
2766                 return NULL;
2767         }
2768
2769         /* Start of the TR req records */
2770         hwdesc->tr_req_base = hwdesc->cppi5_desc_vaddr + tr_size;
2771         /* Start address of the TR response array */
2772         hwdesc->tr_resp_base = hwdesc->tr_req_base + tr_size * tr_count;
2773
2774         tr_desc = hwdesc->cppi5_desc_vaddr;
2775
2776         if (uc->cyclic)
2777                 reload_count = CPPI5_INFO0_TRDESC_RLDCNT_INFINITE;
2778
2779         if (dir == DMA_DEV_TO_MEM)
2780                 ring_id = k3_ringacc_get_ring_id(uc->rflow->r_ring);
2781         else
2782                 ring_id = k3_ringacc_get_ring_id(uc->tchan->tc_ring);
2783
2784         cppi5_trdesc_init(tr_desc, tr_count, tr_size, 0, reload_count);
2785         cppi5_desc_set_pktids(tr_desc, uc->id,
2786                               CPPI5_INFO1_DESC_FLOWID_DEFAULT);
2787         cppi5_desc_set_retpolicy(tr_desc, 0, ring_id);
2788
2789         return d;
2790 }
2791
2792 /**
2793  * udma_get_tr_counters - calculate TR counters for a given length
2794  * @len: Length of the trasnfer
2795  * @align_to: Preferred alignment
2796  * @tr0_cnt0: First TR icnt0
2797  * @tr0_cnt1: First TR icnt1
2798  * @tr1_cnt0: Second (if used) TR icnt0
2799  *
2800  * For len < SZ_64K only one TR is enough, tr1_cnt0 is not updated
2801  * For len >= SZ_64K two TRs are used in a simple way:
2802  * First TR: SZ_64K-alignment blocks (tr0_cnt0, tr0_cnt1)
2803  * Second TR: the remaining length (tr1_cnt0)
2804  *
2805  * Returns the number of TRs the length needs (1 or 2)
2806  * -EINVAL if the length can not be supported
2807  */
2808 static int udma_get_tr_counters(size_t len, unsigned long align_to,
2809                                 u16 *tr0_cnt0, u16 *tr0_cnt1, u16 *tr1_cnt0)
2810 {
2811         if (len < SZ_64K) {
2812                 *tr0_cnt0 = len;
2813                 *tr0_cnt1 = 1;
2814
2815                 return 1;
2816         }
2817
2818         if (align_to > 3)
2819                 align_to = 3;
2820
2821 realign:
2822         *tr0_cnt0 = SZ_64K - BIT(align_to);
2823         if (len / *tr0_cnt0 >= SZ_64K) {
2824                 if (align_to) {
2825                         align_to--;
2826                         goto realign;
2827                 }
2828                 return -EINVAL;
2829         }
2830
2831         *tr0_cnt1 = len / *tr0_cnt0;
2832         *tr1_cnt0 = len % *tr0_cnt0;
2833
2834         return 2;
2835 }
2836
2837 static struct udma_desc *
2838 udma_prep_slave_sg_tr(struct udma_chan *uc, struct scatterlist *sgl,
2839                       unsigned int sglen, enum dma_transfer_direction dir,
2840                       unsigned long tx_flags, void *context)
2841 {
2842         struct scatterlist *sgent;
2843         struct udma_desc *d;
2844         struct cppi5_tr_type1_t *tr_req = NULL;
2845         u16 tr0_cnt0, tr0_cnt1, tr1_cnt0;
2846         unsigned int i;
2847         size_t tr_size;
2848         int num_tr = 0;
2849         int tr_idx = 0;
2850         u64 asel;
2851
2852         /* estimate the number of TRs we will need */
2853         for_each_sg(sgl, sgent, sglen, i) {
2854                 if (sg_dma_len(sgent) < SZ_64K)
2855                         num_tr++;
2856                 else
2857                         num_tr += 2;
2858         }
2859
2860         /* Now allocate and setup the descriptor. */
2861         tr_size = sizeof(struct cppi5_tr_type1_t);
2862         d = udma_alloc_tr_desc(uc, tr_size, num_tr, dir);
2863         if (!d)
2864                 return NULL;
2865
2866         d->sglen = sglen;
2867
2868         if (uc->ud->match_data->type == DMA_TYPE_UDMA)
2869                 asel = 0;
2870         else
2871                 asel = (u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT;
2872
2873         tr_req = d->hwdesc[0].tr_req_base;
2874         for_each_sg(sgl, sgent, sglen, i) {
2875                 dma_addr_t sg_addr = sg_dma_address(sgent);
2876
2877                 num_tr = udma_get_tr_counters(sg_dma_len(sgent), __ffs(sg_addr),
2878                                               &tr0_cnt0, &tr0_cnt1, &tr1_cnt0);
2879                 if (num_tr < 0) {
2880                         dev_err(uc->ud->dev, "size %u is not supported\n",
2881                                 sg_dma_len(sgent));
2882                         udma_free_hwdesc(uc, d);
2883                         kfree(d);
2884                         return NULL;
2885                 }
2886
2887                 cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE1, false,
2888                               false, CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
2889                 cppi5_tr_csf_set(&tr_req[tr_idx].flags, CPPI5_TR_CSF_SUPR_EVT);
2890
2891                 sg_addr |= asel;
2892                 tr_req[tr_idx].addr = sg_addr;
2893                 tr_req[tr_idx].icnt0 = tr0_cnt0;
2894                 tr_req[tr_idx].icnt1 = tr0_cnt1;
2895                 tr_req[tr_idx].dim1 = tr0_cnt0;
2896                 tr_idx++;
2897
2898                 if (num_tr == 2) {
2899                         cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE1,
2900                                       false, false,
2901                                       CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
2902                         cppi5_tr_csf_set(&tr_req[tr_idx].flags,
2903                                          CPPI5_TR_CSF_SUPR_EVT);
2904
2905                         tr_req[tr_idx].addr = sg_addr + tr0_cnt1 * tr0_cnt0;
2906                         tr_req[tr_idx].icnt0 = tr1_cnt0;
2907                         tr_req[tr_idx].icnt1 = 1;
2908                         tr_req[tr_idx].dim1 = tr1_cnt0;
2909                         tr_idx++;
2910                 }
2911
2912                 d->residue += sg_dma_len(sgent);
2913         }
2914
2915         cppi5_tr_csf_set(&tr_req[tr_idx - 1].flags,
2916                          CPPI5_TR_CSF_SUPR_EVT | CPPI5_TR_CSF_EOP);
2917
2918         return d;
2919 }
2920
2921 static struct udma_desc *
2922 udma_prep_slave_sg_triggered_tr(struct udma_chan *uc, struct scatterlist *sgl,
2923                                 unsigned int sglen,
2924                                 enum dma_transfer_direction dir,
2925                                 unsigned long tx_flags, void *context)
2926 {
2927         struct scatterlist *sgent;
2928         struct cppi5_tr_type15_t *tr_req = NULL;
2929         enum dma_slave_buswidth dev_width;
2930         u16 tr_cnt0, tr_cnt1;
2931         dma_addr_t dev_addr;
2932         struct udma_desc *d;
2933         unsigned int i;
2934         size_t tr_size, sg_len;
2935         int num_tr = 0;
2936         int tr_idx = 0;
2937         u32 burst, trigger_size, port_window;
2938         u64 asel;
2939
2940         if (dir == DMA_DEV_TO_MEM) {
2941                 dev_addr = uc->cfg.src_addr;
2942                 dev_width = uc->cfg.src_addr_width;
2943                 burst = uc->cfg.src_maxburst;
2944                 port_window = uc->cfg.src_port_window_size;
2945         } else if (dir == DMA_MEM_TO_DEV) {
2946                 dev_addr = uc->cfg.dst_addr;
2947                 dev_width = uc->cfg.dst_addr_width;
2948                 burst = uc->cfg.dst_maxburst;
2949                 port_window = uc->cfg.dst_port_window_size;
2950         } else {
2951                 dev_err(uc->ud->dev, "%s: bad direction?\n", __func__);
2952                 return NULL;
2953         }
2954
2955         if (!burst)
2956                 burst = 1;
2957
2958         if (port_window) {
2959                 if (port_window != burst) {
2960                         dev_err(uc->ud->dev,
2961                                 "The burst must be equal to port_window\n");
2962                         return NULL;
2963                 }
2964
2965                 tr_cnt0 = dev_width * port_window;
2966                 tr_cnt1 = 1;
2967         } else {
2968                 tr_cnt0 = dev_width;
2969                 tr_cnt1 = burst;
2970         }
2971         trigger_size = tr_cnt0 * tr_cnt1;
2972
2973         /* estimate the number of TRs we will need */
2974         for_each_sg(sgl, sgent, sglen, i) {
2975                 sg_len = sg_dma_len(sgent);
2976
2977                 if (sg_len % trigger_size) {
2978                         dev_err(uc->ud->dev,
2979                                 "Not aligned SG entry (%zu for %u)\n", sg_len,
2980                                 trigger_size);
2981                         return NULL;
2982                 }
2983
2984                 if (sg_len / trigger_size < SZ_64K)
2985                         num_tr++;
2986                 else
2987                         num_tr += 2;
2988         }
2989
2990         /* Now allocate and setup the descriptor. */
2991         tr_size = sizeof(struct cppi5_tr_type15_t);
2992         d = udma_alloc_tr_desc(uc, tr_size, num_tr, dir);
2993         if (!d)
2994                 return NULL;
2995
2996         d->sglen = sglen;
2997
2998         if (uc->ud->match_data->type == DMA_TYPE_UDMA) {
2999                 asel = 0;
3000         } else {
3001                 asel = (u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT;
3002                 dev_addr |= asel;
3003         }
3004
3005         tr_req = d->hwdesc[0].tr_req_base;
3006         for_each_sg(sgl, sgent, sglen, i) {
3007                 u16 tr0_cnt2, tr0_cnt3, tr1_cnt2;
3008                 dma_addr_t sg_addr = sg_dma_address(sgent);
3009
3010                 sg_len = sg_dma_len(sgent);
3011                 num_tr = udma_get_tr_counters(sg_len / trigger_size, 0,
3012                                               &tr0_cnt2, &tr0_cnt3, &tr1_cnt2);
3013                 if (num_tr < 0) {
3014                         dev_err(uc->ud->dev, "size %zu is not supported\n",
3015                                 sg_len);
3016                         udma_free_hwdesc(uc, d);
3017                         kfree(d);
3018                         return NULL;
3019                 }
3020
3021                 cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE15, false,
3022                               true, CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
3023                 cppi5_tr_csf_set(&tr_req[tr_idx].flags, CPPI5_TR_CSF_SUPR_EVT);
3024                 cppi5_tr_set_trigger(&tr_req[tr_idx].flags,
3025                                      uc->config.tr_trigger_type,
3026                                      CPPI5_TR_TRIGGER_TYPE_ICNT2_DEC, 0, 0);
3027
3028                 sg_addr |= asel;
3029                 if (dir == DMA_DEV_TO_MEM) {
3030                         tr_req[tr_idx].addr = dev_addr;
3031                         tr_req[tr_idx].icnt0 = tr_cnt0;
3032                         tr_req[tr_idx].icnt1 = tr_cnt1;
3033                         tr_req[tr_idx].icnt2 = tr0_cnt2;
3034                         tr_req[tr_idx].icnt3 = tr0_cnt3;
3035                         tr_req[tr_idx].dim1 = (-1) * tr_cnt0;
3036
3037                         tr_req[tr_idx].daddr = sg_addr;
3038                         tr_req[tr_idx].dicnt0 = tr_cnt0;
3039                         tr_req[tr_idx].dicnt1 = tr_cnt1;
3040                         tr_req[tr_idx].dicnt2 = tr0_cnt2;
3041                         tr_req[tr_idx].dicnt3 = tr0_cnt3;
3042                         tr_req[tr_idx].ddim1 = tr_cnt0;
3043                         tr_req[tr_idx].ddim2 = trigger_size;
3044                         tr_req[tr_idx].ddim3 = trigger_size * tr0_cnt2;
3045                 } else {
3046                         tr_req[tr_idx].addr = sg_addr;
3047                         tr_req[tr_idx].icnt0 = tr_cnt0;
3048                         tr_req[tr_idx].icnt1 = tr_cnt1;
3049                         tr_req[tr_idx].icnt2 = tr0_cnt2;
3050                         tr_req[tr_idx].icnt3 = tr0_cnt3;
3051                         tr_req[tr_idx].dim1 = tr_cnt0;
3052                         tr_req[tr_idx].dim2 = trigger_size;
3053                         tr_req[tr_idx].dim3 = trigger_size * tr0_cnt2;
3054
3055                         tr_req[tr_idx].daddr = dev_addr;
3056                         tr_req[tr_idx].dicnt0 = tr_cnt0;
3057                         tr_req[tr_idx].dicnt1 = tr_cnt1;
3058                         tr_req[tr_idx].dicnt2 = tr0_cnt2;
3059                         tr_req[tr_idx].dicnt3 = tr0_cnt3;
3060                         tr_req[tr_idx].ddim1 = (-1) * tr_cnt0;
3061                 }
3062
3063                 tr_idx++;
3064
3065                 if (num_tr == 2) {
3066                         cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE15,
3067                                       false, true,
3068                                       CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
3069                         cppi5_tr_csf_set(&tr_req[tr_idx].flags,
3070                                          CPPI5_TR_CSF_SUPR_EVT);
3071                         cppi5_tr_set_trigger(&tr_req[tr_idx].flags,
3072                                              uc->config.tr_trigger_type,
3073                                              CPPI5_TR_TRIGGER_TYPE_ICNT2_DEC,
3074                                              0, 0);
3075
3076                         sg_addr += trigger_size * tr0_cnt2 * tr0_cnt3;
3077                         if (dir == DMA_DEV_TO_MEM) {
3078                                 tr_req[tr_idx].addr = dev_addr;
3079                                 tr_req[tr_idx].icnt0 = tr_cnt0;
3080                                 tr_req[tr_idx].icnt1 = tr_cnt1;
3081                                 tr_req[tr_idx].icnt2 = tr1_cnt2;
3082                                 tr_req[tr_idx].icnt3 = 1;
3083                                 tr_req[tr_idx].dim1 = (-1) * tr_cnt0;
3084
3085                                 tr_req[tr_idx].daddr = sg_addr;
3086                                 tr_req[tr_idx].dicnt0 = tr_cnt0;
3087                                 tr_req[tr_idx].dicnt1 = tr_cnt1;
3088                                 tr_req[tr_idx].dicnt2 = tr1_cnt2;
3089                                 tr_req[tr_idx].dicnt3 = 1;
3090                                 tr_req[tr_idx].ddim1 = tr_cnt0;
3091                                 tr_req[tr_idx].ddim2 = trigger_size;
3092                         } else {
3093                                 tr_req[tr_idx].addr = sg_addr;
3094                                 tr_req[tr_idx].icnt0 = tr_cnt0;
3095                                 tr_req[tr_idx].icnt1 = tr_cnt1;
3096                                 tr_req[tr_idx].icnt2 = tr1_cnt2;
3097                                 tr_req[tr_idx].icnt3 = 1;
3098                                 tr_req[tr_idx].dim1 = tr_cnt0;
3099                                 tr_req[tr_idx].dim2 = trigger_size;
3100
3101                                 tr_req[tr_idx].daddr = dev_addr;
3102                                 tr_req[tr_idx].dicnt0 = tr_cnt0;
3103                                 tr_req[tr_idx].dicnt1 = tr_cnt1;
3104                                 tr_req[tr_idx].dicnt2 = tr1_cnt2;
3105                                 tr_req[tr_idx].dicnt3 = 1;
3106                                 tr_req[tr_idx].ddim1 = (-1) * tr_cnt0;
3107                         }
3108                         tr_idx++;
3109                 }
3110
3111                 d->residue += sg_len;
3112         }
3113
3114         cppi5_tr_csf_set(&tr_req[tr_idx - 1].flags,
3115                          CPPI5_TR_CSF_SUPR_EVT | CPPI5_TR_CSF_EOP);
3116
3117         return d;
3118 }
3119
3120 static int udma_configure_statictr(struct udma_chan *uc, struct udma_desc *d,
3121                                    enum dma_slave_buswidth dev_width,
3122                                    u16 elcnt)
3123 {
3124         if (uc->config.ep_type != PSIL_EP_PDMA_XY)
3125                 return 0;
3126
3127         /* Bus width translates to the element size (ES) */
3128         switch (dev_width) {
3129         case DMA_SLAVE_BUSWIDTH_1_BYTE:
3130                 d->static_tr.elsize = 0;
3131                 break;
3132         case DMA_SLAVE_BUSWIDTH_2_BYTES:
3133                 d->static_tr.elsize = 1;
3134                 break;
3135         case DMA_SLAVE_BUSWIDTH_3_BYTES:
3136                 d->static_tr.elsize = 2;
3137                 break;
3138         case DMA_SLAVE_BUSWIDTH_4_BYTES:
3139                 d->static_tr.elsize = 3;
3140                 break;
3141         case DMA_SLAVE_BUSWIDTH_8_BYTES:
3142                 d->static_tr.elsize = 4;
3143                 break;
3144         default: /* not reached */
3145                 return -EINVAL;
3146         }
3147
3148         d->static_tr.elcnt = elcnt;
3149
3150         /*
3151          * PDMA must to close the packet when the channel is in packet mode.
3152          * For TR mode when the channel is not cyclic we also need PDMA to close
3153          * the packet otherwise the transfer will stall because PDMA holds on
3154          * the data it has received from the peripheral.
3155          */
3156         if (uc->config.pkt_mode || !uc->cyclic) {
3157                 unsigned int div = dev_width * elcnt;
3158
3159                 if (uc->cyclic)
3160                         d->static_tr.bstcnt = d->residue / d->sglen / div;
3161                 else
3162                         d->static_tr.bstcnt = d->residue / div;
3163
3164                 if (uc->config.dir == DMA_DEV_TO_MEM &&
3165                     d->static_tr.bstcnt > uc->ud->match_data->statictr_z_mask)
3166                         return -EINVAL;
3167         } else {
3168                 d->static_tr.bstcnt = 0;
3169         }
3170
3171         return 0;
3172 }
3173
3174 static struct udma_desc *
3175 udma_prep_slave_sg_pkt(struct udma_chan *uc, struct scatterlist *sgl,
3176                        unsigned int sglen, enum dma_transfer_direction dir,
3177                        unsigned long tx_flags, void *context)
3178 {
3179         struct scatterlist *sgent;
3180         struct cppi5_host_desc_t *h_desc = NULL;
3181         struct udma_desc *d;
3182         u32 ring_id;
3183         unsigned int i;
3184         u64 asel;
3185
3186         d = kzalloc(struct_size(d, hwdesc, sglen), GFP_NOWAIT);
3187         if (!d)
3188                 return NULL;
3189
3190         d->sglen = sglen;
3191         d->hwdesc_count = sglen;
3192
3193         if (dir == DMA_DEV_TO_MEM)
3194                 ring_id = k3_ringacc_get_ring_id(uc->rflow->r_ring);
3195         else
3196                 ring_id = k3_ringacc_get_ring_id(uc->tchan->tc_ring);
3197
3198         if (uc->ud->match_data->type == DMA_TYPE_UDMA)
3199                 asel = 0;
3200         else
3201                 asel = (u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT;
3202
3203         for_each_sg(sgl, sgent, sglen, i) {
3204                 struct udma_hwdesc *hwdesc = &d->hwdesc[i];
3205                 dma_addr_t sg_addr = sg_dma_address(sgent);
3206                 struct cppi5_host_desc_t *desc;
3207                 size_t sg_len = sg_dma_len(sgent);
3208
3209                 hwdesc->cppi5_desc_vaddr = dma_pool_zalloc(uc->hdesc_pool,
3210                                                 GFP_NOWAIT,
3211                                                 &hwdesc->cppi5_desc_paddr);
3212                 if (!hwdesc->cppi5_desc_vaddr) {
3213                         dev_err(uc->ud->dev,
3214                                 "descriptor%d allocation failed\n", i);
3215
3216                         udma_free_hwdesc(uc, d);
3217                         kfree(d);
3218                         return NULL;
3219                 }
3220
3221                 d->residue += sg_len;
3222                 hwdesc->cppi5_desc_size = uc->config.hdesc_size;
3223                 desc = hwdesc->cppi5_desc_vaddr;
3224
3225                 if (i == 0) {
3226                         cppi5_hdesc_init(desc, 0, 0);
3227                         /* Flow and Packed ID */
3228                         cppi5_desc_set_pktids(&desc->hdr, uc->id,
3229                                               CPPI5_INFO1_DESC_FLOWID_DEFAULT);
3230                         cppi5_desc_set_retpolicy(&desc->hdr, 0, ring_id);
3231                 } else {
3232                         cppi5_hdesc_reset_hbdesc(desc);
3233                         cppi5_desc_set_retpolicy(&desc->hdr, 0, 0xffff);
3234                 }
3235
3236                 /* attach the sg buffer to the descriptor */
3237                 sg_addr |= asel;
3238                 cppi5_hdesc_attach_buf(desc, sg_addr, sg_len, sg_addr, sg_len);
3239
3240                 /* Attach link as host buffer descriptor */
3241                 if (h_desc)
3242                         cppi5_hdesc_link_hbdesc(h_desc,
3243                                                 hwdesc->cppi5_desc_paddr | asel);
3244
3245                 if (uc->ud->match_data->type == DMA_TYPE_PKTDMA ||
3246                     dir == DMA_MEM_TO_DEV)
3247                         h_desc = desc;
3248         }
3249
3250         if (d->residue >= SZ_4M) {
3251                 dev_err(uc->ud->dev,
3252                         "%s: Transfer size %u is over the supported 4M range\n",
3253                         __func__, d->residue);
3254                 udma_free_hwdesc(uc, d);
3255                 kfree(d);
3256                 return NULL;
3257         }
3258
3259         h_desc = d->hwdesc[0].cppi5_desc_vaddr;
3260         cppi5_hdesc_set_pktlen(h_desc, d->residue);
3261
3262         return d;
3263 }
3264
3265 static int udma_attach_metadata(struct dma_async_tx_descriptor *desc,
3266                                 void *data, size_t len)
3267 {
3268         struct udma_desc *d = to_udma_desc(desc);
3269         struct udma_chan *uc = to_udma_chan(desc->chan);
3270         struct cppi5_host_desc_t *h_desc;
3271         u32 psd_size = len;
3272         u32 flags = 0;
3273
3274         if (!uc->config.pkt_mode || !uc->config.metadata_size)
3275                 return -ENOTSUPP;
3276
3277         if (!data || len > uc->config.metadata_size)
3278                 return -EINVAL;
3279
3280         if (uc->config.needs_epib && len < CPPI5_INFO0_HDESC_EPIB_SIZE)
3281                 return -EINVAL;
3282
3283         h_desc = d->hwdesc[0].cppi5_desc_vaddr;
3284         if (d->dir == DMA_MEM_TO_DEV)
3285                 memcpy(h_desc->epib, data, len);
3286
3287         if (uc->config.needs_epib)
3288                 psd_size -= CPPI5_INFO0_HDESC_EPIB_SIZE;
3289
3290         d->metadata = data;
3291         d->metadata_size = len;
3292         if (uc->config.needs_epib)
3293                 flags |= CPPI5_INFO0_HDESC_EPIB_PRESENT;
3294
3295         cppi5_hdesc_update_flags(h_desc, flags);
3296         cppi5_hdesc_update_psdata_size(h_desc, psd_size);
3297
3298         return 0;
3299 }
3300
3301 static void *udma_get_metadata_ptr(struct dma_async_tx_descriptor *desc,
3302                                    size_t *payload_len, size_t *max_len)
3303 {
3304         struct udma_desc *d = to_udma_desc(desc);
3305         struct udma_chan *uc = to_udma_chan(desc->chan);
3306         struct cppi5_host_desc_t *h_desc;
3307
3308         if (!uc->config.pkt_mode || !uc->config.metadata_size)
3309                 return ERR_PTR(-ENOTSUPP);
3310
3311         h_desc = d->hwdesc[0].cppi5_desc_vaddr;
3312
3313         *max_len = uc->config.metadata_size;
3314
3315         *payload_len = cppi5_hdesc_epib_present(&h_desc->hdr) ?
3316                        CPPI5_INFO0_HDESC_EPIB_SIZE : 0;
3317         *payload_len += cppi5_hdesc_get_psdata_size(h_desc);
3318
3319         return h_desc->epib;
3320 }
3321
3322 static int udma_set_metadata_len(struct dma_async_tx_descriptor *desc,
3323                                  size_t payload_len)
3324 {
3325         struct udma_desc *d = to_udma_desc(desc);
3326         struct udma_chan *uc = to_udma_chan(desc->chan);
3327         struct cppi5_host_desc_t *h_desc;
3328         u32 psd_size = payload_len;
3329         u32 flags = 0;
3330
3331         if (!uc->config.pkt_mode || !uc->config.metadata_size)
3332                 return -ENOTSUPP;
3333
3334         if (payload_len > uc->config.metadata_size)
3335                 return -EINVAL;
3336
3337         if (uc->config.needs_epib && payload_len < CPPI5_INFO0_HDESC_EPIB_SIZE)
3338                 return -EINVAL;
3339
3340         h_desc = d->hwdesc[0].cppi5_desc_vaddr;
3341
3342         if (uc->config.needs_epib) {
3343                 psd_size -= CPPI5_INFO0_HDESC_EPIB_SIZE;
3344                 flags |= CPPI5_INFO0_HDESC_EPIB_PRESENT;
3345         }
3346
3347         cppi5_hdesc_update_flags(h_desc, flags);
3348         cppi5_hdesc_update_psdata_size(h_desc, psd_size);
3349
3350         return 0;
3351 }
3352
3353 static struct dma_descriptor_metadata_ops metadata_ops = {
3354         .attach = udma_attach_metadata,
3355         .get_ptr = udma_get_metadata_ptr,
3356         .set_len = udma_set_metadata_len,
3357 };
3358
3359 static struct dma_async_tx_descriptor *
3360 udma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
3361                    unsigned int sglen, enum dma_transfer_direction dir,
3362                    unsigned long tx_flags, void *context)
3363 {
3364         struct udma_chan *uc = to_udma_chan(chan);
3365         enum dma_slave_buswidth dev_width;
3366         struct udma_desc *d;
3367         u32 burst;
3368
3369         if (dir != uc->config.dir &&
3370             (uc->config.dir == DMA_MEM_TO_MEM && !uc->config.tr_trigger_type)) {
3371                 dev_err(chan->device->dev,
3372                         "%s: chan%d is for %s, not supporting %s\n",
3373                         __func__, uc->id,
3374                         dmaengine_get_direction_text(uc->config.dir),
3375                         dmaengine_get_direction_text(dir));
3376                 return NULL;
3377         }
3378
3379         if (dir == DMA_DEV_TO_MEM) {
3380                 dev_width = uc->cfg.src_addr_width;
3381                 burst = uc->cfg.src_maxburst;
3382         } else if (dir == DMA_MEM_TO_DEV) {
3383                 dev_width = uc->cfg.dst_addr_width;
3384                 burst = uc->cfg.dst_maxburst;
3385         } else {
3386                 dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
3387                 return NULL;
3388         }
3389
3390         if (!burst)
3391                 burst = 1;
3392
3393         if (uc->config.pkt_mode)
3394                 d = udma_prep_slave_sg_pkt(uc, sgl, sglen, dir, tx_flags,
3395                                            context);
3396         else if (is_slave_direction(uc->config.dir))
3397                 d = udma_prep_slave_sg_tr(uc, sgl, sglen, dir, tx_flags,
3398                                           context);
3399         else
3400                 d = udma_prep_slave_sg_triggered_tr(uc, sgl, sglen, dir,
3401                                                     tx_flags, context);
3402
3403         if (!d)
3404                 return NULL;
3405
3406         d->dir = dir;
3407         d->desc_idx = 0;
3408         d->tr_idx = 0;
3409
3410         /* static TR for remote PDMA */
3411         if (udma_configure_statictr(uc, d, dev_width, burst)) {
3412                 dev_err(uc->ud->dev,
3413                         "%s: StaticTR Z is limited to maximum 4095 (%u)\n",
3414                         __func__, d->static_tr.bstcnt);
3415
3416                 udma_free_hwdesc(uc, d);
3417                 kfree(d);
3418                 return NULL;
3419         }
3420
3421         if (uc->config.metadata_size)
3422                 d->vd.tx.metadata_ops = &metadata_ops;
3423
3424         return vchan_tx_prep(&uc->vc, &d->vd, tx_flags);
3425 }
3426
3427 static struct udma_desc *
3428 udma_prep_dma_cyclic_tr(struct udma_chan *uc, dma_addr_t buf_addr,
3429                         size_t buf_len, size_t period_len,
3430                         enum dma_transfer_direction dir, unsigned long flags)
3431 {
3432         struct udma_desc *d;
3433         size_t tr_size, period_addr;
3434         struct cppi5_tr_type1_t *tr_req;
3435         unsigned int periods = buf_len / period_len;
3436         u16 tr0_cnt0, tr0_cnt1, tr1_cnt0;
3437         unsigned int i;
3438         int num_tr;
3439
3440         num_tr = udma_get_tr_counters(period_len, __ffs(buf_addr), &tr0_cnt0,
3441                                       &tr0_cnt1, &tr1_cnt0);
3442         if (num_tr < 0) {
3443                 dev_err(uc->ud->dev, "size %zu is not supported\n",
3444                         period_len);
3445                 return NULL;
3446         }
3447
3448         /* Now allocate and setup the descriptor. */
3449         tr_size = sizeof(struct cppi5_tr_type1_t);
3450         d = udma_alloc_tr_desc(uc, tr_size, periods * num_tr, dir);
3451         if (!d)
3452                 return NULL;
3453
3454         tr_req = d->hwdesc[0].tr_req_base;
3455         if (uc->ud->match_data->type == DMA_TYPE_UDMA)
3456                 period_addr = buf_addr;
3457         else
3458                 period_addr = buf_addr |
3459                         ((u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT);
3460
3461         for (i = 0; i < periods; i++) {
3462                 int tr_idx = i * num_tr;
3463
3464                 cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE1, false,
3465                               false, CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
3466
3467                 tr_req[tr_idx].addr = period_addr;
3468                 tr_req[tr_idx].icnt0 = tr0_cnt0;
3469                 tr_req[tr_idx].icnt1 = tr0_cnt1;
3470                 tr_req[tr_idx].dim1 = tr0_cnt0;
3471
3472                 if (num_tr == 2) {
3473                         cppi5_tr_csf_set(&tr_req[tr_idx].flags,
3474                                          CPPI5_TR_CSF_SUPR_EVT);
3475                         tr_idx++;
3476
3477                         cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE1,
3478                                       false, false,
3479                                       CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
3480
3481                         tr_req[tr_idx].addr = period_addr + tr0_cnt1 * tr0_cnt0;
3482                         tr_req[tr_idx].icnt0 = tr1_cnt0;
3483                         tr_req[tr_idx].icnt1 = 1;
3484                         tr_req[tr_idx].dim1 = tr1_cnt0;
3485                 }
3486
3487                 if (!(flags & DMA_PREP_INTERRUPT))
3488                         cppi5_tr_csf_set(&tr_req[tr_idx].flags,
3489                                          CPPI5_TR_CSF_SUPR_EVT);
3490
3491                 period_addr += period_len;
3492         }
3493
3494         return d;
3495 }
3496
3497 static struct udma_desc *
3498 udma_prep_dma_cyclic_pkt(struct udma_chan *uc, dma_addr_t buf_addr,
3499                          size_t buf_len, size_t period_len,
3500                          enum dma_transfer_direction dir, unsigned long flags)
3501 {
3502         struct udma_desc *d;
3503         u32 ring_id;
3504         int i;
3505         int periods = buf_len / period_len;
3506
3507         if (periods > (K3_UDMA_DEFAULT_RING_SIZE - 1))
3508                 return NULL;
3509
3510         if (period_len >= SZ_4M)
3511                 return NULL;
3512
3513         d = kzalloc(struct_size(d, hwdesc, periods), GFP_NOWAIT);
3514         if (!d)
3515                 return NULL;
3516
3517         d->hwdesc_count = periods;
3518
3519         /* TODO: re-check this... */
3520         if (dir == DMA_DEV_TO_MEM)
3521                 ring_id = k3_ringacc_get_ring_id(uc->rflow->r_ring);
3522         else
3523                 ring_id = k3_ringacc_get_ring_id(uc->tchan->tc_ring);
3524
3525         if (uc->ud->match_data->type != DMA_TYPE_UDMA)
3526                 buf_addr |= (u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT;
3527
3528         for (i = 0; i < periods; i++) {
3529                 struct udma_hwdesc *hwdesc = &d->hwdesc[i];
3530                 dma_addr_t period_addr = buf_addr + (period_len * i);
3531                 struct cppi5_host_desc_t *h_desc;
3532
3533                 hwdesc->cppi5_desc_vaddr = dma_pool_zalloc(uc->hdesc_pool,
3534                                                 GFP_NOWAIT,
3535                                                 &hwdesc->cppi5_desc_paddr);
3536                 if (!hwdesc->cppi5_desc_vaddr) {
3537                         dev_err(uc->ud->dev,
3538                                 "descriptor%d allocation failed\n", i);
3539
3540                         udma_free_hwdesc(uc, d);
3541                         kfree(d);
3542                         return NULL;
3543                 }
3544
3545                 hwdesc->cppi5_desc_size = uc->config.hdesc_size;
3546                 h_desc = hwdesc->cppi5_desc_vaddr;
3547
3548                 cppi5_hdesc_init(h_desc, 0, 0);
3549                 cppi5_hdesc_set_pktlen(h_desc, period_len);
3550
3551                 /* Flow and Packed ID */
3552                 cppi5_desc_set_pktids(&h_desc->hdr, uc->id,
3553                                       CPPI5_INFO1_DESC_FLOWID_DEFAULT);
3554                 cppi5_desc_set_retpolicy(&h_desc->hdr, 0, ring_id);
3555
3556                 /* attach each period to a new descriptor */
3557                 cppi5_hdesc_attach_buf(h_desc,
3558                                        period_addr, period_len,
3559                                        period_addr, period_len);
3560         }
3561
3562         return d;
3563 }
3564
3565 static struct dma_async_tx_descriptor *
3566 udma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
3567                      size_t period_len, enum dma_transfer_direction dir,
3568                      unsigned long flags)
3569 {
3570         struct udma_chan *uc = to_udma_chan(chan);
3571         enum dma_slave_buswidth dev_width;
3572         struct udma_desc *d;
3573         u32 burst;
3574
3575         if (dir != uc->config.dir) {
3576                 dev_err(chan->device->dev,
3577                         "%s: chan%d is for %s, not supporting %s\n",
3578                         __func__, uc->id,
3579                         dmaengine_get_direction_text(uc->config.dir),
3580                         dmaengine_get_direction_text(dir));
3581                 return NULL;
3582         }
3583
3584         uc->cyclic = true;
3585
3586         if (dir == DMA_DEV_TO_MEM) {
3587                 dev_width = uc->cfg.src_addr_width;
3588                 burst = uc->cfg.src_maxburst;
3589         } else if (dir == DMA_MEM_TO_DEV) {
3590                 dev_width = uc->cfg.dst_addr_width;
3591                 burst = uc->cfg.dst_maxburst;
3592         } else {
3593                 dev_err(uc->ud->dev, "%s: bad direction?\n", __func__);
3594                 return NULL;
3595         }
3596
3597         if (!burst)
3598                 burst = 1;
3599
3600         if (uc->config.pkt_mode)
3601                 d = udma_prep_dma_cyclic_pkt(uc, buf_addr, buf_len, period_len,
3602                                              dir, flags);
3603         else
3604                 d = udma_prep_dma_cyclic_tr(uc, buf_addr, buf_len, period_len,
3605                                             dir, flags);
3606
3607         if (!d)
3608                 return NULL;
3609
3610         d->sglen = buf_len / period_len;
3611
3612         d->dir = dir;
3613         d->residue = buf_len;
3614
3615         /* static TR for remote PDMA */
3616         if (udma_configure_statictr(uc, d, dev_width, burst)) {
3617                 dev_err(uc->ud->dev,
3618                         "%s: StaticTR Z is limited to maximum 4095 (%u)\n",
3619                         __func__, d->static_tr.bstcnt);
3620
3621                 udma_free_hwdesc(uc, d);
3622                 kfree(d);
3623                 return NULL;
3624         }
3625
3626         if (uc->config.metadata_size)
3627                 d->vd.tx.metadata_ops = &metadata_ops;
3628
3629         return vchan_tx_prep(&uc->vc, &d->vd, flags);
3630 }
3631
3632 static struct dma_async_tx_descriptor *
3633 udma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
3634                      size_t len, unsigned long tx_flags)
3635 {
3636         struct udma_chan *uc = to_udma_chan(chan);
3637         struct udma_desc *d;
3638         struct cppi5_tr_type15_t *tr_req;
3639         int num_tr;
3640         size_t tr_size = sizeof(struct cppi5_tr_type15_t);
3641         u16 tr0_cnt0, tr0_cnt1, tr1_cnt0;
3642
3643         if (uc->config.dir != DMA_MEM_TO_MEM) {
3644                 dev_err(chan->device->dev,
3645                         "%s: chan%d is for %s, not supporting %s\n",
3646                         __func__, uc->id,
3647                         dmaengine_get_direction_text(uc->config.dir),
3648                         dmaengine_get_direction_text(DMA_MEM_TO_MEM));
3649                 return NULL;
3650         }
3651
3652         num_tr = udma_get_tr_counters(len, __ffs(src | dest), &tr0_cnt0,
3653                                       &tr0_cnt1, &tr1_cnt0);
3654         if (num_tr < 0) {
3655                 dev_err(uc->ud->dev, "size %zu is not supported\n",
3656                         len);
3657                 return NULL;
3658         }
3659
3660         d = udma_alloc_tr_desc(uc, tr_size, num_tr, DMA_MEM_TO_MEM);
3661         if (!d)
3662                 return NULL;
3663
3664         d->dir = DMA_MEM_TO_MEM;
3665         d->desc_idx = 0;
3666         d->tr_idx = 0;
3667         d->residue = len;
3668
3669         if (uc->ud->match_data->type != DMA_TYPE_UDMA) {
3670                 src |= (u64)uc->ud->asel << K3_ADDRESS_ASEL_SHIFT;
3671                 dest |= (u64)uc->ud->asel << K3_ADDRESS_ASEL_SHIFT;
3672         }
3673
3674         tr_req = d->hwdesc[0].tr_req_base;
3675
3676         cppi5_tr_init(&tr_req[0].flags, CPPI5_TR_TYPE15, false, true,
3677                       CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
3678         cppi5_tr_csf_set(&tr_req[0].flags, CPPI5_TR_CSF_SUPR_EVT);
3679
3680         tr_req[0].addr = src;
3681         tr_req[0].icnt0 = tr0_cnt0;
3682         tr_req[0].icnt1 = tr0_cnt1;
3683         tr_req[0].icnt2 = 1;
3684         tr_req[0].icnt3 = 1;
3685         tr_req[0].dim1 = tr0_cnt0;
3686
3687         tr_req[0].daddr = dest;
3688         tr_req[0].dicnt0 = tr0_cnt0;
3689         tr_req[0].dicnt1 = tr0_cnt1;
3690         tr_req[0].dicnt2 = 1;
3691         tr_req[0].dicnt3 = 1;
3692         tr_req[0].ddim1 = tr0_cnt0;
3693
3694         if (num_tr == 2) {
3695                 cppi5_tr_init(&tr_req[1].flags, CPPI5_TR_TYPE15, false, true,
3696                               CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
3697                 cppi5_tr_csf_set(&tr_req[1].flags, CPPI5_TR_CSF_SUPR_EVT);
3698
3699                 tr_req[1].addr = src + tr0_cnt1 * tr0_cnt0;
3700                 tr_req[1].icnt0 = tr1_cnt0;
3701                 tr_req[1].icnt1 = 1;
3702                 tr_req[1].icnt2 = 1;
3703                 tr_req[1].icnt3 = 1;
3704
3705                 tr_req[1].daddr = dest + tr0_cnt1 * tr0_cnt0;
3706                 tr_req[1].dicnt0 = tr1_cnt0;
3707                 tr_req[1].dicnt1 = 1;
3708                 tr_req[1].dicnt2 = 1;
3709                 tr_req[1].dicnt3 = 1;
3710         }
3711
3712         cppi5_tr_csf_set(&tr_req[num_tr - 1].flags,
3713                          CPPI5_TR_CSF_SUPR_EVT | CPPI5_TR_CSF_EOP);
3714
3715         if (uc->config.metadata_size)
3716                 d->vd.tx.metadata_ops = &metadata_ops;
3717
3718         return vchan_tx_prep(&uc->vc, &d->vd, tx_flags);
3719 }
3720
3721 static void udma_issue_pending(struct dma_chan *chan)
3722 {
3723         struct udma_chan *uc = to_udma_chan(chan);
3724         unsigned long flags;
3725
3726         spin_lock_irqsave(&uc->vc.lock, flags);
3727
3728         /* If we have something pending and no active descriptor, then */
3729         if (vchan_issue_pending(&uc->vc) && !uc->desc) {
3730                 /*
3731                  * start a descriptor if the channel is NOT [marked as
3732                  * terminating _and_ it is still running (teardown has not
3733                  * completed yet)].
3734                  */
3735                 if (!(uc->state == UDMA_CHAN_IS_TERMINATING &&
3736                       udma_is_chan_running(uc)))
3737                         udma_start(uc);
3738         }
3739
3740         spin_unlock_irqrestore(&uc->vc.lock, flags);
3741 }
3742
3743 static enum dma_status udma_tx_status(struct dma_chan *chan,
3744                                       dma_cookie_t cookie,
3745                                       struct dma_tx_state *txstate)
3746 {
3747         struct udma_chan *uc = to_udma_chan(chan);
3748         enum dma_status ret;
3749         unsigned long flags;
3750
3751         spin_lock_irqsave(&uc->vc.lock, flags);
3752
3753         ret = dma_cookie_status(chan, cookie, txstate);
3754
3755         if (!udma_is_chan_running(uc))
3756                 ret = DMA_COMPLETE;
3757
3758         if (ret == DMA_IN_PROGRESS && udma_is_chan_paused(uc))
3759                 ret = DMA_PAUSED;
3760
3761         if (ret == DMA_COMPLETE || !txstate)
3762                 goto out;
3763
3764         if (uc->desc && uc->desc->vd.tx.cookie == cookie) {
3765                 u32 peer_bcnt = 0;
3766                 u32 bcnt = 0;
3767                 u32 residue = uc->desc->residue;
3768                 u32 delay = 0;
3769
3770                 if (uc->desc->dir == DMA_MEM_TO_DEV) {
3771                         bcnt = udma_tchanrt_read(uc, UDMA_CHAN_RT_SBCNT_REG);
3772
3773                         if (uc->config.ep_type != PSIL_EP_NATIVE) {
3774                                 peer_bcnt = udma_tchanrt_read(uc,
3775                                                 UDMA_CHAN_RT_PEER_BCNT_REG);
3776
3777                                 if (bcnt > peer_bcnt)
3778                                         delay = bcnt - peer_bcnt;
3779                         }
3780                 } else if (uc->desc->dir == DMA_DEV_TO_MEM) {
3781                         bcnt = udma_rchanrt_read(uc, UDMA_CHAN_RT_BCNT_REG);
3782
3783                         if (uc->config.ep_type != PSIL_EP_NATIVE) {
3784                                 peer_bcnt = udma_rchanrt_read(uc,
3785                                                 UDMA_CHAN_RT_PEER_BCNT_REG);
3786
3787                                 if (peer_bcnt > bcnt)
3788                                         delay = peer_bcnt - bcnt;
3789                         }
3790                 } else {
3791                         bcnt = udma_tchanrt_read(uc, UDMA_CHAN_RT_BCNT_REG);
3792                 }
3793
3794                 bcnt -= uc->bcnt;
3795                 if (bcnt && !(bcnt % uc->desc->residue))
3796                         residue = 0;
3797                 else
3798                         residue -= bcnt % uc->desc->residue;
3799
3800                 if (!residue && (uc->config.dir == DMA_DEV_TO_MEM || !delay)) {
3801                         ret = DMA_COMPLETE;
3802                         delay = 0;
3803                 }
3804
3805                 dma_set_residue(txstate, residue);
3806                 dma_set_in_flight_bytes(txstate, delay);
3807
3808         } else {
3809                 ret = DMA_COMPLETE;
3810         }
3811
3812 out:
3813         spin_unlock_irqrestore(&uc->vc.lock, flags);
3814         return ret;
3815 }
3816
3817 static int udma_pause(struct dma_chan *chan)
3818 {
3819         struct udma_chan *uc = to_udma_chan(chan);
3820
3821         /* pause the channel */
3822         switch (uc->config.dir) {
3823         case DMA_DEV_TO_MEM:
3824                 udma_rchanrt_update_bits(uc, UDMA_CHAN_RT_PEER_RT_EN_REG,
3825                                          UDMA_PEER_RT_EN_PAUSE,
3826                                          UDMA_PEER_RT_EN_PAUSE);
3827                 break;
3828         case DMA_MEM_TO_DEV:
3829                 udma_tchanrt_update_bits(uc, UDMA_CHAN_RT_PEER_RT_EN_REG,
3830                                          UDMA_PEER_RT_EN_PAUSE,
3831                                          UDMA_PEER_RT_EN_PAUSE);
3832                 break;
3833         case DMA_MEM_TO_MEM:
3834                 udma_tchanrt_update_bits(uc, UDMA_CHAN_RT_CTL_REG,
3835                                          UDMA_CHAN_RT_CTL_PAUSE,
3836                                          UDMA_CHAN_RT_CTL_PAUSE);
3837                 break;
3838         default:
3839                 return -EINVAL;
3840         }
3841
3842         return 0;
3843 }
3844
3845 static int udma_resume(struct dma_chan *chan)
3846 {
3847         struct udma_chan *uc = to_udma_chan(chan);
3848
3849         /* resume the channel */
3850         switch (uc->config.dir) {
3851         case DMA_DEV_TO_MEM:
3852                 udma_rchanrt_update_bits(uc, UDMA_CHAN_RT_PEER_RT_EN_REG,
3853                                          UDMA_PEER_RT_EN_PAUSE, 0);
3854
3855                 break;
3856         case DMA_MEM_TO_DEV:
3857                 udma_tchanrt_update_bits(uc, UDMA_CHAN_RT_PEER_RT_EN_REG,
3858                                          UDMA_PEER_RT_EN_PAUSE, 0);
3859                 break;
3860         case DMA_MEM_TO_MEM:
3861                 udma_tchanrt_update_bits(uc, UDMA_CHAN_RT_CTL_REG,
3862                                          UDMA_CHAN_RT_CTL_PAUSE, 0);
3863                 break;
3864         default:
3865                 return -EINVAL;
3866         }
3867
3868         return 0;
3869 }
3870
3871 static int udma_terminate_all(struct dma_chan *chan)
3872 {
3873         struct udma_chan *uc = to_udma_chan(chan);
3874         unsigned long flags;
3875         LIST_HEAD(head);
3876
3877         spin_lock_irqsave(&uc->vc.lock, flags);
3878
3879         if (udma_is_chan_running(uc))
3880                 udma_stop(uc);
3881
3882         if (uc->desc) {
3883                 uc->terminated_desc = uc->desc;
3884                 uc->desc = NULL;
3885                 uc->terminated_desc->terminated = true;
3886                 cancel_delayed_work(&uc->tx_drain.work);
3887         }
3888
3889         uc->paused = false;
3890
3891         vchan_get_all_descriptors(&uc->vc, &head);
3892         spin_unlock_irqrestore(&uc->vc.lock, flags);
3893         vchan_dma_desc_free_list(&uc->vc, &head);
3894
3895         return 0;
3896 }
3897
3898 static void udma_synchronize(struct dma_chan *chan)
3899 {
3900         struct udma_chan *uc = to_udma_chan(chan);
3901         unsigned long timeout = msecs_to_jiffies(1000);
3902
3903         vchan_synchronize(&uc->vc);
3904
3905         if (uc->state == UDMA_CHAN_IS_TERMINATING) {
3906                 timeout = wait_for_completion_timeout(&uc->teardown_completed,
3907                                                       timeout);
3908                 if (!timeout) {
3909                         dev_warn(uc->ud->dev, "chan%d teardown timeout!\n",
3910                                  uc->id);
3911                         udma_dump_chan_stdata(uc);
3912                         udma_reset_chan(uc, true);
3913                 }
3914         }
3915
3916         udma_reset_chan(uc, false);
3917         if (udma_is_chan_running(uc))
3918                 dev_warn(uc->ud->dev, "chan%d refused to stop!\n", uc->id);
3919
3920         cancel_delayed_work_sync(&uc->tx_drain.work);
3921         udma_reset_rings(uc);
3922 }
3923
3924 static void udma_desc_pre_callback(struct virt_dma_chan *vc,
3925                                    struct virt_dma_desc *vd,
3926                                    struct dmaengine_result *result)
3927 {
3928         struct udma_chan *uc = to_udma_chan(&vc->chan);
3929         struct udma_desc *d;
3930
3931         if (!vd)
3932                 return;
3933
3934         d = to_udma_desc(&vd->tx);
3935
3936         if (d->metadata_size)
3937                 udma_fetch_epib(uc, d);
3938
3939         /* Provide residue information for the client */
3940         if (result) {
3941                 void *desc_vaddr = udma_curr_cppi5_desc_vaddr(d, d->desc_idx);
3942
3943                 if (cppi5_desc_get_type(desc_vaddr) ==
3944                     CPPI5_INFO0_DESC_TYPE_VAL_HOST) {
3945                         result->residue = d->residue -
3946                                           cppi5_hdesc_get_pktlen(desc_vaddr);
3947                         if (result->residue)
3948                                 result->result = DMA_TRANS_ABORTED;
3949                         else
3950                                 result->result = DMA_TRANS_NOERROR;
3951                 } else {
3952                         result->residue = 0;
3953                         result->result = DMA_TRANS_NOERROR;
3954                 }
3955         }
3956 }
3957
3958 /*
3959  * This tasklet handles the completion of a DMA descriptor by
3960  * calling its callback and freeing it.
3961  */
3962 static void udma_vchan_complete(struct tasklet_struct *t)
3963 {
3964         struct virt_dma_chan *vc = from_tasklet(vc, t, task);
3965         struct virt_dma_desc *vd, *_vd;
3966         struct dmaengine_desc_callback cb;
3967         LIST_HEAD(head);
3968
3969         spin_lock_irq(&vc->lock);
3970         list_splice_tail_init(&vc->desc_completed, &head);
3971         vd = vc->cyclic;
3972         if (vd) {
3973                 vc->cyclic = NULL;
3974                 dmaengine_desc_get_callback(&vd->tx, &cb);
3975         } else {
3976                 memset(&cb, 0, sizeof(cb));
3977         }
3978         spin_unlock_irq(&vc->lock);
3979
3980         udma_desc_pre_callback(vc, vd, NULL);
3981         dmaengine_desc_callback_invoke(&cb, NULL);
3982
3983         list_for_each_entry_safe(vd, _vd, &head, node) {
3984                 struct dmaengine_result result;
3985
3986                 dmaengine_desc_get_callback(&vd->tx, &cb);
3987
3988                 list_del(&vd->node);
3989
3990                 udma_desc_pre_callback(vc, vd, &result);
3991                 dmaengine_desc_callback_invoke(&cb, &result);
3992
3993                 vchan_vdesc_fini(vd);
3994         }
3995 }
3996
3997 static void udma_free_chan_resources(struct dma_chan *chan)
3998 {
3999         struct udma_chan *uc = to_udma_chan(chan);
4000         struct udma_dev *ud = to_udma_dev(chan->device);
4001
4002         udma_terminate_all(chan);
4003         if (uc->terminated_desc) {
4004                 udma_reset_chan(uc, false);
4005                 udma_reset_rings(uc);
4006         }
4007
4008         cancel_delayed_work_sync(&uc->tx_drain.work);
4009
4010         if (uc->irq_num_ring > 0) {
4011                 free_irq(uc->irq_num_ring, uc);
4012
4013                 uc->irq_num_ring = 0;
4014         }
4015         if (uc->irq_num_udma > 0) {
4016                 free_irq(uc->irq_num_udma, uc);
4017
4018                 uc->irq_num_udma = 0;
4019         }
4020
4021         /* Release PSI-L pairing */
4022         if (uc->psil_paired) {
4023                 navss_psil_unpair(ud, uc->config.src_thread,
4024                                   uc->config.dst_thread);
4025                 uc->psil_paired = false;
4026         }
4027
4028         vchan_free_chan_resources(&uc->vc);
4029         tasklet_kill(&uc->vc.task);
4030
4031         bcdma_free_bchan_resources(uc);
4032         udma_free_tx_resources(uc);
4033         udma_free_rx_resources(uc);
4034         udma_reset_uchan(uc);
4035
4036         if (uc->use_dma_pool) {
4037                 dma_pool_destroy(uc->hdesc_pool);
4038                 uc->use_dma_pool = false;
4039         }
4040 }
4041
4042 static struct platform_driver udma_driver;
4043 static struct platform_driver bcdma_driver;
4044 static struct platform_driver pktdma_driver;
4045
4046 struct udma_filter_param {
4047         int remote_thread_id;
4048         u32 atype;
4049         u32 asel;
4050         u32 tr_trigger_type;
4051 };
4052
4053 static bool udma_dma_filter_fn(struct dma_chan *chan, void *param)
4054 {
4055         struct udma_chan_config *ucc;
4056         struct psil_endpoint_config *ep_config;
4057         struct udma_filter_param *filter_param;
4058         struct udma_chan *uc;
4059         struct udma_dev *ud;
4060
4061         if (chan->device->dev->driver != &udma_driver.driver &&
4062             chan->device->dev->driver != &bcdma_driver.driver &&
4063             chan->device->dev->driver != &pktdma_driver.driver)
4064                 return false;
4065
4066         uc = to_udma_chan(chan);
4067         ucc = &uc->config;
4068         ud = uc->ud;
4069         filter_param = param;
4070
4071         if (filter_param->atype > 2) {
4072                 dev_err(ud->dev, "Invalid channel atype: %u\n",
4073                         filter_param->atype);
4074                 return false;
4075         }
4076
4077         if (filter_param->asel > 15) {
4078                 dev_err(ud->dev, "Invalid channel asel: %u\n",
4079                         filter_param->asel);
4080                 return false;
4081         }
4082
4083         ucc->remote_thread_id = filter_param->remote_thread_id;
4084         ucc->atype = filter_param->atype;
4085         ucc->asel = filter_param->asel;
4086         ucc->tr_trigger_type = filter_param->tr_trigger_type;
4087
4088         if (ucc->tr_trigger_type) {
4089                 ucc->dir = DMA_MEM_TO_MEM;
4090                 goto triggered_bchan;
4091         } else if (ucc->remote_thread_id & K3_PSIL_DST_THREAD_ID_OFFSET) {
4092                 ucc->dir = DMA_MEM_TO_DEV;
4093         } else {
4094                 ucc->dir = DMA_DEV_TO_MEM;
4095         }
4096
4097         ep_config = psil_get_ep_config(ucc->remote_thread_id);
4098         if (IS_ERR(ep_config)) {
4099                 dev_err(ud->dev, "No configuration for psi-l thread 0x%04x\n",
4100                         ucc->remote_thread_id);
4101                 ucc->dir = DMA_MEM_TO_MEM;
4102                 ucc->remote_thread_id = -1;
4103                 ucc->atype = 0;
4104                 ucc->asel = 0;
4105                 return false;
4106         }
4107
4108         if (ud->match_data->type == DMA_TYPE_BCDMA &&
4109             ep_config->pkt_mode) {
4110                 dev_err(ud->dev,
4111                         "Only TR mode is supported (psi-l thread 0x%04x)\n",
4112                         ucc->remote_thread_id);
4113                 ucc->dir = DMA_MEM_TO_MEM;
4114                 ucc->remote_thread_id = -1;
4115                 ucc->atype = 0;
4116                 ucc->asel = 0;
4117                 return false;
4118         }
4119
4120         ucc->pkt_mode = ep_config->pkt_mode;
4121         ucc->channel_tpl = ep_config->channel_tpl;
4122         ucc->notdpkt = ep_config->notdpkt;
4123         ucc->ep_type = ep_config->ep_type;
4124
4125         if (ud->match_data->type == DMA_TYPE_PKTDMA &&
4126             ep_config->mapped_channel_id >= 0) {
4127                 ucc->mapped_channel_id = ep_config->mapped_channel_id;
4128                 ucc->default_flow_id = ep_config->default_flow_id;
4129         } else {
4130                 ucc->mapped_channel_id = -1;
4131                 ucc->default_flow_id = -1;
4132         }
4133
4134         if (ucc->ep_type != PSIL_EP_NATIVE) {
4135                 const struct udma_match_data *match_data = ud->match_data;
4136
4137                 if (match_data->flags & UDMA_FLAG_PDMA_ACC32)
4138                         ucc->enable_acc32 = ep_config->pdma_acc32;
4139                 if (match_data->flags & UDMA_FLAG_PDMA_BURST)
4140                         ucc->enable_burst = ep_config->pdma_burst;
4141         }
4142
4143         ucc->needs_epib = ep_config->needs_epib;
4144         ucc->psd_size = ep_config->psd_size;
4145         ucc->metadata_size =
4146                         (ucc->needs_epib ? CPPI5_INFO0_HDESC_EPIB_SIZE : 0) +
4147                         ucc->psd_size;
4148
4149         if (ucc->pkt_mode)
4150                 ucc->hdesc_size = ALIGN(sizeof(struct cppi5_host_desc_t) +
4151                                  ucc->metadata_size, ud->desc_align);
4152
4153         dev_dbg(ud->dev, "chan%d: Remote thread: 0x%04x (%s)\n", uc->id,
4154                 ucc->remote_thread_id, dmaengine_get_direction_text(ucc->dir));
4155
4156         return true;
4157
4158 triggered_bchan:
4159         dev_dbg(ud->dev, "chan%d: triggered channel (type: %u)\n", uc->id,
4160                 ucc->tr_trigger_type);
4161
4162         return true;
4163
4164 }
4165
4166 static struct dma_chan *udma_of_xlate(struct of_phandle_args *dma_spec,
4167                                       struct of_dma *ofdma)
4168 {
4169         struct udma_dev *ud = ofdma->of_dma_data;
4170         dma_cap_mask_t mask = ud->ddev.cap_mask;
4171         struct udma_filter_param filter_param;
4172         struct dma_chan *chan;
4173
4174         if (ud->match_data->type == DMA_TYPE_BCDMA) {
4175                 if (dma_spec->args_count != 3)
4176                         return NULL;
4177
4178                 filter_param.tr_trigger_type = dma_spec->args[0];
4179                 filter_param.remote_thread_id = dma_spec->args[1];
4180                 filter_param.asel = dma_spec->args[2];
4181                 filter_param.atype = 0;
4182         } else {
4183                 if (dma_spec->args_count != 1 && dma_spec->args_count != 2)
4184                         return NULL;
4185
4186                 filter_param.remote_thread_id = dma_spec->args[0];
4187                 filter_param.tr_trigger_type = 0;
4188                 if (dma_spec->args_count == 2) {
4189                         if (ud->match_data->type == DMA_TYPE_UDMA) {
4190                                 filter_param.atype = dma_spec->args[1];
4191                                 filter_param.asel = 0;
4192                         } else {
4193                                 filter_param.atype = 0;
4194                                 filter_param.asel = dma_spec->args[1];
4195                         }
4196                 } else {
4197                         filter_param.atype = 0;
4198                         filter_param.asel = 0;
4199                 }
4200         }
4201
4202         chan = __dma_request_channel(&mask, udma_dma_filter_fn, &filter_param,
4203                                      ofdma->of_node);
4204         if (!chan) {
4205                 dev_err(ud->dev, "get channel fail in %s.\n", __func__);
4206                 return ERR_PTR(-EINVAL);
4207         }
4208
4209         return chan;
4210 }
4211
4212 static struct udma_match_data am654_main_data = {
4213         .type = DMA_TYPE_UDMA,
4214         .psil_base = 0x1000,
4215         .enable_memcpy_support = true,
4216         .statictr_z_mask = GENMASK(11, 0),
4217         .burst_size = {
4218                 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */
4219                 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* H Channels */
4220                 0, /* No UH Channels */
4221         },
4222 };
4223
4224 static struct udma_match_data am654_mcu_data = {
4225         .type = DMA_TYPE_UDMA,
4226         .psil_base = 0x6000,
4227         .enable_memcpy_support = false,
4228         .statictr_z_mask = GENMASK(11, 0),
4229         .burst_size = {
4230                 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */
4231                 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* H Channels */
4232                 0, /* No UH Channels */
4233         },
4234 };
4235
4236 static struct udma_match_data j721e_main_data = {
4237         .type = DMA_TYPE_UDMA,
4238         .psil_base = 0x1000,
4239         .enable_memcpy_support = true,
4240         .flags = UDMA_FLAGS_J7_CLASS,
4241         .statictr_z_mask = GENMASK(23, 0),
4242         .burst_size = {
4243                 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */
4244                 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_256_BYTES, /* H Channels */
4245                 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_256_BYTES, /* UH Channels */
4246         },
4247 };
4248
4249 static struct udma_match_data j721e_mcu_data = {
4250         .type = DMA_TYPE_UDMA,
4251         .psil_base = 0x6000,
4252         .enable_memcpy_support = false, /* MEM_TO_MEM is slow via MCU UDMA */
4253         .flags = UDMA_FLAGS_J7_CLASS,
4254         .statictr_z_mask = GENMASK(23, 0),
4255         .burst_size = {
4256                 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */
4257                 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_128_BYTES, /* H Channels */
4258                 0, /* No UH Channels */
4259         },
4260 };
4261
4262 static struct udma_match_data am64_bcdma_data = {
4263         .type = DMA_TYPE_BCDMA,
4264         .psil_base = 0x2000, /* for tchan and rchan, not applicable to bchan */
4265         .enable_memcpy_support = true, /* Supported via bchan */
4266         .flags = UDMA_FLAGS_J7_CLASS,
4267         .statictr_z_mask = GENMASK(23, 0),
4268         .burst_size = {
4269                 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */
4270                 0, /* No H Channels */
4271                 0, /* No UH Channels */
4272         },
4273 };
4274
4275 static struct udma_match_data am64_pktdma_data = {
4276         .type = DMA_TYPE_PKTDMA,
4277         .psil_base = 0x1000,
4278         .enable_memcpy_support = false, /* PKTDMA does not support MEM_TO_MEM */
4279         .flags = UDMA_FLAGS_J7_CLASS,
4280         .statictr_z_mask = GENMASK(23, 0),
4281         .burst_size = {
4282                 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */
4283                 0, /* No H Channels */
4284                 0, /* No UH Channels */
4285         },
4286 };
4287
4288 static const struct of_device_id udma_of_match[] = {
4289         {
4290                 .compatible = "ti,am654-navss-main-udmap",
4291                 .data = &am654_main_data,
4292         },
4293         {
4294                 .compatible = "ti,am654-navss-mcu-udmap",
4295                 .data = &am654_mcu_data,
4296         }, {
4297                 .compatible = "ti,j721e-navss-main-udmap",
4298                 .data = &j721e_main_data,
4299         }, {
4300                 .compatible = "ti,j721e-navss-mcu-udmap",
4301                 .data = &j721e_mcu_data,
4302         },
4303         { /* Sentinel */ },
4304 };
4305
4306 static const struct of_device_id bcdma_of_match[] = {
4307         {
4308                 .compatible = "ti,am64-dmss-bcdma",
4309                 .data = &am64_bcdma_data,
4310         },
4311         { /* Sentinel */ },
4312 };
4313
4314 static const struct of_device_id pktdma_of_match[] = {
4315         {
4316                 .compatible = "ti,am64-dmss-pktdma",
4317                 .data = &am64_pktdma_data,
4318         },
4319         { /* Sentinel */ },
4320 };
4321
4322 static struct udma_soc_data am654_soc_data = {
4323         .oes = {
4324                 .udma_rchan = 0x200,
4325         },
4326 };
4327
4328 static struct udma_soc_data j721e_soc_data = {
4329         .oes = {
4330                 .udma_rchan = 0x400,
4331         },
4332 };
4333
4334 static struct udma_soc_data j7200_soc_data = {
4335         .oes = {
4336                 .udma_rchan = 0x80,
4337         },
4338 };
4339
4340 static struct udma_soc_data am64_soc_data = {
4341         .oes = {
4342                 .bcdma_bchan_data = 0x2200,
4343                 .bcdma_bchan_ring = 0x2400,
4344                 .bcdma_tchan_data = 0x2800,
4345                 .bcdma_tchan_ring = 0x2a00,
4346                 .bcdma_rchan_data = 0x2e00,
4347                 .bcdma_rchan_ring = 0x3000,
4348                 .pktdma_tchan_flow = 0x1200,
4349                 .pktdma_rchan_flow = 0x1600,
4350         },
4351         .bcdma_trigger_event_offset = 0xc400,
4352 };
4353
4354 static const struct soc_device_attribute k3_soc_devices[] = {
4355         { .family = "AM65X", .data = &am654_soc_data },
4356         { .family = "J721E", .data = &j721e_soc_data },
4357         { .family = "J7200", .data = &j7200_soc_data },
4358         { .family = "AM64X", .data = &am64_soc_data },
4359         { /* sentinel */ }
4360 };
4361
4362 static int udma_get_mmrs(struct platform_device *pdev, struct udma_dev *ud)
4363 {
4364         u32 cap2, cap3, cap4;
4365         int i;
4366
4367         ud->mmrs[MMR_GCFG] = devm_platform_ioremap_resource_byname(pdev, mmr_names[MMR_GCFG]);
4368         if (IS_ERR(ud->mmrs[MMR_GCFG]))
4369                 return PTR_ERR(ud->mmrs[MMR_GCFG]);
4370
4371         cap2 = udma_read(ud->mmrs[MMR_GCFG], 0x28);
4372         cap3 = udma_read(ud->mmrs[MMR_GCFG], 0x2c);
4373
4374         switch (ud->match_data->type) {
4375         case DMA_TYPE_UDMA:
4376                 ud->rflow_cnt = UDMA_CAP3_RFLOW_CNT(cap3);
4377                 ud->tchan_cnt = UDMA_CAP2_TCHAN_CNT(cap2);
4378                 ud->echan_cnt = UDMA_CAP2_ECHAN_CNT(cap2);
4379                 ud->rchan_cnt = UDMA_CAP2_RCHAN_CNT(cap2);
4380                 break;
4381         case DMA_TYPE_BCDMA:
4382                 ud->bchan_cnt = BCDMA_CAP2_BCHAN_CNT(cap2);
4383                 ud->tchan_cnt = BCDMA_CAP2_TCHAN_CNT(cap2);
4384                 ud->rchan_cnt = BCDMA_CAP2_RCHAN_CNT(cap2);
4385                 ud->rflow_cnt = ud->rchan_cnt;
4386                 break;
4387         case DMA_TYPE_PKTDMA:
4388                 cap4 = udma_read(ud->mmrs[MMR_GCFG], 0x30);
4389                 ud->tchan_cnt = UDMA_CAP2_TCHAN_CNT(cap2);
4390                 ud->rchan_cnt = UDMA_CAP2_RCHAN_CNT(cap2);
4391                 ud->rflow_cnt = UDMA_CAP3_RFLOW_CNT(cap3);
4392                 ud->tflow_cnt = PKTDMA_CAP4_TFLOW_CNT(cap4);
4393                 break;
4394         default:
4395                 return -EINVAL;
4396         }
4397
4398         for (i = 1; i < MMR_LAST; i++) {
4399                 if (i == MMR_BCHANRT && ud->bchan_cnt == 0)
4400                         continue;
4401                 if (i == MMR_TCHANRT && ud->tchan_cnt == 0)
4402                         continue;
4403                 if (i == MMR_RCHANRT && ud->rchan_cnt == 0)
4404                         continue;
4405
4406                 ud->mmrs[i] = devm_platform_ioremap_resource_byname(pdev, mmr_names[i]);
4407                 if (IS_ERR(ud->mmrs[i]))
4408                         return PTR_ERR(ud->mmrs[i]);
4409         }
4410
4411         return 0;
4412 }
4413
4414 static void udma_mark_resource_ranges(struct udma_dev *ud, unsigned long *map,
4415                                       struct ti_sci_resource_desc *rm_desc,
4416                                       char *name)
4417 {
4418         bitmap_clear(map, rm_desc->start, rm_desc->num);
4419         bitmap_clear(map, rm_desc->start_sec, rm_desc->num_sec);
4420         dev_dbg(ud->dev, "ti_sci resource range for %s: %d:%d | %d:%d\n", name,
4421                 rm_desc->start, rm_desc->num, rm_desc->start_sec,
4422                 rm_desc->num_sec);
4423 }
4424
4425 static const char * const range_names[] = {
4426         [RM_RANGE_BCHAN] = "ti,sci-rm-range-bchan",
4427         [RM_RANGE_TCHAN] = "ti,sci-rm-range-tchan",
4428         [RM_RANGE_RCHAN] = "ti,sci-rm-range-rchan",
4429         [RM_RANGE_RFLOW] = "ti,sci-rm-range-rflow",
4430         [RM_RANGE_TFLOW] = "ti,sci-rm-range-tflow",
4431 };
4432
4433 static int udma_setup_resources(struct udma_dev *ud)
4434 {
4435         int ret, i, j;
4436         struct device *dev = ud->dev;
4437         struct ti_sci_resource *rm_res, irq_res;
4438         struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
4439         u32 cap3;
4440
4441         /* Set up the throughput level start indexes */
4442         cap3 = udma_read(ud->mmrs[MMR_GCFG], 0x2c);
4443         if (of_device_is_compatible(dev->of_node,
4444                                     "ti,am654-navss-main-udmap")) {
4445                 ud->tchan_tpl.levels = 2;
4446                 ud->tchan_tpl.start_idx[0] = 8;
4447         } else if (of_device_is_compatible(dev->of_node,
4448                                            "ti,am654-navss-mcu-udmap")) {
4449                 ud->tchan_tpl.levels = 2;
4450                 ud->tchan_tpl.start_idx[0] = 2;
4451         } else if (UDMA_CAP3_UCHAN_CNT(cap3)) {
4452                 ud->tchan_tpl.levels = 3;
4453                 ud->tchan_tpl.start_idx[1] = UDMA_CAP3_UCHAN_CNT(cap3);
4454                 ud->tchan_tpl.start_idx[0] = UDMA_CAP3_HCHAN_CNT(cap3);
4455         } else if (UDMA_CAP3_HCHAN_CNT(cap3)) {
4456                 ud->tchan_tpl.levels = 2;
4457                 ud->tchan_tpl.start_idx[0] = UDMA_CAP3_HCHAN_CNT(cap3);
4458         } else {
4459                 ud->tchan_tpl.levels = 1;
4460         }
4461
4462         ud->rchan_tpl.levels = ud->tchan_tpl.levels;
4463         ud->rchan_tpl.start_idx[0] = ud->tchan_tpl.start_idx[0];
4464         ud->rchan_tpl.start_idx[1] = ud->tchan_tpl.start_idx[1];
4465
4466         ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt),
4467                                            sizeof(unsigned long), GFP_KERNEL);
4468         ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans),
4469                                   GFP_KERNEL);
4470         ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt),
4471                                            sizeof(unsigned long), GFP_KERNEL);
4472         ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans),
4473                                   GFP_KERNEL);
4474         ud->rflow_gp_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rflow_cnt),
4475                                               sizeof(unsigned long),
4476                                               GFP_KERNEL);
4477         ud->rflow_gp_map_allocated = devm_kcalloc(dev,
4478                                                   BITS_TO_LONGS(ud->rflow_cnt),
4479                                                   sizeof(unsigned long),
4480                                                   GFP_KERNEL);
4481         ud->rflow_in_use = devm_kcalloc(dev, BITS_TO_LONGS(ud->rflow_cnt),
4482                                         sizeof(unsigned long),
4483                                         GFP_KERNEL);
4484         ud->rflows = devm_kcalloc(dev, ud->rflow_cnt, sizeof(*ud->rflows),
4485                                   GFP_KERNEL);
4486
4487         if (!ud->tchan_map || !ud->rchan_map || !ud->rflow_gp_map ||
4488             !ud->rflow_gp_map_allocated || !ud->tchans || !ud->rchans ||
4489             !ud->rflows || !ud->rflow_in_use)
4490                 return -ENOMEM;
4491
4492         /*
4493          * RX flows with the same Ids as RX channels are reserved to be used
4494          * as default flows if remote HW can't generate flow_ids. Those
4495          * RX flows can be requested only explicitly by id.
4496          */
4497         bitmap_set(ud->rflow_gp_map_allocated, 0, ud->rchan_cnt);
4498
4499         /* by default no GP rflows are assigned to Linux */
4500         bitmap_set(ud->rflow_gp_map, 0, ud->rflow_cnt);
4501
4502         /* Get resource ranges from tisci */
4503         for (i = 0; i < RM_RANGE_LAST; i++) {
4504                 if (i == RM_RANGE_BCHAN || i == RM_RANGE_TFLOW)
4505                         continue;
4506
4507                 tisci_rm->rm_ranges[i] =
4508                         devm_ti_sci_get_of_resource(tisci_rm->tisci, dev,
4509                                                     tisci_rm->tisci_dev_id,
4510                                                     (char *)range_names[i]);
4511         }
4512
4513         /* tchan ranges */
4514         rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
4515         if (IS_ERR(rm_res)) {
4516                 bitmap_zero(ud->tchan_map, ud->tchan_cnt);
4517         } else {
4518                 bitmap_fill(ud->tchan_map, ud->tchan_cnt);
4519                 for (i = 0; i < rm_res->sets; i++)
4520                         udma_mark_resource_ranges(ud, ud->tchan_map,
4521                                                   &rm_res->desc[i], "tchan");
4522         }
4523         irq_res.sets = rm_res->sets;
4524
4525         /* rchan and matching default flow ranges */
4526         rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
4527         if (IS_ERR(rm_res)) {
4528                 bitmap_zero(ud->rchan_map, ud->rchan_cnt);
4529         } else {
4530                 bitmap_fill(ud->rchan_map, ud->rchan_cnt);
4531                 for (i = 0; i < rm_res->sets; i++)
4532                         udma_mark_resource_ranges(ud, ud->rchan_map,
4533                                                   &rm_res->desc[i], "rchan");
4534         }
4535
4536         irq_res.sets += rm_res->sets;
4537         irq_res.desc = kcalloc(irq_res.sets, sizeof(*irq_res.desc), GFP_KERNEL);
4538         rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
4539         for (i = 0; i < rm_res->sets; i++) {
4540                 irq_res.desc[i].start = rm_res->desc[i].start;
4541                 irq_res.desc[i].num = rm_res->desc[i].num;
4542                 irq_res.desc[i].start_sec = rm_res->desc[i].start_sec;
4543                 irq_res.desc[i].num_sec = rm_res->desc[i].num_sec;
4544         }
4545         rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
4546         for (j = 0; j < rm_res->sets; j++, i++) {
4547                 if (rm_res->desc[j].num) {
4548                         irq_res.desc[i].start = rm_res->desc[j].start +
4549                                         ud->soc_data->oes.udma_rchan;
4550                         irq_res.desc[i].num = rm_res->desc[j].num;
4551                 }
4552                 if (rm_res->desc[j].num_sec) {
4553                         irq_res.desc[i].start_sec = rm_res->desc[j].start_sec +
4554                                         ud->soc_data->oes.udma_rchan;
4555                         irq_res.desc[i].num_sec = rm_res->desc[j].num_sec;
4556                 }
4557         }
4558         ret = ti_sci_inta_msi_domain_alloc_irqs(ud->dev, &irq_res);
4559         kfree(irq_res.desc);
4560         if (ret) {
4561                 dev_err(ud->dev, "Failed to allocate MSI interrupts\n");
4562                 return ret;
4563         }
4564
4565         /* GP rflow ranges */
4566         rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW];
4567         if (IS_ERR(rm_res)) {
4568                 /* all gp flows are assigned exclusively to Linux */
4569                 bitmap_clear(ud->rflow_gp_map, ud->rchan_cnt,
4570                              ud->rflow_cnt - ud->rchan_cnt);
4571         } else {
4572                 for (i = 0; i < rm_res->sets; i++)
4573                         udma_mark_resource_ranges(ud, ud->rflow_gp_map,
4574                                                   &rm_res->desc[i], "gp-rflow");
4575         }
4576
4577         return 0;
4578 }
4579
4580 static int bcdma_setup_resources(struct udma_dev *ud)
4581 {
4582         int ret, i, j;
4583         struct device *dev = ud->dev;
4584         struct ti_sci_resource *rm_res, irq_res;
4585         struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
4586         const struct udma_oes_offsets *oes = &ud->soc_data->oes;
4587         u32 cap;
4588
4589         /* Set up the throughput level start indexes */
4590         cap = udma_read(ud->mmrs[MMR_GCFG], 0x2c);
4591         if (BCDMA_CAP3_UBCHAN_CNT(cap)) {
4592                 ud->bchan_tpl.levels = 3;
4593                 ud->bchan_tpl.start_idx[1] = BCDMA_CAP3_UBCHAN_CNT(cap);
4594                 ud->bchan_tpl.start_idx[0] = BCDMA_CAP3_HBCHAN_CNT(cap);
4595         } else if (BCDMA_CAP3_HBCHAN_CNT(cap)) {
4596                 ud->bchan_tpl.levels = 2;
4597                 ud->bchan_tpl.start_idx[0] = BCDMA_CAP3_HBCHAN_CNT(cap);
4598         } else {
4599                 ud->bchan_tpl.levels = 1;
4600         }
4601
4602         cap = udma_read(ud->mmrs[MMR_GCFG], 0x30);
4603         if (BCDMA_CAP4_URCHAN_CNT(cap)) {
4604                 ud->rchan_tpl.levels = 3;
4605                 ud->rchan_tpl.start_idx[1] = BCDMA_CAP4_URCHAN_CNT(cap);
4606                 ud->rchan_tpl.start_idx[0] = BCDMA_CAP4_HRCHAN_CNT(cap);
4607         } else if (BCDMA_CAP4_HRCHAN_CNT(cap)) {
4608                 ud->rchan_tpl.levels = 2;
4609                 ud->rchan_tpl.start_idx[0] = BCDMA_CAP4_HRCHAN_CNT(cap);
4610         } else {
4611                 ud->rchan_tpl.levels = 1;
4612         }
4613
4614         if (BCDMA_CAP4_UTCHAN_CNT(cap)) {
4615                 ud->tchan_tpl.levels = 3;
4616                 ud->tchan_tpl.start_idx[1] = BCDMA_CAP4_UTCHAN_CNT(cap);
4617                 ud->tchan_tpl.start_idx[0] = BCDMA_CAP4_HTCHAN_CNT(cap);
4618         } else if (BCDMA_CAP4_HTCHAN_CNT(cap)) {
4619                 ud->tchan_tpl.levels = 2;
4620                 ud->tchan_tpl.start_idx[0] = BCDMA_CAP4_HTCHAN_CNT(cap);
4621         } else {
4622                 ud->tchan_tpl.levels = 1;
4623         }
4624
4625         ud->bchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->bchan_cnt),
4626                                            sizeof(unsigned long), GFP_KERNEL);
4627         ud->bchans = devm_kcalloc(dev, ud->bchan_cnt, sizeof(*ud->bchans),
4628                                   GFP_KERNEL);
4629         ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt),
4630                                            sizeof(unsigned long), GFP_KERNEL);
4631         ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans),
4632                                   GFP_KERNEL);
4633         ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt),
4634                                            sizeof(unsigned long), GFP_KERNEL);
4635         ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans),
4636                                   GFP_KERNEL);
4637         /* BCDMA do not really have flows, but the driver expect it */
4638         ud->rflow_in_use = devm_kcalloc(dev, BITS_TO_LONGS(ud->rchan_cnt),
4639                                         sizeof(unsigned long),
4640                                         GFP_KERNEL);
4641         ud->rflows = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rflows),
4642                                   GFP_KERNEL);
4643
4644         if (!ud->bchan_map || !ud->tchan_map || !ud->rchan_map ||
4645             !ud->rflow_in_use || !ud->bchans || !ud->tchans || !ud->rchans ||
4646             !ud->rflows)
4647                 return -ENOMEM;
4648
4649         /* Get resource ranges from tisci */
4650         for (i = 0; i < RM_RANGE_LAST; i++) {
4651                 if (i == RM_RANGE_RFLOW || i == RM_RANGE_TFLOW)
4652                         continue;
4653                 if (i == RM_RANGE_BCHAN && ud->bchan_cnt == 0)
4654                         continue;
4655                 if (i == RM_RANGE_TCHAN && ud->tchan_cnt == 0)
4656                         continue;
4657                 if (i == RM_RANGE_RCHAN && ud->rchan_cnt == 0)
4658                         continue;
4659
4660                 tisci_rm->rm_ranges[i] =
4661                         devm_ti_sci_get_of_resource(tisci_rm->tisci, dev,
4662                                                     tisci_rm->tisci_dev_id,
4663                                                     (char *)range_names[i]);
4664         }
4665
4666         irq_res.sets = 0;
4667
4668         /* bchan ranges */
4669         if (ud->bchan_cnt) {
4670                 rm_res = tisci_rm->rm_ranges[RM_RANGE_BCHAN];
4671                 if (IS_ERR(rm_res)) {
4672                         bitmap_zero(ud->bchan_map, ud->bchan_cnt);
4673                 } else {
4674                         bitmap_fill(ud->bchan_map, ud->bchan_cnt);
4675                         for (i = 0; i < rm_res->sets; i++)
4676                                 udma_mark_resource_ranges(ud, ud->bchan_map,
4677                                                           &rm_res->desc[i],
4678                                                           "bchan");
4679                 }
4680                 irq_res.sets += rm_res->sets;
4681         }
4682
4683         /* tchan ranges */
4684         if (ud->tchan_cnt) {
4685                 rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
4686                 if (IS_ERR(rm_res)) {
4687                         bitmap_zero(ud->tchan_map, ud->tchan_cnt);
4688                 } else {
4689                         bitmap_fill(ud->tchan_map, ud->tchan_cnt);
4690                         for (i = 0; i < rm_res->sets; i++)
4691                                 udma_mark_resource_ranges(ud, ud->tchan_map,
4692                                                           &rm_res->desc[i],
4693                                                           "tchan");
4694                 }
4695                 irq_res.sets += rm_res->sets * 2;
4696         }
4697
4698         /* rchan ranges */
4699         if (ud->rchan_cnt) {
4700                 rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
4701                 if (IS_ERR(rm_res)) {
4702                         bitmap_zero(ud->rchan_map, ud->rchan_cnt);
4703                 } else {
4704                         bitmap_fill(ud->rchan_map, ud->rchan_cnt);
4705                         for (i = 0; i < rm_res->sets; i++)
4706                                 udma_mark_resource_ranges(ud, ud->rchan_map,
4707                                                           &rm_res->desc[i],
4708                                                           "rchan");
4709                 }
4710                 irq_res.sets += rm_res->sets * 2;
4711         }
4712
4713         irq_res.desc = kcalloc(irq_res.sets, sizeof(*irq_res.desc), GFP_KERNEL);
4714         if (ud->bchan_cnt) {
4715                 rm_res = tisci_rm->rm_ranges[RM_RANGE_BCHAN];
4716                 for (i = 0; i < rm_res->sets; i++) {
4717                         irq_res.desc[i].start = rm_res->desc[i].start +
4718                                                 oes->bcdma_bchan_ring;
4719                         irq_res.desc[i].num = rm_res->desc[i].num;
4720                 }
4721         }
4722         if (ud->tchan_cnt) {
4723                 rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
4724                 for (j = 0; j < rm_res->sets; j++, i += 2) {
4725                         irq_res.desc[i].start = rm_res->desc[j].start +
4726                                                 oes->bcdma_tchan_data;
4727                         irq_res.desc[i].num = rm_res->desc[j].num;
4728
4729                         irq_res.desc[i + 1].start = rm_res->desc[j].start +
4730                                                 oes->bcdma_tchan_ring;
4731                         irq_res.desc[i + 1].num = rm_res->desc[j].num;
4732                 }
4733         }
4734         if (ud->rchan_cnt) {
4735                 rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
4736                 for (j = 0; j < rm_res->sets; j++, i += 2) {
4737                         irq_res.desc[i].start = rm_res->desc[j].start +
4738                                                 oes->bcdma_rchan_data;
4739                         irq_res.desc[i].num = rm_res->desc[j].num;
4740
4741                         irq_res.desc[i + 1].start = rm_res->desc[j].start +
4742                                                 oes->bcdma_rchan_ring;
4743                         irq_res.desc[i + 1].num = rm_res->desc[j].num;
4744                 }
4745         }
4746
4747         ret = ti_sci_inta_msi_domain_alloc_irqs(ud->dev, &irq_res);
4748         kfree(irq_res.desc);
4749         if (ret) {
4750                 dev_err(ud->dev, "Failed to allocate MSI interrupts\n");
4751                 return ret;
4752         }
4753
4754         return 0;
4755 }
4756
4757 static int pktdma_setup_resources(struct udma_dev *ud)
4758 {
4759         int ret, i, j;
4760         struct device *dev = ud->dev;
4761         struct ti_sci_resource *rm_res, irq_res;
4762         struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
4763         const struct udma_oes_offsets *oes = &ud->soc_data->oes;
4764         u32 cap3;
4765
4766         /* Set up the throughput level start indexes */
4767         cap3 = udma_read(ud->mmrs[MMR_GCFG], 0x2c);
4768         if (UDMA_CAP3_UCHAN_CNT(cap3)) {
4769                 ud->tchan_tpl.levels = 3;
4770                 ud->tchan_tpl.start_idx[1] = UDMA_CAP3_UCHAN_CNT(cap3);
4771                 ud->tchan_tpl.start_idx[0] = UDMA_CAP3_HCHAN_CNT(cap3);
4772         } else if (UDMA_CAP3_HCHAN_CNT(cap3)) {
4773                 ud->tchan_tpl.levels = 2;
4774                 ud->tchan_tpl.start_idx[0] = UDMA_CAP3_HCHAN_CNT(cap3);
4775         } else {
4776                 ud->tchan_tpl.levels = 1;
4777         }
4778
4779         ud->rchan_tpl.levels = ud->tchan_tpl.levels;
4780         ud->rchan_tpl.start_idx[0] = ud->tchan_tpl.start_idx[0];
4781         ud->rchan_tpl.start_idx[1] = ud->tchan_tpl.start_idx[1];
4782
4783         ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt),
4784                                            sizeof(unsigned long), GFP_KERNEL);
4785         ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans),
4786                                   GFP_KERNEL);
4787         ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt),
4788                                            sizeof(unsigned long), GFP_KERNEL);
4789         ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans),
4790                                   GFP_KERNEL);
4791         ud->rflow_in_use = devm_kcalloc(dev, BITS_TO_LONGS(ud->rflow_cnt),
4792                                         sizeof(unsigned long),
4793                                         GFP_KERNEL);
4794         ud->rflows = devm_kcalloc(dev, ud->rflow_cnt, sizeof(*ud->rflows),
4795                                   GFP_KERNEL);
4796         ud->tflow_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tflow_cnt),
4797                                            sizeof(unsigned long), GFP_KERNEL);
4798
4799         if (!ud->tchan_map || !ud->rchan_map || !ud->tflow_map || !ud->tchans ||
4800             !ud->rchans || !ud->rflows || !ud->rflow_in_use)
4801                 return -ENOMEM;
4802
4803         /* Get resource ranges from tisci */
4804         for (i = 0; i < RM_RANGE_LAST; i++) {
4805                 if (i == RM_RANGE_BCHAN)
4806                         continue;
4807
4808                 tisci_rm->rm_ranges[i] =
4809                         devm_ti_sci_get_of_resource(tisci_rm->tisci, dev,
4810                                                     tisci_rm->tisci_dev_id,
4811                                                     (char *)range_names[i]);
4812         }
4813
4814         /* tchan ranges */
4815         rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
4816         if (IS_ERR(rm_res)) {
4817                 bitmap_zero(ud->tchan_map, ud->tchan_cnt);
4818         } else {
4819                 bitmap_fill(ud->tchan_map, ud->tchan_cnt);
4820                 for (i = 0; i < rm_res->sets; i++)
4821                         udma_mark_resource_ranges(ud, ud->tchan_map,
4822                                                   &rm_res->desc[i], "tchan");
4823         }
4824
4825         /* rchan ranges */
4826         rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
4827         if (IS_ERR(rm_res)) {
4828                 bitmap_zero(ud->rchan_map, ud->rchan_cnt);
4829         } else {
4830                 bitmap_fill(ud->rchan_map, ud->rchan_cnt);
4831                 for (i = 0; i < rm_res->sets; i++)
4832                         udma_mark_resource_ranges(ud, ud->rchan_map,
4833                                                   &rm_res->desc[i], "rchan");
4834         }
4835
4836         /* rflow ranges */
4837         rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW];
4838         if (IS_ERR(rm_res)) {
4839                 /* all rflows are assigned exclusively to Linux */
4840                 bitmap_zero(ud->rflow_in_use, ud->rflow_cnt);
4841         } else {
4842                 bitmap_fill(ud->rflow_in_use, ud->rflow_cnt);
4843                 for (i = 0; i < rm_res->sets; i++)
4844                         udma_mark_resource_ranges(ud, ud->rflow_in_use,
4845                                                   &rm_res->desc[i], "rflow");
4846         }
4847         irq_res.sets = rm_res->sets;
4848
4849         /* tflow ranges */
4850         rm_res = tisci_rm->rm_ranges[RM_RANGE_TFLOW];
4851         if (IS_ERR(rm_res)) {
4852                 /* all tflows are assigned exclusively to Linux */
4853                 bitmap_zero(ud->tflow_map, ud->tflow_cnt);
4854         } else {
4855                 bitmap_fill(ud->tflow_map, ud->tflow_cnt);
4856                 for (i = 0; i < rm_res->sets; i++)
4857                         udma_mark_resource_ranges(ud, ud->tflow_map,
4858                                                   &rm_res->desc[i], "tflow");
4859         }
4860         irq_res.sets += rm_res->sets;
4861
4862         irq_res.desc = kcalloc(irq_res.sets, sizeof(*irq_res.desc), GFP_KERNEL);
4863         rm_res = tisci_rm->rm_ranges[RM_RANGE_TFLOW];
4864         for (i = 0; i < rm_res->sets; i++) {
4865                 irq_res.desc[i].start = rm_res->desc[i].start +
4866                                         oes->pktdma_tchan_flow;
4867                 irq_res.desc[i].num = rm_res->desc[i].num;
4868         }
4869         rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW];
4870         for (j = 0; j < rm_res->sets; j++, i++) {
4871                 irq_res.desc[i].start = rm_res->desc[j].start +
4872                                         oes->pktdma_rchan_flow;
4873                 irq_res.desc[i].num = rm_res->desc[j].num;
4874         }
4875         ret = ti_sci_inta_msi_domain_alloc_irqs(ud->dev, &irq_res);
4876         kfree(irq_res.desc);
4877         if (ret) {
4878                 dev_err(ud->dev, "Failed to allocate MSI interrupts\n");
4879                 return ret;
4880         }
4881
4882         return 0;
4883 }
4884
4885 static int setup_resources(struct udma_dev *ud)
4886 {
4887         struct device *dev = ud->dev;
4888         int ch_count, ret;
4889
4890         switch (ud->match_data->type) {
4891         case DMA_TYPE_UDMA:
4892                 ret = udma_setup_resources(ud);
4893                 break;
4894         case DMA_TYPE_BCDMA:
4895                 ret = bcdma_setup_resources(ud);
4896                 break;
4897         case DMA_TYPE_PKTDMA:
4898                 ret = pktdma_setup_resources(ud);
4899                 break;
4900         default:
4901                 return -EINVAL;
4902         }
4903
4904         if (ret)
4905                 return ret;
4906
4907         ch_count  = ud->bchan_cnt + ud->tchan_cnt + ud->rchan_cnt;
4908         if (ud->bchan_cnt)
4909                 ch_count -= bitmap_weight(ud->bchan_map, ud->bchan_cnt);
4910         ch_count -= bitmap_weight(ud->tchan_map, ud->tchan_cnt);
4911         ch_count -= bitmap_weight(ud->rchan_map, ud->rchan_cnt);
4912         if (!ch_count)
4913                 return -ENODEV;
4914
4915         ud->channels = devm_kcalloc(dev, ch_count, sizeof(*ud->channels),
4916                                     GFP_KERNEL);
4917         if (!ud->channels)
4918                 return -ENOMEM;
4919
4920         switch (ud->match_data->type) {
4921         case DMA_TYPE_UDMA:
4922                 dev_info(dev,
4923                          "Channels: %d (tchan: %u, rchan: %u, gp-rflow: %u)\n",
4924                          ch_count,
4925                          ud->tchan_cnt - bitmap_weight(ud->tchan_map,
4926                                                        ud->tchan_cnt),
4927                          ud->rchan_cnt - bitmap_weight(ud->rchan_map,
4928                                                        ud->rchan_cnt),
4929                          ud->rflow_cnt - bitmap_weight(ud->rflow_gp_map,
4930                                                        ud->rflow_cnt));
4931                 break;
4932         case DMA_TYPE_BCDMA:
4933                 dev_info(dev,
4934                          "Channels: %d (bchan: %u, tchan: %u, rchan: %u)\n",
4935                          ch_count,
4936                          ud->bchan_cnt - bitmap_weight(ud->bchan_map,
4937                                                        ud->bchan_cnt),
4938                          ud->tchan_cnt - bitmap_weight(ud->tchan_map,
4939                                                        ud->tchan_cnt),
4940                          ud->rchan_cnt - bitmap_weight(ud->rchan_map,
4941                                                        ud->rchan_cnt));
4942                 break;
4943         case DMA_TYPE_PKTDMA:
4944                 dev_info(dev,
4945                          "Channels: %d (tchan: %u, rchan: %u)\n",
4946                          ch_count,
4947                          ud->tchan_cnt - bitmap_weight(ud->tchan_map,
4948                                                        ud->tchan_cnt),
4949                          ud->rchan_cnt - bitmap_weight(ud->rchan_map,
4950                                                        ud->rchan_cnt));
4951                 break;
4952         default:
4953                 break;
4954         }
4955
4956         return ch_count;
4957 }
4958
4959 static int udma_setup_rx_flush(struct udma_dev *ud)
4960 {
4961         struct udma_rx_flush *rx_flush = &ud->rx_flush;
4962         struct cppi5_desc_hdr_t *tr_desc;
4963         struct cppi5_tr_type1_t *tr_req;
4964         struct cppi5_host_desc_t *desc;
4965         struct device *dev = ud->dev;
4966         struct udma_hwdesc *hwdesc;
4967         size_t tr_size;
4968
4969         /* Allocate 1K buffer for discarded data on RX channel teardown */
4970         rx_flush->buffer_size = SZ_1K;
4971         rx_flush->buffer_vaddr = devm_kzalloc(dev, rx_flush->buffer_size,
4972                                               GFP_KERNEL);
4973         if (!rx_flush->buffer_vaddr)
4974                 return -ENOMEM;
4975
4976         rx_flush->buffer_paddr = dma_map_single(dev, rx_flush->buffer_vaddr,
4977                                                 rx_flush->buffer_size,
4978                                                 DMA_TO_DEVICE);
4979         if (dma_mapping_error(dev, rx_flush->buffer_paddr))
4980                 return -ENOMEM;
4981
4982         /* Set up descriptor to be used for TR mode */
4983         hwdesc = &rx_flush->hwdescs[0];
4984         tr_size = sizeof(struct cppi5_tr_type1_t);
4985         hwdesc->cppi5_desc_size = cppi5_trdesc_calc_size(tr_size, 1);
4986         hwdesc->cppi5_desc_size = ALIGN(hwdesc->cppi5_desc_size,
4987                                         ud->desc_align);
4988
4989         hwdesc->cppi5_desc_vaddr = devm_kzalloc(dev, hwdesc->cppi5_desc_size,
4990                                                 GFP_KERNEL);
4991         if (!hwdesc->cppi5_desc_vaddr)
4992                 return -ENOMEM;
4993
4994         hwdesc->cppi5_desc_paddr = dma_map_single(dev, hwdesc->cppi5_desc_vaddr,
4995                                                   hwdesc->cppi5_desc_size,
4996                                                   DMA_TO_DEVICE);
4997         if (dma_mapping_error(dev, hwdesc->cppi5_desc_paddr))
4998                 return -ENOMEM;
4999
5000         /* Start of the TR req records */
5001         hwdesc->tr_req_base = hwdesc->cppi5_desc_vaddr + tr_size;
5002         /* Start address of the TR response array */
5003         hwdesc->tr_resp_base = hwdesc->tr_req_base + tr_size;
5004
5005         tr_desc = hwdesc->cppi5_desc_vaddr;
5006         cppi5_trdesc_init(tr_desc, 1, tr_size, 0, 0);
5007         cppi5_desc_set_pktids(tr_desc, 0, CPPI5_INFO1_DESC_FLOWID_DEFAULT);
5008         cppi5_desc_set_retpolicy(tr_desc, 0, 0);
5009
5010         tr_req = hwdesc->tr_req_base;
5011         cppi5_tr_init(&tr_req->flags, CPPI5_TR_TYPE1, false, false,
5012                       CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
5013         cppi5_tr_csf_set(&tr_req->flags, CPPI5_TR_CSF_SUPR_EVT);
5014
5015         tr_req->addr = rx_flush->buffer_paddr;
5016         tr_req->icnt0 = rx_flush->buffer_size;
5017         tr_req->icnt1 = 1;
5018
5019         dma_sync_single_for_device(dev, hwdesc->cppi5_desc_paddr,
5020                                    hwdesc->cppi5_desc_size, DMA_TO_DEVICE);
5021
5022         /* Set up descriptor to be used for packet mode */
5023         hwdesc = &rx_flush->hwdescs[1];
5024         hwdesc->cppi5_desc_size = ALIGN(sizeof(struct cppi5_host_desc_t) +
5025                                         CPPI5_INFO0_HDESC_EPIB_SIZE +
5026                                         CPPI5_INFO0_HDESC_PSDATA_MAX_SIZE,
5027                                         ud->desc_align);
5028
5029         hwdesc->cppi5_desc_vaddr = devm_kzalloc(dev, hwdesc->cppi5_desc_size,
5030                                                 GFP_KERNEL);
5031         if (!hwdesc->cppi5_desc_vaddr)
5032                 return -ENOMEM;
5033
5034         hwdesc->cppi5_desc_paddr = dma_map_single(dev, hwdesc->cppi5_desc_vaddr,
5035                                                   hwdesc->cppi5_desc_size,
5036                                                   DMA_TO_DEVICE);
5037         if (dma_mapping_error(dev, hwdesc->cppi5_desc_paddr))
5038                 return -ENOMEM;
5039
5040         desc = hwdesc->cppi5_desc_vaddr;
5041         cppi5_hdesc_init(desc, 0, 0);
5042         cppi5_desc_set_pktids(&desc->hdr, 0, CPPI5_INFO1_DESC_FLOWID_DEFAULT);
5043         cppi5_desc_set_retpolicy(&desc->hdr, 0, 0);
5044
5045         cppi5_hdesc_attach_buf(desc,
5046                                rx_flush->buffer_paddr, rx_flush->buffer_size,
5047                                rx_flush->buffer_paddr, rx_flush->buffer_size);
5048
5049         dma_sync_single_for_device(dev, hwdesc->cppi5_desc_paddr,
5050                                    hwdesc->cppi5_desc_size, DMA_TO_DEVICE);
5051         return 0;
5052 }
5053
5054 #ifdef CONFIG_DEBUG_FS
5055 static void udma_dbg_summary_show_chan(struct seq_file *s,
5056                                        struct dma_chan *chan)
5057 {
5058         struct udma_chan *uc = to_udma_chan(chan);
5059         struct udma_chan_config *ucc = &uc->config;
5060
5061         seq_printf(s, " %-13s| %s", dma_chan_name(chan),
5062                    chan->dbg_client_name ?: "in-use");
5063         if (ucc->tr_trigger_type)
5064                 seq_puts(s, " (triggered, ");
5065         else
5066                 seq_printf(s, " (%s, ",
5067                            dmaengine_get_direction_text(uc->config.dir));
5068
5069         switch (uc->config.dir) {
5070         case DMA_MEM_TO_MEM:
5071                 if (uc->ud->match_data->type == DMA_TYPE_BCDMA) {
5072                         seq_printf(s, "bchan%d)\n", uc->bchan->id);
5073                         return;
5074                 }
5075
5076                 seq_printf(s, "chan%d pair [0x%04x -> 0x%04x], ", uc->tchan->id,
5077                            ucc->src_thread, ucc->dst_thread);
5078                 break;
5079         case DMA_DEV_TO_MEM:
5080                 seq_printf(s, "rchan%d [0x%04x -> 0x%04x], ", uc->rchan->id,
5081                            ucc->src_thread, ucc->dst_thread);
5082                 if (uc->ud->match_data->type == DMA_TYPE_PKTDMA)
5083                         seq_printf(s, "rflow%d, ", uc->rflow->id);
5084                 break;
5085         case DMA_MEM_TO_DEV:
5086                 seq_printf(s, "tchan%d [0x%04x -> 0x%04x], ", uc->tchan->id,
5087                            ucc->src_thread, ucc->dst_thread);
5088                 if (uc->ud->match_data->type == DMA_TYPE_PKTDMA)
5089                         seq_printf(s, "tflow%d, ", uc->tchan->tflow_id);
5090                 break;
5091         default:
5092                 seq_printf(s, ")\n");
5093                 return;
5094         }
5095
5096         if (ucc->ep_type == PSIL_EP_NATIVE) {
5097                 seq_printf(s, "PSI-L Native");
5098                 if (ucc->metadata_size) {
5099                         seq_printf(s, "[%s", ucc->needs_epib ? " EPIB" : "");
5100                         if (ucc->psd_size)
5101                                 seq_printf(s, " PSDsize:%u", ucc->psd_size);
5102                         seq_printf(s, " ]");
5103                 }
5104         } else {
5105                 seq_printf(s, "PDMA");
5106                 if (ucc->enable_acc32 || ucc->enable_burst)
5107                         seq_printf(s, "[%s%s ]",
5108                                    ucc->enable_acc32 ? " ACC32" : "",
5109                                    ucc->enable_burst ? " BURST" : "");
5110         }
5111
5112         seq_printf(s, ", %s)\n", ucc->pkt_mode ? "Packet mode" : "TR mode");
5113 }
5114
5115 static void udma_dbg_summary_show(struct seq_file *s,
5116                                   struct dma_device *dma_dev)
5117 {
5118         struct dma_chan *chan;
5119
5120         list_for_each_entry(chan, &dma_dev->channels, device_node) {
5121                 if (chan->client_count)
5122                         udma_dbg_summary_show_chan(s, chan);
5123         }
5124 }
5125 #endif /* CONFIG_DEBUG_FS */
5126
5127 static enum dmaengine_alignment udma_get_copy_align(struct udma_dev *ud)
5128 {
5129         const struct udma_match_data *match_data = ud->match_data;
5130         u8 tpl;
5131
5132         if (!match_data->enable_memcpy_support)
5133                 return DMAENGINE_ALIGN_8_BYTES;
5134
5135         /* Get the highest TPL level the device supports for memcpy */
5136         if (ud->bchan_cnt)
5137                 tpl = udma_get_chan_tpl_index(&ud->bchan_tpl, 0);
5138         else if (ud->tchan_cnt)
5139                 tpl = udma_get_chan_tpl_index(&ud->tchan_tpl, 0);
5140         else
5141                 return DMAENGINE_ALIGN_8_BYTES;
5142
5143         switch (match_data->burst_size[tpl]) {
5144         case TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_256_BYTES:
5145                 return DMAENGINE_ALIGN_256_BYTES;
5146         case TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_128_BYTES:
5147                 return DMAENGINE_ALIGN_128_BYTES;
5148         case TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES:
5149         fallthrough;
5150         default:
5151                 return DMAENGINE_ALIGN_64_BYTES;
5152         }
5153 }
5154
5155 #define TI_UDMAC_BUSWIDTHS      (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
5156                                  BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
5157                                  BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \
5158                                  BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
5159                                  BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
5160
5161 static int udma_probe(struct platform_device *pdev)
5162 {
5163         struct device_node *navss_node = pdev->dev.parent->of_node;
5164         const struct soc_device_attribute *soc;
5165         struct device *dev = &pdev->dev;
5166         struct udma_dev *ud;
5167         const struct of_device_id *match;
5168         int i, ret;
5169         int ch_count;
5170
5171         ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(48));
5172         if (ret)
5173                 dev_err(dev, "failed to set dma mask stuff\n");
5174
5175         ud = devm_kzalloc(dev, sizeof(*ud), GFP_KERNEL);
5176         if (!ud)
5177                 return -ENOMEM;
5178
5179         match = of_match_node(udma_of_match, dev->of_node);
5180         if (!match)
5181                 match = of_match_node(bcdma_of_match, dev->of_node);
5182         if (!match) {
5183                 match = of_match_node(pktdma_of_match, dev->of_node);
5184                 if (!match) {
5185                         dev_err(dev, "No compatible match found\n");
5186                         return -ENODEV;
5187                 }
5188         }
5189         ud->match_data = match->data;
5190
5191         soc = soc_device_match(k3_soc_devices);
5192         if (!soc) {
5193                 dev_err(dev, "No compatible SoC found\n");
5194                 return -ENODEV;
5195         }
5196         ud->soc_data = soc->data;
5197
5198         ret = udma_get_mmrs(pdev, ud);
5199         if (ret)
5200                 return ret;
5201
5202         ud->tisci_rm.tisci = ti_sci_get_by_phandle(dev->of_node, "ti,sci");
5203         if (IS_ERR(ud->tisci_rm.tisci))
5204                 return PTR_ERR(ud->tisci_rm.tisci);
5205
5206         ret = of_property_read_u32(dev->of_node, "ti,sci-dev-id",
5207                                    &ud->tisci_rm.tisci_dev_id);
5208         if (ret) {
5209                 dev_err(dev, "ti,sci-dev-id read failure %d\n", ret);
5210                 return ret;
5211         }
5212         pdev->id = ud->tisci_rm.tisci_dev_id;
5213
5214         ret = of_property_read_u32(navss_node, "ti,sci-dev-id",
5215                                    &ud->tisci_rm.tisci_navss_dev_id);
5216         if (ret) {
5217                 dev_err(dev, "NAVSS ti,sci-dev-id read failure %d\n", ret);
5218                 return ret;
5219         }
5220
5221         if (ud->match_data->type == DMA_TYPE_UDMA) {
5222                 ret = of_property_read_u32(dev->of_node, "ti,udma-atype",
5223                                            &ud->atype);
5224                 if (!ret && ud->atype > 2) {
5225                         dev_err(dev, "Invalid atype: %u\n", ud->atype);
5226                         return -EINVAL;
5227                 }
5228         } else {
5229                 ret = of_property_read_u32(dev->of_node, "ti,asel",
5230                                            &ud->asel);
5231                 if (!ret && ud->asel > 15) {
5232                         dev_err(dev, "Invalid asel: %u\n", ud->asel);
5233                         return -EINVAL;
5234                 }
5235         }
5236
5237         ud->tisci_rm.tisci_udmap_ops = &ud->tisci_rm.tisci->ops.rm_udmap_ops;
5238         ud->tisci_rm.tisci_psil_ops = &ud->tisci_rm.tisci->ops.rm_psil_ops;
5239
5240         if (ud->match_data->type == DMA_TYPE_UDMA) {
5241                 ud->ringacc = of_k3_ringacc_get_by_phandle(dev->of_node, "ti,ringacc");
5242         } else {
5243                 struct k3_ringacc_init_data ring_init_data;
5244
5245                 ring_init_data.tisci = ud->tisci_rm.tisci;
5246                 ring_init_data.tisci_dev_id = ud->tisci_rm.tisci_dev_id;
5247                 if (ud->match_data->type == DMA_TYPE_BCDMA) {
5248                         ring_init_data.num_rings = ud->bchan_cnt +
5249                                                    ud->tchan_cnt +
5250                                                    ud->rchan_cnt;
5251                 } else {
5252                         ring_init_data.num_rings = ud->rflow_cnt +
5253                                                    ud->tflow_cnt;
5254                 }
5255
5256                 ud->ringacc = k3_ringacc_dmarings_init(pdev, &ring_init_data);
5257         }
5258
5259         if (IS_ERR(ud->ringacc))
5260                 return PTR_ERR(ud->ringacc);
5261
5262         dev->msi_domain = of_msi_get_domain(dev, dev->of_node,
5263                                             DOMAIN_BUS_TI_SCI_INTA_MSI);
5264         if (!dev->msi_domain) {
5265                 dev_err(dev, "Failed to get MSI domain\n");
5266                 return -EPROBE_DEFER;
5267         }
5268
5269         dma_cap_set(DMA_SLAVE, ud->ddev.cap_mask);
5270         /* cyclic operation is not supported via PKTDMA */
5271         if (ud->match_data->type != DMA_TYPE_PKTDMA) {
5272                 dma_cap_set(DMA_CYCLIC, ud->ddev.cap_mask);
5273                 ud->ddev.device_prep_dma_cyclic = udma_prep_dma_cyclic;
5274         }
5275
5276         ud->ddev.device_config = udma_slave_config;
5277         ud->ddev.device_prep_slave_sg = udma_prep_slave_sg;
5278         ud->ddev.device_issue_pending = udma_issue_pending;
5279         ud->ddev.device_tx_status = udma_tx_status;
5280         ud->ddev.device_pause = udma_pause;
5281         ud->ddev.device_resume = udma_resume;
5282         ud->ddev.device_terminate_all = udma_terminate_all;
5283         ud->ddev.device_synchronize = udma_synchronize;
5284 #ifdef CONFIG_DEBUG_FS
5285         ud->ddev.dbg_summary_show = udma_dbg_summary_show;
5286 #endif
5287
5288         switch (ud->match_data->type) {
5289         case DMA_TYPE_UDMA:
5290                 ud->ddev.device_alloc_chan_resources =
5291                                         udma_alloc_chan_resources;
5292                 break;
5293         case DMA_TYPE_BCDMA:
5294                 ud->ddev.device_alloc_chan_resources =
5295                                         bcdma_alloc_chan_resources;
5296                 ud->ddev.device_router_config = bcdma_router_config;
5297                 break;
5298         case DMA_TYPE_PKTDMA:
5299                 ud->ddev.device_alloc_chan_resources =
5300                                         pktdma_alloc_chan_resources;
5301                 break;
5302         default:
5303                 return -EINVAL;
5304         }
5305         ud->ddev.device_free_chan_resources = udma_free_chan_resources;
5306
5307         ud->ddev.src_addr_widths = TI_UDMAC_BUSWIDTHS;
5308         ud->ddev.dst_addr_widths = TI_UDMAC_BUSWIDTHS;
5309         ud->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
5310         ud->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
5311         ud->ddev.desc_metadata_modes = DESC_METADATA_CLIENT |
5312                                        DESC_METADATA_ENGINE;
5313         if (ud->match_data->enable_memcpy_support &&
5314             !(ud->match_data->type == DMA_TYPE_BCDMA && ud->bchan_cnt == 0)) {
5315                 dma_cap_set(DMA_MEMCPY, ud->ddev.cap_mask);
5316                 ud->ddev.device_prep_dma_memcpy = udma_prep_dma_memcpy;
5317                 ud->ddev.directions |= BIT(DMA_MEM_TO_MEM);
5318         }
5319
5320         ud->ddev.dev = dev;
5321         ud->dev = dev;
5322         ud->psil_base = ud->match_data->psil_base;
5323
5324         INIT_LIST_HEAD(&ud->ddev.channels);
5325         INIT_LIST_HEAD(&ud->desc_to_purge);
5326
5327         ch_count = setup_resources(ud);
5328         if (ch_count <= 0)
5329                 return ch_count;
5330
5331         spin_lock_init(&ud->lock);
5332         INIT_WORK(&ud->purge_work, udma_purge_desc_work);
5333
5334         ud->desc_align = 64;
5335         if (ud->desc_align < dma_get_cache_alignment())
5336                 ud->desc_align = dma_get_cache_alignment();
5337
5338         ret = udma_setup_rx_flush(ud);
5339         if (ret)
5340                 return ret;
5341
5342         for (i = 0; i < ud->bchan_cnt; i++) {
5343                 struct udma_bchan *bchan = &ud->bchans[i];
5344
5345                 bchan->id = i;
5346                 bchan->reg_rt = ud->mmrs[MMR_BCHANRT] + i * 0x1000;
5347         }
5348
5349         for (i = 0; i < ud->tchan_cnt; i++) {
5350                 struct udma_tchan *tchan = &ud->tchans[i];
5351
5352                 tchan->id = i;
5353                 tchan->reg_rt = ud->mmrs[MMR_TCHANRT] + i * 0x1000;
5354         }
5355
5356         for (i = 0; i < ud->rchan_cnt; i++) {
5357                 struct udma_rchan *rchan = &ud->rchans[i];
5358
5359                 rchan->id = i;
5360                 rchan->reg_rt = ud->mmrs[MMR_RCHANRT] + i * 0x1000;
5361         }
5362
5363         for (i = 0; i < ud->rflow_cnt; i++) {
5364                 struct udma_rflow *rflow = &ud->rflows[i];
5365
5366                 rflow->id = i;
5367         }
5368
5369         for (i = 0; i < ch_count; i++) {
5370                 struct udma_chan *uc = &ud->channels[i];
5371
5372                 uc->ud = ud;
5373                 uc->vc.desc_free = udma_desc_free;
5374                 uc->id = i;
5375                 uc->bchan = NULL;
5376                 uc->tchan = NULL;
5377                 uc->rchan = NULL;
5378                 uc->config.remote_thread_id = -1;
5379                 uc->config.mapped_channel_id = -1;
5380                 uc->config.default_flow_id = -1;
5381                 uc->config.dir = DMA_MEM_TO_MEM;
5382                 uc->name = devm_kasprintf(dev, GFP_KERNEL, "%s chan%d",
5383                                           dev_name(dev), i);
5384
5385                 vchan_init(&uc->vc, &ud->ddev);
5386                 /* Use custom vchan completion handling */
5387                 tasklet_setup(&uc->vc.task, udma_vchan_complete);
5388                 init_completion(&uc->teardown_completed);
5389                 INIT_DELAYED_WORK(&uc->tx_drain.work, udma_check_tx_completion);
5390         }
5391
5392         /* Configure the copy_align to the maximum burst size the device supports */
5393         ud->ddev.copy_align = udma_get_copy_align(ud);
5394
5395         ret = dma_async_device_register(&ud->ddev);
5396         if (ret) {
5397                 dev_err(dev, "failed to register slave DMA engine: %d\n", ret);
5398                 return ret;
5399         }
5400
5401         platform_set_drvdata(pdev, ud);
5402
5403         ret = of_dma_controller_register(dev->of_node, udma_of_xlate, ud);
5404         if (ret) {
5405                 dev_err(dev, "failed to register of_dma controller\n");
5406                 dma_async_device_unregister(&ud->ddev);
5407         }
5408
5409         return ret;
5410 }
5411
5412 static struct platform_driver udma_driver = {
5413         .driver = {
5414                 .name   = "ti-udma",
5415                 .of_match_table = udma_of_match,
5416                 .suppress_bind_attrs = true,
5417         },
5418         .probe          = udma_probe,
5419 };
5420 builtin_platform_driver(udma_driver);
5421
5422 static struct platform_driver bcdma_driver = {
5423         .driver = {
5424                 .name   = "ti-bcdma",
5425                 .of_match_table = bcdma_of_match,
5426                 .suppress_bind_attrs = true,
5427         },
5428         .probe          = udma_probe,
5429 };
5430 builtin_platform_driver(bcdma_driver);
5431
5432 static struct platform_driver pktdma_driver = {
5433         .driver = {
5434                 .name   = "ti-pktdma",
5435                 .of_match_table = pktdma_of_match,
5436                 .suppress_bind_attrs = true,
5437         },
5438         .probe          = udma_probe,
5439 };
5440 builtin_platform_driver(pktdma_driver);
5441
5442 /* Private interfaces to UDMA */
5443 #include "k3-udma-private.c"