dmaengine: ti: k3-udma-glue: move psi-l pairing in channel en/dis functions
[linux-2.6-microblaze.git] / drivers / dma / ti / k3-udma-glue.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * K3 NAVSS DMA glue interface
4  *
5  * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
6  *
7  */
8
9 #include <linux/atomic.h>
10 #include <linux/delay.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/io.h>
13 #include <linux/init.h>
14 #include <linux/of.h>
15 #include <linux/platform_device.h>
16 #include <linux/soc/ti/k3-ringacc.h>
17 #include <linux/dma/ti-cppi5.h>
18 #include <linux/dma/k3-udma-glue.h>
19
20 #include "k3-udma.h"
21 #include "k3-psil-priv.h"
22
23 struct k3_udma_glue_common {
24         struct device *dev;
25         struct udma_dev *udmax;
26         const struct udma_tisci_rm *tisci_rm;
27         struct k3_ringacc *ringacc;
28         u32 src_thread;
29         u32 dst_thread;
30
31         u32  hdesc_size;
32         bool epib;
33         u32  psdata_size;
34         u32  swdata_size;
35         u32  atype;
36 };
37
38 struct k3_udma_glue_tx_channel {
39         struct k3_udma_glue_common common;
40
41         struct udma_tchan *udma_tchanx;
42         int udma_tchan_id;
43
44         struct k3_ring *ringtx;
45         struct k3_ring *ringtxcq;
46
47         bool psil_paired;
48
49         int virq;
50
51         atomic_t free_pkts;
52         bool tx_pause_on_err;
53         bool tx_filt_einfo;
54         bool tx_filt_pswords;
55         bool tx_supr_tdpkt;
56 };
57
58 struct k3_udma_glue_rx_flow {
59         struct udma_rflow *udma_rflow;
60         int udma_rflow_id;
61         struct k3_ring *ringrx;
62         struct k3_ring *ringrxfdq;
63
64         int virq;
65 };
66
67 struct k3_udma_glue_rx_channel {
68         struct k3_udma_glue_common common;
69
70         struct udma_rchan *udma_rchanx;
71         int udma_rchan_id;
72         bool remote;
73
74         bool psil_paired;
75
76         u32  swdata_size;
77         int  flow_id_base;
78
79         struct k3_udma_glue_rx_flow *flows;
80         u32 flow_num;
81         u32 flows_ready;
82 };
83
84 #define K3_UDMAX_TDOWN_TIMEOUT_US 1000
85
86 static int of_k3_udma_glue_parse(struct device_node *udmax_np,
87                                  struct k3_udma_glue_common *common)
88 {
89         common->ringacc = of_k3_ringacc_get_by_phandle(udmax_np,
90                                                        "ti,ringacc");
91         if (IS_ERR(common->ringacc))
92                 return PTR_ERR(common->ringacc);
93
94         common->udmax = of_xudma_dev_get(udmax_np, NULL);
95         if (IS_ERR(common->udmax))
96                 return PTR_ERR(common->udmax);
97
98         common->tisci_rm = xudma_dev_get_tisci_rm(common->udmax);
99
100         return 0;
101 }
102
103 static int of_k3_udma_glue_parse_chn(struct device_node *chn_np,
104                 const char *name, struct k3_udma_glue_common *common,
105                 bool tx_chn)
106 {
107         struct psil_endpoint_config *ep_config;
108         struct of_phandle_args dma_spec;
109         u32 thread_id;
110         int ret = 0;
111         int index;
112
113         if (unlikely(!name))
114                 return -EINVAL;
115
116         index = of_property_match_string(chn_np, "dma-names", name);
117         if (index < 0)
118                 return index;
119
120         if (of_parse_phandle_with_args(chn_np, "dmas", "#dma-cells", index,
121                                        &dma_spec))
122                 return -ENOENT;
123
124         thread_id = dma_spec.args[0];
125         if (dma_spec.args_count == 2) {
126                 if (dma_spec.args[1] > 2) {
127                         dev_err(common->dev, "Invalid channel atype: %u\n",
128                                 dma_spec.args[1]);
129                         ret = -EINVAL;
130                         goto out_put_spec;
131                 }
132                 common->atype = dma_spec.args[1];
133         }
134
135         if (tx_chn && !(thread_id & K3_PSIL_DST_THREAD_ID_OFFSET)) {
136                 ret = -EINVAL;
137                 goto out_put_spec;
138         }
139
140         if (!tx_chn && (thread_id & K3_PSIL_DST_THREAD_ID_OFFSET)) {
141                 ret = -EINVAL;
142                 goto out_put_spec;
143         }
144
145         /* get psil endpoint config */
146         ep_config = psil_get_ep_config(thread_id);
147         if (IS_ERR(ep_config)) {
148                 dev_err(common->dev,
149                         "No configuration for psi-l thread 0x%04x\n",
150                         thread_id);
151                 ret = PTR_ERR(ep_config);
152                 goto out_put_spec;
153         }
154
155         common->epib = ep_config->needs_epib;
156         common->psdata_size = ep_config->psd_size;
157
158         if (tx_chn)
159                 common->dst_thread = thread_id;
160         else
161                 common->src_thread = thread_id;
162
163         ret = of_k3_udma_glue_parse(dma_spec.np, common);
164
165 out_put_spec:
166         of_node_put(dma_spec.np);
167         return ret;
168 };
169
170 static void k3_udma_glue_dump_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
171 {
172         struct device *dev = tx_chn->common.dev;
173
174         dev_dbg(dev, "dump_tx_chn:\n"
175                 "udma_tchan_id: %d\n"
176                 "src_thread: %08x\n"
177                 "dst_thread: %08x\n",
178                 tx_chn->udma_tchan_id,
179                 tx_chn->common.src_thread,
180                 tx_chn->common.dst_thread);
181 }
182
183 static void k3_udma_glue_dump_tx_rt_chn(struct k3_udma_glue_tx_channel *chn,
184                                         char *mark)
185 {
186         struct device *dev = chn->common.dev;
187
188         dev_dbg(dev, "=== dump ===> %s\n", mark);
189         dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_CTL_REG,
190                 xudma_tchanrt_read(chn->udma_tchanx, UDMA_CHAN_RT_CTL_REG));
191         dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_PEER_RT_EN_REG,
192                 xudma_tchanrt_read(chn->udma_tchanx,
193                                    UDMA_CHAN_RT_PEER_RT_EN_REG));
194         dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_PCNT_REG,
195                 xudma_tchanrt_read(chn->udma_tchanx, UDMA_CHAN_RT_PCNT_REG));
196         dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_BCNT_REG,
197                 xudma_tchanrt_read(chn->udma_tchanx, UDMA_CHAN_RT_BCNT_REG));
198         dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_SBCNT_REG,
199                 xudma_tchanrt_read(chn->udma_tchanx, UDMA_CHAN_RT_SBCNT_REG));
200 }
201
202 static int k3_udma_glue_cfg_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
203 {
204         const struct udma_tisci_rm *tisci_rm = tx_chn->common.tisci_rm;
205         struct ti_sci_msg_rm_udmap_tx_ch_cfg req;
206
207         memset(&req, 0, sizeof(req));
208
209         req.valid_params = TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID |
210                         TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_EINFO_VALID |
211                         TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_PSWORDS_VALID |
212                         TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID |
213                         TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID |
214                         TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID |
215                         TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID |
216                         TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID;
217         req.nav_id = tisci_rm->tisci_dev_id;
218         req.index = tx_chn->udma_tchan_id;
219         if (tx_chn->tx_pause_on_err)
220                 req.tx_pause_on_err = 1;
221         if (tx_chn->tx_filt_einfo)
222                 req.tx_filt_einfo = 1;
223         if (tx_chn->tx_filt_pswords)
224                 req.tx_filt_pswords = 1;
225         req.tx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
226         if (tx_chn->tx_supr_tdpkt)
227                 req.tx_supr_tdpkt = 1;
228         req.tx_fetch_size = tx_chn->common.hdesc_size >> 2;
229         req.txcq_qnum = k3_ringacc_get_ring_id(tx_chn->ringtxcq);
230         req.tx_atype = tx_chn->common.atype;
231
232         return tisci_rm->tisci_udmap_ops->tx_ch_cfg(tisci_rm->tisci, &req);
233 }
234
235 struct k3_udma_glue_tx_channel *k3_udma_glue_request_tx_chn(struct device *dev,
236                 const char *name, struct k3_udma_glue_tx_channel_cfg *cfg)
237 {
238         struct k3_udma_glue_tx_channel *tx_chn;
239         int ret;
240
241         tx_chn = devm_kzalloc(dev, sizeof(*tx_chn), GFP_KERNEL);
242         if (!tx_chn)
243                 return ERR_PTR(-ENOMEM);
244
245         tx_chn->common.dev = dev;
246         tx_chn->common.swdata_size = cfg->swdata_size;
247         tx_chn->tx_pause_on_err = cfg->tx_pause_on_err;
248         tx_chn->tx_filt_einfo = cfg->tx_filt_einfo;
249         tx_chn->tx_filt_pswords = cfg->tx_filt_pswords;
250         tx_chn->tx_supr_tdpkt = cfg->tx_supr_tdpkt;
251
252         /* parse of udmap channel */
253         ret = of_k3_udma_glue_parse_chn(dev->of_node, name,
254                                         &tx_chn->common, true);
255         if (ret)
256                 goto err;
257
258         tx_chn->common.hdesc_size = cppi5_hdesc_calc_size(tx_chn->common.epib,
259                                                 tx_chn->common.psdata_size,
260                                                 tx_chn->common.swdata_size);
261
262         /* request and cfg UDMAP TX channel */
263         tx_chn->udma_tchanx = xudma_tchan_get(tx_chn->common.udmax, -1);
264         if (IS_ERR(tx_chn->udma_tchanx)) {
265                 ret = PTR_ERR(tx_chn->udma_tchanx);
266                 dev_err(dev, "UDMAX tchanx get err %d\n", ret);
267                 goto err;
268         }
269         tx_chn->udma_tchan_id = xudma_tchan_get_id(tx_chn->udma_tchanx);
270
271         atomic_set(&tx_chn->free_pkts, cfg->txcq_cfg.size);
272
273         /* request and cfg rings */
274         ret =  k3_ringacc_request_rings_pair(tx_chn->common.ringacc,
275                                              tx_chn->udma_tchan_id, -1,
276                                              &tx_chn->ringtx,
277                                              &tx_chn->ringtxcq);
278         if (ret) {
279                 dev_err(dev, "Failed to get TX/TXCQ rings %d\n", ret);
280                 goto err;
281         }
282
283         ret = k3_ringacc_ring_cfg(tx_chn->ringtx, &cfg->tx_cfg);
284         if (ret) {
285                 dev_err(dev, "Failed to cfg ringtx %d\n", ret);
286                 goto err;
287         }
288
289         ret = k3_ringacc_ring_cfg(tx_chn->ringtxcq, &cfg->txcq_cfg);
290         if (ret) {
291                 dev_err(dev, "Failed to cfg ringtx %d\n", ret);
292                 goto err;
293         }
294
295         /* request and cfg psi-l */
296         tx_chn->common.src_thread =
297                         xudma_dev_get_psil_base(tx_chn->common.udmax) +
298                         tx_chn->udma_tchan_id;
299
300         ret = k3_udma_glue_cfg_tx_chn(tx_chn);
301         if (ret) {
302                 dev_err(dev, "Failed to cfg tchan %d\n", ret);
303                 goto err;
304         }
305
306         k3_udma_glue_dump_tx_chn(tx_chn);
307
308         return tx_chn;
309
310 err:
311         k3_udma_glue_release_tx_chn(tx_chn);
312         return ERR_PTR(ret);
313 }
314 EXPORT_SYMBOL_GPL(k3_udma_glue_request_tx_chn);
315
316 void k3_udma_glue_release_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
317 {
318         if (tx_chn->psil_paired) {
319                 xudma_navss_psil_unpair(tx_chn->common.udmax,
320                                         tx_chn->common.src_thread,
321                                         tx_chn->common.dst_thread);
322                 tx_chn->psil_paired = false;
323         }
324
325         if (!IS_ERR_OR_NULL(tx_chn->udma_tchanx))
326                 xudma_tchan_put(tx_chn->common.udmax,
327                                 tx_chn->udma_tchanx);
328
329         if (tx_chn->ringtxcq)
330                 k3_ringacc_ring_free(tx_chn->ringtxcq);
331
332         if (tx_chn->ringtx)
333                 k3_ringacc_ring_free(tx_chn->ringtx);
334 }
335 EXPORT_SYMBOL_GPL(k3_udma_glue_release_tx_chn);
336
337 int k3_udma_glue_push_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
338                              struct cppi5_host_desc_t *desc_tx,
339                              dma_addr_t desc_dma)
340 {
341         u32 ringtxcq_id;
342
343         if (!atomic_add_unless(&tx_chn->free_pkts, -1, 0))
344                 return -ENOMEM;
345
346         ringtxcq_id = k3_ringacc_get_ring_id(tx_chn->ringtxcq);
347         cppi5_desc_set_retpolicy(&desc_tx->hdr, 0, ringtxcq_id);
348
349         return k3_ringacc_ring_push(tx_chn->ringtx, &desc_dma);
350 }
351 EXPORT_SYMBOL_GPL(k3_udma_glue_push_tx_chn);
352
353 int k3_udma_glue_pop_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
354                             dma_addr_t *desc_dma)
355 {
356         int ret;
357
358         ret = k3_ringacc_ring_pop(tx_chn->ringtxcq, desc_dma);
359         if (!ret)
360                 atomic_inc(&tx_chn->free_pkts);
361
362         return ret;
363 }
364 EXPORT_SYMBOL_GPL(k3_udma_glue_pop_tx_chn);
365
366 int k3_udma_glue_enable_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
367 {
368         int ret;
369
370         ret = xudma_navss_psil_pair(tx_chn->common.udmax,
371                                     tx_chn->common.src_thread,
372                                     tx_chn->common.dst_thread);
373         if (ret) {
374                 dev_err(tx_chn->common.dev, "PSI-L request err %d\n", ret);
375                 return ret;
376         }
377
378         tx_chn->psil_paired = true;
379
380         xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_CHAN_RT_PEER_RT_EN_REG,
381                             UDMA_PEER_RT_EN_ENABLE);
382
383         xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_CHAN_RT_CTL_REG,
384                             UDMA_CHAN_RT_CTL_EN);
385
386         k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn en");
387         return 0;
388 }
389 EXPORT_SYMBOL_GPL(k3_udma_glue_enable_tx_chn);
390
391 void k3_udma_glue_disable_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
392 {
393         k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn dis1");
394
395         xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_CHAN_RT_CTL_REG, 0);
396
397         xudma_tchanrt_write(tx_chn->udma_tchanx,
398                             UDMA_CHAN_RT_PEER_RT_EN_REG, 0);
399         k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn dis2");
400
401         if (tx_chn->psil_paired) {
402                 xudma_navss_psil_unpair(tx_chn->common.udmax,
403                                         tx_chn->common.src_thread,
404                                         tx_chn->common.dst_thread);
405                 tx_chn->psil_paired = false;
406         }
407 }
408 EXPORT_SYMBOL_GPL(k3_udma_glue_disable_tx_chn);
409
410 void k3_udma_glue_tdown_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
411                                bool sync)
412 {
413         int i = 0;
414         u32 val;
415
416         k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn tdown1");
417
418         xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_CHAN_RT_CTL_REG,
419                             UDMA_CHAN_RT_CTL_EN | UDMA_CHAN_RT_CTL_TDOWN);
420
421         val = xudma_tchanrt_read(tx_chn->udma_tchanx, UDMA_CHAN_RT_CTL_REG);
422
423         while (sync && (val & UDMA_CHAN_RT_CTL_EN)) {
424                 val = xudma_tchanrt_read(tx_chn->udma_tchanx,
425                                          UDMA_CHAN_RT_CTL_REG);
426                 udelay(1);
427                 if (i > K3_UDMAX_TDOWN_TIMEOUT_US) {
428                         dev_err(tx_chn->common.dev, "TX tdown timeout\n");
429                         break;
430                 }
431                 i++;
432         }
433
434         val = xudma_tchanrt_read(tx_chn->udma_tchanx,
435                                  UDMA_CHAN_RT_PEER_RT_EN_REG);
436         if (sync && (val & UDMA_PEER_RT_EN_ENABLE))
437                 dev_err(tx_chn->common.dev, "TX tdown peer not stopped\n");
438         k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn tdown2");
439 }
440 EXPORT_SYMBOL_GPL(k3_udma_glue_tdown_tx_chn);
441
442 void k3_udma_glue_reset_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
443                                void *data,
444                                void (*cleanup)(void *data, dma_addr_t desc_dma))
445 {
446         dma_addr_t desc_dma;
447         int occ_tx, i, ret;
448
449         /* reset TXCQ as it is not input for udma - expected to be empty */
450         if (tx_chn->ringtxcq)
451                 k3_ringacc_ring_reset(tx_chn->ringtxcq);
452
453         /*
454          * TXQ reset need to be special way as it is input for udma and its
455          * state cached by udma, so:
456          * 1) save TXQ occ
457          * 2) clean up TXQ and call callback .cleanup() for each desc
458          * 3) reset TXQ in a special way
459          */
460         occ_tx = k3_ringacc_ring_get_occ(tx_chn->ringtx);
461         dev_dbg(tx_chn->common.dev, "TX reset occ_tx %u\n", occ_tx);
462
463         for (i = 0; i < occ_tx; i++) {
464                 ret = k3_ringacc_ring_pop(tx_chn->ringtx, &desc_dma);
465                 if (ret) {
466                         dev_err(tx_chn->common.dev, "TX reset pop %d\n", ret);
467                         break;
468                 }
469                 cleanup(data, desc_dma);
470         }
471
472         k3_ringacc_ring_reset_dma(tx_chn->ringtx, occ_tx);
473 }
474 EXPORT_SYMBOL_GPL(k3_udma_glue_reset_tx_chn);
475
476 u32 k3_udma_glue_tx_get_hdesc_size(struct k3_udma_glue_tx_channel *tx_chn)
477 {
478         return tx_chn->common.hdesc_size;
479 }
480 EXPORT_SYMBOL_GPL(k3_udma_glue_tx_get_hdesc_size);
481
482 u32 k3_udma_glue_tx_get_txcq_id(struct k3_udma_glue_tx_channel *tx_chn)
483 {
484         return k3_ringacc_get_ring_id(tx_chn->ringtxcq);
485 }
486 EXPORT_SYMBOL_GPL(k3_udma_glue_tx_get_txcq_id);
487
488 int k3_udma_glue_tx_get_irq(struct k3_udma_glue_tx_channel *tx_chn)
489 {
490         tx_chn->virq = k3_ringacc_get_ring_irq_num(tx_chn->ringtxcq);
491
492         return tx_chn->virq;
493 }
494 EXPORT_SYMBOL_GPL(k3_udma_glue_tx_get_irq);
495
496 static int k3_udma_glue_cfg_rx_chn(struct k3_udma_glue_rx_channel *rx_chn)
497 {
498         const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm;
499         struct ti_sci_msg_rm_udmap_rx_ch_cfg req;
500         int ret;
501
502         memset(&req, 0, sizeof(req));
503
504         req.valid_params = TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID |
505                            TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID |
506                            TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID |
507                            TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID |
508                            TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID |
509                            TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID;
510
511         req.nav_id = tisci_rm->tisci_dev_id;
512         req.index = rx_chn->udma_rchan_id;
513         req.rx_fetch_size = rx_chn->common.hdesc_size >> 2;
514         /*
515          * TODO: we can't support rxcq_qnum/RCHAN[a]_RCQ cfg with current sysfw
516          * and udmax impl, so just configure it to invalid value.
517          * req.rxcq_qnum = k3_ringacc_get_ring_id(rx_chn->flows[0].ringrx);
518          */
519         req.rxcq_qnum = 0xFFFF;
520         if (rx_chn->flow_num && rx_chn->flow_id_base != rx_chn->udma_rchan_id) {
521                 /* Default flow + extra ones */
522                 req.flowid_start = rx_chn->flow_id_base;
523                 req.flowid_cnt = rx_chn->flow_num;
524         }
525         req.rx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
526         req.rx_atype = rx_chn->common.atype;
527
528         ret = tisci_rm->tisci_udmap_ops->rx_ch_cfg(tisci_rm->tisci, &req);
529         if (ret)
530                 dev_err(rx_chn->common.dev, "rchan%d cfg failed %d\n",
531                         rx_chn->udma_rchan_id, ret);
532
533         return ret;
534 }
535
536 static void k3_udma_glue_release_rx_flow(struct k3_udma_glue_rx_channel *rx_chn,
537                                          u32 flow_num)
538 {
539         struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num];
540
541         if (IS_ERR_OR_NULL(flow->udma_rflow))
542                 return;
543
544         if (flow->ringrxfdq)
545                 k3_ringacc_ring_free(flow->ringrxfdq);
546
547         if (flow->ringrx)
548                 k3_ringacc_ring_free(flow->ringrx);
549
550         xudma_rflow_put(rx_chn->common.udmax, flow->udma_rflow);
551         flow->udma_rflow = NULL;
552         rx_chn->flows_ready--;
553 }
554
555 static int k3_udma_glue_cfg_rx_flow(struct k3_udma_glue_rx_channel *rx_chn,
556                                     u32 flow_idx,
557                                     struct k3_udma_glue_rx_flow_cfg *flow_cfg)
558 {
559         struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_idx];
560         const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm;
561         struct device *dev = rx_chn->common.dev;
562         struct ti_sci_msg_rm_udmap_flow_cfg req;
563         int rx_ring_id;
564         int rx_ringfdq_id;
565         int ret = 0;
566
567         flow->udma_rflow = xudma_rflow_get(rx_chn->common.udmax,
568                                            flow->udma_rflow_id);
569         if (IS_ERR(flow->udma_rflow)) {
570                 ret = PTR_ERR(flow->udma_rflow);
571                 dev_err(dev, "UDMAX rflow get err %d\n", ret);
572                 return ret;
573         }
574
575         if (flow->udma_rflow_id != xudma_rflow_get_id(flow->udma_rflow)) {
576                 ret = -ENODEV;
577                 goto err_rflow_put;
578         }
579
580         /* request and cfg rings */
581         ret =  k3_ringacc_request_rings_pair(rx_chn->common.ringacc,
582                                              flow_cfg->ring_rxfdq0_id,
583                                              flow_cfg->ring_rxq_id,
584                                              &flow->ringrxfdq,
585                                              &flow->ringrx);
586         if (ret) {
587                 dev_err(dev, "Failed to get RX/RXFDQ rings %d\n", ret);
588                 goto err_rflow_put;
589         }
590
591         ret = k3_ringacc_ring_cfg(flow->ringrx, &flow_cfg->rx_cfg);
592         if (ret) {
593                 dev_err(dev, "Failed to cfg ringrx %d\n", ret);
594                 goto err_ringrxfdq_free;
595         }
596
597         ret = k3_ringacc_ring_cfg(flow->ringrxfdq, &flow_cfg->rxfdq_cfg);
598         if (ret) {
599                 dev_err(dev, "Failed to cfg ringrxfdq %d\n", ret);
600                 goto err_ringrxfdq_free;
601         }
602
603         if (rx_chn->remote) {
604                 rx_ring_id = TI_SCI_RESOURCE_NULL;
605                 rx_ringfdq_id = TI_SCI_RESOURCE_NULL;
606         } else {
607                 rx_ring_id = k3_ringacc_get_ring_id(flow->ringrx);
608                 rx_ringfdq_id = k3_ringacc_get_ring_id(flow->ringrxfdq);
609         }
610
611         memset(&req, 0, sizeof(req));
612
613         req.valid_params =
614                         TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID |
615                         TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID |
616                         TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID |
617                         TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DESC_TYPE_VALID |
618                         TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID |
619                         TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_HI_SEL_VALID |
620                         TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_LO_SEL_VALID |
621                         TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_HI_SEL_VALID |
622                         TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_LO_SEL_VALID |
623                         TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID |
624                         TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID |
625                         TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID |
626                         TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID;
627         req.nav_id = tisci_rm->tisci_dev_id;
628         req.flow_index = flow->udma_rflow_id;
629         if (rx_chn->common.epib)
630                 req.rx_einfo_present = 1;
631         if (rx_chn->common.psdata_size)
632                 req.rx_psinfo_present = 1;
633         if (flow_cfg->rx_error_handling)
634                 req.rx_error_handling = 1;
635         req.rx_desc_type = 0;
636         req.rx_dest_qnum = rx_ring_id;
637         req.rx_src_tag_hi_sel = 0;
638         req.rx_src_tag_lo_sel = flow_cfg->src_tag_lo_sel;
639         req.rx_dest_tag_hi_sel = 0;
640         req.rx_dest_tag_lo_sel = 0;
641         req.rx_fdq0_sz0_qnum = rx_ringfdq_id;
642         req.rx_fdq1_qnum = rx_ringfdq_id;
643         req.rx_fdq2_qnum = rx_ringfdq_id;
644         req.rx_fdq3_qnum = rx_ringfdq_id;
645
646         ret = tisci_rm->tisci_udmap_ops->rx_flow_cfg(tisci_rm->tisci, &req);
647         if (ret) {
648                 dev_err(dev, "flow%d config failed: %d\n", flow->udma_rflow_id,
649                         ret);
650                 goto err_ringrxfdq_free;
651         }
652
653         rx_chn->flows_ready++;
654         dev_dbg(dev, "flow%d config done. ready:%d\n",
655                 flow->udma_rflow_id, rx_chn->flows_ready);
656
657         return 0;
658
659 err_ringrxfdq_free:
660         k3_ringacc_ring_free(flow->ringrxfdq);
661         k3_ringacc_ring_free(flow->ringrx);
662
663 err_rflow_put:
664         xudma_rflow_put(rx_chn->common.udmax, flow->udma_rflow);
665         flow->udma_rflow = NULL;
666
667         return ret;
668 }
669
670 static void k3_udma_glue_dump_rx_chn(struct k3_udma_glue_rx_channel *chn)
671 {
672         struct device *dev = chn->common.dev;
673
674         dev_dbg(dev, "dump_rx_chn:\n"
675                 "udma_rchan_id: %d\n"
676                 "src_thread: %08x\n"
677                 "dst_thread: %08x\n"
678                 "epib: %d\n"
679                 "hdesc_size: %u\n"
680                 "psdata_size: %u\n"
681                 "swdata_size: %u\n"
682                 "flow_id_base: %d\n"
683                 "flow_num: %d\n",
684                 chn->udma_rchan_id,
685                 chn->common.src_thread,
686                 chn->common.dst_thread,
687                 chn->common.epib,
688                 chn->common.hdesc_size,
689                 chn->common.psdata_size,
690                 chn->common.swdata_size,
691                 chn->flow_id_base,
692                 chn->flow_num);
693 }
694
695 static void k3_udma_glue_dump_rx_rt_chn(struct k3_udma_glue_rx_channel *chn,
696                                         char *mark)
697 {
698         struct device *dev = chn->common.dev;
699
700         dev_dbg(dev, "=== dump ===> %s\n", mark);
701
702         dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_CTL_REG,
703                 xudma_rchanrt_read(chn->udma_rchanx, UDMA_CHAN_RT_CTL_REG));
704         dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_PEER_RT_EN_REG,
705                 xudma_rchanrt_read(chn->udma_rchanx,
706                                    UDMA_CHAN_RT_PEER_RT_EN_REG));
707         dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_PCNT_REG,
708                 xudma_rchanrt_read(chn->udma_rchanx, UDMA_CHAN_RT_PCNT_REG));
709         dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_BCNT_REG,
710                 xudma_rchanrt_read(chn->udma_rchanx, UDMA_CHAN_RT_BCNT_REG));
711         dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_SBCNT_REG,
712                 xudma_rchanrt_read(chn->udma_rchanx, UDMA_CHAN_RT_SBCNT_REG));
713 }
714
715 static int
716 k3_udma_glue_allocate_rx_flows(struct k3_udma_glue_rx_channel *rx_chn,
717                                struct k3_udma_glue_rx_channel_cfg *cfg)
718 {
719         int ret;
720
721         /* default rflow */
722         if (cfg->flow_id_use_rxchan_id)
723                 return 0;
724
725         /* not a GP rflows */
726         if (rx_chn->flow_id_base != -1 &&
727             !xudma_rflow_is_gp(rx_chn->common.udmax, rx_chn->flow_id_base))
728                 return 0;
729
730         /* Allocate range of GP rflows */
731         ret = xudma_alloc_gp_rflow_range(rx_chn->common.udmax,
732                                          rx_chn->flow_id_base,
733                                          rx_chn->flow_num);
734         if (ret < 0) {
735                 dev_err(rx_chn->common.dev, "UDMAX reserve_rflow %d cnt:%d err: %d\n",
736                         rx_chn->flow_id_base, rx_chn->flow_num, ret);
737                 return ret;
738         }
739         rx_chn->flow_id_base = ret;
740
741         return 0;
742 }
743
744 static struct k3_udma_glue_rx_channel *
745 k3_udma_glue_request_rx_chn_priv(struct device *dev, const char *name,
746                                  struct k3_udma_glue_rx_channel_cfg *cfg)
747 {
748         struct k3_udma_glue_rx_channel *rx_chn;
749         int ret, i;
750
751         if (cfg->flow_id_num <= 0)
752                 return ERR_PTR(-EINVAL);
753
754         if (cfg->flow_id_num != 1 &&
755             (cfg->def_flow_cfg || cfg->flow_id_use_rxchan_id))
756                 return ERR_PTR(-EINVAL);
757
758         rx_chn = devm_kzalloc(dev, sizeof(*rx_chn), GFP_KERNEL);
759         if (!rx_chn)
760                 return ERR_PTR(-ENOMEM);
761
762         rx_chn->common.dev = dev;
763         rx_chn->common.swdata_size = cfg->swdata_size;
764         rx_chn->remote = false;
765
766         /* parse of udmap channel */
767         ret = of_k3_udma_glue_parse_chn(dev->of_node, name,
768                                         &rx_chn->common, false);
769         if (ret)
770                 goto err;
771
772         rx_chn->common.hdesc_size = cppi5_hdesc_calc_size(rx_chn->common.epib,
773                                                 rx_chn->common.psdata_size,
774                                                 rx_chn->common.swdata_size);
775
776         /* request and cfg UDMAP RX channel */
777         rx_chn->udma_rchanx = xudma_rchan_get(rx_chn->common.udmax, -1);
778         if (IS_ERR(rx_chn->udma_rchanx)) {
779                 ret = PTR_ERR(rx_chn->udma_rchanx);
780                 dev_err(dev, "UDMAX rchanx get err %d\n", ret);
781                 goto err;
782         }
783         rx_chn->udma_rchan_id = xudma_rchan_get_id(rx_chn->udma_rchanx);
784
785         rx_chn->flow_num = cfg->flow_id_num;
786         rx_chn->flow_id_base = cfg->flow_id_base;
787
788         /* Use RX channel id as flow id: target dev can't generate flow_id */
789         if (cfg->flow_id_use_rxchan_id)
790                 rx_chn->flow_id_base = rx_chn->udma_rchan_id;
791
792         rx_chn->flows = devm_kcalloc(dev, rx_chn->flow_num,
793                                      sizeof(*rx_chn->flows), GFP_KERNEL);
794         if (!rx_chn->flows) {
795                 ret = -ENOMEM;
796                 goto err;
797         }
798
799         ret = k3_udma_glue_allocate_rx_flows(rx_chn, cfg);
800         if (ret)
801                 goto err;
802
803         for (i = 0; i < rx_chn->flow_num; i++)
804                 rx_chn->flows[i].udma_rflow_id = rx_chn->flow_id_base + i;
805
806         /* request and cfg psi-l */
807         rx_chn->common.dst_thread =
808                         xudma_dev_get_psil_base(rx_chn->common.udmax) +
809                         rx_chn->udma_rchan_id;
810
811         ret = k3_udma_glue_cfg_rx_chn(rx_chn);
812         if (ret) {
813                 dev_err(dev, "Failed to cfg rchan %d\n", ret);
814                 goto err;
815         }
816
817         /* init default RX flow only if flow_num = 1 */
818         if (cfg->def_flow_cfg) {
819                 ret = k3_udma_glue_cfg_rx_flow(rx_chn, 0, cfg->def_flow_cfg);
820                 if (ret)
821                         goto err;
822         }
823
824         k3_udma_glue_dump_rx_chn(rx_chn);
825
826         return rx_chn;
827
828 err:
829         k3_udma_glue_release_rx_chn(rx_chn);
830         return ERR_PTR(ret);
831 }
832
833 static struct k3_udma_glue_rx_channel *
834 k3_udma_glue_request_remote_rx_chn(struct device *dev, const char *name,
835                                    struct k3_udma_glue_rx_channel_cfg *cfg)
836 {
837         struct k3_udma_glue_rx_channel *rx_chn;
838         int ret, i;
839
840         if (cfg->flow_id_num <= 0 ||
841             cfg->flow_id_use_rxchan_id ||
842             cfg->def_flow_cfg ||
843             cfg->flow_id_base < 0)
844                 return ERR_PTR(-EINVAL);
845
846         /*
847          * Remote RX channel is under control of Remote CPU core, so
848          * Linux can only request and manipulate by dedicated RX flows
849          */
850
851         rx_chn = devm_kzalloc(dev, sizeof(*rx_chn), GFP_KERNEL);
852         if (!rx_chn)
853                 return ERR_PTR(-ENOMEM);
854
855         rx_chn->common.dev = dev;
856         rx_chn->common.swdata_size = cfg->swdata_size;
857         rx_chn->remote = true;
858         rx_chn->udma_rchan_id = -1;
859         rx_chn->flow_num = cfg->flow_id_num;
860         rx_chn->flow_id_base = cfg->flow_id_base;
861         rx_chn->psil_paired = false;
862
863         /* parse of udmap channel */
864         ret = of_k3_udma_glue_parse_chn(dev->of_node, name,
865                                         &rx_chn->common, false);
866         if (ret)
867                 goto err;
868
869         rx_chn->common.hdesc_size = cppi5_hdesc_calc_size(rx_chn->common.epib,
870                                                 rx_chn->common.psdata_size,
871                                                 rx_chn->common.swdata_size);
872
873         rx_chn->flows = devm_kcalloc(dev, rx_chn->flow_num,
874                                      sizeof(*rx_chn->flows), GFP_KERNEL);
875         if (!rx_chn->flows) {
876                 ret = -ENOMEM;
877                 goto err;
878         }
879
880         ret = k3_udma_glue_allocate_rx_flows(rx_chn, cfg);
881         if (ret)
882                 goto err;
883
884         for (i = 0; i < rx_chn->flow_num; i++)
885                 rx_chn->flows[i].udma_rflow_id = rx_chn->flow_id_base + i;
886
887         k3_udma_glue_dump_rx_chn(rx_chn);
888
889         return rx_chn;
890
891 err:
892         k3_udma_glue_release_rx_chn(rx_chn);
893         return ERR_PTR(ret);
894 }
895
896 struct k3_udma_glue_rx_channel *
897 k3_udma_glue_request_rx_chn(struct device *dev, const char *name,
898                             struct k3_udma_glue_rx_channel_cfg *cfg)
899 {
900         if (cfg->remote)
901                 return k3_udma_glue_request_remote_rx_chn(dev, name, cfg);
902         else
903                 return k3_udma_glue_request_rx_chn_priv(dev, name, cfg);
904 }
905 EXPORT_SYMBOL_GPL(k3_udma_glue_request_rx_chn);
906
907 void k3_udma_glue_release_rx_chn(struct k3_udma_glue_rx_channel *rx_chn)
908 {
909         int i;
910
911         if (IS_ERR_OR_NULL(rx_chn->common.udmax))
912                 return;
913
914         if (rx_chn->psil_paired) {
915                 xudma_navss_psil_unpair(rx_chn->common.udmax,
916                                         rx_chn->common.src_thread,
917                                         rx_chn->common.dst_thread);
918                 rx_chn->psil_paired = false;
919         }
920
921         for (i = 0; i < rx_chn->flow_num; i++)
922                 k3_udma_glue_release_rx_flow(rx_chn, i);
923
924         if (xudma_rflow_is_gp(rx_chn->common.udmax, rx_chn->flow_id_base))
925                 xudma_free_gp_rflow_range(rx_chn->common.udmax,
926                                           rx_chn->flow_id_base,
927                                           rx_chn->flow_num);
928
929         if (!IS_ERR_OR_NULL(rx_chn->udma_rchanx))
930                 xudma_rchan_put(rx_chn->common.udmax,
931                                 rx_chn->udma_rchanx);
932 }
933 EXPORT_SYMBOL_GPL(k3_udma_glue_release_rx_chn);
934
935 int k3_udma_glue_rx_flow_init(struct k3_udma_glue_rx_channel *rx_chn,
936                               u32 flow_idx,
937                               struct k3_udma_glue_rx_flow_cfg *flow_cfg)
938 {
939         if (flow_idx >= rx_chn->flow_num)
940                 return -EINVAL;
941
942         return k3_udma_glue_cfg_rx_flow(rx_chn, flow_idx, flow_cfg);
943 }
944 EXPORT_SYMBOL_GPL(k3_udma_glue_rx_flow_init);
945
946 u32 k3_udma_glue_rx_flow_get_fdq_id(struct k3_udma_glue_rx_channel *rx_chn,
947                                     u32 flow_idx)
948 {
949         struct k3_udma_glue_rx_flow *flow;
950
951         if (flow_idx >= rx_chn->flow_num)
952                 return -EINVAL;
953
954         flow = &rx_chn->flows[flow_idx];
955
956         return k3_ringacc_get_ring_id(flow->ringrxfdq);
957 }
958 EXPORT_SYMBOL_GPL(k3_udma_glue_rx_flow_get_fdq_id);
959
960 u32 k3_udma_glue_rx_get_flow_id_base(struct k3_udma_glue_rx_channel *rx_chn)
961 {
962         return rx_chn->flow_id_base;
963 }
964 EXPORT_SYMBOL_GPL(k3_udma_glue_rx_get_flow_id_base);
965
966 int k3_udma_glue_rx_flow_enable(struct k3_udma_glue_rx_channel *rx_chn,
967                                 u32 flow_idx)
968 {
969         struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_idx];
970         const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm;
971         struct device *dev = rx_chn->common.dev;
972         struct ti_sci_msg_rm_udmap_flow_cfg req;
973         int rx_ring_id;
974         int rx_ringfdq_id;
975         int ret = 0;
976
977         if (!rx_chn->remote)
978                 return -EINVAL;
979
980         rx_ring_id = k3_ringacc_get_ring_id(flow->ringrx);
981         rx_ringfdq_id = k3_ringacc_get_ring_id(flow->ringrxfdq);
982
983         memset(&req, 0, sizeof(req));
984
985         req.valid_params =
986                         TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID |
987                         TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID |
988                         TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID |
989                         TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID |
990                         TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID;
991         req.nav_id = tisci_rm->tisci_dev_id;
992         req.flow_index = flow->udma_rflow_id;
993         req.rx_dest_qnum = rx_ring_id;
994         req.rx_fdq0_sz0_qnum = rx_ringfdq_id;
995         req.rx_fdq1_qnum = rx_ringfdq_id;
996         req.rx_fdq2_qnum = rx_ringfdq_id;
997         req.rx_fdq3_qnum = rx_ringfdq_id;
998
999         ret = tisci_rm->tisci_udmap_ops->rx_flow_cfg(tisci_rm->tisci, &req);
1000         if (ret) {
1001                 dev_err(dev, "flow%d enable failed: %d\n", flow->udma_rflow_id,
1002                         ret);
1003         }
1004
1005         return ret;
1006 }
1007 EXPORT_SYMBOL_GPL(k3_udma_glue_rx_flow_enable);
1008
1009 int k3_udma_glue_rx_flow_disable(struct k3_udma_glue_rx_channel *rx_chn,
1010                                  u32 flow_idx)
1011 {
1012         struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_idx];
1013         const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm;
1014         struct device *dev = rx_chn->common.dev;
1015         struct ti_sci_msg_rm_udmap_flow_cfg req;
1016         int ret = 0;
1017
1018         if (!rx_chn->remote)
1019                 return -EINVAL;
1020
1021         memset(&req, 0, sizeof(req));
1022         req.valid_params =
1023                         TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID |
1024                         TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID |
1025                         TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID |
1026                         TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID |
1027                         TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID;
1028         req.nav_id = tisci_rm->tisci_dev_id;
1029         req.flow_index = flow->udma_rflow_id;
1030         req.rx_dest_qnum = TI_SCI_RESOURCE_NULL;
1031         req.rx_fdq0_sz0_qnum = TI_SCI_RESOURCE_NULL;
1032         req.rx_fdq1_qnum = TI_SCI_RESOURCE_NULL;
1033         req.rx_fdq2_qnum = TI_SCI_RESOURCE_NULL;
1034         req.rx_fdq3_qnum = TI_SCI_RESOURCE_NULL;
1035
1036         ret = tisci_rm->tisci_udmap_ops->rx_flow_cfg(tisci_rm->tisci, &req);
1037         if (ret) {
1038                 dev_err(dev, "flow%d disable failed: %d\n", flow->udma_rflow_id,
1039                         ret);
1040         }
1041
1042         return ret;
1043 }
1044 EXPORT_SYMBOL_GPL(k3_udma_glue_rx_flow_disable);
1045
1046 int k3_udma_glue_enable_rx_chn(struct k3_udma_glue_rx_channel *rx_chn)
1047 {
1048         int ret;
1049
1050         if (rx_chn->remote)
1051                 return -EINVAL;
1052
1053         if (rx_chn->flows_ready < rx_chn->flow_num)
1054                 return -EINVAL;
1055
1056         ret = xudma_navss_psil_pair(rx_chn->common.udmax,
1057                                     rx_chn->common.src_thread,
1058                                     rx_chn->common.dst_thread);
1059         if (ret) {
1060                 dev_err(rx_chn->common.dev, "PSI-L request err %d\n", ret);
1061                 return ret;
1062         }
1063
1064         rx_chn->psil_paired = true;
1065
1066         xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_CHAN_RT_CTL_REG,
1067                             UDMA_CHAN_RT_CTL_EN);
1068
1069         xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_CHAN_RT_PEER_RT_EN_REG,
1070                             UDMA_PEER_RT_EN_ENABLE);
1071
1072         k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt en");
1073         return 0;
1074 }
1075 EXPORT_SYMBOL_GPL(k3_udma_glue_enable_rx_chn);
1076
1077 void k3_udma_glue_disable_rx_chn(struct k3_udma_glue_rx_channel *rx_chn)
1078 {
1079         k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt dis1");
1080
1081         xudma_rchanrt_write(rx_chn->udma_rchanx,
1082                             UDMA_CHAN_RT_PEER_RT_EN_REG, 0);
1083         xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_CHAN_RT_CTL_REG, 0);
1084
1085         k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt dis2");
1086
1087         if (rx_chn->psil_paired) {
1088                 xudma_navss_psil_unpair(rx_chn->common.udmax,
1089                                         rx_chn->common.src_thread,
1090                                         rx_chn->common.dst_thread);
1091                 rx_chn->psil_paired = false;
1092         }
1093 }
1094 EXPORT_SYMBOL_GPL(k3_udma_glue_disable_rx_chn);
1095
1096 void k3_udma_glue_tdown_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
1097                                bool sync)
1098 {
1099         int i = 0;
1100         u32 val;
1101
1102         if (rx_chn->remote)
1103                 return;
1104
1105         k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt tdown1");
1106
1107         xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_CHAN_RT_PEER_RT_EN_REG,
1108                             UDMA_PEER_RT_EN_ENABLE | UDMA_PEER_RT_EN_TEARDOWN);
1109
1110         val = xudma_rchanrt_read(rx_chn->udma_rchanx, UDMA_CHAN_RT_CTL_REG);
1111
1112         while (sync && (val & UDMA_CHAN_RT_CTL_EN)) {
1113                 val = xudma_rchanrt_read(rx_chn->udma_rchanx,
1114                                          UDMA_CHAN_RT_CTL_REG);
1115                 udelay(1);
1116                 if (i > K3_UDMAX_TDOWN_TIMEOUT_US) {
1117                         dev_err(rx_chn->common.dev, "RX tdown timeout\n");
1118                         break;
1119                 }
1120                 i++;
1121         }
1122
1123         val = xudma_rchanrt_read(rx_chn->udma_rchanx,
1124                                  UDMA_CHAN_RT_PEER_RT_EN_REG);
1125         if (sync && (val & UDMA_PEER_RT_EN_ENABLE))
1126                 dev_err(rx_chn->common.dev, "TX tdown peer not stopped\n");
1127         k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt tdown2");
1128 }
1129 EXPORT_SYMBOL_GPL(k3_udma_glue_tdown_rx_chn);
1130
1131 void k3_udma_glue_reset_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
1132                 u32 flow_num, void *data,
1133                 void (*cleanup)(void *data, dma_addr_t desc_dma), bool skip_fdq)
1134 {
1135         struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num];
1136         struct device *dev = rx_chn->common.dev;
1137         dma_addr_t desc_dma;
1138         int occ_rx, i, ret;
1139
1140         /* reset RXCQ as it is not input for udma - expected to be empty */
1141         occ_rx = k3_ringacc_ring_get_occ(flow->ringrx);
1142         dev_dbg(dev, "RX reset flow %u occ_rx %u\n", flow_num, occ_rx);
1143         if (flow->ringrx)
1144                 k3_ringacc_ring_reset(flow->ringrx);
1145
1146         /* Skip RX FDQ in case one FDQ is used for the set of flows */
1147         if (skip_fdq)
1148                 return;
1149
1150         /*
1151          * RX FDQ reset need to be special way as it is input for udma and its
1152          * state cached by udma, so:
1153          * 1) save RX FDQ occ
1154          * 2) clean up RX FDQ and call callback .cleanup() for each desc
1155          * 3) reset RX FDQ in a special way
1156          */
1157         occ_rx = k3_ringacc_ring_get_occ(flow->ringrxfdq);
1158         dev_dbg(dev, "RX reset flow %u occ_rx_fdq %u\n", flow_num, occ_rx);
1159
1160         for (i = 0; i < occ_rx; i++) {
1161                 ret = k3_ringacc_ring_pop(flow->ringrxfdq, &desc_dma);
1162                 if (ret) {
1163                         dev_err(dev, "RX reset pop %d\n", ret);
1164                         break;
1165                 }
1166                 cleanup(data, desc_dma);
1167         }
1168
1169         k3_ringacc_ring_reset_dma(flow->ringrxfdq, occ_rx);
1170 }
1171 EXPORT_SYMBOL_GPL(k3_udma_glue_reset_rx_chn);
1172
1173 int k3_udma_glue_push_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
1174                              u32 flow_num, struct cppi5_host_desc_t *desc_rx,
1175                              dma_addr_t desc_dma)
1176 {
1177         struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num];
1178
1179         return k3_ringacc_ring_push(flow->ringrxfdq, &desc_dma);
1180 }
1181 EXPORT_SYMBOL_GPL(k3_udma_glue_push_rx_chn);
1182
1183 int k3_udma_glue_pop_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
1184                             u32 flow_num, dma_addr_t *desc_dma)
1185 {
1186         struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num];
1187
1188         return k3_ringacc_ring_pop(flow->ringrx, desc_dma);
1189 }
1190 EXPORT_SYMBOL_GPL(k3_udma_glue_pop_rx_chn);
1191
1192 int k3_udma_glue_rx_get_irq(struct k3_udma_glue_rx_channel *rx_chn,
1193                             u32 flow_num)
1194 {
1195         struct k3_udma_glue_rx_flow *flow;
1196
1197         flow = &rx_chn->flows[flow_num];
1198
1199         flow->virq = k3_ringacc_get_ring_irq_num(flow->ringrx);
1200
1201         return flow->virq;
1202 }
1203 EXPORT_SYMBOL_GPL(k3_udma_glue_rx_get_irq);