Merge tag 'fsnotify_for_v5.11-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-microblaze.git] / drivers / dma / ti / k3-udma-glue.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * K3 NAVSS DMA glue interface
4  *
5  * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
6  *
7  */
8
9 #include <linux/atomic.h>
10 #include <linux/delay.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/io.h>
13 #include <linux/init.h>
14 #include <linux/of.h>
15 #include <linux/platform_device.h>
16 #include <linux/soc/ti/k3-ringacc.h>
17 #include <linux/dma/ti-cppi5.h>
18 #include <linux/dma/k3-udma-glue.h>
19
20 #include "k3-udma.h"
21 #include "k3-psil-priv.h"
22
23 struct k3_udma_glue_common {
24         struct device *dev;
25         struct udma_dev *udmax;
26         const struct udma_tisci_rm *tisci_rm;
27         struct k3_ringacc *ringacc;
28         u32 src_thread;
29         u32 dst_thread;
30
31         u32  hdesc_size;
32         bool epib;
33         u32  psdata_size;
34         u32  swdata_size;
35         u32  atype;
36 };
37
38 struct k3_udma_glue_tx_channel {
39         struct k3_udma_glue_common common;
40
41         struct udma_tchan *udma_tchanx;
42         int udma_tchan_id;
43
44         struct k3_ring *ringtx;
45         struct k3_ring *ringtxcq;
46
47         bool psil_paired;
48
49         int virq;
50
51         atomic_t free_pkts;
52         bool tx_pause_on_err;
53         bool tx_filt_einfo;
54         bool tx_filt_pswords;
55         bool tx_supr_tdpkt;
56 };
57
58 struct k3_udma_glue_rx_flow {
59         struct udma_rflow *udma_rflow;
60         int udma_rflow_id;
61         struct k3_ring *ringrx;
62         struct k3_ring *ringrxfdq;
63
64         int virq;
65 };
66
67 struct k3_udma_glue_rx_channel {
68         struct k3_udma_glue_common common;
69
70         struct udma_rchan *udma_rchanx;
71         int udma_rchan_id;
72         bool remote;
73
74         bool psil_paired;
75
76         u32  swdata_size;
77         int  flow_id_base;
78
79         struct k3_udma_glue_rx_flow *flows;
80         u32 flow_num;
81         u32 flows_ready;
82 };
83
84 #define K3_UDMAX_TDOWN_TIMEOUT_US 1000
85
86 static int of_k3_udma_glue_parse(struct device_node *udmax_np,
87                                  struct k3_udma_glue_common *common)
88 {
89         common->ringacc = of_k3_ringacc_get_by_phandle(udmax_np,
90                                                        "ti,ringacc");
91         if (IS_ERR(common->ringacc))
92                 return PTR_ERR(common->ringacc);
93
94         common->udmax = of_xudma_dev_get(udmax_np, NULL);
95         if (IS_ERR(common->udmax))
96                 return PTR_ERR(common->udmax);
97
98         common->tisci_rm = xudma_dev_get_tisci_rm(common->udmax);
99
100         return 0;
101 }
102
103 static int of_k3_udma_glue_parse_chn(struct device_node *chn_np,
104                 const char *name, struct k3_udma_glue_common *common,
105                 bool tx_chn)
106 {
107         struct psil_endpoint_config *ep_config;
108         struct of_phandle_args dma_spec;
109         u32 thread_id;
110         int ret = 0;
111         int index;
112
113         if (unlikely(!name))
114                 return -EINVAL;
115
116         index = of_property_match_string(chn_np, "dma-names", name);
117         if (index < 0)
118                 return index;
119
120         if (of_parse_phandle_with_args(chn_np, "dmas", "#dma-cells", index,
121                                        &dma_spec))
122                 return -ENOENT;
123
124         thread_id = dma_spec.args[0];
125         if (dma_spec.args_count == 2) {
126                 if (dma_spec.args[1] > 2) {
127                         dev_err(common->dev, "Invalid channel atype: %u\n",
128                                 dma_spec.args[1]);
129                         ret = -EINVAL;
130                         goto out_put_spec;
131                 }
132                 common->atype = dma_spec.args[1];
133         }
134
135         if (tx_chn && !(thread_id & K3_PSIL_DST_THREAD_ID_OFFSET)) {
136                 ret = -EINVAL;
137                 goto out_put_spec;
138         }
139
140         if (!tx_chn && (thread_id & K3_PSIL_DST_THREAD_ID_OFFSET)) {
141                 ret = -EINVAL;
142                 goto out_put_spec;
143         }
144
145         /* get psil endpoint config */
146         ep_config = psil_get_ep_config(thread_id);
147         if (IS_ERR(ep_config)) {
148                 dev_err(common->dev,
149                         "No configuration for psi-l thread 0x%04x\n",
150                         thread_id);
151                 ret = PTR_ERR(ep_config);
152                 goto out_put_spec;
153         }
154
155         common->epib = ep_config->needs_epib;
156         common->psdata_size = ep_config->psd_size;
157
158         if (tx_chn)
159                 common->dst_thread = thread_id;
160         else
161                 common->src_thread = thread_id;
162
163         ret = of_k3_udma_glue_parse(dma_spec.np, common);
164
165 out_put_spec:
166         of_node_put(dma_spec.np);
167         return ret;
168 };
169
170 static void k3_udma_glue_dump_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
171 {
172         struct device *dev = tx_chn->common.dev;
173
174         dev_dbg(dev, "dump_tx_chn:\n"
175                 "udma_tchan_id: %d\n"
176                 "src_thread: %08x\n"
177                 "dst_thread: %08x\n",
178                 tx_chn->udma_tchan_id,
179                 tx_chn->common.src_thread,
180                 tx_chn->common.dst_thread);
181 }
182
183 static void k3_udma_glue_dump_tx_rt_chn(struct k3_udma_glue_tx_channel *chn,
184                                         char *mark)
185 {
186         struct device *dev = chn->common.dev;
187
188         dev_dbg(dev, "=== dump ===> %s\n", mark);
189         dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_CTL_REG,
190                 xudma_tchanrt_read(chn->udma_tchanx, UDMA_CHAN_RT_CTL_REG));
191         dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_PEER_RT_EN_REG,
192                 xudma_tchanrt_read(chn->udma_tchanx,
193                                    UDMA_CHAN_RT_PEER_RT_EN_REG));
194         dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_PCNT_REG,
195                 xudma_tchanrt_read(chn->udma_tchanx, UDMA_CHAN_RT_PCNT_REG));
196         dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_BCNT_REG,
197                 xudma_tchanrt_read(chn->udma_tchanx, UDMA_CHAN_RT_BCNT_REG));
198         dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_SBCNT_REG,
199                 xudma_tchanrt_read(chn->udma_tchanx, UDMA_CHAN_RT_SBCNT_REG));
200 }
201
202 static int k3_udma_glue_cfg_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
203 {
204         const struct udma_tisci_rm *tisci_rm = tx_chn->common.tisci_rm;
205         struct ti_sci_msg_rm_udmap_tx_ch_cfg req;
206
207         memset(&req, 0, sizeof(req));
208
209         req.valid_params = TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID |
210                         TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_EINFO_VALID |
211                         TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_PSWORDS_VALID |
212                         TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID |
213                         TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID |
214                         TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID |
215                         TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID |
216                         TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID;
217         req.nav_id = tisci_rm->tisci_dev_id;
218         req.index = tx_chn->udma_tchan_id;
219         if (tx_chn->tx_pause_on_err)
220                 req.tx_pause_on_err = 1;
221         if (tx_chn->tx_filt_einfo)
222                 req.tx_filt_einfo = 1;
223         if (tx_chn->tx_filt_pswords)
224                 req.tx_filt_pswords = 1;
225         req.tx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
226         if (tx_chn->tx_supr_tdpkt)
227                 req.tx_supr_tdpkt = 1;
228         req.tx_fetch_size = tx_chn->common.hdesc_size >> 2;
229         req.txcq_qnum = k3_ringacc_get_ring_id(tx_chn->ringtxcq);
230         req.tx_atype = tx_chn->common.atype;
231
232         return tisci_rm->tisci_udmap_ops->tx_ch_cfg(tisci_rm->tisci, &req);
233 }
234
235 struct k3_udma_glue_tx_channel *k3_udma_glue_request_tx_chn(struct device *dev,
236                 const char *name, struct k3_udma_glue_tx_channel_cfg *cfg)
237 {
238         struct k3_udma_glue_tx_channel *tx_chn;
239         int ret;
240
241         tx_chn = devm_kzalloc(dev, sizeof(*tx_chn), GFP_KERNEL);
242         if (!tx_chn)
243                 return ERR_PTR(-ENOMEM);
244
245         tx_chn->common.dev = dev;
246         tx_chn->common.swdata_size = cfg->swdata_size;
247         tx_chn->tx_pause_on_err = cfg->tx_pause_on_err;
248         tx_chn->tx_filt_einfo = cfg->tx_filt_einfo;
249         tx_chn->tx_filt_pswords = cfg->tx_filt_pswords;
250         tx_chn->tx_supr_tdpkt = cfg->tx_supr_tdpkt;
251
252         /* parse of udmap channel */
253         ret = of_k3_udma_glue_parse_chn(dev->of_node, name,
254                                         &tx_chn->common, true);
255         if (ret)
256                 goto err;
257
258         tx_chn->common.hdesc_size = cppi5_hdesc_calc_size(tx_chn->common.epib,
259                                                 tx_chn->common.psdata_size,
260                                                 tx_chn->common.swdata_size);
261
262         /* request and cfg UDMAP TX channel */
263         tx_chn->udma_tchanx = xudma_tchan_get(tx_chn->common.udmax, -1);
264         if (IS_ERR(tx_chn->udma_tchanx)) {
265                 ret = PTR_ERR(tx_chn->udma_tchanx);
266                 dev_err(dev, "UDMAX tchanx get err %d\n", ret);
267                 goto err;
268         }
269         tx_chn->udma_tchan_id = xudma_tchan_get_id(tx_chn->udma_tchanx);
270
271         atomic_set(&tx_chn->free_pkts, cfg->txcq_cfg.size);
272
273         /* request and cfg rings */
274         ret =  k3_ringacc_request_rings_pair(tx_chn->common.ringacc,
275                                              tx_chn->udma_tchan_id, -1,
276                                              &tx_chn->ringtx,
277                                              &tx_chn->ringtxcq);
278         if (ret) {
279                 dev_err(dev, "Failed to get TX/TXCQ rings %d\n", ret);
280                 goto err;
281         }
282
283         ret = k3_ringacc_ring_cfg(tx_chn->ringtx, &cfg->tx_cfg);
284         if (ret) {
285                 dev_err(dev, "Failed to cfg ringtx %d\n", ret);
286                 goto err;
287         }
288
289         ret = k3_ringacc_ring_cfg(tx_chn->ringtxcq, &cfg->txcq_cfg);
290         if (ret) {
291                 dev_err(dev, "Failed to cfg ringtx %d\n", ret);
292                 goto err;
293         }
294
295         /* request and cfg psi-l */
296         tx_chn->common.src_thread =
297                         xudma_dev_get_psil_base(tx_chn->common.udmax) +
298                         tx_chn->udma_tchan_id;
299
300         ret = k3_udma_glue_cfg_tx_chn(tx_chn);
301         if (ret) {
302                 dev_err(dev, "Failed to cfg tchan %d\n", ret);
303                 goto err;
304         }
305
306         ret = xudma_navss_psil_pair(tx_chn->common.udmax,
307                                     tx_chn->common.src_thread,
308                                     tx_chn->common.dst_thread);
309         if (ret) {
310                 dev_err(dev, "PSI-L request err %d\n", ret);
311                 goto err;
312         }
313
314         tx_chn->psil_paired = true;
315
316         /* reset TX RT registers */
317         k3_udma_glue_disable_tx_chn(tx_chn);
318
319         k3_udma_glue_dump_tx_chn(tx_chn);
320
321         return tx_chn;
322
323 err:
324         k3_udma_glue_release_tx_chn(tx_chn);
325         return ERR_PTR(ret);
326 }
327 EXPORT_SYMBOL_GPL(k3_udma_glue_request_tx_chn);
328
329 void k3_udma_glue_release_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
330 {
331         if (tx_chn->psil_paired) {
332                 xudma_navss_psil_unpair(tx_chn->common.udmax,
333                                         tx_chn->common.src_thread,
334                                         tx_chn->common.dst_thread);
335                 tx_chn->psil_paired = false;
336         }
337
338         if (!IS_ERR_OR_NULL(tx_chn->udma_tchanx))
339                 xudma_tchan_put(tx_chn->common.udmax,
340                                 tx_chn->udma_tchanx);
341
342         if (tx_chn->ringtxcq)
343                 k3_ringacc_ring_free(tx_chn->ringtxcq);
344
345         if (tx_chn->ringtx)
346                 k3_ringacc_ring_free(tx_chn->ringtx);
347 }
348 EXPORT_SYMBOL_GPL(k3_udma_glue_release_tx_chn);
349
350 int k3_udma_glue_push_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
351                              struct cppi5_host_desc_t *desc_tx,
352                              dma_addr_t desc_dma)
353 {
354         u32 ringtxcq_id;
355
356         if (!atomic_add_unless(&tx_chn->free_pkts, -1, 0))
357                 return -ENOMEM;
358
359         ringtxcq_id = k3_ringacc_get_ring_id(tx_chn->ringtxcq);
360         cppi5_desc_set_retpolicy(&desc_tx->hdr, 0, ringtxcq_id);
361
362         return k3_ringacc_ring_push(tx_chn->ringtx, &desc_dma);
363 }
364 EXPORT_SYMBOL_GPL(k3_udma_glue_push_tx_chn);
365
366 int k3_udma_glue_pop_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
367                             dma_addr_t *desc_dma)
368 {
369         int ret;
370
371         ret = k3_ringacc_ring_pop(tx_chn->ringtxcq, desc_dma);
372         if (!ret)
373                 atomic_inc(&tx_chn->free_pkts);
374
375         return ret;
376 }
377 EXPORT_SYMBOL_GPL(k3_udma_glue_pop_tx_chn);
378
379 int k3_udma_glue_enable_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
380 {
381         xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_CHAN_RT_PEER_RT_EN_REG,
382                             UDMA_PEER_RT_EN_ENABLE);
383
384         xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_CHAN_RT_CTL_REG,
385                             UDMA_CHAN_RT_CTL_EN);
386
387         k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn en");
388         return 0;
389 }
390 EXPORT_SYMBOL_GPL(k3_udma_glue_enable_tx_chn);
391
392 void k3_udma_glue_disable_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
393 {
394         k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn dis1");
395
396         xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_CHAN_RT_CTL_REG, 0);
397
398         xudma_tchanrt_write(tx_chn->udma_tchanx,
399                             UDMA_CHAN_RT_PEER_RT_EN_REG, 0);
400         k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn dis2");
401 }
402 EXPORT_SYMBOL_GPL(k3_udma_glue_disable_tx_chn);
403
404 void k3_udma_glue_tdown_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
405                                bool sync)
406 {
407         int i = 0;
408         u32 val;
409
410         k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn tdown1");
411
412         xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_CHAN_RT_CTL_REG,
413                             UDMA_CHAN_RT_CTL_EN | UDMA_CHAN_RT_CTL_TDOWN);
414
415         val = xudma_tchanrt_read(tx_chn->udma_tchanx, UDMA_CHAN_RT_CTL_REG);
416
417         while (sync && (val & UDMA_CHAN_RT_CTL_EN)) {
418                 val = xudma_tchanrt_read(tx_chn->udma_tchanx,
419                                          UDMA_CHAN_RT_CTL_REG);
420                 udelay(1);
421                 if (i > K3_UDMAX_TDOWN_TIMEOUT_US) {
422                         dev_err(tx_chn->common.dev, "TX tdown timeout\n");
423                         break;
424                 }
425                 i++;
426         }
427
428         val = xudma_tchanrt_read(tx_chn->udma_tchanx,
429                                  UDMA_CHAN_RT_PEER_RT_EN_REG);
430         if (sync && (val & UDMA_PEER_RT_EN_ENABLE))
431                 dev_err(tx_chn->common.dev, "TX tdown peer not stopped\n");
432         k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn tdown2");
433 }
434 EXPORT_SYMBOL_GPL(k3_udma_glue_tdown_tx_chn);
435
436 void k3_udma_glue_reset_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
437                                void *data,
438                                void (*cleanup)(void *data, dma_addr_t desc_dma))
439 {
440         dma_addr_t desc_dma;
441         int occ_tx, i, ret;
442
443         /* reset TXCQ as it is not input for udma - expected to be empty */
444         if (tx_chn->ringtxcq)
445                 k3_ringacc_ring_reset(tx_chn->ringtxcq);
446
447         /*
448          * TXQ reset need to be special way as it is input for udma and its
449          * state cached by udma, so:
450          * 1) save TXQ occ
451          * 2) clean up TXQ and call callback .cleanup() for each desc
452          * 3) reset TXQ in a special way
453          */
454         occ_tx = k3_ringacc_ring_get_occ(tx_chn->ringtx);
455         dev_dbg(tx_chn->common.dev, "TX reset occ_tx %u\n", occ_tx);
456
457         for (i = 0; i < occ_tx; i++) {
458                 ret = k3_ringacc_ring_pop(tx_chn->ringtx, &desc_dma);
459                 if (ret) {
460                         dev_err(tx_chn->common.dev, "TX reset pop %d\n", ret);
461                         break;
462                 }
463                 cleanup(data, desc_dma);
464         }
465
466         k3_ringacc_ring_reset_dma(tx_chn->ringtx, occ_tx);
467 }
468 EXPORT_SYMBOL_GPL(k3_udma_glue_reset_tx_chn);
469
470 u32 k3_udma_glue_tx_get_hdesc_size(struct k3_udma_glue_tx_channel *tx_chn)
471 {
472         return tx_chn->common.hdesc_size;
473 }
474 EXPORT_SYMBOL_GPL(k3_udma_glue_tx_get_hdesc_size);
475
476 u32 k3_udma_glue_tx_get_txcq_id(struct k3_udma_glue_tx_channel *tx_chn)
477 {
478         return k3_ringacc_get_ring_id(tx_chn->ringtxcq);
479 }
480 EXPORT_SYMBOL_GPL(k3_udma_glue_tx_get_txcq_id);
481
482 int k3_udma_glue_tx_get_irq(struct k3_udma_glue_tx_channel *tx_chn)
483 {
484         tx_chn->virq = k3_ringacc_get_ring_irq_num(tx_chn->ringtxcq);
485
486         return tx_chn->virq;
487 }
488 EXPORT_SYMBOL_GPL(k3_udma_glue_tx_get_irq);
489
490 static int k3_udma_glue_cfg_rx_chn(struct k3_udma_glue_rx_channel *rx_chn)
491 {
492         const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm;
493         struct ti_sci_msg_rm_udmap_rx_ch_cfg req;
494         int ret;
495
496         memset(&req, 0, sizeof(req));
497
498         req.valid_params = TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID |
499                            TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID |
500                            TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID |
501                            TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID |
502                            TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID |
503                            TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID;
504
505         req.nav_id = tisci_rm->tisci_dev_id;
506         req.index = rx_chn->udma_rchan_id;
507         req.rx_fetch_size = rx_chn->common.hdesc_size >> 2;
508         /*
509          * TODO: we can't support rxcq_qnum/RCHAN[a]_RCQ cfg with current sysfw
510          * and udmax impl, so just configure it to invalid value.
511          * req.rxcq_qnum = k3_ringacc_get_ring_id(rx_chn->flows[0].ringrx);
512          */
513         req.rxcq_qnum = 0xFFFF;
514         if (rx_chn->flow_num && rx_chn->flow_id_base != rx_chn->udma_rchan_id) {
515                 /* Default flow + extra ones */
516                 req.flowid_start = rx_chn->flow_id_base;
517                 req.flowid_cnt = rx_chn->flow_num;
518         }
519         req.rx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
520         req.rx_atype = rx_chn->common.atype;
521
522         ret = tisci_rm->tisci_udmap_ops->rx_ch_cfg(tisci_rm->tisci, &req);
523         if (ret)
524                 dev_err(rx_chn->common.dev, "rchan%d cfg failed %d\n",
525                         rx_chn->udma_rchan_id, ret);
526
527         return ret;
528 }
529
530 static void k3_udma_glue_release_rx_flow(struct k3_udma_glue_rx_channel *rx_chn,
531                                          u32 flow_num)
532 {
533         struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num];
534
535         if (IS_ERR_OR_NULL(flow->udma_rflow))
536                 return;
537
538         if (flow->ringrxfdq)
539                 k3_ringacc_ring_free(flow->ringrxfdq);
540
541         if (flow->ringrx)
542                 k3_ringacc_ring_free(flow->ringrx);
543
544         xudma_rflow_put(rx_chn->common.udmax, flow->udma_rflow);
545         flow->udma_rflow = NULL;
546         rx_chn->flows_ready--;
547 }
548
549 static int k3_udma_glue_cfg_rx_flow(struct k3_udma_glue_rx_channel *rx_chn,
550                                     u32 flow_idx,
551                                     struct k3_udma_glue_rx_flow_cfg *flow_cfg)
552 {
553         struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_idx];
554         const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm;
555         struct device *dev = rx_chn->common.dev;
556         struct ti_sci_msg_rm_udmap_flow_cfg req;
557         int rx_ring_id;
558         int rx_ringfdq_id;
559         int ret = 0;
560
561         flow->udma_rflow = xudma_rflow_get(rx_chn->common.udmax,
562                                            flow->udma_rflow_id);
563         if (IS_ERR(flow->udma_rflow)) {
564                 ret = PTR_ERR(flow->udma_rflow);
565                 dev_err(dev, "UDMAX rflow get err %d\n", ret);
566                 return ret;
567         }
568
569         if (flow->udma_rflow_id != xudma_rflow_get_id(flow->udma_rflow)) {
570                 ret = -ENODEV;
571                 goto err_rflow_put;
572         }
573
574         /* request and cfg rings */
575         ret =  k3_ringacc_request_rings_pair(rx_chn->common.ringacc,
576                                              flow_cfg->ring_rxfdq0_id,
577                                              flow_cfg->ring_rxq_id,
578                                              &flow->ringrxfdq,
579                                              &flow->ringrx);
580         if (ret) {
581                 dev_err(dev, "Failed to get RX/RXFDQ rings %d\n", ret);
582                 goto err_rflow_put;
583         }
584
585         ret = k3_ringacc_ring_cfg(flow->ringrx, &flow_cfg->rx_cfg);
586         if (ret) {
587                 dev_err(dev, "Failed to cfg ringrx %d\n", ret);
588                 goto err_ringrxfdq_free;
589         }
590
591         ret = k3_ringacc_ring_cfg(flow->ringrxfdq, &flow_cfg->rxfdq_cfg);
592         if (ret) {
593                 dev_err(dev, "Failed to cfg ringrxfdq %d\n", ret);
594                 goto err_ringrxfdq_free;
595         }
596
597         if (rx_chn->remote) {
598                 rx_ring_id = TI_SCI_RESOURCE_NULL;
599                 rx_ringfdq_id = TI_SCI_RESOURCE_NULL;
600         } else {
601                 rx_ring_id = k3_ringacc_get_ring_id(flow->ringrx);
602                 rx_ringfdq_id = k3_ringacc_get_ring_id(flow->ringrxfdq);
603         }
604
605         memset(&req, 0, sizeof(req));
606
607         req.valid_params =
608                         TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID |
609                         TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID |
610                         TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID |
611                         TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DESC_TYPE_VALID |
612                         TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID |
613                         TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_HI_SEL_VALID |
614                         TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_LO_SEL_VALID |
615                         TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_HI_SEL_VALID |
616                         TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_LO_SEL_VALID |
617                         TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID |
618                         TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID |
619                         TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID |
620                         TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID;
621         req.nav_id = tisci_rm->tisci_dev_id;
622         req.flow_index = flow->udma_rflow_id;
623         if (rx_chn->common.epib)
624                 req.rx_einfo_present = 1;
625         if (rx_chn->common.psdata_size)
626                 req.rx_psinfo_present = 1;
627         if (flow_cfg->rx_error_handling)
628                 req.rx_error_handling = 1;
629         req.rx_desc_type = 0;
630         req.rx_dest_qnum = rx_ring_id;
631         req.rx_src_tag_hi_sel = 0;
632         req.rx_src_tag_lo_sel = flow_cfg->src_tag_lo_sel;
633         req.rx_dest_tag_hi_sel = 0;
634         req.rx_dest_tag_lo_sel = 0;
635         req.rx_fdq0_sz0_qnum = rx_ringfdq_id;
636         req.rx_fdq1_qnum = rx_ringfdq_id;
637         req.rx_fdq2_qnum = rx_ringfdq_id;
638         req.rx_fdq3_qnum = rx_ringfdq_id;
639
640         ret = tisci_rm->tisci_udmap_ops->rx_flow_cfg(tisci_rm->tisci, &req);
641         if (ret) {
642                 dev_err(dev, "flow%d config failed: %d\n", flow->udma_rflow_id,
643                         ret);
644                 goto err_ringrxfdq_free;
645         }
646
647         rx_chn->flows_ready++;
648         dev_dbg(dev, "flow%d config done. ready:%d\n",
649                 flow->udma_rflow_id, rx_chn->flows_ready);
650
651         return 0;
652
653 err_ringrxfdq_free:
654         k3_ringacc_ring_free(flow->ringrxfdq);
655         k3_ringacc_ring_free(flow->ringrx);
656
657 err_rflow_put:
658         xudma_rflow_put(rx_chn->common.udmax, flow->udma_rflow);
659         flow->udma_rflow = NULL;
660
661         return ret;
662 }
663
664 static void k3_udma_glue_dump_rx_chn(struct k3_udma_glue_rx_channel *chn)
665 {
666         struct device *dev = chn->common.dev;
667
668         dev_dbg(dev, "dump_rx_chn:\n"
669                 "udma_rchan_id: %d\n"
670                 "src_thread: %08x\n"
671                 "dst_thread: %08x\n"
672                 "epib: %d\n"
673                 "hdesc_size: %u\n"
674                 "psdata_size: %u\n"
675                 "swdata_size: %u\n"
676                 "flow_id_base: %d\n"
677                 "flow_num: %d\n",
678                 chn->udma_rchan_id,
679                 chn->common.src_thread,
680                 chn->common.dst_thread,
681                 chn->common.epib,
682                 chn->common.hdesc_size,
683                 chn->common.psdata_size,
684                 chn->common.swdata_size,
685                 chn->flow_id_base,
686                 chn->flow_num);
687 }
688
689 static void k3_udma_glue_dump_rx_rt_chn(struct k3_udma_glue_rx_channel *chn,
690                                         char *mark)
691 {
692         struct device *dev = chn->common.dev;
693
694         dev_dbg(dev, "=== dump ===> %s\n", mark);
695
696         dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_CTL_REG,
697                 xudma_rchanrt_read(chn->udma_rchanx, UDMA_CHAN_RT_CTL_REG));
698         dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_PEER_RT_EN_REG,
699                 xudma_rchanrt_read(chn->udma_rchanx,
700                                    UDMA_CHAN_RT_PEER_RT_EN_REG));
701         dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_PCNT_REG,
702                 xudma_rchanrt_read(chn->udma_rchanx, UDMA_CHAN_RT_PCNT_REG));
703         dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_BCNT_REG,
704                 xudma_rchanrt_read(chn->udma_rchanx, UDMA_CHAN_RT_BCNT_REG));
705         dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_SBCNT_REG,
706                 xudma_rchanrt_read(chn->udma_rchanx, UDMA_CHAN_RT_SBCNT_REG));
707 }
708
709 static int
710 k3_udma_glue_allocate_rx_flows(struct k3_udma_glue_rx_channel *rx_chn,
711                                struct k3_udma_glue_rx_channel_cfg *cfg)
712 {
713         int ret;
714
715         /* default rflow */
716         if (cfg->flow_id_use_rxchan_id)
717                 return 0;
718
719         /* not a GP rflows */
720         if (rx_chn->flow_id_base != -1 &&
721             !xudma_rflow_is_gp(rx_chn->common.udmax, rx_chn->flow_id_base))
722                 return 0;
723
724         /* Allocate range of GP rflows */
725         ret = xudma_alloc_gp_rflow_range(rx_chn->common.udmax,
726                                          rx_chn->flow_id_base,
727                                          rx_chn->flow_num);
728         if (ret < 0) {
729                 dev_err(rx_chn->common.dev, "UDMAX reserve_rflow %d cnt:%d err: %d\n",
730                         rx_chn->flow_id_base, rx_chn->flow_num, ret);
731                 return ret;
732         }
733         rx_chn->flow_id_base = ret;
734
735         return 0;
736 }
737
738 static struct k3_udma_glue_rx_channel *
739 k3_udma_glue_request_rx_chn_priv(struct device *dev, const char *name,
740                                  struct k3_udma_glue_rx_channel_cfg *cfg)
741 {
742         struct k3_udma_glue_rx_channel *rx_chn;
743         int ret, i;
744
745         if (cfg->flow_id_num <= 0)
746                 return ERR_PTR(-EINVAL);
747
748         if (cfg->flow_id_num != 1 &&
749             (cfg->def_flow_cfg || cfg->flow_id_use_rxchan_id))
750                 return ERR_PTR(-EINVAL);
751
752         rx_chn = devm_kzalloc(dev, sizeof(*rx_chn), GFP_KERNEL);
753         if (!rx_chn)
754                 return ERR_PTR(-ENOMEM);
755
756         rx_chn->common.dev = dev;
757         rx_chn->common.swdata_size = cfg->swdata_size;
758         rx_chn->remote = false;
759
760         /* parse of udmap channel */
761         ret = of_k3_udma_glue_parse_chn(dev->of_node, name,
762                                         &rx_chn->common, false);
763         if (ret)
764                 goto err;
765
766         rx_chn->common.hdesc_size = cppi5_hdesc_calc_size(rx_chn->common.epib,
767                                                 rx_chn->common.psdata_size,
768                                                 rx_chn->common.swdata_size);
769
770         /* request and cfg UDMAP RX channel */
771         rx_chn->udma_rchanx = xudma_rchan_get(rx_chn->common.udmax, -1);
772         if (IS_ERR(rx_chn->udma_rchanx)) {
773                 ret = PTR_ERR(rx_chn->udma_rchanx);
774                 dev_err(dev, "UDMAX rchanx get err %d\n", ret);
775                 goto err;
776         }
777         rx_chn->udma_rchan_id = xudma_rchan_get_id(rx_chn->udma_rchanx);
778
779         rx_chn->flow_num = cfg->flow_id_num;
780         rx_chn->flow_id_base = cfg->flow_id_base;
781
782         /* Use RX channel id as flow id: target dev can't generate flow_id */
783         if (cfg->flow_id_use_rxchan_id)
784                 rx_chn->flow_id_base = rx_chn->udma_rchan_id;
785
786         rx_chn->flows = devm_kcalloc(dev, rx_chn->flow_num,
787                                      sizeof(*rx_chn->flows), GFP_KERNEL);
788         if (!rx_chn->flows) {
789                 ret = -ENOMEM;
790                 goto err;
791         }
792
793         ret = k3_udma_glue_allocate_rx_flows(rx_chn, cfg);
794         if (ret)
795                 goto err;
796
797         for (i = 0; i < rx_chn->flow_num; i++)
798                 rx_chn->flows[i].udma_rflow_id = rx_chn->flow_id_base + i;
799
800         /* request and cfg psi-l */
801         rx_chn->common.dst_thread =
802                         xudma_dev_get_psil_base(rx_chn->common.udmax) +
803                         rx_chn->udma_rchan_id;
804
805         ret = k3_udma_glue_cfg_rx_chn(rx_chn);
806         if (ret) {
807                 dev_err(dev, "Failed to cfg rchan %d\n", ret);
808                 goto err;
809         }
810
811         /* init default RX flow only if flow_num = 1 */
812         if (cfg->def_flow_cfg) {
813                 ret = k3_udma_glue_cfg_rx_flow(rx_chn, 0, cfg->def_flow_cfg);
814                 if (ret)
815                         goto err;
816         }
817
818         ret = xudma_navss_psil_pair(rx_chn->common.udmax,
819                                     rx_chn->common.src_thread,
820                                     rx_chn->common.dst_thread);
821         if (ret) {
822                 dev_err(dev, "PSI-L request err %d\n", ret);
823                 goto err;
824         }
825
826         rx_chn->psil_paired = true;
827
828         /* reset RX RT registers */
829         k3_udma_glue_disable_rx_chn(rx_chn);
830
831         k3_udma_glue_dump_rx_chn(rx_chn);
832
833         return rx_chn;
834
835 err:
836         k3_udma_glue_release_rx_chn(rx_chn);
837         return ERR_PTR(ret);
838 }
839
840 static struct k3_udma_glue_rx_channel *
841 k3_udma_glue_request_remote_rx_chn(struct device *dev, const char *name,
842                                    struct k3_udma_glue_rx_channel_cfg *cfg)
843 {
844         struct k3_udma_glue_rx_channel *rx_chn;
845         int ret, i;
846
847         if (cfg->flow_id_num <= 0 ||
848             cfg->flow_id_use_rxchan_id ||
849             cfg->def_flow_cfg ||
850             cfg->flow_id_base < 0)
851                 return ERR_PTR(-EINVAL);
852
853         /*
854          * Remote RX channel is under control of Remote CPU core, so
855          * Linux can only request and manipulate by dedicated RX flows
856          */
857
858         rx_chn = devm_kzalloc(dev, sizeof(*rx_chn), GFP_KERNEL);
859         if (!rx_chn)
860                 return ERR_PTR(-ENOMEM);
861
862         rx_chn->common.dev = dev;
863         rx_chn->common.swdata_size = cfg->swdata_size;
864         rx_chn->remote = true;
865         rx_chn->udma_rchan_id = -1;
866         rx_chn->flow_num = cfg->flow_id_num;
867         rx_chn->flow_id_base = cfg->flow_id_base;
868         rx_chn->psil_paired = false;
869
870         /* parse of udmap channel */
871         ret = of_k3_udma_glue_parse_chn(dev->of_node, name,
872                                         &rx_chn->common, false);
873         if (ret)
874                 goto err;
875
876         rx_chn->common.hdesc_size = cppi5_hdesc_calc_size(rx_chn->common.epib,
877                                                 rx_chn->common.psdata_size,
878                                                 rx_chn->common.swdata_size);
879
880         rx_chn->flows = devm_kcalloc(dev, rx_chn->flow_num,
881                                      sizeof(*rx_chn->flows), GFP_KERNEL);
882         if (!rx_chn->flows) {
883                 ret = -ENOMEM;
884                 goto err;
885         }
886
887         ret = k3_udma_glue_allocate_rx_flows(rx_chn, cfg);
888         if (ret)
889                 goto err;
890
891         for (i = 0; i < rx_chn->flow_num; i++)
892                 rx_chn->flows[i].udma_rflow_id = rx_chn->flow_id_base + i;
893
894         k3_udma_glue_dump_rx_chn(rx_chn);
895
896         return rx_chn;
897
898 err:
899         k3_udma_glue_release_rx_chn(rx_chn);
900         return ERR_PTR(ret);
901 }
902
903 struct k3_udma_glue_rx_channel *
904 k3_udma_glue_request_rx_chn(struct device *dev, const char *name,
905                             struct k3_udma_glue_rx_channel_cfg *cfg)
906 {
907         if (cfg->remote)
908                 return k3_udma_glue_request_remote_rx_chn(dev, name, cfg);
909         else
910                 return k3_udma_glue_request_rx_chn_priv(dev, name, cfg);
911 }
912 EXPORT_SYMBOL_GPL(k3_udma_glue_request_rx_chn);
913
914 void k3_udma_glue_release_rx_chn(struct k3_udma_glue_rx_channel *rx_chn)
915 {
916         int i;
917
918         if (IS_ERR_OR_NULL(rx_chn->common.udmax))
919                 return;
920
921         if (rx_chn->psil_paired) {
922                 xudma_navss_psil_unpair(rx_chn->common.udmax,
923                                         rx_chn->common.src_thread,
924                                         rx_chn->common.dst_thread);
925                 rx_chn->psil_paired = false;
926         }
927
928         for (i = 0; i < rx_chn->flow_num; i++)
929                 k3_udma_glue_release_rx_flow(rx_chn, i);
930
931         if (xudma_rflow_is_gp(rx_chn->common.udmax, rx_chn->flow_id_base))
932                 xudma_free_gp_rflow_range(rx_chn->common.udmax,
933                                           rx_chn->flow_id_base,
934                                           rx_chn->flow_num);
935
936         if (!IS_ERR_OR_NULL(rx_chn->udma_rchanx))
937                 xudma_rchan_put(rx_chn->common.udmax,
938                                 rx_chn->udma_rchanx);
939 }
940 EXPORT_SYMBOL_GPL(k3_udma_glue_release_rx_chn);
941
942 int k3_udma_glue_rx_flow_init(struct k3_udma_glue_rx_channel *rx_chn,
943                               u32 flow_idx,
944                               struct k3_udma_glue_rx_flow_cfg *flow_cfg)
945 {
946         if (flow_idx >= rx_chn->flow_num)
947                 return -EINVAL;
948
949         return k3_udma_glue_cfg_rx_flow(rx_chn, flow_idx, flow_cfg);
950 }
951 EXPORT_SYMBOL_GPL(k3_udma_glue_rx_flow_init);
952
953 u32 k3_udma_glue_rx_flow_get_fdq_id(struct k3_udma_glue_rx_channel *rx_chn,
954                                     u32 flow_idx)
955 {
956         struct k3_udma_glue_rx_flow *flow;
957
958         if (flow_idx >= rx_chn->flow_num)
959                 return -EINVAL;
960
961         flow = &rx_chn->flows[flow_idx];
962
963         return k3_ringacc_get_ring_id(flow->ringrxfdq);
964 }
965 EXPORT_SYMBOL_GPL(k3_udma_glue_rx_flow_get_fdq_id);
966
967 u32 k3_udma_glue_rx_get_flow_id_base(struct k3_udma_glue_rx_channel *rx_chn)
968 {
969         return rx_chn->flow_id_base;
970 }
971 EXPORT_SYMBOL_GPL(k3_udma_glue_rx_get_flow_id_base);
972
973 int k3_udma_glue_rx_flow_enable(struct k3_udma_glue_rx_channel *rx_chn,
974                                 u32 flow_idx)
975 {
976         struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_idx];
977         const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm;
978         struct device *dev = rx_chn->common.dev;
979         struct ti_sci_msg_rm_udmap_flow_cfg req;
980         int rx_ring_id;
981         int rx_ringfdq_id;
982         int ret = 0;
983
984         if (!rx_chn->remote)
985                 return -EINVAL;
986
987         rx_ring_id = k3_ringacc_get_ring_id(flow->ringrx);
988         rx_ringfdq_id = k3_ringacc_get_ring_id(flow->ringrxfdq);
989
990         memset(&req, 0, sizeof(req));
991
992         req.valid_params =
993                         TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID |
994                         TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID |
995                         TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID |
996                         TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID |
997                         TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID;
998         req.nav_id = tisci_rm->tisci_dev_id;
999         req.flow_index = flow->udma_rflow_id;
1000         req.rx_dest_qnum = rx_ring_id;
1001         req.rx_fdq0_sz0_qnum = rx_ringfdq_id;
1002         req.rx_fdq1_qnum = rx_ringfdq_id;
1003         req.rx_fdq2_qnum = rx_ringfdq_id;
1004         req.rx_fdq3_qnum = rx_ringfdq_id;
1005
1006         ret = tisci_rm->tisci_udmap_ops->rx_flow_cfg(tisci_rm->tisci, &req);
1007         if (ret) {
1008                 dev_err(dev, "flow%d enable failed: %d\n", flow->udma_rflow_id,
1009                         ret);
1010         }
1011
1012         return ret;
1013 }
1014 EXPORT_SYMBOL_GPL(k3_udma_glue_rx_flow_enable);
1015
1016 int k3_udma_glue_rx_flow_disable(struct k3_udma_glue_rx_channel *rx_chn,
1017                                  u32 flow_idx)
1018 {
1019         struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_idx];
1020         const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm;
1021         struct device *dev = rx_chn->common.dev;
1022         struct ti_sci_msg_rm_udmap_flow_cfg req;
1023         int ret = 0;
1024
1025         if (!rx_chn->remote)
1026                 return -EINVAL;
1027
1028         memset(&req, 0, sizeof(req));
1029         req.valid_params =
1030                         TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID |
1031                         TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID |
1032                         TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID |
1033                         TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID |
1034                         TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID;
1035         req.nav_id = tisci_rm->tisci_dev_id;
1036         req.flow_index = flow->udma_rflow_id;
1037         req.rx_dest_qnum = TI_SCI_RESOURCE_NULL;
1038         req.rx_fdq0_sz0_qnum = TI_SCI_RESOURCE_NULL;
1039         req.rx_fdq1_qnum = TI_SCI_RESOURCE_NULL;
1040         req.rx_fdq2_qnum = TI_SCI_RESOURCE_NULL;
1041         req.rx_fdq3_qnum = TI_SCI_RESOURCE_NULL;
1042
1043         ret = tisci_rm->tisci_udmap_ops->rx_flow_cfg(tisci_rm->tisci, &req);
1044         if (ret) {
1045                 dev_err(dev, "flow%d disable failed: %d\n", flow->udma_rflow_id,
1046                         ret);
1047         }
1048
1049         return ret;
1050 }
1051 EXPORT_SYMBOL_GPL(k3_udma_glue_rx_flow_disable);
1052
1053 int k3_udma_glue_enable_rx_chn(struct k3_udma_glue_rx_channel *rx_chn)
1054 {
1055         if (rx_chn->remote)
1056                 return -EINVAL;
1057
1058         if (rx_chn->flows_ready < rx_chn->flow_num)
1059                 return -EINVAL;
1060
1061         xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_CHAN_RT_CTL_REG,
1062                             UDMA_CHAN_RT_CTL_EN);
1063
1064         xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_CHAN_RT_PEER_RT_EN_REG,
1065                             UDMA_PEER_RT_EN_ENABLE);
1066
1067         k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt en");
1068         return 0;
1069 }
1070 EXPORT_SYMBOL_GPL(k3_udma_glue_enable_rx_chn);
1071
1072 void k3_udma_glue_disable_rx_chn(struct k3_udma_glue_rx_channel *rx_chn)
1073 {
1074         k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt dis1");
1075
1076         xudma_rchanrt_write(rx_chn->udma_rchanx,
1077                             UDMA_CHAN_RT_PEER_RT_EN_REG, 0);
1078         xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_CHAN_RT_CTL_REG, 0);
1079
1080         k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt dis2");
1081 }
1082 EXPORT_SYMBOL_GPL(k3_udma_glue_disable_rx_chn);
1083
1084 void k3_udma_glue_tdown_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
1085                                bool sync)
1086 {
1087         int i = 0;
1088         u32 val;
1089
1090         if (rx_chn->remote)
1091                 return;
1092
1093         k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt tdown1");
1094
1095         xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_CHAN_RT_PEER_RT_EN_REG,
1096                             UDMA_PEER_RT_EN_ENABLE | UDMA_PEER_RT_EN_TEARDOWN);
1097
1098         val = xudma_rchanrt_read(rx_chn->udma_rchanx, UDMA_CHAN_RT_CTL_REG);
1099
1100         while (sync && (val & UDMA_CHAN_RT_CTL_EN)) {
1101                 val = xudma_rchanrt_read(rx_chn->udma_rchanx,
1102                                          UDMA_CHAN_RT_CTL_REG);
1103                 udelay(1);
1104                 if (i > K3_UDMAX_TDOWN_TIMEOUT_US) {
1105                         dev_err(rx_chn->common.dev, "RX tdown timeout\n");
1106                         break;
1107                 }
1108                 i++;
1109         }
1110
1111         val = xudma_rchanrt_read(rx_chn->udma_rchanx,
1112                                  UDMA_CHAN_RT_PEER_RT_EN_REG);
1113         if (sync && (val & UDMA_PEER_RT_EN_ENABLE))
1114                 dev_err(rx_chn->common.dev, "TX tdown peer not stopped\n");
1115         k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt tdown2");
1116 }
1117 EXPORT_SYMBOL_GPL(k3_udma_glue_tdown_rx_chn);
1118
1119 void k3_udma_glue_reset_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
1120                 u32 flow_num, void *data,
1121                 void (*cleanup)(void *data, dma_addr_t desc_dma), bool skip_fdq)
1122 {
1123         struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num];
1124         struct device *dev = rx_chn->common.dev;
1125         dma_addr_t desc_dma;
1126         int occ_rx, i, ret;
1127
1128         /* reset RXCQ as it is not input for udma - expected to be empty */
1129         occ_rx = k3_ringacc_ring_get_occ(flow->ringrx);
1130         dev_dbg(dev, "RX reset flow %u occ_rx %u\n", flow_num, occ_rx);
1131         if (flow->ringrx)
1132                 k3_ringacc_ring_reset(flow->ringrx);
1133
1134         /* Skip RX FDQ in case one FDQ is used for the set of flows */
1135         if (skip_fdq)
1136                 return;
1137
1138         /*
1139          * RX FDQ reset need to be special way as it is input for udma and its
1140          * state cached by udma, so:
1141          * 1) save RX FDQ occ
1142          * 2) clean up RX FDQ and call callback .cleanup() for each desc
1143          * 3) reset RX FDQ in a special way
1144          */
1145         occ_rx = k3_ringacc_ring_get_occ(flow->ringrxfdq);
1146         dev_dbg(dev, "RX reset flow %u occ_rx_fdq %u\n", flow_num, occ_rx);
1147
1148         for (i = 0; i < occ_rx; i++) {
1149                 ret = k3_ringacc_ring_pop(flow->ringrxfdq, &desc_dma);
1150                 if (ret) {
1151                         dev_err(dev, "RX reset pop %d\n", ret);
1152                         break;
1153                 }
1154                 cleanup(data, desc_dma);
1155         }
1156
1157         k3_ringacc_ring_reset_dma(flow->ringrxfdq, occ_rx);
1158 }
1159 EXPORT_SYMBOL_GPL(k3_udma_glue_reset_rx_chn);
1160
1161 int k3_udma_glue_push_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
1162                              u32 flow_num, struct cppi5_host_desc_t *desc_rx,
1163                              dma_addr_t desc_dma)
1164 {
1165         struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num];
1166
1167         return k3_ringacc_ring_push(flow->ringrxfdq, &desc_dma);
1168 }
1169 EXPORT_SYMBOL_GPL(k3_udma_glue_push_rx_chn);
1170
1171 int k3_udma_glue_pop_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
1172                             u32 flow_num, dma_addr_t *desc_dma)
1173 {
1174         struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num];
1175
1176         return k3_ringacc_ring_pop(flow->ringrx, desc_dma);
1177 }
1178 EXPORT_SYMBOL_GPL(k3_udma_glue_pop_rx_chn);
1179
1180 int k3_udma_glue_rx_get_irq(struct k3_udma_glue_rx_channel *rx_chn,
1181                             u32 flow_num)
1182 {
1183         struct k3_udma_glue_rx_flow *flow;
1184
1185         flow = &rx_chn->flows[flow_num];
1186
1187         flow->virq = k3_ringacc_get_ring_irq_num(flow->ringrx);
1188
1189         return flow->virq;
1190 }
1191 EXPORT_SYMBOL_GPL(k3_udma_glue_rx_get_irq);