1 // SPDX-License-Identifier: GPL-2.0
3 * K3 NAVSS DMA glue interface
5 * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
9 #include <linux/module.h>
10 #include <linux/atomic.h>
11 #include <linux/delay.h>
12 #include <linux/dma-mapping.h>
14 #include <linux/init.h>
16 #include <linux/platform_device.h>
17 #include <linux/soc/ti/k3-ringacc.h>
18 #include <linux/dma/ti-cppi5.h>
19 #include <linux/dma/k3-udma-glue.h>
22 #include "k3-psil-priv.h"
24 struct k3_udma_glue_common {
26 struct device chan_dev;
27 struct udma_dev *udmax;
28 const struct udma_tisci_rm *tisci_rm;
29 struct k3_ringacc *ringacc;
38 struct psil_endpoint_config *ep_config;
41 struct k3_udma_glue_tx_channel {
42 struct k3_udma_glue_common common;
44 struct udma_tchan *udma_tchanx;
47 struct k3_ring *ringtx;
48 struct k3_ring *ringtxcq;
63 struct k3_udma_glue_rx_flow {
64 struct udma_rflow *udma_rflow;
66 struct k3_ring *ringrx;
67 struct k3_ring *ringrxfdq;
72 struct k3_udma_glue_rx_channel {
73 struct k3_udma_glue_common common;
75 struct udma_rchan *udma_rchanx;
84 struct k3_udma_glue_rx_flow *flows;
89 static void k3_udma_chan_dev_release(struct device *dev)
91 /* The struct containing the device is devm managed */
94 static struct class k3_udma_glue_devclass = {
95 .name = "k3_udma_glue_chan",
96 .dev_release = k3_udma_chan_dev_release,
99 #define K3_UDMAX_TDOWN_TIMEOUT_US 1000
101 static int of_k3_udma_glue_parse(struct device_node *udmax_np,
102 struct k3_udma_glue_common *common)
104 common->udmax = of_xudma_dev_get(udmax_np, NULL);
105 if (IS_ERR(common->udmax))
106 return PTR_ERR(common->udmax);
108 common->ringacc = xudma_get_ringacc(common->udmax);
109 common->tisci_rm = xudma_dev_get_tisci_rm(common->udmax);
114 static int of_k3_udma_glue_parse_chn_common(struct k3_udma_glue_common *common, u32 thread_id,
117 if (tx_chn && !(thread_id & K3_PSIL_DST_THREAD_ID_OFFSET))
120 if (!tx_chn && (thread_id & K3_PSIL_DST_THREAD_ID_OFFSET))
123 /* get psil endpoint config */
124 common->ep_config = psil_get_ep_config(thread_id);
125 if (IS_ERR(common->ep_config)) {
127 "No configuration for psi-l thread 0x%04x\n",
129 return PTR_ERR(common->ep_config);
132 common->epib = common->ep_config->needs_epib;
133 common->psdata_size = common->ep_config->psd_size;
136 common->dst_thread = thread_id;
138 common->src_thread = thread_id;
143 static int of_k3_udma_glue_parse_chn(struct device_node *chn_np,
144 const char *name, struct k3_udma_glue_common *common,
147 struct of_phandle_args dma_spec;
155 index = of_property_match_string(chn_np, "dma-names", name);
159 if (of_parse_phandle_with_args(chn_np, "dmas", "#dma-cells", index,
163 ret = of_k3_udma_glue_parse(dma_spec.np, common);
167 thread_id = dma_spec.args[0];
168 if (dma_spec.args_count == 2) {
169 if (dma_spec.args[1] > 2 && !xudma_is_pktdma(common->udmax)) {
170 dev_err(common->dev, "Invalid channel atype: %u\n",
175 if (dma_spec.args[1] > 15 && xudma_is_pktdma(common->udmax)) {
176 dev_err(common->dev, "Invalid channel asel: %u\n",
182 common->atype_asel = dma_spec.args[1];
185 ret = of_k3_udma_glue_parse_chn_common(common, thread_id, tx_chn);
188 of_node_put(dma_spec.np);
193 of_k3_udma_glue_parse_chn_by_id(struct device_node *udmax_np, struct k3_udma_glue_common *common,
194 bool tx_chn, u32 thread_id)
198 if (unlikely(!udmax_np))
201 ret = of_k3_udma_glue_parse(udmax_np, common);
205 ret = of_k3_udma_glue_parse_chn_common(common, thread_id, tx_chn);
208 of_node_put(udmax_np);
212 static void k3_udma_glue_dump_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
214 struct device *dev = tx_chn->common.dev;
216 dev_dbg(dev, "dump_tx_chn:\n"
217 "udma_tchan_id: %d\n"
219 "dst_thread: %08x\n",
220 tx_chn->udma_tchan_id,
221 tx_chn->common.src_thread,
222 tx_chn->common.dst_thread);
225 static void k3_udma_glue_dump_tx_rt_chn(struct k3_udma_glue_tx_channel *chn,
228 struct device *dev = chn->common.dev;
230 dev_dbg(dev, "=== dump ===> %s\n", mark);
231 dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_CTL_REG,
232 xudma_tchanrt_read(chn->udma_tchanx, UDMA_CHAN_RT_CTL_REG));
233 dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_PEER_RT_EN_REG,
234 xudma_tchanrt_read(chn->udma_tchanx,
235 UDMA_CHAN_RT_PEER_RT_EN_REG));
236 dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_PCNT_REG,
237 xudma_tchanrt_read(chn->udma_tchanx, UDMA_CHAN_RT_PCNT_REG));
238 dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_BCNT_REG,
239 xudma_tchanrt_read(chn->udma_tchanx, UDMA_CHAN_RT_BCNT_REG));
240 dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_SBCNT_REG,
241 xudma_tchanrt_read(chn->udma_tchanx, UDMA_CHAN_RT_SBCNT_REG));
244 static int k3_udma_glue_cfg_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
246 const struct udma_tisci_rm *tisci_rm = tx_chn->common.tisci_rm;
247 struct ti_sci_msg_rm_udmap_tx_ch_cfg req;
249 memset(&req, 0, sizeof(req));
251 req.valid_params = TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID |
252 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_EINFO_VALID |
253 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_PSWORDS_VALID |
254 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID |
255 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID |
256 TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID |
257 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID |
258 TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID;
259 req.nav_id = tisci_rm->tisci_dev_id;
260 req.index = tx_chn->udma_tchan_id;
261 if (tx_chn->tx_pause_on_err)
262 req.tx_pause_on_err = 1;
263 if (tx_chn->tx_filt_einfo)
264 req.tx_filt_einfo = 1;
265 if (tx_chn->tx_filt_pswords)
266 req.tx_filt_pswords = 1;
267 req.tx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
268 if (tx_chn->tx_supr_tdpkt)
269 req.tx_supr_tdpkt = 1;
270 req.tx_fetch_size = tx_chn->common.hdesc_size >> 2;
271 req.txcq_qnum = k3_ringacc_get_ring_id(tx_chn->ringtxcq);
272 req.tx_atype = tx_chn->common.atype_asel;
274 return tisci_rm->tisci_udmap_ops->tx_ch_cfg(tisci_rm->tisci, &req);
278 k3_udma_glue_request_tx_chn_common(struct device *dev,
279 struct k3_udma_glue_tx_channel *tx_chn,
280 struct k3_udma_glue_tx_channel_cfg *cfg)
284 tx_chn->common.hdesc_size = cppi5_hdesc_calc_size(tx_chn->common.epib,
285 tx_chn->common.psdata_size,
286 tx_chn->common.swdata_size);
288 if (xudma_is_pktdma(tx_chn->common.udmax))
289 tx_chn->udma_tchan_id = tx_chn->common.ep_config->mapped_channel_id;
291 tx_chn->udma_tchan_id = -1;
293 /* request and cfg UDMAP TX channel */
294 tx_chn->udma_tchanx = xudma_tchan_get(tx_chn->common.udmax,
295 tx_chn->udma_tchan_id);
296 if (IS_ERR(tx_chn->udma_tchanx)) {
297 ret = PTR_ERR(tx_chn->udma_tchanx);
298 dev_err(dev, "UDMAX tchanx get err %d\n", ret);
301 tx_chn->udma_tchan_id = xudma_tchan_get_id(tx_chn->udma_tchanx);
303 tx_chn->common.chan_dev.class = &k3_udma_glue_devclass;
304 tx_chn->common.chan_dev.parent = xudma_get_device(tx_chn->common.udmax);
305 dev_set_name(&tx_chn->common.chan_dev, "tchan%d-0x%04x",
306 tx_chn->udma_tchan_id, tx_chn->common.dst_thread);
307 ret = device_register(&tx_chn->common.chan_dev);
309 dev_err(dev, "Channel Device registration failed %d\n", ret);
310 put_device(&tx_chn->common.chan_dev);
311 tx_chn->common.chan_dev.parent = NULL;
315 if (xudma_is_pktdma(tx_chn->common.udmax)) {
316 /* prepare the channel device as coherent */
317 tx_chn->common.chan_dev.dma_coherent = true;
318 dma_coerce_mask_and_coherent(&tx_chn->common.chan_dev,
322 atomic_set(&tx_chn->free_pkts, cfg->txcq_cfg.size);
324 if (xudma_is_pktdma(tx_chn->common.udmax))
325 tx_chn->udma_tflow_id = tx_chn->common.ep_config->default_flow_id;
327 tx_chn->udma_tflow_id = tx_chn->udma_tchan_id;
329 /* request and cfg rings */
330 ret = k3_ringacc_request_rings_pair(tx_chn->common.ringacc,
331 tx_chn->udma_tflow_id, -1,
335 dev_err(dev, "Failed to get TX/TXCQ rings %d\n", ret);
339 /* Set the dma_dev for the rings to be configured */
340 cfg->tx_cfg.dma_dev = k3_udma_glue_tx_get_dma_device(tx_chn);
341 cfg->txcq_cfg.dma_dev = cfg->tx_cfg.dma_dev;
343 /* Set the ASEL value for DMA rings of PKTDMA */
344 if (xudma_is_pktdma(tx_chn->common.udmax)) {
345 cfg->tx_cfg.asel = tx_chn->common.atype_asel;
346 cfg->txcq_cfg.asel = tx_chn->common.atype_asel;
349 ret = k3_ringacc_ring_cfg(tx_chn->ringtx, &cfg->tx_cfg);
351 dev_err(dev, "Failed to cfg ringtx %d\n", ret);
355 ret = k3_ringacc_ring_cfg(tx_chn->ringtxcq, &cfg->txcq_cfg);
357 dev_err(dev, "Failed to cfg ringtx %d\n", ret);
361 /* request and cfg psi-l */
362 tx_chn->common.src_thread =
363 xudma_dev_get_psil_base(tx_chn->common.udmax) +
364 tx_chn->udma_tchan_id;
366 ret = k3_udma_glue_cfg_tx_chn(tx_chn);
368 dev_err(dev, "Failed to cfg tchan %d\n", ret);
372 k3_udma_glue_dump_tx_chn(tx_chn);
377 struct k3_udma_glue_tx_channel *
378 k3_udma_glue_request_tx_chn(struct device *dev, const char *name,
379 struct k3_udma_glue_tx_channel_cfg *cfg)
381 struct k3_udma_glue_tx_channel *tx_chn;
384 tx_chn = devm_kzalloc(dev, sizeof(*tx_chn), GFP_KERNEL);
386 return ERR_PTR(-ENOMEM);
388 tx_chn->common.dev = dev;
389 tx_chn->common.swdata_size = cfg->swdata_size;
390 tx_chn->tx_pause_on_err = cfg->tx_pause_on_err;
391 tx_chn->tx_filt_einfo = cfg->tx_filt_einfo;
392 tx_chn->tx_filt_pswords = cfg->tx_filt_pswords;
393 tx_chn->tx_supr_tdpkt = cfg->tx_supr_tdpkt;
395 /* parse of udmap channel */
396 ret = of_k3_udma_glue_parse_chn(dev->of_node, name,
397 &tx_chn->common, true);
401 ret = k3_udma_glue_request_tx_chn_common(dev, tx_chn, cfg);
408 k3_udma_glue_release_tx_chn(tx_chn);
411 EXPORT_SYMBOL_GPL(k3_udma_glue_request_tx_chn);
413 struct k3_udma_glue_tx_channel *
414 k3_udma_glue_request_tx_chn_for_thread_id(struct device *dev,
415 struct k3_udma_glue_tx_channel_cfg *cfg,
416 struct device_node *udmax_np, u32 thread_id)
418 struct k3_udma_glue_tx_channel *tx_chn;
421 tx_chn = devm_kzalloc(dev, sizeof(*tx_chn), GFP_KERNEL);
423 return ERR_PTR(-ENOMEM);
425 tx_chn->common.dev = dev;
426 tx_chn->common.swdata_size = cfg->swdata_size;
427 tx_chn->tx_pause_on_err = cfg->tx_pause_on_err;
428 tx_chn->tx_filt_einfo = cfg->tx_filt_einfo;
429 tx_chn->tx_filt_pswords = cfg->tx_filt_pswords;
430 tx_chn->tx_supr_tdpkt = cfg->tx_supr_tdpkt;
432 ret = of_k3_udma_glue_parse_chn_by_id(udmax_np, &tx_chn->common, true, thread_id);
436 ret = k3_udma_glue_request_tx_chn_common(dev, tx_chn, cfg);
443 k3_udma_glue_release_tx_chn(tx_chn);
446 EXPORT_SYMBOL_GPL(k3_udma_glue_request_tx_chn_for_thread_id);
448 void k3_udma_glue_release_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
450 if (tx_chn->psil_paired) {
451 xudma_navss_psil_unpair(tx_chn->common.udmax,
452 tx_chn->common.src_thread,
453 tx_chn->common.dst_thread);
454 tx_chn->psil_paired = false;
457 if (!IS_ERR_OR_NULL(tx_chn->udma_tchanx))
458 xudma_tchan_put(tx_chn->common.udmax,
459 tx_chn->udma_tchanx);
461 if (tx_chn->ringtxcq)
462 k3_ringacc_ring_free(tx_chn->ringtxcq);
465 k3_ringacc_ring_free(tx_chn->ringtx);
467 if (tx_chn->common.chan_dev.parent) {
468 device_unregister(&tx_chn->common.chan_dev);
469 tx_chn->common.chan_dev.parent = NULL;
472 EXPORT_SYMBOL_GPL(k3_udma_glue_release_tx_chn);
474 int k3_udma_glue_push_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
475 struct cppi5_host_desc_t *desc_tx,
480 if (!atomic_add_unless(&tx_chn->free_pkts, -1, 0))
483 ringtxcq_id = k3_ringacc_get_ring_id(tx_chn->ringtxcq);
484 cppi5_desc_set_retpolicy(&desc_tx->hdr, 0, ringtxcq_id);
486 return k3_ringacc_ring_push(tx_chn->ringtx, &desc_dma);
488 EXPORT_SYMBOL_GPL(k3_udma_glue_push_tx_chn);
490 int k3_udma_glue_pop_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
491 dma_addr_t *desc_dma)
495 ret = k3_ringacc_ring_pop(tx_chn->ringtxcq, desc_dma);
497 atomic_inc(&tx_chn->free_pkts);
501 EXPORT_SYMBOL_GPL(k3_udma_glue_pop_tx_chn);
503 int k3_udma_glue_enable_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
507 ret = xudma_navss_psil_pair(tx_chn->common.udmax,
508 tx_chn->common.src_thread,
509 tx_chn->common.dst_thread);
511 dev_err(tx_chn->common.dev, "PSI-L request err %d\n", ret);
515 tx_chn->psil_paired = true;
517 xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_CHAN_RT_PEER_RT_EN_REG,
518 UDMA_PEER_RT_EN_ENABLE);
520 xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_CHAN_RT_CTL_REG,
521 UDMA_CHAN_RT_CTL_EN);
523 k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn en");
526 EXPORT_SYMBOL_GPL(k3_udma_glue_enable_tx_chn);
528 void k3_udma_glue_disable_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
530 k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn dis1");
532 xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_CHAN_RT_CTL_REG, 0);
534 xudma_tchanrt_write(tx_chn->udma_tchanx,
535 UDMA_CHAN_RT_PEER_RT_EN_REG, 0);
536 k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn dis2");
538 if (tx_chn->psil_paired) {
539 xudma_navss_psil_unpair(tx_chn->common.udmax,
540 tx_chn->common.src_thread,
541 tx_chn->common.dst_thread);
542 tx_chn->psil_paired = false;
545 EXPORT_SYMBOL_GPL(k3_udma_glue_disable_tx_chn);
547 void k3_udma_glue_tdown_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
553 k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn tdown1");
555 xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_CHAN_RT_CTL_REG,
556 UDMA_CHAN_RT_CTL_EN | UDMA_CHAN_RT_CTL_TDOWN);
558 val = xudma_tchanrt_read(tx_chn->udma_tchanx, UDMA_CHAN_RT_CTL_REG);
560 while (sync && (val & UDMA_CHAN_RT_CTL_EN)) {
561 val = xudma_tchanrt_read(tx_chn->udma_tchanx,
562 UDMA_CHAN_RT_CTL_REG);
564 if (i > K3_UDMAX_TDOWN_TIMEOUT_US) {
565 dev_err(tx_chn->common.dev, "TX tdown timeout\n");
571 val = xudma_tchanrt_read(tx_chn->udma_tchanx,
572 UDMA_CHAN_RT_PEER_RT_EN_REG);
573 if (sync && (val & UDMA_PEER_RT_EN_ENABLE))
574 dev_err(tx_chn->common.dev, "TX tdown peer not stopped\n");
575 k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn tdown2");
577 EXPORT_SYMBOL_GPL(k3_udma_glue_tdown_tx_chn);
579 void k3_udma_glue_reset_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
581 void (*cleanup)(void *data, dma_addr_t desc_dma))
583 struct device *dev = tx_chn->common.dev;
588 * TXQ reset need to be special way as it is input for udma and its
589 * state cached by udma, so:
591 * 2) clean up TXQ and call callback .cleanup() for each desc
592 * 3) reset TXQ in a special way
594 occ_tx = k3_ringacc_ring_get_occ(tx_chn->ringtx);
595 dev_dbg(dev, "TX reset occ_tx %u\n", occ_tx);
597 for (i = 0; i < occ_tx; i++) {
598 ret = k3_ringacc_ring_pop(tx_chn->ringtx, &desc_dma);
601 dev_err(dev, "TX reset pop %d\n", ret);
604 cleanup(data, desc_dma);
607 /* reset TXCQ as it is not input for udma - expected to be empty */
608 k3_ringacc_ring_reset(tx_chn->ringtxcq);
609 k3_ringacc_ring_reset_dma(tx_chn->ringtx, occ_tx);
611 EXPORT_SYMBOL_GPL(k3_udma_glue_reset_tx_chn);
613 u32 k3_udma_glue_tx_get_hdesc_size(struct k3_udma_glue_tx_channel *tx_chn)
615 return tx_chn->common.hdesc_size;
617 EXPORT_SYMBOL_GPL(k3_udma_glue_tx_get_hdesc_size);
619 u32 k3_udma_glue_tx_get_txcq_id(struct k3_udma_glue_tx_channel *tx_chn)
621 return k3_ringacc_get_ring_id(tx_chn->ringtxcq);
623 EXPORT_SYMBOL_GPL(k3_udma_glue_tx_get_txcq_id);
625 int k3_udma_glue_tx_get_irq(struct k3_udma_glue_tx_channel *tx_chn)
627 if (xudma_is_pktdma(tx_chn->common.udmax)) {
628 tx_chn->virq = xudma_pktdma_tflow_get_irq(tx_chn->common.udmax,
629 tx_chn->udma_tflow_id);
631 tx_chn->virq = k3_ringacc_get_ring_irq_num(tx_chn->ringtxcq);
639 EXPORT_SYMBOL_GPL(k3_udma_glue_tx_get_irq);
642 k3_udma_glue_tx_get_dma_device(struct k3_udma_glue_tx_channel *tx_chn)
644 if (xudma_is_pktdma(tx_chn->common.udmax) &&
645 (tx_chn->common.atype_asel == 14 || tx_chn->common.atype_asel == 15))
646 return &tx_chn->common.chan_dev;
648 return xudma_get_device(tx_chn->common.udmax);
650 EXPORT_SYMBOL_GPL(k3_udma_glue_tx_get_dma_device);
652 void k3_udma_glue_tx_dma_to_cppi5_addr(struct k3_udma_glue_tx_channel *tx_chn,
655 if (!xudma_is_pktdma(tx_chn->common.udmax) ||
656 !tx_chn->common.atype_asel)
659 *addr |= (u64)tx_chn->common.atype_asel << K3_ADDRESS_ASEL_SHIFT;
661 EXPORT_SYMBOL_GPL(k3_udma_glue_tx_dma_to_cppi5_addr);
663 void k3_udma_glue_tx_cppi5_to_dma_addr(struct k3_udma_glue_tx_channel *tx_chn,
666 if (!xudma_is_pktdma(tx_chn->common.udmax) ||
667 !tx_chn->common.atype_asel)
670 *addr &= (u64)GENMASK(K3_ADDRESS_ASEL_SHIFT - 1, 0);
672 EXPORT_SYMBOL_GPL(k3_udma_glue_tx_cppi5_to_dma_addr);
674 static int k3_udma_glue_cfg_rx_chn(struct k3_udma_glue_rx_channel *rx_chn)
676 const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm;
677 struct ti_sci_msg_rm_udmap_rx_ch_cfg req;
680 memset(&req, 0, sizeof(req));
682 req.valid_params = TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID |
683 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID |
684 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID |
685 TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID;
687 req.nav_id = tisci_rm->tisci_dev_id;
688 req.index = rx_chn->udma_rchan_id;
689 req.rx_fetch_size = rx_chn->common.hdesc_size >> 2;
691 * TODO: we can't support rxcq_qnum/RCHAN[a]_RCQ cfg with current sysfw
692 * and udmax impl, so just configure it to invalid value.
693 * req.rxcq_qnum = k3_ringacc_get_ring_id(rx_chn->flows[0].ringrx);
695 req.rxcq_qnum = 0xFFFF;
696 if (!xudma_is_pktdma(rx_chn->common.udmax) && rx_chn->flow_num &&
697 rx_chn->flow_id_base != rx_chn->udma_rchan_id) {
698 /* Default flow + extra ones */
699 req.valid_params |= TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID |
700 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID;
701 req.flowid_start = rx_chn->flow_id_base;
702 req.flowid_cnt = rx_chn->flow_num;
704 req.rx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
705 req.rx_atype = rx_chn->common.atype_asel;
707 ret = tisci_rm->tisci_udmap_ops->rx_ch_cfg(tisci_rm->tisci, &req);
709 dev_err(rx_chn->common.dev, "rchan%d cfg failed %d\n",
710 rx_chn->udma_rchan_id, ret);
715 static void k3_udma_glue_release_rx_flow(struct k3_udma_glue_rx_channel *rx_chn,
718 struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num];
720 if (IS_ERR_OR_NULL(flow->udma_rflow))
724 k3_ringacc_ring_free(flow->ringrxfdq);
727 k3_ringacc_ring_free(flow->ringrx);
729 xudma_rflow_put(rx_chn->common.udmax, flow->udma_rflow);
730 flow->udma_rflow = NULL;
731 rx_chn->flows_ready--;
734 static int k3_udma_glue_cfg_rx_flow(struct k3_udma_glue_rx_channel *rx_chn,
736 struct k3_udma_glue_rx_flow_cfg *flow_cfg)
738 struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_idx];
739 const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm;
740 struct device *dev = rx_chn->common.dev;
741 struct ti_sci_msg_rm_udmap_flow_cfg req;
746 flow->udma_rflow = xudma_rflow_get(rx_chn->common.udmax,
747 flow->udma_rflow_id);
748 if (IS_ERR(flow->udma_rflow)) {
749 ret = PTR_ERR(flow->udma_rflow);
750 dev_err(dev, "UDMAX rflow get err %d\n", ret);
754 if (flow->udma_rflow_id != xudma_rflow_get_id(flow->udma_rflow)) {
759 if (xudma_is_pktdma(rx_chn->common.udmax)) {
760 rx_ringfdq_id = flow->udma_rflow_id +
761 xudma_get_rflow_ring_offset(rx_chn->common.udmax);
764 rx_ring_id = flow_cfg->ring_rxq_id;
765 rx_ringfdq_id = flow_cfg->ring_rxfdq0_id;
768 /* request and cfg rings */
769 ret = k3_ringacc_request_rings_pair(rx_chn->common.ringacc,
770 rx_ringfdq_id, rx_ring_id,
774 dev_err(dev, "Failed to get RX/RXFDQ rings %d\n", ret);
778 /* Set the dma_dev for the rings to be configured */
779 flow_cfg->rx_cfg.dma_dev = k3_udma_glue_rx_get_dma_device(rx_chn);
780 flow_cfg->rxfdq_cfg.dma_dev = flow_cfg->rx_cfg.dma_dev;
782 /* Set the ASEL value for DMA rings of PKTDMA */
783 if (xudma_is_pktdma(rx_chn->common.udmax)) {
784 flow_cfg->rx_cfg.asel = rx_chn->common.atype_asel;
785 flow_cfg->rxfdq_cfg.asel = rx_chn->common.atype_asel;
788 ret = k3_ringacc_ring_cfg(flow->ringrx, &flow_cfg->rx_cfg);
790 dev_err(dev, "Failed to cfg ringrx %d\n", ret);
791 goto err_ringrxfdq_free;
794 ret = k3_ringacc_ring_cfg(flow->ringrxfdq, &flow_cfg->rxfdq_cfg);
796 dev_err(dev, "Failed to cfg ringrxfdq %d\n", ret);
797 goto err_ringrxfdq_free;
800 if (rx_chn->remote) {
801 rx_ring_id = TI_SCI_RESOURCE_NULL;
802 rx_ringfdq_id = TI_SCI_RESOURCE_NULL;
804 rx_ring_id = k3_ringacc_get_ring_id(flow->ringrx);
805 rx_ringfdq_id = k3_ringacc_get_ring_id(flow->ringrxfdq);
808 memset(&req, 0, sizeof(req));
811 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID |
812 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID |
813 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID |
814 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DESC_TYPE_VALID |
815 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID |
816 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_HI_SEL_VALID |
817 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_LO_SEL_VALID |
818 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_HI_SEL_VALID |
819 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_LO_SEL_VALID |
820 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID |
821 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID |
822 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID |
823 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID;
824 req.nav_id = tisci_rm->tisci_dev_id;
825 req.flow_index = flow->udma_rflow_id;
826 if (rx_chn->common.epib)
827 req.rx_einfo_present = 1;
828 if (rx_chn->common.psdata_size)
829 req.rx_psinfo_present = 1;
830 if (flow_cfg->rx_error_handling)
831 req.rx_error_handling = 1;
832 req.rx_desc_type = 0;
833 req.rx_dest_qnum = rx_ring_id;
834 req.rx_src_tag_hi_sel = 0;
835 req.rx_src_tag_lo_sel = flow_cfg->src_tag_lo_sel;
836 req.rx_dest_tag_hi_sel = 0;
837 req.rx_dest_tag_lo_sel = 0;
838 req.rx_fdq0_sz0_qnum = rx_ringfdq_id;
839 req.rx_fdq1_qnum = rx_ringfdq_id;
840 req.rx_fdq2_qnum = rx_ringfdq_id;
841 req.rx_fdq3_qnum = rx_ringfdq_id;
843 ret = tisci_rm->tisci_udmap_ops->rx_flow_cfg(tisci_rm->tisci, &req);
845 dev_err(dev, "flow%d config failed: %d\n", flow->udma_rflow_id,
847 goto err_ringrxfdq_free;
850 rx_chn->flows_ready++;
851 dev_dbg(dev, "flow%d config done. ready:%d\n",
852 flow->udma_rflow_id, rx_chn->flows_ready);
857 k3_ringacc_ring_free(flow->ringrxfdq);
858 k3_ringacc_ring_free(flow->ringrx);
861 xudma_rflow_put(rx_chn->common.udmax, flow->udma_rflow);
862 flow->udma_rflow = NULL;
867 static void k3_udma_glue_dump_rx_chn(struct k3_udma_glue_rx_channel *chn)
869 struct device *dev = chn->common.dev;
871 dev_dbg(dev, "dump_rx_chn:\n"
872 "udma_rchan_id: %d\n"
882 chn->common.src_thread,
883 chn->common.dst_thread,
885 chn->common.hdesc_size,
886 chn->common.psdata_size,
887 chn->common.swdata_size,
892 static void k3_udma_glue_dump_rx_rt_chn(struct k3_udma_glue_rx_channel *chn,
895 struct device *dev = chn->common.dev;
897 dev_dbg(dev, "=== dump ===> %s\n", mark);
899 dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_CTL_REG,
900 xudma_rchanrt_read(chn->udma_rchanx, UDMA_CHAN_RT_CTL_REG));
901 dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_PEER_RT_EN_REG,
902 xudma_rchanrt_read(chn->udma_rchanx,
903 UDMA_CHAN_RT_PEER_RT_EN_REG));
904 dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_PCNT_REG,
905 xudma_rchanrt_read(chn->udma_rchanx, UDMA_CHAN_RT_PCNT_REG));
906 dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_BCNT_REG,
907 xudma_rchanrt_read(chn->udma_rchanx, UDMA_CHAN_RT_BCNT_REG));
908 dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_SBCNT_REG,
909 xudma_rchanrt_read(chn->udma_rchanx, UDMA_CHAN_RT_SBCNT_REG));
913 k3_udma_glue_allocate_rx_flows(struct k3_udma_glue_rx_channel *rx_chn,
914 struct k3_udma_glue_rx_channel_cfg *cfg)
919 if (cfg->flow_id_use_rxchan_id)
922 /* not a GP rflows */
923 if (rx_chn->flow_id_base != -1 &&
924 !xudma_rflow_is_gp(rx_chn->common.udmax, rx_chn->flow_id_base))
927 /* Allocate range of GP rflows */
928 ret = xudma_alloc_gp_rflow_range(rx_chn->common.udmax,
929 rx_chn->flow_id_base,
932 dev_err(rx_chn->common.dev, "UDMAX reserve_rflow %d cnt:%d err: %d\n",
933 rx_chn->flow_id_base, rx_chn->flow_num, ret);
936 rx_chn->flow_id_base = ret;
941 static struct k3_udma_glue_rx_channel *
942 k3_udma_glue_request_rx_chn_priv(struct device *dev, const char *name,
943 struct k3_udma_glue_rx_channel_cfg *cfg)
945 struct k3_udma_glue_rx_channel *rx_chn;
946 struct psil_endpoint_config *ep_cfg;
949 if (cfg->flow_id_num <= 0)
950 return ERR_PTR(-EINVAL);
952 if (cfg->flow_id_num != 1 &&
953 (cfg->def_flow_cfg || cfg->flow_id_use_rxchan_id))
954 return ERR_PTR(-EINVAL);
956 rx_chn = devm_kzalloc(dev, sizeof(*rx_chn), GFP_KERNEL);
958 return ERR_PTR(-ENOMEM);
960 rx_chn->common.dev = dev;
961 rx_chn->common.swdata_size = cfg->swdata_size;
962 rx_chn->remote = false;
964 /* parse of udmap channel */
965 ret = of_k3_udma_glue_parse_chn(dev->of_node, name,
966 &rx_chn->common, false);
970 rx_chn->common.hdesc_size = cppi5_hdesc_calc_size(rx_chn->common.epib,
971 rx_chn->common.psdata_size,
972 rx_chn->common.swdata_size);
974 ep_cfg = rx_chn->common.ep_config;
976 if (xudma_is_pktdma(rx_chn->common.udmax))
977 rx_chn->udma_rchan_id = ep_cfg->mapped_channel_id;
979 rx_chn->udma_rchan_id = -1;
981 /* request and cfg UDMAP RX channel */
982 rx_chn->udma_rchanx = xudma_rchan_get(rx_chn->common.udmax,
983 rx_chn->udma_rchan_id);
984 if (IS_ERR(rx_chn->udma_rchanx)) {
985 ret = PTR_ERR(rx_chn->udma_rchanx);
986 dev_err(dev, "UDMAX rchanx get err %d\n", ret);
989 rx_chn->udma_rchan_id = xudma_rchan_get_id(rx_chn->udma_rchanx);
991 rx_chn->common.chan_dev.class = &k3_udma_glue_devclass;
992 rx_chn->common.chan_dev.parent = xudma_get_device(rx_chn->common.udmax);
993 dev_set_name(&rx_chn->common.chan_dev, "rchan%d-0x%04x",
994 rx_chn->udma_rchan_id, rx_chn->common.src_thread);
995 ret = device_register(&rx_chn->common.chan_dev);
997 dev_err(dev, "Channel Device registration failed %d\n", ret);
998 put_device(&rx_chn->common.chan_dev);
999 rx_chn->common.chan_dev.parent = NULL;
1003 if (xudma_is_pktdma(rx_chn->common.udmax)) {
1004 /* prepare the channel device as coherent */
1005 rx_chn->common.chan_dev.dma_coherent = true;
1006 dma_coerce_mask_and_coherent(&rx_chn->common.chan_dev,
1010 if (xudma_is_pktdma(rx_chn->common.udmax)) {
1011 int flow_start = cfg->flow_id_base;
1014 if (flow_start == -1)
1015 flow_start = ep_cfg->flow_start;
1017 flow_end = flow_start + cfg->flow_id_num - 1;
1018 if (flow_start < ep_cfg->flow_start ||
1019 flow_end > (ep_cfg->flow_start + ep_cfg->flow_num - 1)) {
1020 dev_err(dev, "Invalid flow range requested\n");
1024 rx_chn->flow_id_base = flow_start;
1026 rx_chn->flow_id_base = cfg->flow_id_base;
1028 /* Use RX channel id as flow id: target dev can't generate flow_id */
1029 if (cfg->flow_id_use_rxchan_id)
1030 rx_chn->flow_id_base = rx_chn->udma_rchan_id;
1033 rx_chn->flow_num = cfg->flow_id_num;
1035 rx_chn->flows = devm_kcalloc(dev, rx_chn->flow_num,
1036 sizeof(*rx_chn->flows), GFP_KERNEL);
1037 if (!rx_chn->flows) {
1042 ret = k3_udma_glue_allocate_rx_flows(rx_chn, cfg);
1046 for (i = 0; i < rx_chn->flow_num; i++)
1047 rx_chn->flows[i].udma_rflow_id = rx_chn->flow_id_base + i;
1049 /* request and cfg psi-l */
1050 rx_chn->common.dst_thread =
1051 xudma_dev_get_psil_base(rx_chn->common.udmax) +
1052 rx_chn->udma_rchan_id;
1054 ret = k3_udma_glue_cfg_rx_chn(rx_chn);
1056 dev_err(dev, "Failed to cfg rchan %d\n", ret);
1060 /* init default RX flow only if flow_num = 1 */
1061 if (cfg->def_flow_cfg) {
1062 ret = k3_udma_glue_cfg_rx_flow(rx_chn, 0, cfg->def_flow_cfg);
1067 k3_udma_glue_dump_rx_chn(rx_chn);
1072 k3_udma_glue_release_rx_chn(rx_chn);
1073 return ERR_PTR(ret);
1077 k3_udma_glue_request_remote_rx_chn_common(struct k3_udma_glue_rx_channel *rx_chn,
1078 struct k3_udma_glue_rx_channel_cfg *cfg,
1083 rx_chn->common.hdesc_size = cppi5_hdesc_calc_size(rx_chn->common.epib,
1084 rx_chn->common.psdata_size,
1085 rx_chn->common.swdata_size);
1087 rx_chn->flows = devm_kcalloc(dev, rx_chn->flow_num,
1088 sizeof(*rx_chn->flows), GFP_KERNEL);
1092 rx_chn->common.chan_dev.class = &k3_udma_glue_devclass;
1093 rx_chn->common.chan_dev.parent = xudma_get_device(rx_chn->common.udmax);
1094 dev_set_name(&rx_chn->common.chan_dev, "rchan_remote-0x%04x-0x%02x",
1095 rx_chn->common.src_thread, rx_chn->flow_id_base);
1096 ret = device_register(&rx_chn->common.chan_dev);
1098 dev_err(dev, "Channel Device registration failed %d\n", ret);
1099 put_device(&rx_chn->common.chan_dev);
1100 rx_chn->common.chan_dev.parent = NULL;
1104 if (xudma_is_pktdma(rx_chn->common.udmax)) {
1105 /* prepare the channel device as coherent */
1106 rx_chn->common.chan_dev.dma_coherent = true;
1107 dma_coerce_mask_and_coherent(&rx_chn->common.chan_dev,
1111 ret = k3_udma_glue_allocate_rx_flows(rx_chn, cfg);
1115 for (i = 0; i < rx_chn->flow_num; i++)
1116 rx_chn->flows[i].udma_rflow_id = rx_chn->flow_id_base + i;
1118 k3_udma_glue_dump_rx_chn(rx_chn);
1123 static struct k3_udma_glue_rx_channel *
1124 k3_udma_glue_request_remote_rx_chn(struct device *dev, const char *name,
1125 struct k3_udma_glue_rx_channel_cfg *cfg)
1127 struct k3_udma_glue_rx_channel *rx_chn;
1130 if (cfg->flow_id_num <= 0 ||
1131 cfg->flow_id_use_rxchan_id ||
1132 cfg->def_flow_cfg ||
1133 cfg->flow_id_base < 0)
1134 return ERR_PTR(-EINVAL);
1137 * Remote RX channel is under control of Remote CPU core, so
1138 * Linux can only request and manipulate by dedicated RX flows
1141 rx_chn = devm_kzalloc(dev, sizeof(*rx_chn), GFP_KERNEL);
1143 return ERR_PTR(-ENOMEM);
1145 rx_chn->common.dev = dev;
1146 rx_chn->common.swdata_size = cfg->swdata_size;
1147 rx_chn->remote = true;
1148 rx_chn->udma_rchan_id = -1;
1149 rx_chn->flow_num = cfg->flow_id_num;
1150 rx_chn->flow_id_base = cfg->flow_id_base;
1151 rx_chn->psil_paired = false;
1153 /* parse of udmap channel */
1154 ret = of_k3_udma_glue_parse_chn(dev->of_node, name,
1155 &rx_chn->common, false);
1159 ret = k3_udma_glue_request_remote_rx_chn_common(rx_chn, cfg, dev);
1166 k3_udma_glue_release_rx_chn(rx_chn);
1167 return ERR_PTR(ret);
1170 struct k3_udma_glue_rx_channel *
1171 k3_udma_glue_request_remote_rx_chn_for_thread_id(struct device *dev,
1172 struct k3_udma_glue_rx_channel_cfg *cfg,
1173 struct device_node *udmax_np, u32 thread_id)
1175 struct k3_udma_glue_rx_channel *rx_chn;
1178 if (cfg->flow_id_num <= 0 ||
1179 cfg->flow_id_use_rxchan_id ||
1180 cfg->def_flow_cfg ||
1181 cfg->flow_id_base < 0)
1182 return ERR_PTR(-EINVAL);
1185 * Remote RX channel is under control of Remote CPU core, so
1186 * Linux can only request and manipulate by dedicated RX flows
1189 rx_chn = devm_kzalloc(dev, sizeof(*rx_chn), GFP_KERNEL);
1191 return ERR_PTR(-ENOMEM);
1193 rx_chn->common.dev = dev;
1194 rx_chn->common.swdata_size = cfg->swdata_size;
1195 rx_chn->remote = true;
1196 rx_chn->udma_rchan_id = -1;
1197 rx_chn->flow_num = cfg->flow_id_num;
1198 rx_chn->flow_id_base = cfg->flow_id_base;
1199 rx_chn->psil_paired = false;
1201 ret = of_k3_udma_glue_parse_chn_by_id(udmax_np, &rx_chn->common, false, thread_id);
1205 ret = k3_udma_glue_request_remote_rx_chn_common(rx_chn, cfg, dev);
1212 k3_udma_glue_release_rx_chn(rx_chn);
1213 return ERR_PTR(ret);
1215 EXPORT_SYMBOL_GPL(k3_udma_glue_request_remote_rx_chn_for_thread_id);
1217 struct k3_udma_glue_rx_channel *
1218 k3_udma_glue_request_rx_chn(struct device *dev, const char *name,
1219 struct k3_udma_glue_rx_channel_cfg *cfg)
1222 return k3_udma_glue_request_remote_rx_chn(dev, name, cfg);
1224 return k3_udma_glue_request_rx_chn_priv(dev, name, cfg);
1226 EXPORT_SYMBOL_GPL(k3_udma_glue_request_rx_chn);
1228 void k3_udma_glue_release_rx_chn(struct k3_udma_glue_rx_channel *rx_chn)
1232 if (IS_ERR_OR_NULL(rx_chn->common.udmax))
1235 if (rx_chn->psil_paired) {
1236 xudma_navss_psil_unpair(rx_chn->common.udmax,
1237 rx_chn->common.src_thread,
1238 rx_chn->common.dst_thread);
1239 rx_chn->psil_paired = false;
1242 for (i = 0; i < rx_chn->flow_num; i++)
1243 k3_udma_glue_release_rx_flow(rx_chn, i);
1245 if (xudma_rflow_is_gp(rx_chn->common.udmax, rx_chn->flow_id_base))
1246 xudma_free_gp_rflow_range(rx_chn->common.udmax,
1247 rx_chn->flow_id_base,
1250 if (!IS_ERR_OR_NULL(rx_chn->udma_rchanx))
1251 xudma_rchan_put(rx_chn->common.udmax,
1252 rx_chn->udma_rchanx);
1254 if (rx_chn->common.chan_dev.parent) {
1255 device_unregister(&rx_chn->common.chan_dev);
1256 rx_chn->common.chan_dev.parent = NULL;
1259 EXPORT_SYMBOL_GPL(k3_udma_glue_release_rx_chn);
1261 int k3_udma_glue_rx_flow_init(struct k3_udma_glue_rx_channel *rx_chn,
1263 struct k3_udma_glue_rx_flow_cfg *flow_cfg)
1265 if (flow_idx >= rx_chn->flow_num)
1268 return k3_udma_glue_cfg_rx_flow(rx_chn, flow_idx, flow_cfg);
1270 EXPORT_SYMBOL_GPL(k3_udma_glue_rx_flow_init);
1272 u32 k3_udma_glue_rx_flow_get_fdq_id(struct k3_udma_glue_rx_channel *rx_chn,
1275 struct k3_udma_glue_rx_flow *flow;
1277 if (flow_idx >= rx_chn->flow_num)
1280 flow = &rx_chn->flows[flow_idx];
1282 return k3_ringacc_get_ring_id(flow->ringrxfdq);
1284 EXPORT_SYMBOL_GPL(k3_udma_glue_rx_flow_get_fdq_id);
1286 u32 k3_udma_glue_rx_get_flow_id_base(struct k3_udma_glue_rx_channel *rx_chn)
1288 return rx_chn->flow_id_base;
1290 EXPORT_SYMBOL_GPL(k3_udma_glue_rx_get_flow_id_base);
1292 int k3_udma_glue_rx_flow_enable(struct k3_udma_glue_rx_channel *rx_chn,
1295 struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_idx];
1296 const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm;
1297 struct device *dev = rx_chn->common.dev;
1298 struct ti_sci_msg_rm_udmap_flow_cfg req;
1303 if (!rx_chn->remote)
1306 rx_ring_id = k3_ringacc_get_ring_id(flow->ringrx);
1307 rx_ringfdq_id = k3_ringacc_get_ring_id(flow->ringrxfdq);
1309 memset(&req, 0, sizeof(req));
1312 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID |
1313 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID |
1314 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID |
1315 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID |
1316 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID;
1317 req.nav_id = tisci_rm->tisci_dev_id;
1318 req.flow_index = flow->udma_rflow_id;
1319 req.rx_dest_qnum = rx_ring_id;
1320 req.rx_fdq0_sz0_qnum = rx_ringfdq_id;
1321 req.rx_fdq1_qnum = rx_ringfdq_id;
1322 req.rx_fdq2_qnum = rx_ringfdq_id;
1323 req.rx_fdq3_qnum = rx_ringfdq_id;
1325 ret = tisci_rm->tisci_udmap_ops->rx_flow_cfg(tisci_rm->tisci, &req);
1327 dev_err(dev, "flow%d enable failed: %d\n", flow->udma_rflow_id,
1333 EXPORT_SYMBOL_GPL(k3_udma_glue_rx_flow_enable);
1335 int k3_udma_glue_rx_flow_disable(struct k3_udma_glue_rx_channel *rx_chn,
1338 struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_idx];
1339 const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm;
1340 struct device *dev = rx_chn->common.dev;
1341 struct ti_sci_msg_rm_udmap_flow_cfg req;
1344 if (!rx_chn->remote)
1347 memset(&req, 0, sizeof(req));
1349 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID |
1350 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID |
1351 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID |
1352 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID |
1353 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID;
1354 req.nav_id = tisci_rm->tisci_dev_id;
1355 req.flow_index = flow->udma_rflow_id;
1356 req.rx_dest_qnum = TI_SCI_RESOURCE_NULL;
1357 req.rx_fdq0_sz0_qnum = TI_SCI_RESOURCE_NULL;
1358 req.rx_fdq1_qnum = TI_SCI_RESOURCE_NULL;
1359 req.rx_fdq2_qnum = TI_SCI_RESOURCE_NULL;
1360 req.rx_fdq3_qnum = TI_SCI_RESOURCE_NULL;
1362 ret = tisci_rm->tisci_udmap_ops->rx_flow_cfg(tisci_rm->tisci, &req);
1364 dev_err(dev, "flow%d disable failed: %d\n", flow->udma_rflow_id,
1370 EXPORT_SYMBOL_GPL(k3_udma_glue_rx_flow_disable);
1372 int k3_udma_glue_enable_rx_chn(struct k3_udma_glue_rx_channel *rx_chn)
1379 if (rx_chn->flows_ready < rx_chn->flow_num)
1382 ret = xudma_navss_psil_pair(rx_chn->common.udmax,
1383 rx_chn->common.src_thread,
1384 rx_chn->common.dst_thread);
1386 dev_err(rx_chn->common.dev, "PSI-L request err %d\n", ret);
1390 rx_chn->psil_paired = true;
1392 xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_CHAN_RT_CTL_REG,
1393 UDMA_CHAN_RT_CTL_EN);
1395 xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_CHAN_RT_PEER_RT_EN_REG,
1396 UDMA_PEER_RT_EN_ENABLE);
1398 k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt en");
1401 EXPORT_SYMBOL_GPL(k3_udma_glue_enable_rx_chn);
1403 void k3_udma_glue_disable_rx_chn(struct k3_udma_glue_rx_channel *rx_chn)
1405 k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt dis1");
1407 xudma_rchanrt_write(rx_chn->udma_rchanx,
1408 UDMA_CHAN_RT_PEER_RT_EN_REG, 0);
1409 xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_CHAN_RT_CTL_REG, 0);
1411 k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt dis2");
1413 if (rx_chn->psil_paired) {
1414 xudma_navss_psil_unpair(rx_chn->common.udmax,
1415 rx_chn->common.src_thread,
1416 rx_chn->common.dst_thread);
1417 rx_chn->psil_paired = false;
1420 EXPORT_SYMBOL_GPL(k3_udma_glue_disable_rx_chn);
1422 void k3_udma_glue_tdown_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
1431 k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt tdown1");
1433 xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_CHAN_RT_PEER_RT_EN_REG,
1434 UDMA_PEER_RT_EN_ENABLE | UDMA_PEER_RT_EN_TEARDOWN);
1436 val = xudma_rchanrt_read(rx_chn->udma_rchanx, UDMA_CHAN_RT_CTL_REG);
1438 while (sync && (val & UDMA_CHAN_RT_CTL_EN)) {
1439 val = xudma_rchanrt_read(rx_chn->udma_rchanx,
1440 UDMA_CHAN_RT_CTL_REG);
1442 if (i > K3_UDMAX_TDOWN_TIMEOUT_US) {
1443 dev_err(rx_chn->common.dev, "RX tdown timeout\n");
1449 val = xudma_rchanrt_read(rx_chn->udma_rchanx,
1450 UDMA_CHAN_RT_PEER_RT_EN_REG);
1451 if (sync && (val & UDMA_PEER_RT_EN_ENABLE))
1452 dev_err(rx_chn->common.dev, "TX tdown peer not stopped\n");
1453 k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt tdown2");
1455 EXPORT_SYMBOL_GPL(k3_udma_glue_tdown_rx_chn);
1457 void k3_udma_glue_reset_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
1458 u32 flow_num, void *data,
1459 void (*cleanup)(void *data, dma_addr_t desc_dma), bool skip_fdq)
1461 struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num];
1462 struct device *dev = rx_chn->common.dev;
1463 dma_addr_t desc_dma;
1466 /* reset RXCQ as it is not input for udma - expected to be empty */
1467 occ_rx = k3_ringacc_ring_get_occ(flow->ringrx);
1468 dev_dbg(dev, "RX reset flow %u occ_rx %u\n", flow_num, occ_rx);
1470 /* Skip RX FDQ in case one FDQ is used for the set of flows */
1475 * RX FDQ reset need to be special way as it is input for udma and its
1476 * state cached by udma, so:
1477 * 1) save RX FDQ occ
1478 * 2) clean up RX FDQ and call callback .cleanup() for each desc
1479 * 3) reset RX FDQ in a special way
1481 occ_rx = k3_ringacc_ring_get_occ(flow->ringrxfdq);
1482 dev_dbg(dev, "RX reset flow %u occ_rx_fdq %u\n", flow_num, occ_rx);
1484 for (i = 0; i < occ_rx; i++) {
1485 ret = k3_ringacc_ring_pop(flow->ringrxfdq, &desc_dma);
1487 if (ret != -ENODATA)
1488 dev_err(dev, "RX reset pop %d\n", ret);
1491 cleanup(data, desc_dma);
1494 k3_ringacc_ring_reset_dma(flow->ringrxfdq, occ_rx);
1497 k3_ringacc_ring_reset(flow->ringrx);
1499 EXPORT_SYMBOL_GPL(k3_udma_glue_reset_rx_chn);
1501 int k3_udma_glue_push_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
1502 u32 flow_num, struct cppi5_host_desc_t *desc_rx,
1503 dma_addr_t desc_dma)
1505 struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num];
1507 return k3_ringacc_ring_push(flow->ringrxfdq, &desc_dma);
1509 EXPORT_SYMBOL_GPL(k3_udma_glue_push_rx_chn);
1511 int k3_udma_glue_pop_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
1512 u32 flow_num, dma_addr_t *desc_dma)
1514 struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num];
1516 return k3_ringacc_ring_pop(flow->ringrx, desc_dma);
1518 EXPORT_SYMBOL_GPL(k3_udma_glue_pop_rx_chn);
1520 int k3_udma_glue_rx_get_irq(struct k3_udma_glue_rx_channel *rx_chn,
1523 struct k3_udma_glue_rx_flow *flow;
1525 flow = &rx_chn->flows[flow_num];
1527 if (xudma_is_pktdma(rx_chn->common.udmax)) {
1528 flow->virq = xudma_pktdma_rflow_get_irq(rx_chn->common.udmax,
1529 flow->udma_rflow_id);
1531 flow->virq = k3_ringacc_get_ring_irq_num(flow->ringrx);
1536 EXPORT_SYMBOL_GPL(k3_udma_glue_rx_get_irq);
1539 k3_udma_glue_rx_get_dma_device(struct k3_udma_glue_rx_channel *rx_chn)
1541 if (xudma_is_pktdma(rx_chn->common.udmax) &&
1542 (rx_chn->common.atype_asel == 14 || rx_chn->common.atype_asel == 15))
1543 return &rx_chn->common.chan_dev;
1545 return xudma_get_device(rx_chn->common.udmax);
1547 EXPORT_SYMBOL_GPL(k3_udma_glue_rx_get_dma_device);
1549 void k3_udma_glue_rx_dma_to_cppi5_addr(struct k3_udma_glue_rx_channel *rx_chn,
1552 if (!xudma_is_pktdma(rx_chn->common.udmax) ||
1553 !rx_chn->common.atype_asel)
1556 *addr |= (u64)rx_chn->common.atype_asel << K3_ADDRESS_ASEL_SHIFT;
1558 EXPORT_SYMBOL_GPL(k3_udma_glue_rx_dma_to_cppi5_addr);
1560 void k3_udma_glue_rx_cppi5_to_dma_addr(struct k3_udma_glue_rx_channel *rx_chn,
1563 if (!xudma_is_pktdma(rx_chn->common.udmax) ||
1564 !rx_chn->common.atype_asel)
1567 *addr &= (u64)GENMASK(K3_ADDRESS_ASEL_SHIFT - 1, 0);
1569 EXPORT_SYMBOL_GPL(k3_udma_glue_rx_cppi5_to_dma_addr);
1571 static int __init k3_udma_glue_class_init(void)
1573 return class_register(&k3_udma_glue_devclass);
1576 module_init(k3_udma_glue_class_init);
1577 MODULE_LICENSE("GPL v2");