1 // SPDX-License-Identifier: GPL-2.0
3 * K3 NAVSS DMA glue interface
5 * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
9 #include <linux/atomic.h>
10 #include <linux/delay.h>
11 #include <linux/dma-mapping.h>
13 #include <linux/init.h>
15 #include <linux/platform_device.h>
16 #include <linux/soc/ti/k3-ringacc.h>
17 #include <linux/dma/ti-cppi5.h>
18 #include <linux/dma/k3-udma-glue.h>
21 #include "k3-psil-priv.h"
23 struct k3_udma_glue_common {
25 struct udma_dev *udmax;
26 const struct udma_tisci_rm *tisci_rm;
27 struct k3_ringacc *ringacc;
38 struct k3_udma_glue_tx_channel {
39 struct k3_udma_glue_common common;
41 struct udma_tchan *udma_tchanx;
44 struct k3_ring *ringtx;
45 struct k3_ring *ringtxcq;
58 struct k3_udma_glue_rx_flow {
59 struct udma_rflow *udma_rflow;
61 struct k3_ring *ringrx;
62 struct k3_ring *ringrxfdq;
67 struct k3_udma_glue_rx_channel {
68 struct k3_udma_glue_common common;
70 struct udma_rchan *udma_rchanx;
79 struct k3_udma_glue_rx_flow *flows;
84 #define K3_UDMAX_TDOWN_TIMEOUT_US 1000
86 static int of_k3_udma_glue_parse(struct device_node *udmax_np,
87 struct k3_udma_glue_common *common)
89 common->ringacc = of_k3_ringacc_get_by_phandle(udmax_np,
91 if (IS_ERR(common->ringacc))
92 return PTR_ERR(common->ringacc);
94 common->udmax = of_xudma_dev_get(udmax_np, NULL);
95 if (IS_ERR(common->udmax))
96 return PTR_ERR(common->udmax);
98 common->tisci_rm = xudma_dev_get_tisci_rm(common->udmax);
103 static int of_k3_udma_glue_parse_chn(struct device_node *chn_np,
104 const char *name, struct k3_udma_glue_common *common,
107 struct psil_endpoint_config *ep_config;
108 struct of_phandle_args dma_spec;
116 index = of_property_match_string(chn_np, "dma-names", name);
120 if (of_parse_phandle_with_args(chn_np, "dmas", "#dma-cells", index,
124 thread_id = dma_spec.args[0];
125 if (dma_spec.args_count == 2) {
126 if (dma_spec.args[1] > 2) {
127 dev_err(common->dev, "Invalid channel atype: %u\n",
132 common->atype = dma_spec.args[1];
135 if (tx_chn && !(thread_id & K3_PSIL_DST_THREAD_ID_OFFSET)) {
140 if (!tx_chn && (thread_id & K3_PSIL_DST_THREAD_ID_OFFSET)) {
145 /* get psil endpoint config */
146 ep_config = psil_get_ep_config(thread_id);
147 if (IS_ERR(ep_config)) {
149 "No configuration for psi-l thread 0x%04x\n",
151 ret = PTR_ERR(ep_config);
155 common->epib = ep_config->needs_epib;
156 common->psdata_size = ep_config->psd_size;
159 common->dst_thread = thread_id;
161 common->src_thread = thread_id;
163 ret = of_k3_udma_glue_parse(dma_spec.np, common);
166 of_node_put(dma_spec.np);
170 static void k3_udma_glue_dump_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
172 struct device *dev = tx_chn->common.dev;
174 dev_dbg(dev, "dump_tx_chn:\n"
175 "udma_tchan_id: %d\n"
177 "dst_thread: %08x\n",
178 tx_chn->udma_tchan_id,
179 tx_chn->common.src_thread,
180 tx_chn->common.dst_thread);
183 static void k3_udma_glue_dump_tx_rt_chn(struct k3_udma_glue_tx_channel *chn,
186 struct device *dev = chn->common.dev;
188 dev_dbg(dev, "=== dump ===> %s\n", mark);
189 dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_CTL_REG,
190 xudma_tchanrt_read(chn->udma_tchanx, UDMA_CHAN_RT_CTL_REG));
191 dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_PEER_RT_EN_REG,
192 xudma_tchanrt_read(chn->udma_tchanx,
193 UDMA_CHAN_RT_PEER_RT_EN_REG));
194 dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_PCNT_REG,
195 xudma_tchanrt_read(chn->udma_tchanx, UDMA_CHAN_RT_PCNT_REG));
196 dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_BCNT_REG,
197 xudma_tchanrt_read(chn->udma_tchanx, UDMA_CHAN_RT_BCNT_REG));
198 dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_SBCNT_REG,
199 xudma_tchanrt_read(chn->udma_tchanx, UDMA_CHAN_RT_SBCNT_REG));
202 static int k3_udma_glue_cfg_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
204 const struct udma_tisci_rm *tisci_rm = tx_chn->common.tisci_rm;
205 struct ti_sci_msg_rm_udmap_tx_ch_cfg req;
207 memset(&req, 0, sizeof(req));
209 req.valid_params = TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID |
210 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_EINFO_VALID |
211 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_PSWORDS_VALID |
212 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID |
213 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID |
214 TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID |
215 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID |
216 TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID;
217 req.nav_id = tisci_rm->tisci_dev_id;
218 req.index = tx_chn->udma_tchan_id;
219 if (tx_chn->tx_pause_on_err)
220 req.tx_pause_on_err = 1;
221 if (tx_chn->tx_filt_einfo)
222 req.tx_filt_einfo = 1;
223 if (tx_chn->tx_filt_pswords)
224 req.tx_filt_pswords = 1;
225 req.tx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
226 if (tx_chn->tx_supr_tdpkt)
227 req.tx_supr_tdpkt = 1;
228 req.tx_fetch_size = tx_chn->common.hdesc_size >> 2;
229 req.txcq_qnum = k3_ringacc_get_ring_id(tx_chn->ringtxcq);
230 req.tx_atype = tx_chn->common.atype;
232 return tisci_rm->tisci_udmap_ops->tx_ch_cfg(tisci_rm->tisci, &req);
235 struct k3_udma_glue_tx_channel *k3_udma_glue_request_tx_chn(struct device *dev,
236 const char *name, struct k3_udma_glue_tx_channel_cfg *cfg)
238 struct k3_udma_glue_tx_channel *tx_chn;
241 tx_chn = devm_kzalloc(dev, sizeof(*tx_chn), GFP_KERNEL);
243 return ERR_PTR(-ENOMEM);
245 tx_chn->common.dev = dev;
246 tx_chn->common.swdata_size = cfg->swdata_size;
247 tx_chn->tx_pause_on_err = cfg->tx_pause_on_err;
248 tx_chn->tx_filt_einfo = cfg->tx_filt_einfo;
249 tx_chn->tx_filt_pswords = cfg->tx_filt_pswords;
250 tx_chn->tx_supr_tdpkt = cfg->tx_supr_tdpkt;
252 /* parse of udmap channel */
253 ret = of_k3_udma_glue_parse_chn(dev->of_node, name,
254 &tx_chn->common, true);
258 tx_chn->common.hdesc_size = cppi5_hdesc_calc_size(tx_chn->common.epib,
259 tx_chn->common.psdata_size,
260 tx_chn->common.swdata_size);
262 /* request and cfg UDMAP TX channel */
263 tx_chn->udma_tchanx = xudma_tchan_get(tx_chn->common.udmax, -1);
264 if (IS_ERR(tx_chn->udma_tchanx)) {
265 ret = PTR_ERR(tx_chn->udma_tchanx);
266 dev_err(dev, "UDMAX tchanx get err %d\n", ret);
269 tx_chn->udma_tchan_id = xudma_tchan_get_id(tx_chn->udma_tchanx);
271 atomic_set(&tx_chn->free_pkts, cfg->txcq_cfg.size);
273 /* request and cfg rings */
274 ret = k3_ringacc_request_rings_pair(tx_chn->common.ringacc,
275 tx_chn->udma_tchan_id, -1,
279 dev_err(dev, "Failed to get TX/TXCQ rings %d\n", ret);
283 ret = k3_ringacc_ring_cfg(tx_chn->ringtx, &cfg->tx_cfg);
285 dev_err(dev, "Failed to cfg ringtx %d\n", ret);
289 ret = k3_ringacc_ring_cfg(tx_chn->ringtxcq, &cfg->txcq_cfg);
291 dev_err(dev, "Failed to cfg ringtx %d\n", ret);
295 /* request and cfg psi-l */
296 tx_chn->common.src_thread =
297 xudma_dev_get_psil_base(tx_chn->common.udmax) +
298 tx_chn->udma_tchan_id;
300 ret = k3_udma_glue_cfg_tx_chn(tx_chn);
302 dev_err(dev, "Failed to cfg tchan %d\n", ret);
306 k3_udma_glue_dump_tx_chn(tx_chn);
311 k3_udma_glue_release_tx_chn(tx_chn);
314 EXPORT_SYMBOL_GPL(k3_udma_glue_request_tx_chn);
316 void k3_udma_glue_release_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
318 if (tx_chn->psil_paired) {
319 xudma_navss_psil_unpair(tx_chn->common.udmax,
320 tx_chn->common.src_thread,
321 tx_chn->common.dst_thread);
322 tx_chn->psil_paired = false;
325 if (!IS_ERR_OR_NULL(tx_chn->udma_tchanx))
326 xudma_tchan_put(tx_chn->common.udmax,
327 tx_chn->udma_tchanx);
329 if (tx_chn->ringtxcq)
330 k3_ringacc_ring_free(tx_chn->ringtxcq);
333 k3_ringacc_ring_free(tx_chn->ringtx);
335 EXPORT_SYMBOL_GPL(k3_udma_glue_release_tx_chn);
337 int k3_udma_glue_push_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
338 struct cppi5_host_desc_t *desc_tx,
343 if (!atomic_add_unless(&tx_chn->free_pkts, -1, 0))
346 ringtxcq_id = k3_ringacc_get_ring_id(tx_chn->ringtxcq);
347 cppi5_desc_set_retpolicy(&desc_tx->hdr, 0, ringtxcq_id);
349 return k3_ringacc_ring_push(tx_chn->ringtx, &desc_dma);
351 EXPORT_SYMBOL_GPL(k3_udma_glue_push_tx_chn);
353 int k3_udma_glue_pop_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
354 dma_addr_t *desc_dma)
358 ret = k3_ringacc_ring_pop(tx_chn->ringtxcq, desc_dma);
360 atomic_inc(&tx_chn->free_pkts);
364 EXPORT_SYMBOL_GPL(k3_udma_glue_pop_tx_chn);
366 int k3_udma_glue_enable_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
370 ret = xudma_navss_psil_pair(tx_chn->common.udmax,
371 tx_chn->common.src_thread,
372 tx_chn->common.dst_thread);
374 dev_err(tx_chn->common.dev, "PSI-L request err %d\n", ret);
378 tx_chn->psil_paired = true;
380 xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_CHAN_RT_PEER_RT_EN_REG,
381 UDMA_PEER_RT_EN_ENABLE);
383 xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_CHAN_RT_CTL_REG,
384 UDMA_CHAN_RT_CTL_EN);
386 k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn en");
389 EXPORT_SYMBOL_GPL(k3_udma_glue_enable_tx_chn);
391 void k3_udma_glue_disable_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
393 k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn dis1");
395 xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_CHAN_RT_CTL_REG, 0);
397 xudma_tchanrt_write(tx_chn->udma_tchanx,
398 UDMA_CHAN_RT_PEER_RT_EN_REG, 0);
399 k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn dis2");
401 if (tx_chn->psil_paired) {
402 xudma_navss_psil_unpair(tx_chn->common.udmax,
403 tx_chn->common.src_thread,
404 tx_chn->common.dst_thread);
405 tx_chn->psil_paired = false;
408 EXPORT_SYMBOL_GPL(k3_udma_glue_disable_tx_chn);
410 void k3_udma_glue_tdown_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
416 k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn tdown1");
418 xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_CHAN_RT_CTL_REG,
419 UDMA_CHAN_RT_CTL_EN | UDMA_CHAN_RT_CTL_TDOWN);
421 val = xudma_tchanrt_read(tx_chn->udma_tchanx, UDMA_CHAN_RT_CTL_REG);
423 while (sync && (val & UDMA_CHAN_RT_CTL_EN)) {
424 val = xudma_tchanrt_read(tx_chn->udma_tchanx,
425 UDMA_CHAN_RT_CTL_REG);
427 if (i > K3_UDMAX_TDOWN_TIMEOUT_US) {
428 dev_err(tx_chn->common.dev, "TX tdown timeout\n");
434 val = xudma_tchanrt_read(tx_chn->udma_tchanx,
435 UDMA_CHAN_RT_PEER_RT_EN_REG);
436 if (sync && (val & UDMA_PEER_RT_EN_ENABLE))
437 dev_err(tx_chn->common.dev, "TX tdown peer not stopped\n");
438 k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn tdown2");
440 EXPORT_SYMBOL_GPL(k3_udma_glue_tdown_tx_chn);
442 void k3_udma_glue_reset_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
444 void (*cleanup)(void *data, dma_addr_t desc_dma))
449 /* reset TXCQ as it is not input for udma - expected to be empty */
450 if (tx_chn->ringtxcq)
451 k3_ringacc_ring_reset(tx_chn->ringtxcq);
454 * TXQ reset need to be special way as it is input for udma and its
455 * state cached by udma, so:
457 * 2) clean up TXQ and call callback .cleanup() for each desc
458 * 3) reset TXQ in a special way
460 occ_tx = k3_ringacc_ring_get_occ(tx_chn->ringtx);
461 dev_dbg(tx_chn->common.dev, "TX reset occ_tx %u\n", occ_tx);
463 for (i = 0; i < occ_tx; i++) {
464 ret = k3_ringacc_ring_pop(tx_chn->ringtx, &desc_dma);
466 dev_err(tx_chn->common.dev, "TX reset pop %d\n", ret);
469 cleanup(data, desc_dma);
472 k3_ringacc_ring_reset_dma(tx_chn->ringtx, occ_tx);
474 EXPORT_SYMBOL_GPL(k3_udma_glue_reset_tx_chn);
476 u32 k3_udma_glue_tx_get_hdesc_size(struct k3_udma_glue_tx_channel *tx_chn)
478 return tx_chn->common.hdesc_size;
480 EXPORT_SYMBOL_GPL(k3_udma_glue_tx_get_hdesc_size);
482 u32 k3_udma_glue_tx_get_txcq_id(struct k3_udma_glue_tx_channel *tx_chn)
484 return k3_ringacc_get_ring_id(tx_chn->ringtxcq);
486 EXPORT_SYMBOL_GPL(k3_udma_glue_tx_get_txcq_id);
488 int k3_udma_glue_tx_get_irq(struct k3_udma_glue_tx_channel *tx_chn)
490 tx_chn->virq = k3_ringacc_get_ring_irq_num(tx_chn->ringtxcq);
494 EXPORT_SYMBOL_GPL(k3_udma_glue_tx_get_irq);
496 static int k3_udma_glue_cfg_rx_chn(struct k3_udma_glue_rx_channel *rx_chn)
498 const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm;
499 struct ti_sci_msg_rm_udmap_rx_ch_cfg req;
502 memset(&req, 0, sizeof(req));
504 req.valid_params = TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID |
505 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID |
506 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID |
507 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID |
508 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID |
509 TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID;
511 req.nav_id = tisci_rm->tisci_dev_id;
512 req.index = rx_chn->udma_rchan_id;
513 req.rx_fetch_size = rx_chn->common.hdesc_size >> 2;
515 * TODO: we can't support rxcq_qnum/RCHAN[a]_RCQ cfg with current sysfw
516 * and udmax impl, so just configure it to invalid value.
517 * req.rxcq_qnum = k3_ringacc_get_ring_id(rx_chn->flows[0].ringrx);
519 req.rxcq_qnum = 0xFFFF;
520 if (rx_chn->flow_num && rx_chn->flow_id_base != rx_chn->udma_rchan_id) {
521 /* Default flow + extra ones */
522 req.flowid_start = rx_chn->flow_id_base;
523 req.flowid_cnt = rx_chn->flow_num;
525 req.rx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
526 req.rx_atype = rx_chn->common.atype;
528 ret = tisci_rm->tisci_udmap_ops->rx_ch_cfg(tisci_rm->tisci, &req);
530 dev_err(rx_chn->common.dev, "rchan%d cfg failed %d\n",
531 rx_chn->udma_rchan_id, ret);
536 static void k3_udma_glue_release_rx_flow(struct k3_udma_glue_rx_channel *rx_chn,
539 struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num];
541 if (IS_ERR_OR_NULL(flow->udma_rflow))
545 k3_ringacc_ring_free(flow->ringrxfdq);
548 k3_ringacc_ring_free(flow->ringrx);
550 xudma_rflow_put(rx_chn->common.udmax, flow->udma_rflow);
551 flow->udma_rflow = NULL;
552 rx_chn->flows_ready--;
555 static int k3_udma_glue_cfg_rx_flow(struct k3_udma_glue_rx_channel *rx_chn,
557 struct k3_udma_glue_rx_flow_cfg *flow_cfg)
559 struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_idx];
560 const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm;
561 struct device *dev = rx_chn->common.dev;
562 struct ti_sci_msg_rm_udmap_flow_cfg req;
567 flow->udma_rflow = xudma_rflow_get(rx_chn->common.udmax,
568 flow->udma_rflow_id);
569 if (IS_ERR(flow->udma_rflow)) {
570 ret = PTR_ERR(flow->udma_rflow);
571 dev_err(dev, "UDMAX rflow get err %d\n", ret);
575 if (flow->udma_rflow_id != xudma_rflow_get_id(flow->udma_rflow)) {
580 /* request and cfg rings */
581 ret = k3_ringacc_request_rings_pair(rx_chn->common.ringacc,
582 flow_cfg->ring_rxfdq0_id,
583 flow_cfg->ring_rxq_id,
587 dev_err(dev, "Failed to get RX/RXFDQ rings %d\n", ret);
591 ret = k3_ringacc_ring_cfg(flow->ringrx, &flow_cfg->rx_cfg);
593 dev_err(dev, "Failed to cfg ringrx %d\n", ret);
594 goto err_ringrxfdq_free;
597 ret = k3_ringacc_ring_cfg(flow->ringrxfdq, &flow_cfg->rxfdq_cfg);
599 dev_err(dev, "Failed to cfg ringrxfdq %d\n", ret);
600 goto err_ringrxfdq_free;
603 if (rx_chn->remote) {
604 rx_ring_id = TI_SCI_RESOURCE_NULL;
605 rx_ringfdq_id = TI_SCI_RESOURCE_NULL;
607 rx_ring_id = k3_ringacc_get_ring_id(flow->ringrx);
608 rx_ringfdq_id = k3_ringacc_get_ring_id(flow->ringrxfdq);
611 memset(&req, 0, sizeof(req));
614 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID |
615 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID |
616 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID |
617 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DESC_TYPE_VALID |
618 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID |
619 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_HI_SEL_VALID |
620 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_LO_SEL_VALID |
621 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_HI_SEL_VALID |
622 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_LO_SEL_VALID |
623 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID |
624 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID |
625 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID |
626 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID;
627 req.nav_id = tisci_rm->tisci_dev_id;
628 req.flow_index = flow->udma_rflow_id;
629 if (rx_chn->common.epib)
630 req.rx_einfo_present = 1;
631 if (rx_chn->common.psdata_size)
632 req.rx_psinfo_present = 1;
633 if (flow_cfg->rx_error_handling)
634 req.rx_error_handling = 1;
635 req.rx_desc_type = 0;
636 req.rx_dest_qnum = rx_ring_id;
637 req.rx_src_tag_hi_sel = 0;
638 req.rx_src_tag_lo_sel = flow_cfg->src_tag_lo_sel;
639 req.rx_dest_tag_hi_sel = 0;
640 req.rx_dest_tag_lo_sel = 0;
641 req.rx_fdq0_sz0_qnum = rx_ringfdq_id;
642 req.rx_fdq1_qnum = rx_ringfdq_id;
643 req.rx_fdq2_qnum = rx_ringfdq_id;
644 req.rx_fdq3_qnum = rx_ringfdq_id;
646 ret = tisci_rm->tisci_udmap_ops->rx_flow_cfg(tisci_rm->tisci, &req);
648 dev_err(dev, "flow%d config failed: %d\n", flow->udma_rflow_id,
650 goto err_ringrxfdq_free;
653 rx_chn->flows_ready++;
654 dev_dbg(dev, "flow%d config done. ready:%d\n",
655 flow->udma_rflow_id, rx_chn->flows_ready);
660 k3_ringacc_ring_free(flow->ringrxfdq);
661 k3_ringacc_ring_free(flow->ringrx);
664 xudma_rflow_put(rx_chn->common.udmax, flow->udma_rflow);
665 flow->udma_rflow = NULL;
670 static void k3_udma_glue_dump_rx_chn(struct k3_udma_glue_rx_channel *chn)
672 struct device *dev = chn->common.dev;
674 dev_dbg(dev, "dump_rx_chn:\n"
675 "udma_rchan_id: %d\n"
685 chn->common.src_thread,
686 chn->common.dst_thread,
688 chn->common.hdesc_size,
689 chn->common.psdata_size,
690 chn->common.swdata_size,
695 static void k3_udma_glue_dump_rx_rt_chn(struct k3_udma_glue_rx_channel *chn,
698 struct device *dev = chn->common.dev;
700 dev_dbg(dev, "=== dump ===> %s\n", mark);
702 dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_CTL_REG,
703 xudma_rchanrt_read(chn->udma_rchanx, UDMA_CHAN_RT_CTL_REG));
704 dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_PEER_RT_EN_REG,
705 xudma_rchanrt_read(chn->udma_rchanx,
706 UDMA_CHAN_RT_PEER_RT_EN_REG));
707 dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_PCNT_REG,
708 xudma_rchanrt_read(chn->udma_rchanx, UDMA_CHAN_RT_PCNT_REG));
709 dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_BCNT_REG,
710 xudma_rchanrt_read(chn->udma_rchanx, UDMA_CHAN_RT_BCNT_REG));
711 dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_SBCNT_REG,
712 xudma_rchanrt_read(chn->udma_rchanx, UDMA_CHAN_RT_SBCNT_REG));
716 k3_udma_glue_allocate_rx_flows(struct k3_udma_glue_rx_channel *rx_chn,
717 struct k3_udma_glue_rx_channel_cfg *cfg)
722 if (cfg->flow_id_use_rxchan_id)
725 /* not a GP rflows */
726 if (rx_chn->flow_id_base != -1 &&
727 !xudma_rflow_is_gp(rx_chn->common.udmax, rx_chn->flow_id_base))
730 /* Allocate range of GP rflows */
731 ret = xudma_alloc_gp_rflow_range(rx_chn->common.udmax,
732 rx_chn->flow_id_base,
735 dev_err(rx_chn->common.dev, "UDMAX reserve_rflow %d cnt:%d err: %d\n",
736 rx_chn->flow_id_base, rx_chn->flow_num, ret);
739 rx_chn->flow_id_base = ret;
744 static struct k3_udma_glue_rx_channel *
745 k3_udma_glue_request_rx_chn_priv(struct device *dev, const char *name,
746 struct k3_udma_glue_rx_channel_cfg *cfg)
748 struct k3_udma_glue_rx_channel *rx_chn;
751 if (cfg->flow_id_num <= 0)
752 return ERR_PTR(-EINVAL);
754 if (cfg->flow_id_num != 1 &&
755 (cfg->def_flow_cfg || cfg->flow_id_use_rxchan_id))
756 return ERR_PTR(-EINVAL);
758 rx_chn = devm_kzalloc(dev, sizeof(*rx_chn), GFP_KERNEL);
760 return ERR_PTR(-ENOMEM);
762 rx_chn->common.dev = dev;
763 rx_chn->common.swdata_size = cfg->swdata_size;
764 rx_chn->remote = false;
766 /* parse of udmap channel */
767 ret = of_k3_udma_glue_parse_chn(dev->of_node, name,
768 &rx_chn->common, false);
772 rx_chn->common.hdesc_size = cppi5_hdesc_calc_size(rx_chn->common.epib,
773 rx_chn->common.psdata_size,
774 rx_chn->common.swdata_size);
776 /* request and cfg UDMAP RX channel */
777 rx_chn->udma_rchanx = xudma_rchan_get(rx_chn->common.udmax, -1);
778 if (IS_ERR(rx_chn->udma_rchanx)) {
779 ret = PTR_ERR(rx_chn->udma_rchanx);
780 dev_err(dev, "UDMAX rchanx get err %d\n", ret);
783 rx_chn->udma_rchan_id = xudma_rchan_get_id(rx_chn->udma_rchanx);
785 rx_chn->flow_num = cfg->flow_id_num;
786 rx_chn->flow_id_base = cfg->flow_id_base;
788 /* Use RX channel id as flow id: target dev can't generate flow_id */
789 if (cfg->flow_id_use_rxchan_id)
790 rx_chn->flow_id_base = rx_chn->udma_rchan_id;
792 rx_chn->flows = devm_kcalloc(dev, rx_chn->flow_num,
793 sizeof(*rx_chn->flows), GFP_KERNEL);
794 if (!rx_chn->flows) {
799 ret = k3_udma_glue_allocate_rx_flows(rx_chn, cfg);
803 for (i = 0; i < rx_chn->flow_num; i++)
804 rx_chn->flows[i].udma_rflow_id = rx_chn->flow_id_base + i;
806 /* request and cfg psi-l */
807 rx_chn->common.dst_thread =
808 xudma_dev_get_psil_base(rx_chn->common.udmax) +
809 rx_chn->udma_rchan_id;
811 ret = k3_udma_glue_cfg_rx_chn(rx_chn);
813 dev_err(dev, "Failed to cfg rchan %d\n", ret);
817 /* init default RX flow only if flow_num = 1 */
818 if (cfg->def_flow_cfg) {
819 ret = k3_udma_glue_cfg_rx_flow(rx_chn, 0, cfg->def_flow_cfg);
824 k3_udma_glue_dump_rx_chn(rx_chn);
829 k3_udma_glue_release_rx_chn(rx_chn);
833 static struct k3_udma_glue_rx_channel *
834 k3_udma_glue_request_remote_rx_chn(struct device *dev, const char *name,
835 struct k3_udma_glue_rx_channel_cfg *cfg)
837 struct k3_udma_glue_rx_channel *rx_chn;
840 if (cfg->flow_id_num <= 0 ||
841 cfg->flow_id_use_rxchan_id ||
843 cfg->flow_id_base < 0)
844 return ERR_PTR(-EINVAL);
847 * Remote RX channel is under control of Remote CPU core, so
848 * Linux can only request and manipulate by dedicated RX flows
851 rx_chn = devm_kzalloc(dev, sizeof(*rx_chn), GFP_KERNEL);
853 return ERR_PTR(-ENOMEM);
855 rx_chn->common.dev = dev;
856 rx_chn->common.swdata_size = cfg->swdata_size;
857 rx_chn->remote = true;
858 rx_chn->udma_rchan_id = -1;
859 rx_chn->flow_num = cfg->flow_id_num;
860 rx_chn->flow_id_base = cfg->flow_id_base;
861 rx_chn->psil_paired = false;
863 /* parse of udmap channel */
864 ret = of_k3_udma_glue_parse_chn(dev->of_node, name,
865 &rx_chn->common, false);
869 rx_chn->common.hdesc_size = cppi5_hdesc_calc_size(rx_chn->common.epib,
870 rx_chn->common.psdata_size,
871 rx_chn->common.swdata_size);
873 rx_chn->flows = devm_kcalloc(dev, rx_chn->flow_num,
874 sizeof(*rx_chn->flows), GFP_KERNEL);
875 if (!rx_chn->flows) {
880 ret = k3_udma_glue_allocate_rx_flows(rx_chn, cfg);
884 for (i = 0; i < rx_chn->flow_num; i++)
885 rx_chn->flows[i].udma_rflow_id = rx_chn->flow_id_base + i;
887 k3_udma_glue_dump_rx_chn(rx_chn);
892 k3_udma_glue_release_rx_chn(rx_chn);
896 struct k3_udma_glue_rx_channel *
897 k3_udma_glue_request_rx_chn(struct device *dev, const char *name,
898 struct k3_udma_glue_rx_channel_cfg *cfg)
901 return k3_udma_glue_request_remote_rx_chn(dev, name, cfg);
903 return k3_udma_glue_request_rx_chn_priv(dev, name, cfg);
905 EXPORT_SYMBOL_GPL(k3_udma_glue_request_rx_chn);
907 void k3_udma_glue_release_rx_chn(struct k3_udma_glue_rx_channel *rx_chn)
911 if (IS_ERR_OR_NULL(rx_chn->common.udmax))
914 if (rx_chn->psil_paired) {
915 xudma_navss_psil_unpair(rx_chn->common.udmax,
916 rx_chn->common.src_thread,
917 rx_chn->common.dst_thread);
918 rx_chn->psil_paired = false;
921 for (i = 0; i < rx_chn->flow_num; i++)
922 k3_udma_glue_release_rx_flow(rx_chn, i);
924 if (xudma_rflow_is_gp(rx_chn->common.udmax, rx_chn->flow_id_base))
925 xudma_free_gp_rflow_range(rx_chn->common.udmax,
926 rx_chn->flow_id_base,
929 if (!IS_ERR_OR_NULL(rx_chn->udma_rchanx))
930 xudma_rchan_put(rx_chn->common.udmax,
931 rx_chn->udma_rchanx);
933 EXPORT_SYMBOL_GPL(k3_udma_glue_release_rx_chn);
935 int k3_udma_glue_rx_flow_init(struct k3_udma_glue_rx_channel *rx_chn,
937 struct k3_udma_glue_rx_flow_cfg *flow_cfg)
939 if (flow_idx >= rx_chn->flow_num)
942 return k3_udma_glue_cfg_rx_flow(rx_chn, flow_idx, flow_cfg);
944 EXPORT_SYMBOL_GPL(k3_udma_glue_rx_flow_init);
946 u32 k3_udma_glue_rx_flow_get_fdq_id(struct k3_udma_glue_rx_channel *rx_chn,
949 struct k3_udma_glue_rx_flow *flow;
951 if (flow_idx >= rx_chn->flow_num)
954 flow = &rx_chn->flows[flow_idx];
956 return k3_ringacc_get_ring_id(flow->ringrxfdq);
958 EXPORT_SYMBOL_GPL(k3_udma_glue_rx_flow_get_fdq_id);
960 u32 k3_udma_glue_rx_get_flow_id_base(struct k3_udma_glue_rx_channel *rx_chn)
962 return rx_chn->flow_id_base;
964 EXPORT_SYMBOL_GPL(k3_udma_glue_rx_get_flow_id_base);
966 int k3_udma_glue_rx_flow_enable(struct k3_udma_glue_rx_channel *rx_chn,
969 struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_idx];
970 const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm;
971 struct device *dev = rx_chn->common.dev;
972 struct ti_sci_msg_rm_udmap_flow_cfg req;
980 rx_ring_id = k3_ringacc_get_ring_id(flow->ringrx);
981 rx_ringfdq_id = k3_ringacc_get_ring_id(flow->ringrxfdq);
983 memset(&req, 0, sizeof(req));
986 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID |
987 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID |
988 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID |
989 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID |
990 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID;
991 req.nav_id = tisci_rm->tisci_dev_id;
992 req.flow_index = flow->udma_rflow_id;
993 req.rx_dest_qnum = rx_ring_id;
994 req.rx_fdq0_sz0_qnum = rx_ringfdq_id;
995 req.rx_fdq1_qnum = rx_ringfdq_id;
996 req.rx_fdq2_qnum = rx_ringfdq_id;
997 req.rx_fdq3_qnum = rx_ringfdq_id;
999 ret = tisci_rm->tisci_udmap_ops->rx_flow_cfg(tisci_rm->tisci, &req);
1001 dev_err(dev, "flow%d enable failed: %d\n", flow->udma_rflow_id,
1007 EXPORT_SYMBOL_GPL(k3_udma_glue_rx_flow_enable);
1009 int k3_udma_glue_rx_flow_disable(struct k3_udma_glue_rx_channel *rx_chn,
1012 struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_idx];
1013 const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm;
1014 struct device *dev = rx_chn->common.dev;
1015 struct ti_sci_msg_rm_udmap_flow_cfg req;
1018 if (!rx_chn->remote)
1021 memset(&req, 0, sizeof(req));
1023 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID |
1024 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID |
1025 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID |
1026 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID |
1027 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID;
1028 req.nav_id = tisci_rm->tisci_dev_id;
1029 req.flow_index = flow->udma_rflow_id;
1030 req.rx_dest_qnum = TI_SCI_RESOURCE_NULL;
1031 req.rx_fdq0_sz0_qnum = TI_SCI_RESOURCE_NULL;
1032 req.rx_fdq1_qnum = TI_SCI_RESOURCE_NULL;
1033 req.rx_fdq2_qnum = TI_SCI_RESOURCE_NULL;
1034 req.rx_fdq3_qnum = TI_SCI_RESOURCE_NULL;
1036 ret = tisci_rm->tisci_udmap_ops->rx_flow_cfg(tisci_rm->tisci, &req);
1038 dev_err(dev, "flow%d disable failed: %d\n", flow->udma_rflow_id,
1044 EXPORT_SYMBOL_GPL(k3_udma_glue_rx_flow_disable);
1046 int k3_udma_glue_enable_rx_chn(struct k3_udma_glue_rx_channel *rx_chn)
1053 if (rx_chn->flows_ready < rx_chn->flow_num)
1056 ret = xudma_navss_psil_pair(rx_chn->common.udmax,
1057 rx_chn->common.src_thread,
1058 rx_chn->common.dst_thread);
1060 dev_err(rx_chn->common.dev, "PSI-L request err %d\n", ret);
1064 rx_chn->psil_paired = true;
1066 xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_CHAN_RT_CTL_REG,
1067 UDMA_CHAN_RT_CTL_EN);
1069 xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_CHAN_RT_PEER_RT_EN_REG,
1070 UDMA_PEER_RT_EN_ENABLE);
1072 k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt en");
1075 EXPORT_SYMBOL_GPL(k3_udma_glue_enable_rx_chn);
1077 void k3_udma_glue_disable_rx_chn(struct k3_udma_glue_rx_channel *rx_chn)
1079 k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt dis1");
1081 xudma_rchanrt_write(rx_chn->udma_rchanx,
1082 UDMA_CHAN_RT_PEER_RT_EN_REG, 0);
1083 xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_CHAN_RT_CTL_REG, 0);
1085 k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt dis2");
1087 if (rx_chn->psil_paired) {
1088 xudma_navss_psil_unpair(rx_chn->common.udmax,
1089 rx_chn->common.src_thread,
1090 rx_chn->common.dst_thread);
1091 rx_chn->psil_paired = false;
1094 EXPORT_SYMBOL_GPL(k3_udma_glue_disable_rx_chn);
1096 void k3_udma_glue_tdown_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
1105 k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt tdown1");
1107 xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_CHAN_RT_PEER_RT_EN_REG,
1108 UDMA_PEER_RT_EN_ENABLE | UDMA_PEER_RT_EN_TEARDOWN);
1110 val = xudma_rchanrt_read(rx_chn->udma_rchanx, UDMA_CHAN_RT_CTL_REG);
1112 while (sync && (val & UDMA_CHAN_RT_CTL_EN)) {
1113 val = xudma_rchanrt_read(rx_chn->udma_rchanx,
1114 UDMA_CHAN_RT_CTL_REG);
1116 if (i > K3_UDMAX_TDOWN_TIMEOUT_US) {
1117 dev_err(rx_chn->common.dev, "RX tdown timeout\n");
1123 val = xudma_rchanrt_read(rx_chn->udma_rchanx,
1124 UDMA_CHAN_RT_PEER_RT_EN_REG);
1125 if (sync && (val & UDMA_PEER_RT_EN_ENABLE))
1126 dev_err(rx_chn->common.dev, "TX tdown peer not stopped\n");
1127 k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt tdown2");
1129 EXPORT_SYMBOL_GPL(k3_udma_glue_tdown_rx_chn);
1131 void k3_udma_glue_reset_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
1132 u32 flow_num, void *data,
1133 void (*cleanup)(void *data, dma_addr_t desc_dma), bool skip_fdq)
1135 struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num];
1136 struct device *dev = rx_chn->common.dev;
1137 dma_addr_t desc_dma;
1140 /* reset RXCQ as it is not input for udma - expected to be empty */
1141 occ_rx = k3_ringacc_ring_get_occ(flow->ringrx);
1142 dev_dbg(dev, "RX reset flow %u occ_rx %u\n", flow_num, occ_rx);
1144 k3_ringacc_ring_reset(flow->ringrx);
1146 /* Skip RX FDQ in case one FDQ is used for the set of flows */
1151 * RX FDQ reset need to be special way as it is input for udma and its
1152 * state cached by udma, so:
1153 * 1) save RX FDQ occ
1154 * 2) clean up RX FDQ and call callback .cleanup() for each desc
1155 * 3) reset RX FDQ in a special way
1157 occ_rx = k3_ringacc_ring_get_occ(flow->ringrxfdq);
1158 dev_dbg(dev, "RX reset flow %u occ_rx_fdq %u\n", flow_num, occ_rx);
1160 for (i = 0; i < occ_rx; i++) {
1161 ret = k3_ringacc_ring_pop(flow->ringrxfdq, &desc_dma);
1163 dev_err(dev, "RX reset pop %d\n", ret);
1166 cleanup(data, desc_dma);
1169 k3_ringacc_ring_reset_dma(flow->ringrxfdq, occ_rx);
1171 EXPORT_SYMBOL_GPL(k3_udma_glue_reset_rx_chn);
1173 int k3_udma_glue_push_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
1174 u32 flow_num, struct cppi5_host_desc_t *desc_rx,
1175 dma_addr_t desc_dma)
1177 struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num];
1179 return k3_ringacc_ring_push(flow->ringrxfdq, &desc_dma);
1181 EXPORT_SYMBOL_GPL(k3_udma_glue_push_rx_chn);
1183 int k3_udma_glue_pop_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
1184 u32 flow_num, dma_addr_t *desc_dma)
1186 struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num];
1188 return k3_ringacc_ring_pop(flow->ringrx, desc_dma);
1190 EXPORT_SYMBOL_GPL(k3_udma_glue_pop_rx_chn);
1192 int k3_udma_glue_rx_get_irq(struct k3_udma_glue_rx_channel *rx_chn,
1195 struct k3_udma_glue_rx_flow *flow;
1197 flow = &rx_chn->flows[flow_num];
1199 flow->virq = k3_ringacc_get_ring_irq_num(flow->ringrx);
1203 EXPORT_SYMBOL_GPL(k3_udma_glue_rx_get_irq);