1 // SPDX-License-Identifier: GPL-2.0
3 * Texas Instruments CPDMA Driver
5 * Copyright (C) 2010 Texas Instruments
8 #include <linux/kernel.h>
9 #include <linux/spinlock.h>
10 #include <linux/device.h>
11 #include <linux/module.h>
12 #include <linux/slab.h>
13 #include <linux/err.h>
14 #include <linux/dma-mapping.h>
16 #include <linux/delay.h>
17 #include <linux/genalloc.h>
18 #include "davinci_cpdma.h"
21 #define CPDMA_TXIDVER 0x00
22 #define CPDMA_TXCONTROL 0x04
23 #define CPDMA_TXTEARDOWN 0x08
24 #define CPDMA_RXIDVER 0x10
25 #define CPDMA_RXCONTROL 0x14
26 #define CPDMA_SOFTRESET 0x1c
27 #define CPDMA_RXTEARDOWN 0x18
28 #define CPDMA_TX_PRI0_RATE 0x30
29 #define CPDMA_TXINTSTATRAW 0x80
30 #define CPDMA_TXINTSTATMASKED 0x84
31 #define CPDMA_TXINTMASKSET 0x88
32 #define CPDMA_TXINTMASKCLEAR 0x8c
33 #define CPDMA_MACINVECTOR 0x90
34 #define CPDMA_MACEOIVECTOR 0x94
35 #define CPDMA_RXINTSTATRAW 0xa0
36 #define CPDMA_RXINTSTATMASKED 0xa4
37 #define CPDMA_RXINTMASKSET 0xa8
38 #define CPDMA_RXINTMASKCLEAR 0xac
39 #define CPDMA_DMAINTSTATRAW 0xb0
40 #define CPDMA_DMAINTSTATMASKED 0xb4
41 #define CPDMA_DMAINTMASKSET 0xb8
42 #define CPDMA_DMAINTMASKCLEAR 0xbc
43 #define CPDMA_DMAINT_HOSTERR BIT(1)
45 /* the following exist only if has_ext_regs is set */
46 #define CPDMA_DMACONTROL 0x20
47 #define CPDMA_DMASTATUS 0x24
48 #define CPDMA_RXBUFFOFS 0x28
49 #define CPDMA_EM_CONTROL 0x2c
51 /* Descriptor mode bits */
52 #define CPDMA_DESC_SOP BIT(31)
53 #define CPDMA_DESC_EOP BIT(30)
54 #define CPDMA_DESC_OWNER BIT(29)
55 #define CPDMA_DESC_EOQ BIT(28)
56 #define CPDMA_DESC_TD_COMPLETE BIT(27)
57 #define CPDMA_DESC_PASS_CRC BIT(26)
58 #define CPDMA_DESC_TO_PORT_EN BIT(20)
59 #define CPDMA_TO_PORT_SHIFT 16
60 #define CPDMA_DESC_PORT_MASK (BIT(18) | BIT(17) | BIT(16))
61 #define CPDMA_DESC_CRC_LEN 4
63 #define CPDMA_TEARDOWN_VALUE 0xfffffffc
65 #define CPDMA_MAX_RLIM_CNT 16384
79 struct cpdma_desc_pool {
82 void __iomem *iomap; /* ioremap map */
83 void *cpumap; /* dma_alloc map */
84 int desc_size, mem_size;
87 struct gen_pool *gen_pool;
97 enum cpdma_state state;
98 struct cpdma_params params;
100 struct cpdma_desc_pool *pool;
102 struct cpdma_chan *channels[2 * CPDMA_MAX_CHANNELS];
104 int num_rx_desc; /* RX descriptors number */
105 int num_tx_desc; /* TX descriptors number */
109 struct cpdma_desc __iomem *head, *tail;
110 void __iomem *hdp, *cp, *rxfree;
111 enum cpdma_state state;
112 struct cpdma_ctlr *ctlr;
118 cpdma_handler_fn handler;
119 enum dma_data_direction dir;
120 struct cpdma_chan_stats stats;
121 /* offsets into dmaregs */
122 int int_set, int_clear, td;
128 struct cpdma_control_info {
132 #define ACCESS_RO BIT(0)
133 #define ACCESS_WO BIT(1)
134 #define ACCESS_RW (ACCESS_RO | ACCESS_WO)
137 static struct cpdma_control_info controls[] = {
138 [CPDMA_TX_RLIM] = {CPDMA_DMACONTROL, 8, 0xffff, ACCESS_RW},
139 [CPDMA_CMD_IDLE] = {CPDMA_DMACONTROL, 3, 1, ACCESS_WO},
140 [CPDMA_COPY_ERROR_FRAMES] = {CPDMA_DMACONTROL, 4, 1, ACCESS_RW},
141 [CPDMA_RX_OFF_LEN_UPDATE] = {CPDMA_DMACONTROL, 2, 1, ACCESS_RW},
142 [CPDMA_RX_OWNERSHIP_FLIP] = {CPDMA_DMACONTROL, 1, 1, ACCESS_RW},
143 [CPDMA_TX_PRIO_FIXED] = {CPDMA_DMACONTROL, 0, 1, ACCESS_RW},
144 [CPDMA_STAT_IDLE] = {CPDMA_DMASTATUS, 31, 1, ACCESS_RO},
145 [CPDMA_STAT_TX_ERR_CODE] = {CPDMA_DMASTATUS, 20, 0xf, ACCESS_RW},
146 [CPDMA_STAT_TX_ERR_CHAN] = {CPDMA_DMASTATUS, 16, 0x7, ACCESS_RW},
147 [CPDMA_STAT_RX_ERR_CODE] = {CPDMA_DMASTATUS, 12, 0xf, ACCESS_RW},
148 [CPDMA_STAT_RX_ERR_CHAN] = {CPDMA_DMASTATUS, 8, 0x7, ACCESS_RW},
149 [CPDMA_RX_BUFFER_OFFSET] = {CPDMA_RXBUFFOFS, 0, 0xffff, ACCESS_RW},
152 #define tx_chan_num(chan) (chan)
153 #define rx_chan_num(chan) ((chan) + CPDMA_MAX_CHANNELS)
154 #define is_rx_chan(chan) ((chan)->chan_num >= CPDMA_MAX_CHANNELS)
155 #define is_tx_chan(chan) (!is_rx_chan(chan))
156 #define __chan_linear(chan_num) ((chan_num) & (CPDMA_MAX_CHANNELS - 1))
157 #define chan_linear(chan) __chan_linear((chan)->chan_num)
159 /* The following make access to common cpdma_ctlr params more readable */
160 #define dmaregs params.dmaregs
161 #define num_chan params.num_chan
163 /* various accessors */
164 #define dma_reg_read(ctlr, ofs) readl((ctlr)->dmaregs + (ofs))
165 #define chan_read(chan, fld) readl((chan)->fld)
166 #define desc_read(desc, fld) readl(&(desc)->fld)
167 #define dma_reg_write(ctlr, ofs, v) writel(v, (ctlr)->dmaregs + (ofs))
168 #define chan_write(chan, fld, v) writel(v, (chan)->fld)
169 #define desc_write(desc, fld, v) writel((u32)(v), &(desc)->fld)
171 #define cpdma_desc_to_port(chan, mode, directed) \
173 if (!is_rx_chan(chan) && ((directed == 1) || \
175 mode |= (CPDMA_DESC_TO_PORT_EN | \
176 (directed << CPDMA_TO_PORT_SHIFT)); \
179 static void cpdma_desc_pool_destroy(struct cpdma_ctlr *ctlr)
181 struct cpdma_desc_pool *pool = ctlr->pool;
186 WARN(gen_pool_size(pool->gen_pool) != gen_pool_avail(pool->gen_pool),
187 "cpdma_desc_pool size %zd != avail %zd",
188 gen_pool_size(pool->gen_pool),
189 gen_pool_avail(pool->gen_pool));
191 dma_free_coherent(ctlr->dev, pool->mem_size, pool->cpumap,
196 * Utility constructs for a cpdma descriptor pool. Some devices (e.g. davinci
197 * emac) have dedicated on-chip memory for these descriptors. Some other
198 * devices (e.g. cpsw switches) use plain old memory. Descriptor pools
199 * abstract out these details
201 static int cpdma_desc_pool_create(struct cpdma_ctlr *ctlr)
203 struct cpdma_params *cpdma_params = &ctlr->params;
204 struct cpdma_desc_pool *pool;
207 pool = devm_kzalloc(ctlr->dev, sizeof(*pool), GFP_KERNEL);
209 goto gen_pool_create_fail;
212 pool->mem_size = cpdma_params->desc_mem_size;
213 pool->desc_size = ALIGN(sizeof(struct cpdma_desc),
214 cpdma_params->desc_align);
215 pool->num_desc = pool->mem_size / pool->desc_size;
217 if (cpdma_params->descs_pool_size) {
218 /* recalculate memory size required cpdma descriptor pool
219 * basing on number of descriptors specified by user and
220 * if memory size > CPPI internal RAM size (desc_mem_size)
221 * then switch to use DDR
223 pool->num_desc = cpdma_params->descs_pool_size;
224 pool->mem_size = pool->desc_size * pool->num_desc;
225 if (pool->mem_size > cpdma_params->desc_mem_size)
226 cpdma_params->desc_mem_phys = 0;
229 pool->gen_pool = devm_gen_pool_create(ctlr->dev, ilog2(pool->desc_size),
231 if (IS_ERR(pool->gen_pool)) {
232 ret = PTR_ERR(pool->gen_pool);
233 dev_err(ctlr->dev, "pool create failed %d\n", ret);
234 goto gen_pool_create_fail;
237 if (cpdma_params->desc_mem_phys) {
238 pool->phys = cpdma_params->desc_mem_phys;
239 pool->iomap = devm_ioremap(ctlr->dev, pool->phys,
241 pool->hw_addr = cpdma_params->desc_hw_addr;
243 pool->cpumap = dma_alloc_coherent(ctlr->dev, pool->mem_size,
244 &pool->hw_addr, GFP_KERNEL);
245 pool->iomap = (void __iomem __force *)pool->cpumap;
246 pool->phys = pool->hw_addr; /* assumes no IOMMU, don't use this value */
250 goto gen_pool_create_fail;
252 ret = gen_pool_add_virt(pool->gen_pool, (unsigned long)pool->iomap,
253 pool->phys, pool->mem_size, -1);
255 dev_err(ctlr->dev, "pool add failed %d\n", ret);
256 goto gen_pool_add_virt_fail;
261 gen_pool_add_virt_fail:
262 cpdma_desc_pool_destroy(ctlr);
263 gen_pool_create_fail:
268 static inline dma_addr_t desc_phys(struct cpdma_desc_pool *pool,
269 struct cpdma_desc __iomem *desc)
273 return pool->hw_addr + (__force long)desc - (__force long)pool->iomap;
276 static inline struct cpdma_desc __iomem *
277 desc_from_phys(struct cpdma_desc_pool *pool, dma_addr_t dma)
279 return dma ? pool->iomap + dma - pool->hw_addr : NULL;
282 static struct cpdma_desc __iomem *
283 cpdma_desc_alloc(struct cpdma_desc_pool *pool)
285 return (struct cpdma_desc __iomem *)
286 gen_pool_alloc(pool->gen_pool, pool->desc_size);
289 static void cpdma_desc_free(struct cpdma_desc_pool *pool,
290 struct cpdma_desc __iomem *desc, int num_desc)
292 gen_pool_free(pool->gen_pool, (unsigned long)desc, pool->desc_size);
295 static int _cpdma_control_set(struct cpdma_ctlr *ctlr, int control, int value)
297 struct cpdma_control_info *info = &controls[control];
300 if (!ctlr->params.has_ext_regs)
303 if (ctlr->state != CPDMA_STATE_ACTIVE)
306 if (control < 0 || control >= ARRAY_SIZE(controls))
309 if ((info->access & ACCESS_WO) != ACCESS_WO)
312 val = dma_reg_read(ctlr, info->reg);
313 val &= ~(info->mask << info->shift);
314 val |= (value & info->mask) << info->shift;
315 dma_reg_write(ctlr, info->reg, val);
320 static int _cpdma_control_get(struct cpdma_ctlr *ctlr, int control)
322 struct cpdma_control_info *info = &controls[control];
325 if (!ctlr->params.has_ext_regs)
328 if (ctlr->state != CPDMA_STATE_ACTIVE)
331 if (control < 0 || control >= ARRAY_SIZE(controls))
334 if ((info->access & ACCESS_RO) != ACCESS_RO)
337 ret = (dma_reg_read(ctlr, info->reg) >> info->shift) & info->mask;
341 /* cpdma_chan_set_chan_shaper - set shaper for a channel
342 * Has to be called under ctlr lock
344 static int cpdma_chan_set_chan_shaper(struct cpdma_chan *chan)
346 struct cpdma_ctlr *ctlr = chan->ctlr;
354 rate_reg = CPDMA_TX_PRI0_RATE + 4 * chan->chan_num;
355 dma_reg_write(ctlr, rate_reg, chan->rate_factor);
357 rmask = _cpdma_control_get(ctlr, CPDMA_TX_RLIM);
360 ret = _cpdma_control_set(ctlr, CPDMA_TX_RLIM, rmask);
364 static int cpdma_chan_on(struct cpdma_chan *chan)
366 struct cpdma_ctlr *ctlr = chan->ctlr;
367 struct cpdma_desc_pool *pool = ctlr->pool;
370 spin_lock_irqsave(&chan->lock, flags);
371 if (chan->state != CPDMA_STATE_IDLE) {
372 spin_unlock_irqrestore(&chan->lock, flags);
375 if (ctlr->state != CPDMA_STATE_ACTIVE) {
376 spin_unlock_irqrestore(&chan->lock, flags);
379 dma_reg_write(ctlr, chan->int_set, chan->mask);
380 chan->state = CPDMA_STATE_ACTIVE;
382 chan_write(chan, hdp, desc_phys(pool, chan->head));
384 chan_write(chan, rxfree, chan->count);
387 spin_unlock_irqrestore(&chan->lock, flags);
391 /* cpdma_chan_fit_rate - set rate for a channel and check if it's possible.
392 * rmask - mask of rate limited channels
393 * Returns min rate in Kb/s
395 static int cpdma_chan_fit_rate(struct cpdma_chan *ch, u32 rate,
396 u32 *rmask, int *prio_mode)
398 struct cpdma_ctlr *ctlr = ch->ctlr;
399 struct cpdma_chan *chan;
400 u32 old_rate = ch->rate;
405 for (i = tx_chan_num(0); i < tx_chan_num(CPDMA_MAX_CHANNELS); i++) {
406 chan = ctlr->channels[i];
415 new_rmask |= chan->mask;
429 dev_err(ctlr->dev, "Upper cpdma ch%d is not rate limited\n",
434 static u32 cpdma_chan_set_factors(struct cpdma_ctlr *ctlr,
435 struct cpdma_chan *ch)
437 u32 delta = UINT_MAX, prev_delta = UINT_MAX, best_delta = UINT_MAX;
438 u32 best_send_cnt = 0, best_idle_cnt = 0;
439 u32 new_rate, best_rate = 0, rate_reg;
440 u64 send_cnt, idle_cnt;
441 u32 min_send_cnt, freq;
442 u64 divident, divisor;
449 freq = ctlr->params.bus_freq_mhz * 1000 * 32;
451 dev_err(ctlr->dev, "The bus frequency is not set\n");
455 min_send_cnt = freq - ch->rate;
456 send_cnt = DIV_ROUND_UP(min_send_cnt, ch->rate);
457 while (send_cnt <= CPDMA_MAX_RLIM_CNT) {
458 divident = ch->rate * send_cnt;
459 divisor = min_send_cnt;
460 idle_cnt = DIV_ROUND_CLOSEST_ULL(divident, divisor);
462 divident = freq * idle_cnt;
463 divisor = idle_cnt + send_cnt;
464 new_rate = DIV_ROUND_CLOSEST_ULL(divident, divisor);
466 delta = new_rate >= ch->rate ? new_rate - ch->rate : delta;
467 if (delta < best_delta) {
469 best_send_cnt = send_cnt;
470 best_idle_cnt = idle_cnt;
471 best_rate = new_rate;
477 if (prev_delta >= delta) {
484 divident = freq * idle_cnt;
485 send_cnt = DIV_ROUND_CLOSEST_ULL(divident, ch->rate);
486 send_cnt -= idle_cnt;
487 prev_delta = UINT_MAX;
490 ch->rate = best_rate;
491 ch->rate_factor = best_send_cnt | (best_idle_cnt << 16);
494 rate_reg = CPDMA_TX_PRI0_RATE + 4 * ch->chan_num;
495 dma_reg_write(ctlr, rate_reg, ch->rate_factor);
499 struct cpdma_ctlr *cpdma_ctlr_create(struct cpdma_params *params)
501 struct cpdma_ctlr *ctlr;
503 ctlr = devm_kzalloc(params->dev, sizeof(*ctlr), GFP_KERNEL);
507 ctlr->state = CPDMA_STATE_IDLE;
508 ctlr->params = *params;
509 ctlr->dev = params->dev;
511 spin_lock_init(&ctlr->lock);
513 if (cpdma_desc_pool_create(ctlr))
515 /* split pool equally between RX/TX by default */
516 ctlr->num_tx_desc = ctlr->pool->num_desc / 2;
517 ctlr->num_rx_desc = ctlr->pool->num_desc - ctlr->num_tx_desc;
519 if (WARN_ON(ctlr->num_chan > CPDMA_MAX_CHANNELS))
520 ctlr->num_chan = CPDMA_MAX_CHANNELS;
524 int cpdma_ctlr_start(struct cpdma_ctlr *ctlr)
526 struct cpdma_chan *chan;
530 spin_lock_irqsave(&ctlr->lock, flags);
531 if (ctlr->state != CPDMA_STATE_IDLE) {
532 spin_unlock_irqrestore(&ctlr->lock, flags);
536 if (ctlr->params.has_soft_reset) {
537 unsigned timeout = 10 * 100;
539 dma_reg_write(ctlr, CPDMA_SOFTRESET, 1);
541 if (dma_reg_read(ctlr, CPDMA_SOFTRESET) == 0)
549 for (i = 0; i < ctlr->num_chan; i++) {
550 writel(0, ctlr->params.txhdp + 4 * i);
551 writel(0, ctlr->params.rxhdp + 4 * i);
552 writel(0, ctlr->params.txcp + 4 * i);
553 writel(0, ctlr->params.rxcp + 4 * i);
556 dma_reg_write(ctlr, CPDMA_RXINTMASKCLEAR, 0xffffffff);
557 dma_reg_write(ctlr, CPDMA_TXINTMASKCLEAR, 0xffffffff);
559 dma_reg_write(ctlr, CPDMA_TXCONTROL, 1);
560 dma_reg_write(ctlr, CPDMA_RXCONTROL, 1);
562 ctlr->state = CPDMA_STATE_ACTIVE;
565 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
566 chan = ctlr->channels[i];
568 cpdma_chan_set_chan_shaper(chan);
571 /* off prio mode if all tx channels are rate limited */
572 if (is_tx_chan(chan) && !chan->rate)
577 _cpdma_control_set(ctlr, CPDMA_TX_PRIO_FIXED, prio_mode);
578 _cpdma_control_set(ctlr, CPDMA_RX_BUFFER_OFFSET, 0);
580 spin_unlock_irqrestore(&ctlr->lock, flags);
584 int cpdma_ctlr_stop(struct cpdma_ctlr *ctlr)
589 spin_lock_irqsave(&ctlr->lock, flags);
590 if (ctlr->state != CPDMA_STATE_ACTIVE) {
591 spin_unlock_irqrestore(&ctlr->lock, flags);
595 ctlr->state = CPDMA_STATE_TEARDOWN;
596 spin_unlock_irqrestore(&ctlr->lock, flags);
598 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
599 if (ctlr->channels[i])
600 cpdma_chan_stop(ctlr->channels[i]);
603 spin_lock_irqsave(&ctlr->lock, flags);
604 dma_reg_write(ctlr, CPDMA_RXINTMASKCLEAR, 0xffffffff);
605 dma_reg_write(ctlr, CPDMA_TXINTMASKCLEAR, 0xffffffff);
607 dma_reg_write(ctlr, CPDMA_TXCONTROL, 0);
608 dma_reg_write(ctlr, CPDMA_RXCONTROL, 0);
610 ctlr->state = CPDMA_STATE_IDLE;
612 spin_unlock_irqrestore(&ctlr->lock, flags);
616 int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr)
623 if (ctlr->state != CPDMA_STATE_IDLE)
624 cpdma_ctlr_stop(ctlr);
626 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++)
627 cpdma_chan_destroy(ctlr->channels[i]);
629 cpdma_desc_pool_destroy(ctlr);
633 int cpdma_ctlr_int_ctrl(struct cpdma_ctlr *ctlr, bool enable)
638 spin_lock_irqsave(&ctlr->lock, flags);
639 if (ctlr->state != CPDMA_STATE_ACTIVE) {
640 spin_unlock_irqrestore(&ctlr->lock, flags);
644 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
645 if (ctlr->channels[i])
646 cpdma_chan_int_ctrl(ctlr->channels[i], enable);
649 spin_unlock_irqrestore(&ctlr->lock, flags);
653 void cpdma_ctlr_eoi(struct cpdma_ctlr *ctlr, u32 value)
655 dma_reg_write(ctlr, CPDMA_MACEOIVECTOR, value);
658 u32 cpdma_ctrl_rxchs_state(struct cpdma_ctlr *ctlr)
660 return dma_reg_read(ctlr, CPDMA_RXINTSTATMASKED);
663 u32 cpdma_ctrl_txchs_state(struct cpdma_ctlr *ctlr)
665 return dma_reg_read(ctlr, CPDMA_TXINTSTATMASKED);
668 static void cpdma_chan_set_descs(struct cpdma_ctlr *ctlr,
669 int rx, int desc_num,
672 struct cpdma_chan *chan, *most_chan = NULL;
673 int desc_cnt = desc_num;
681 min = rx_chan_num(0);
682 max = rx_chan_num(CPDMA_MAX_CHANNELS);
684 min = tx_chan_num(0);
685 max = tx_chan_num(CPDMA_MAX_CHANNELS);
688 for (i = min; i < max; i++) {
689 chan = ctlr->channels[i];
694 chan->desc_num = (chan->weight * desc_num) / 100;
696 chan->desc_num = per_ch_desc;
698 desc_cnt -= chan->desc_num;
700 if (most_dnum < chan->desc_num) {
701 most_dnum = chan->desc_num;
707 most_chan->desc_num += desc_cnt;
711 * cpdma_chan_split_pool - Splits ctrl pool between all channels.
712 * Has to be called under ctlr lock
714 int cpdma_chan_split_pool(struct cpdma_ctlr *ctlr)
716 int tx_per_ch_desc = 0, rx_per_ch_desc = 0;
717 int free_rx_num = 0, free_tx_num = 0;
718 int rx_weight = 0, tx_weight = 0;
719 int tx_desc_num, rx_desc_num;
720 struct cpdma_chan *chan;
726 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
727 chan = ctlr->channels[i];
731 if (is_rx_chan(chan)) {
734 rx_weight += chan->weight;
738 tx_weight += chan->weight;
742 if (rx_weight > 100 || tx_weight > 100)
745 tx_desc_num = ctlr->num_tx_desc;
746 rx_desc_num = ctlr->num_rx_desc;
749 tx_per_ch_desc = tx_desc_num - (tx_weight * tx_desc_num) / 100;
750 tx_per_ch_desc /= free_tx_num;
753 rx_per_ch_desc = rx_desc_num - (rx_weight * rx_desc_num) / 100;
754 rx_per_ch_desc /= free_rx_num;
757 cpdma_chan_set_descs(ctlr, 0, tx_desc_num, tx_per_ch_desc);
758 cpdma_chan_set_descs(ctlr, 1, rx_desc_num, rx_per_ch_desc);
764 /* cpdma_chan_set_weight - set weight of a channel in percentage.
765 * Tx and Rx channels have separate weights. That is 100% for RX
766 * and 100% for Tx. The weight is used to split cpdma resources
767 * in correct proportion required by the channels, including number
768 * of descriptors. The channel rate is not enough to know the
769 * weight of a channel as the maximum rate of an interface is needed.
770 * If weight = 0, then channel uses rest of descriptors leaved by
773 int cpdma_chan_set_weight(struct cpdma_chan *ch, int weight)
775 struct cpdma_ctlr *ctlr = ch->ctlr;
776 unsigned long flags, ch_flags;
779 spin_lock_irqsave(&ctlr->lock, flags);
780 spin_lock_irqsave(&ch->lock, ch_flags);
781 if (ch->weight == weight) {
782 spin_unlock_irqrestore(&ch->lock, ch_flags);
783 spin_unlock_irqrestore(&ctlr->lock, flags);
787 spin_unlock_irqrestore(&ch->lock, ch_flags);
789 /* re-split pool using new channel weight */
790 ret = cpdma_chan_split_pool(ctlr);
791 spin_unlock_irqrestore(&ctlr->lock, flags);
795 /* cpdma_chan_get_min_rate - get minimum allowed rate for channel
796 * Should be called before cpdma_chan_set_rate.
797 * Returns min rate in Kb/s
799 u32 cpdma_chan_get_min_rate(struct cpdma_ctlr *ctlr)
801 unsigned int divident, divisor;
803 divident = ctlr->params.bus_freq_mhz * 32 * 1000;
804 divisor = 1 + CPDMA_MAX_RLIM_CNT;
806 return DIV_ROUND_UP(divident, divisor);
809 /* cpdma_chan_set_rate - limits bandwidth for transmit channel.
810 * The bandwidth * limited channels have to be in order beginning from lowest.
811 * ch - transmit channel the bandwidth is configured for
812 * rate - bandwidth in Kb/s, if 0 - then off shaper
814 int cpdma_chan_set_rate(struct cpdma_chan *ch, u32 rate)
816 unsigned long flags, ch_flags;
817 struct cpdma_ctlr *ctlr;
821 if (!ch || !is_tx_chan(ch))
824 if (ch->rate == rate)
828 spin_lock_irqsave(&ctlr->lock, flags);
829 spin_lock_irqsave(&ch->lock, ch_flags);
831 ret = cpdma_chan_fit_rate(ch, rate, &rmask, &prio_mode);
835 ret = cpdma_chan_set_factors(ctlr, ch);
839 spin_unlock_irqrestore(&ch->lock, ch_flags);
842 _cpdma_control_set(ctlr, CPDMA_TX_RLIM, rmask);
843 _cpdma_control_set(ctlr, CPDMA_TX_PRIO_FIXED, prio_mode);
844 spin_unlock_irqrestore(&ctlr->lock, flags);
848 spin_unlock_irqrestore(&ch->lock, ch_flags);
849 spin_unlock_irqrestore(&ctlr->lock, flags);
853 u32 cpdma_chan_get_rate(struct cpdma_chan *ch)
858 spin_lock_irqsave(&ch->lock, flags);
860 spin_unlock_irqrestore(&ch->lock, flags);
865 struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num,
866 cpdma_handler_fn handler, int rx_type)
868 int offset = chan_num * 4;
869 struct cpdma_chan *chan;
872 chan_num = rx_type ? rx_chan_num(chan_num) : tx_chan_num(chan_num);
874 if (__chan_linear(chan_num) >= ctlr->num_chan)
875 return ERR_PTR(-EINVAL);
877 chan = devm_kzalloc(ctlr->dev, sizeof(*chan), GFP_KERNEL);
879 return ERR_PTR(-ENOMEM);
881 spin_lock_irqsave(&ctlr->lock, flags);
882 if (ctlr->channels[chan_num]) {
883 spin_unlock_irqrestore(&ctlr->lock, flags);
884 devm_kfree(ctlr->dev, chan);
885 return ERR_PTR(-EBUSY);
889 chan->state = CPDMA_STATE_IDLE;
890 chan->chan_num = chan_num;
891 chan->handler = handler;
895 if (is_rx_chan(chan)) {
896 chan->hdp = ctlr->params.rxhdp + offset;
897 chan->cp = ctlr->params.rxcp + offset;
898 chan->rxfree = ctlr->params.rxfree + offset;
899 chan->int_set = CPDMA_RXINTMASKSET;
900 chan->int_clear = CPDMA_RXINTMASKCLEAR;
901 chan->td = CPDMA_RXTEARDOWN;
902 chan->dir = DMA_FROM_DEVICE;
904 chan->hdp = ctlr->params.txhdp + offset;
905 chan->cp = ctlr->params.txcp + offset;
906 chan->int_set = CPDMA_TXINTMASKSET;
907 chan->int_clear = CPDMA_TXINTMASKCLEAR;
908 chan->td = CPDMA_TXTEARDOWN;
909 chan->dir = DMA_TO_DEVICE;
911 chan->mask = BIT(chan_linear(chan));
913 spin_lock_init(&chan->lock);
915 ctlr->channels[chan_num] = chan;
918 cpdma_chan_split_pool(ctlr);
920 spin_unlock_irqrestore(&ctlr->lock, flags);
924 int cpdma_chan_get_rx_buf_num(struct cpdma_chan *chan)
929 spin_lock_irqsave(&chan->lock, flags);
930 desc_num = chan->desc_num;
931 spin_unlock_irqrestore(&chan->lock, flags);
936 int cpdma_chan_destroy(struct cpdma_chan *chan)
938 struct cpdma_ctlr *ctlr;
945 spin_lock_irqsave(&ctlr->lock, flags);
946 if (chan->state != CPDMA_STATE_IDLE)
947 cpdma_chan_stop(chan);
948 ctlr->channels[chan->chan_num] = NULL;
950 devm_kfree(ctlr->dev, chan);
951 cpdma_chan_split_pool(ctlr);
953 spin_unlock_irqrestore(&ctlr->lock, flags);
957 int cpdma_chan_get_stats(struct cpdma_chan *chan,
958 struct cpdma_chan_stats *stats)
963 spin_lock_irqsave(&chan->lock, flags);
964 memcpy(stats, &chan->stats, sizeof(*stats));
965 spin_unlock_irqrestore(&chan->lock, flags);
969 static void __cpdma_chan_submit(struct cpdma_chan *chan,
970 struct cpdma_desc __iomem *desc)
972 struct cpdma_ctlr *ctlr = chan->ctlr;
973 struct cpdma_desc __iomem *prev = chan->tail;
974 struct cpdma_desc_pool *pool = ctlr->pool;
978 desc_dma = desc_phys(pool, desc);
980 /* simple case - idle channel */
982 chan->stats.head_enqueue++;
985 if (chan->state == CPDMA_STATE_ACTIVE)
986 chan_write(chan, hdp, desc_dma);
990 /* first chain the descriptor at the tail of the list */
991 desc_write(prev, hw_next, desc_dma);
993 chan->stats.tail_enqueue++;
995 /* next check if EOQ has been triggered already */
996 mode = desc_read(prev, hw_mode);
997 if (((mode & (CPDMA_DESC_EOQ | CPDMA_DESC_OWNER)) == CPDMA_DESC_EOQ) &&
998 (chan->state == CPDMA_STATE_ACTIVE)) {
999 desc_write(prev, hw_mode, mode & ~CPDMA_DESC_EOQ);
1000 chan_write(chan, hdp, desc_dma);
1001 chan->stats.misqueued++;
1005 int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data,
1006 int len, int directed)
1008 struct cpdma_ctlr *ctlr = chan->ctlr;
1009 struct cpdma_desc __iomem *desc;
1011 unsigned long flags;
1015 spin_lock_irqsave(&chan->lock, flags);
1017 if (chan->state == CPDMA_STATE_TEARDOWN) {
1022 if (chan->count >= chan->desc_num) {
1023 chan->stats.desc_alloc_fail++;
1028 desc = cpdma_desc_alloc(ctlr->pool);
1030 chan->stats.desc_alloc_fail++;
1035 if (len < ctlr->params.min_packet_size) {
1036 len = ctlr->params.min_packet_size;
1037 chan->stats.runt_transmit_buff++;
1040 buffer = dma_map_single(ctlr->dev, data, len, chan->dir);
1041 ret = dma_mapping_error(ctlr->dev, buffer);
1043 cpdma_desc_free(ctlr->pool, desc, 1);
1048 mode = CPDMA_DESC_OWNER | CPDMA_DESC_SOP | CPDMA_DESC_EOP;
1049 cpdma_desc_to_port(chan, mode, directed);
1051 /* Relaxed IO accessors can be used here as there is read barrier
1052 * at the end of write sequence.
1054 writel_relaxed(0, &desc->hw_next);
1055 writel_relaxed(buffer, &desc->hw_buffer);
1056 writel_relaxed(len, &desc->hw_len);
1057 writel_relaxed(mode | len, &desc->hw_mode);
1058 writel_relaxed((uintptr_t)token, &desc->sw_token);
1059 writel_relaxed(buffer, &desc->sw_buffer);
1060 writel_relaxed(len, &desc->sw_len);
1061 desc_read(desc, sw_len);
1063 __cpdma_chan_submit(chan, desc);
1065 if (chan->state == CPDMA_STATE_ACTIVE && chan->rxfree)
1066 chan_write(chan, rxfree, 1);
1071 spin_unlock_irqrestore(&chan->lock, flags);
1075 bool cpdma_check_free_tx_desc(struct cpdma_chan *chan)
1077 struct cpdma_ctlr *ctlr = chan->ctlr;
1078 struct cpdma_desc_pool *pool = ctlr->pool;
1080 unsigned long flags;
1082 spin_lock_irqsave(&chan->lock, flags);
1083 free_tx_desc = (chan->count < chan->desc_num) &&
1084 gen_pool_avail(pool->gen_pool);
1085 spin_unlock_irqrestore(&chan->lock, flags);
1086 return free_tx_desc;
1089 static void __cpdma_chan_free(struct cpdma_chan *chan,
1090 struct cpdma_desc __iomem *desc,
1091 int outlen, int status)
1093 struct cpdma_ctlr *ctlr = chan->ctlr;
1094 struct cpdma_desc_pool *pool = ctlr->pool;
1095 dma_addr_t buff_dma;
1099 token = desc_read(desc, sw_token);
1100 buff_dma = desc_read(desc, sw_buffer);
1101 origlen = desc_read(desc, sw_len);
1103 dma_unmap_single(ctlr->dev, buff_dma, origlen, chan->dir);
1104 cpdma_desc_free(pool, desc, 1);
1105 (*chan->handler)((void *)token, outlen, status);
1108 static int __cpdma_chan_process(struct cpdma_chan *chan)
1110 struct cpdma_ctlr *ctlr = chan->ctlr;
1111 struct cpdma_desc __iomem *desc;
1114 struct cpdma_desc_pool *pool = ctlr->pool;
1115 dma_addr_t desc_dma;
1116 unsigned long flags;
1118 spin_lock_irqsave(&chan->lock, flags);
1122 chan->stats.empty_dequeue++;
1126 desc_dma = desc_phys(pool, desc);
1128 status = desc_read(desc, hw_mode);
1129 outlen = status & 0x7ff;
1130 if (status & CPDMA_DESC_OWNER) {
1131 chan->stats.busy_dequeue++;
1136 if (status & CPDMA_DESC_PASS_CRC)
1137 outlen -= CPDMA_DESC_CRC_LEN;
1139 status = status & (CPDMA_DESC_EOQ | CPDMA_DESC_TD_COMPLETE |
1140 CPDMA_DESC_PORT_MASK | CPDMA_RX_VLAN_ENCAP);
1142 chan->head = desc_from_phys(pool, desc_read(desc, hw_next));
1143 chan_write(chan, cp, desc_dma);
1145 chan->stats.good_dequeue++;
1147 if ((status & CPDMA_DESC_EOQ) && chan->head) {
1148 chan->stats.requeue++;
1149 chan_write(chan, hdp, desc_phys(pool, chan->head));
1152 spin_unlock_irqrestore(&chan->lock, flags);
1153 if (unlikely(status & CPDMA_DESC_TD_COMPLETE))
1154 cb_status = -ENOSYS;
1158 __cpdma_chan_free(chan, desc, outlen, cb_status);
1162 spin_unlock_irqrestore(&chan->lock, flags);
1166 int cpdma_chan_process(struct cpdma_chan *chan, int quota)
1168 int used = 0, ret = 0;
1170 if (chan->state != CPDMA_STATE_ACTIVE)
1173 while (used < quota) {
1174 ret = __cpdma_chan_process(chan);
1182 int cpdma_chan_start(struct cpdma_chan *chan)
1184 struct cpdma_ctlr *ctlr = chan->ctlr;
1185 unsigned long flags;
1188 spin_lock_irqsave(&ctlr->lock, flags);
1189 ret = cpdma_chan_set_chan_shaper(chan);
1190 spin_unlock_irqrestore(&ctlr->lock, flags);
1194 ret = cpdma_chan_on(chan);
1201 int cpdma_chan_stop(struct cpdma_chan *chan)
1203 struct cpdma_ctlr *ctlr = chan->ctlr;
1204 struct cpdma_desc_pool *pool = ctlr->pool;
1205 unsigned long flags;
1209 spin_lock_irqsave(&chan->lock, flags);
1210 if (chan->state == CPDMA_STATE_TEARDOWN) {
1211 spin_unlock_irqrestore(&chan->lock, flags);
1215 chan->state = CPDMA_STATE_TEARDOWN;
1216 dma_reg_write(ctlr, chan->int_clear, chan->mask);
1218 /* trigger teardown */
1219 dma_reg_write(ctlr, chan->td, chan_linear(chan));
1221 /* wait for teardown complete */
1222 timeout = 100 * 100; /* 100 ms */
1224 u32 cp = chan_read(chan, cp);
1225 if ((cp & CPDMA_TEARDOWN_VALUE) == CPDMA_TEARDOWN_VALUE)
1231 chan_write(chan, cp, CPDMA_TEARDOWN_VALUE);
1233 /* handle completed packets */
1234 spin_unlock_irqrestore(&chan->lock, flags);
1236 ret = __cpdma_chan_process(chan);
1239 } while ((ret & CPDMA_DESC_TD_COMPLETE) == 0);
1240 spin_lock_irqsave(&chan->lock, flags);
1242 /* remaining packets haven't been tx/rx'ed, clean them up */
1243 while (chan->head) {
1244 struct cpdma_desc __iomem *desc = chan->head;
1245 dma_addr_t next_dma;
1247 next_dma = desc_read(desc, hw_next);
1248 chan->head = desc_from_phys(pool, next_dma);
1250 chan->stats.teardown_dequeue++;
1252 /* issue callback without locks held */
1253 spin_unlock_irqrestore(&chan->lock, flags);
1254 __cpdma_chan_free(chan, desc, 0, -ENOSYS);
1255 spin_lock_irqsave(&chan->lock, flags);
1258 chan->state = CPDMA_STATE_IDLE;
1259 spin_unlock_irqrestore(&chan->lock, flags);
1263 int cpdma_chan_int_ctrl(struct cpdma_chan *chan, bool enable)
1265 unsigned long flags;
1267 spin_lock_irqsave(&chan->lock, flags);
1268 if (chan->state != CPDMA_STATE_ACTIVE) {
1269 spin_unlock_irqrestore(&chan->lock, flags);
1273 dma_reg_write(chan->ctlr, enable ? chan->int_set : chan->int_clear,
1275 spin_unlock_irqrestore(&chan->lock, flags);
1280 int cpdma_control_get(struct cpdma_ctlr *ctlr, int control)
1282 unsigned long flags;
1285 spin_lock_irqsave(&ctlr->lock, flags);
1286 ret = _cpdma_control_get(ctlr, control);
1287 spin_unlock_irqrestore(&ctlr->lock, flags);
1292 int cpdma_control_set(struct cpdma_ctlr *ctlr, int control, int value)
1294 unsigned long flags;
1297 spin_lock_irqsave(&ctlr->lock, flags);
1298 ret = _cpdma_control_set(ctlr, control, value);
1299 spin_unlock_irqrestore(&ctlr->lock, flags);
1304 int cpdma_get_num_rx_descs(struct cpdma_ctlr *ctlr)
1306 return ctlr->num_rx_desc;
1309 int cpdma_get_num_tx_descs(struct cpdma_ctlr *ctlr)
1311 return ctlr->num_tx_desc;
1314 void cpdma_set_num_rx_descs(struct cpdma_ctlr *ctlr, int num_rx_desc)
1316 ctlr->num_rx_desc = num_rx_desc;
1317 ctlr->num_tx_desc = ctlr->pool->num_desc - ctlr->num_rx_desc;