1 // SPDX-License-Identifier: GPL-2.0
3 * Texas Instruments CPDMA Driver
5 * Copyright (C) 2010 Texas Instruments
8 #include <linux/kernel.h>
9 #include <linux/spinlock.h>
10 #include <linux/device.h>
11 #include <linux/module.h>
12 #include <linux/slab.h>
13 #include <linux/err.h>
14 #include <linux/dma-mapping.h>
16 #include <linux/delay.h>
17 #include <linux/genalloc.h>
18 #include "davinci_cpdma.h"
21 #define CPDMA_TXIDVER 0x00
22 #define CPDMA_TXCONTROL 0x04
23 #define CPDMA_TXTEARDOWN 0x08
24 #define CPDMA_RXIDVER 0x10
25 #define CPDMA_RXCONTROL 0x14
26 #define CPDMA_SOFTRESET 0x1c
27 #define CPDMA_RXTEARDOWN 0x18
28 #define CPDMA_TX_PRI0_RATE 0x30
29 #define CPDMA_TXINTSTATRAW 0x80
30 #define CPDMA_TXINTSTATMASKED 0x84
31 #define CPDMA_TXINTMASKSET 0x88
32 #define CPDMA_TXINTMASKCLEAR 0x8c
33 #define CPDMA_MACINVECTOR 0x90
34 #define CPDMA_MACEOIVECTOR 0x94
35 #define CPDMA_RXINTSTATRAW 0xa0
36 #define CPDMA_RXINTSTATMASKED 0xa4
37 #define CPDMA_RXINTMASKSET 0xa8
38 #define CPDMA_RXINTMASKCLEAR 0xac
39 #define CPDMA_DMAINTSTATRAW 0xb0
40 #define CPDMA_DMAINTSTATMASKED 0xb4
41 #define CPDMA_DMAINTMASKSET 0xb8
42 #define CPDMA_DMAINTMASKCLEAR 0xbc
43 #define CPDMA_DMAINT_HOSTERR BIT(1)
45 /* the following exist only if has_ext_regs is set */
46 #define CPDMA_DMACONTROL 0x20
47 #define CPDMA_DMASTATUS 0x24
48 #define CPDMA_RXBUFFOFS 0x28
49 #define CPDMA_EM_CONTROL 0x2c
51 /* Descriptor mode bits */
52 #define CPDMA_DESC_SOP BIT(31)
53 #define CPDMA_DESC_EOP BIT(30)
54 #define CPDMA_DESC_OWNER BIT(29)
55 #define CPDMA_DESC_EOQ BIT(28)
56 #define CPDMA_DESC_TD_COMPLETE BIT(27)
57 #define CPDMA_DESC_PASS_CRC BIT(26)
58 #define CPDMA_DESC_TO_PORT_EN BIT(20)
59 #define CPDMA_TO_PORT_SHIFT 16
60 #define CPDMA_DESC_PORT_MASK (BIT(18) | BIT(17) | BIT(16))
61 #define CPDMA_DESC_CRC_LEN 4
63 #define CPDMA_TEARDOWN_VALUE 0xfffffffc
65 #define CPDMA_MAX_RLIM_CNT 16384
79 struct cpdma_desc_pool {
82 void __iomem *iomap; /* ioremap map */
83 void *cpumap; /* dma_alloc map */
84 int desc_size, mem_size;
87 struct gen_pool *gen_pool;
97 enum cpdma_state state;
98 struct cpdma_params params;
100 struct cpdma_desc_pool *pool;
102 struct cpdma_chan *channels[2 * CPDMA_MAX_CHANNELS];
104 int num_rx_desc; /* RX descriptors number */
105 int num_tx_desc; /* TX descriptors number */
109 struct cpdma_desc __iomem *head, *tail;
110 void __iomem *hdp, *cp, *rxfree;
111 enum cpdma_state state;
112 struct cpdma_ctlr *ctlr;
118 cpdma_handler_fn handler;
119 enum dma_data_direction dir;
120 struct cpdma_chan_stats stats;
121 /* offsets into dmaregs */
122 int int_set, int_clear, td;
128 struct cpdma_control_info {
132 #define ACCESS_RO BIT(0)
133 #define ACCESS_WO BIT(1)
134 #define ACCESS_RW (ACCESS_RO | ACCESS_WO)
138 struct cpdma_chan *chan;
145 static struct cpdma_control_info controls[] = {
146 [CPDMA_TX_RLIM] = {CPDMA_DMACONTROL, 8, 0xffff, ACCESS_RW},
147 [CPDMA_CMD_IDLE] = {CPDMA_DMACONTROL, 3, 1, ACCESS_WO},
148 [CPDMA_COPY_ERROR_FRAMES] = {CPDMA_DMACONTROL, 4, 1, ACCESS_RW},
149 [CPDMA_RX_OFF_LEN_UPDATE] = {CPDMA_DMACONTROL, 2, 1, ACCESS_RW},
150 [CPDMA_RX_OWNERSHIP_FLIP] = {CPDMA_DMACONTROL, 1, 1, ACCESS_RW},
151 [CPDMA_TX_PRIO_FIXED] = {CPDMA_DMACONTROL, 0, 1, ACCESS_RW},
152 [CPDMA_STAT_IDLE] = {CPDMA_DMASTATUS, 31, 1, ACCESS_RO},
153 [CPDMA_STAT_TX_ERR_CODE] = {CPDMA_DMASTATUS, 20, 0xf, ACCESS_RW},
154 [CPDMA_STAT_TX_ERR_CHAN] = {CPDMA_DMASTATUS, 16, 0x7, ACCESS_RW},
155 [CPDMA_STAT_RX_ERR_CODE] = {CPDMA_DMASTATUS, 12, 0xf, ACCESS_RW},
156 [CPDMA_STAT_RX_ERR_CHAN] = {CPDMA_DMASTATUS, 8, 0x7, ACCESS_RW},
157 [CPDMA_RX_BUFFER_OFFSET] = {CPDMA_RXBUFFOFS, 0, 0xffff, ACCESS_RW},
160 #define tx_chan_num(chan) (chan)
161 #define rx_chan_num(chan) ((chan) + CPDMA_MAX_CHANNELS)
162 #define is_rx_chan(chan) ((chan)->chan_num >= CPDMA_MAX_CHANNELS)
163 #define is_tx_chan(chan) (!is_rx_chan(chan))
164 #define __chan_linear(chan_num) ((chan_num) & (CPDMA_MAX_CHANNELS - 1))
165 #define chan_linear(chan) __chan_linear((chan)->chan_num)
167 /* The following make access to common cpdma_ctlr params more readable */
168 #define dmaregs params.dmaregs
169 #define num_chan params.num_chan
171 /* various accessors */
172 #define dma_reg_read(ctlr, ofs) readl((ctlr)->dmaregs + (ofs))
173 #define chan_read(chan, fld) readl((chan)->fld)
174 #define desc_read(desc, fld) readl(&(desc)->fld)
175 #define dma_reg_write(ctlr, ofs, v) writel(v, (ctlr)->dmaregs + (ofs))
176 #define chan_write(chan, fld, v) writel(v, (chan)->fld)
177 #define desc_write(desc, fld, v) writel((u32)(v), &(desc)->fld)
179 #define cpdma_desc_to_port(chan, mode, directed) \
181 if (!is_rx_chan(chan) && ((directed == 1) || \
183 mode |= (CPDMA_DESC_TO_PORT_EN | \
184 (directed << CPDMA_TO_PORT_SHIFT)); \
187 static void cpdma_desc_pool_destroy(struct cpdma_ctlr *ctlr)
189 struct cpdma_desc_pool *pool = ctlr->pool;
194 WARN(gen_pool_size(pool->gen_pool) != gen_pool_avail(pool->gen_pool),
195 "cpdma_desc_pool size %zd != avail %zd",
196 gen_pool_size(pool->gen_pool),
197 gen_pool_avail(pool->gen_pool));
199 dma_free_coherent(ctlr->dev, pool->mem_size, pool->cpumap,
204 * Utility constructs for a cpdma descriptor pool. Some devices (e.g. davinci
205 * emac) have dedicated on-chip memory for these descriptors. Some other
206 * devices (e.g. cpsw switches) use plain old memory. Descriptor pools
207 * abstract out these details
209 static int cpdma_desc_pool_create(struct cpdma_ctlr *ctlr)
211 struct cpdma_params *cpdma_params = &ctlr->params;
212 struct cpdma_desc_pool *pool;
215 pool = devm_kzalloc(ctlr->dev, sizeof(*pool), GFP_KERNEL);
217 goto gen_pool_create_fail;
220 pool->mem_size = cpdma_params->desc_mem_size;
221 pool->desc_size = ALIGN(sizeof(struct cpdma_desc),
222 cpdma_params->desc_align);
223 pool->num_desc = pool->mem_size / pool->desc_size;
225 if (cpdma_params->descs_pool_size) {
226 /* recalculate memory size required cpdma descriptor pool
227 * basing on number of descriptors specified by user and
228 * if memory size > CPPI internal RAM size (desc_mem_size)
229 * then switch to use DDR
231 pool->num_desc = cpdma_params->descs_pool_size;
232 pool->mem_size = pool->desc_size * pool->num_desc;
233 if (pool->mem_size > cpdma_params->desc_mem_size)
234 cpdma_params->desc_mem_phys = 0;
237 pool->gen_pool = devm_gen_pool_create(ctlr->dev, ilog2(pool->desc_size),
239 if (IS_ERR(pool->gen_pool)) {
240 ret = PTR_ERR(pool->gen_pool);
241 dev_err(ctlr->dev, "pool create failed %d\n", ret);
242 goto gen_pool_create_fail;
245 if (cpdma_params->desc_mem_phys) {
246 pool->phys = cpdma_params->desc_mem_phys;
247 pool->iomap = devm_ioremap(ctlr->dev, pool->phys,
249 pool->hw_addr = cpdma_params->desc_hw_addr;
251 pool->cpumap = dma_alloc_coherent(ctlr->dev, pool->mem_size,
252 &pool->hw_addr, GFP_KERNEL);
253 pool->iomap = (void __iomem __force *)pool->cpumap;
254 pool->phys = pool->hw_addr; /* assumes no IOMMU, don't use this value */
258 goto gen_pool_create_fail;
260 ret = gen_pool_add_virt(pool->gen_pool, (unsigned long)pool->iomap,
261 pool->phys, pool->mem_size, -1);
263 dev_err(ctlr->dev, "pool add failed %d\n", ret);
264 goto gen_pool_add_virt_fail;
269 gen_pool_add_virt_fail:
270 cpdma_desc_pool_destroy(ctlr);
271 gen_pool_create_fail:
276 static inline dma_addr_t desc_phys(struct cpdma_desc_pool *pool,
277 struct cpdma_desc __iomem *desc)
281 return pool->hw_addr + (__force long)desc - (__force long)pool->iomap;
284 static inline struct cpdma_desc __iomem *
285 desc_from_phys(struct cpdma_desc_pool *pool, dma_addr_t dma)
287 return dma ? pool->iomap + dma - pool->hw_addr : NULL;
290 static struct cpdma_desc __iomem *
291 cpdma_desc_alloc(struct cpdma_desc_pool *pool)
293 return (struct cpdma_desc __iomem *)
294 gen_pool_alloc(pool->gen_pool, pool->desc_size);
297 static void cpdma_desc_free(struct cpdma_desc_pool *pool,
298 struct cpdma_desc __iomem *desc, int num_desc)
300 gen_pool_free(pool->gen_pool, (unsigned long)desc, pool->desc_size);
303 static int _cpdma_control_set(struct cpdma_ctlr *ctlr, int control, int value)
305 struct cpdma_control_info *info = &controls[control];
308 if (!ctlr->params.has_ext_regs)
311 if (ctlr->state != CPDMA_STATE_ACTIVE)
314 if (control < 0 || control >= ARRAY_SIZE(controls))
317 if ((info->access & ACCESS_WO) != ACCESS_WO)
320 val = dma_reg_read(ctlr, info->reg);
321 val &= ~(info->mask << info->shift);
322 val |= (value & info->mask) << info->shift;
323 dma_reg_write(ctlr, info->reg, val);
328 static int _cpdma_control_get(struct cpdma_ctlr *ctlr, int control)
330 struct cpdma_control_info *info = &controls[control];
333 if (!ctlr->params.has_ext_regs)
336 if (ctlr->state != CPDMA_STATE_ACTIVE)
339 if (control < 0 || control >= ARRAY_SIZE(controls))
342 if ((info->access & ACCESS_RO) != ACCESS_RO)
345 ret = (dma_reg_read(ctlr, info->reg) >> info->shift) & info->mask;
349 /* cpdma_chan_set_chan_shaper - set shaper for a channel
350 * Has to be called under ctlr lock
352 static int cpdma_chan_set_chan_shaper(struct cpdma_chan *chan)
354 struct cpdma_ctlr *ctlr = chan->ctlr;
362 rate_reg = CPDMA_TX_PRI0_RATE + 4 * chan->chan_num;
363 dma_reg_write(ctlr, rate_reg, chan->rate_factor);
365 rmask = _cpdma_control_get(ctlr, CPDMA_TX_RLIM);
368 ret = _cpdma_control_set(ctlr, CPDMA_TX_RLIM, rmask);
372 static int cpdma_chan_on(struct cpdma_chan *chan)
374 struct cpdma_ctlr *ctlr = chan->ctlr;
375 struct cpdma_desc_pool *pool = ctlr->pool;
378 spin_lock_irqsave(&chan->lock, flags);
379 if (chan->state != CPDMA_STATE_IDLE) {
380 spin_unlock_irqrestore(&chan->lock, flags);
383 if (ctlr->state != CPDMA_STATE_ACTIVE) {
384 spin_unlock_irqrestore(&chan->lock, flags);
387 dma_reg_write(ctlr, chan->int_set, chan->mask);
388 chan->state = CPDMA_STATE_ACTIVE;
390 chan_write(chan, hdp, desc_phys(pool, chan->head));
392 chan_write(chan, rxfree, chan->count);
395 spin_unlock_irqrestore(&chan->lock, flags);
399 /* cpdma_chan_fit_rate - set rate for a channel and check if it's possible.
400 * rmask - mask of rate limited channels
401 * Returns min rate in Kb/s
403 static int cpdma_chan_fit_rate(struct cpdma_chan *ch, u32 rate,
404 u32 *rmask, int *prio_mode)
406 struct cpdma_ctlr *ctlr = ch->ctlr;
407 struct cpdma_chan *chan;
408 u32 old_rate = ch->rate;
413 for (i = tx_chan_num(0); i < tx_chan_num(CPDMA_MAX_CHANNELS); i++) {
414 chan = ctlr->channels[i];
423 new_rmask |= chan->mask;
437 dev_err(ctlr->dev, "Upper cpdma ch%d is not rate limited\n",
442 static u32 cpdma_chan_set_factors(struct cpdma_ctlr *ctlr,
443 struct cpdma_chan *ch)
445 u32 delta = UINT_MAX, prev_delta = UINT_MAX, best_delta = UINT_MAX;
446 u32 best_send_cnt = 0, best_idle_cnt = 0;
447 u32 new_rate, best_rate = 0, rate_reg;
448 u64 send_cnt, idle_cnt;
449 u32 min_send_cnt, freq;
450 u64 divident, divisor;
457 freq = ctlr->params.bus_freq_mhz * 1000 * 32;
459 dev_err(ctlr->dev, "The bus frequency is not set\n");
463 min_send_cnt = freq - ch->rate;
464 send_cnt = DIV_ROUND_UP(min_send_cnt, ch->rate);
465 while (send_cnt <= CPDMA_MAX_RLIM_CNT) {
466 divident = ch->rate * send_cnt;
467 divisor = min_send_cnt;
468 idle_cnt = DIV_ROUND_CLOSEST_ULL(divident, divisor);
470 divident = freq * idle_cnt;
471 divisor = idle_cnt + send_cnt;
472 new_rate = DIV_ROUND_CLOSEST_ULL(divident, divisor);
474 delta = new_rate >= ch->rate ? new_rate - ch->rate : delta;
475 if (delta < best_delta) {
477 best_send_cnt = send_cnt;
478 best_idle_cnt = idle_cnt;
479 best_rate = new_rate;
485 if (prev_delta >= delta) {
492 divident = freq * idle_cnt;
493 send_cnt = DIV_ROUND_CLOSEST_ULL(divident, ch->rate);
494 send_cnt -= idle_cnt;
495 prev_delta = UINT_MAX;
498 ch->rate = best_rate;
499 ch->rate_factor = best_send_cnt | (best_idle_cnt << 16);
502 rate_reg = CPDMA_TX_PRI0_RATE + 4 * ch->chan_num;
503 dma_reg_write(ctlr, rate_reg, ch->rate_factor);
507 struct cpdma_ctlr *cpdma_ctlr_create(struct cpdma_params *params)
509 struct cpdma_ctlr *ctlr;
511 ctlr = devm_kzalloc(params->dev, sizeof(*ctlr), GFP_KERNEL);
515 ctlr->state = CPDMA_STATE_IDLE;
516 ctlr->params = *params;
517 ctlr->dev = params->dev;
519 spin_lock_init(&ctlr->lock);
521 if (cpdma_desc_pool_create(ctlr))
523 /* split pool equally between RX/TX by default */
524 ctlr->num_tx_desc = ctlr->pool->num_desc / 2;
525 ctlr->num_rx_desc = ctlr->pool->num_desc - ctlr->num_tx_desc;
527 if (WARN_ON(ctlr->num_chan > CPDMA_MAX_CHANNELS))
528 ctlr->num_chan = CPDMA_MAX_CHANNELS;
532 int cpdma_ctlr_start(struct cpdma_ctlr *ctlr)
534 struct cpdma_chan *chan;
538 spin_lock_irqsave(&ctlr->lock, flags);
539 if (ctlr->state != CPDMA_STATE_IDLE) {
540 spin_unlock_irqrestore(&ctlr->lock, flags);
544 if (ctlr->params.has_soft_reset) {
545 unsigned timeout = 10 * 100;
547 dma_reg_write(ctlr, CPDMA_SOFTRESET, 1);
549 if (dma_reg_read(ctlr, CPDMA_SOFTRESET) == 0)
557 for (i = 0; i < ctlr->num_chan; i++) {
558 writel(0, ctlr->params.txhdp + 4 * i);
559 writel(0, ctlr->params.rxhdp + 4 * i);
560 writel(0, ctlr->params.txcp + 4 * i);
561 writel(0, ctlr->params.rxcp + 4 * i);
564 dma_reg_write(ctlr, CPDMA_RXINTMASKCLEAR, 0xffffffff);
565 dma_reg_write(ctlr, CPDMA_TXINTMASKCLEAR, 0xffffffff);
567 dma_reg_write(ctlr, CPDMA_TXCONTROL, 1);
568 dma_reg_write(ctlr, CPDMA_RXCONTROL, 1);
570 ctlr->state = CPDMA_STATE_ACTIVE;
573 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
574 chan = ctlr->channels[i];
576 cpdma_chan_set_chan_shaper(chan);
579 /* off prio mode if all tx channels are rate limited */
580 if (is_tx_chan(chan) && !chan->rate)
585 _cpdma_control_set(ctlr, CPDMA_TX_PRIO_FIXED, prio_mode);
586 _cpdma_control_set(ctlr, CPDMA_RX_BUFFER_OFFSET, 0);
588 spin_unlock_irqrestore(&ctlr->lock, flags);
592 int cpdma_ctlr_stop(struct cpdma_ctlr *ctlr)
597 spin_lock_irqsave(&ctlr->lock, flags);
598 if (ctlr->state != CPDMA_STATE_ACTIVE) {
599 spin_unlock_irqrestore(&ctlr->lock, flags);
603 ctlr->state = CPDMA_STATE_TEARDOWN;
604 spin_unlock_irqrestore(&ctlr->lock, flags);
606 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
607 if (ctlr->channels[i])
608 cpdma_chan_stop(ctlr->channels[i]);
611 spin_lock_irqsave(&ctlr->lock, flags);
612 dma_reg_write(ctlr, CPDMA_RXINTMASKCLEAR, 0xffffffff);
613 dma_reg_write(ctlr, CPDMA_TXINTMASKCLEAR, 0xffffffff);
615 dma_reg_write(ctlr, CPDMA_TXCONTROL, 0);
616 dma_reg_write(ctlr, CPDMA_RXCONTROL, 0);
618 ctlr->state = CPDMA_STATE_IDLE;
620 spin_unlock_irqrestore(&ctlr->lock, flags);
624 int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr)
631 if (ctlr->state != CPDMA_STATE_IDLE)
632 cpdma_ctlr_stop(ctlr);
634 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++)
635 cpdma_chan_destroy(ctlr->channels[i]);
637 cpdma_desc_pool_destroy(ctlr);
641 int cpdma_ctlr_int_ctrl(struct cpdma_ctlr *ctlr, bool enable)
646 spin_lock_irqsave(&ctlr->lock, flags);
647 if (ctlr->state != CPDMA_STATE_ACTIVE) {
648 spin_unlock_irqrestore(&ctlr->lock, flags);
652 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
653 if (ctlr->channels[i])
654 cpdma_chan_int_ctrl(ctlr->channels[i], enable);
657 spin_unlock_irqrestore(&ctlr->lock, flags);
661 void cpdma_ctlr_eoi(struct cpdma_ctlr *ctlr, u32 value)
663 dma_reg_write(ctlr, CPDMA_MACEOIVECTOR, value);
666 u32 cpdma_ctrl_rxchs_state(struct cpdma_ctlr *ctlr)
668 return dma_reg_read(ctlr, CPDMA_RXINTSTATMASKED);
671 u32 cpdma_ctrl_txchs_state(struct cpdma_ctlr *ctlr)
673 return dma_reg_read(ctlr, CPDMA_TXINTSTATMASKED);
676 static void cpdma_chan_set_descs(struct cpdma_ctlr *ctlr,
677 int rx, int desc_num,
680 struct cpdma_chan *chan, *most_chan = NULL;
681 int desc_cnt = desc_num;
689 min = rx_chan_num(0);
690 max = rx_chan_num(CPDMA_MAX_CHANNELS);
692 min = tx_chan_num(0);
693 max = tx_chan_num(CPDMA_MAX_CHANNELS);
696 for (i = min; i < max; i++) {
697 chan = ctlr->channels[i];
702 chan->desc_num = (chan->weight * desc_num) / 100;
704 chan->desc_num = per_ch_desc;
706 desc_cnt -= chan->desc_num;
708 if (most_dnum < chan->desc_num) {
709 most_dnum = chan->desc_num;
715 most_chan->desc_num += desc_cnt;
719 * cpdma_chan_split_pool - Splits ctrl pool between all channels.
720 * Has to be called under ctlr lock
722 int cpdma_chan_split_pool(struct cpdma_ctlr *ctlr)
724 int tx_per_ch_desc = 0, rx_per_ch_desc = 0;
725 int free_rx_num = 0, free_tx_num = 0;
726 int rx_weight = 0, tx_weight = 0;
727 int tx_desc_num, rx_desc_num;
728 struct cpdma_chan *chan;
734 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
735 chan = ctlr->channels[i];
739 if (is_rx_chan(chan)) {
742 rx_weight += chan->weight;
746 tx_weight += chan->weight;
750 if (rx_weight > 100 || tx_weight > 100)
753 tx_desc_num = ctlr->num_tx_desc;
754 rx_desc_num = ctlr->num_rx_desc;
757 tx_per_ch_desc = tx_desc_num - (tx_weight * tx_desc_num) / 100;
758 tx_per_ch_desc /= free_tx_num;
761 rx_per_ch_desc = rx_desc_num - (rx_weight * rx_desc_num) / 100;
762 rx_per_ch_desc /= free_rx_num;
765 cpdma_chan_set_descs(ctlr, 0, tx_desc_num, tx_per_ch_desc);
766 cpdma_chan_set_descs(ctlr, 1, rx_desc_num, rx_per_ch_desc);
772 /* cpdma_chan_set_weight - set weight of a channel in percentage.
773 * Tx and Rx channels have separate weights. That is 100% for RX
774 * and 100% for Tx. The weight is used to split cpdma resources
775 * in correct proportion required by the channels, including number
776 * of descriptors. The channel rate is not enough to know the
777 * weight of a channel as the maximum rate of an interface is needed.
778 * If weight = 0, then channel uses rest of descriptors leaved by
781 int cpdma_chan_set_weight(struct cpdma_chan *ch, int weight)
783 struct cpdma_ctlr *ctlr = ch->ctlr;
784 unsigned long flags, ch_flags;
787 spin_lock_irqsave(&ctlr->lock, flags);
788 spin_lock_irqsave(&ch->lock, ch_flags);
789 if (ch->weight == weight) {
790 spin_unlock_irqrestore(&ch->lock, ch_flags);
791 spin_unlock_irqrestore(&ctlr->lock, flags);
795 spin_unlock_irqrestore(&ch->lock, ch_flags);
797 /* re-split pool using new channel weight */
798 ret = cpdma_chan_split_pool(ctlr);
799 spin_unlock_irqrestore(&ctlr->lock, flags);
803 /* cpdma_chan_get_min_rate - get minimum allowed rate for channel
804 * Should be called before cpdma_chan_set_rate.
805 * Returns min rate in Kb/s
807 u32 cpdma_chan_get_min_rate(struct cpdma_ctlr *ctlr)
809 unsigned int divident, divisor;
811 divident = ctlr->params.bus_freq_mhz * 32 * 1000;
812 divisor = 1 + CPDMA_MAX_RLIM_CNT;
814 return DIV_ROUND_UP(divident, divisor);
817 /* cpdma_chan_set_rate - limits bandwidth for transmit channel.
818 * The bandwidth * limited channels have to be in order beginning from lowest.
819 * ch - transmit channel the bandwidth is configured for
820 * rate - bandwidth in Kb/s, if 0 - then off shaper
822 int cpdma_chan_set_rate(struct cpdma_chan *ch, u32 rate)
824 unsigned long flags, ch_flags;
825 struct cpdma_ctlr *ctlr;
829 if (!ch || !is_tx_chan(ch))
832 if (ch->rate == rate)
836 spin_lock_irqsave(&ctlr->lock, flags);
837 spin_lock_irqsave(&ch->lock, ch_flags);
839 ret = cpdma_chan_fit_rate(ch, rate, &rmask, &prio_mode);
843 ret = cpdma_chan_set_factors(ctlr, ch);
847 spin_unlock_irqrestore(&ch->lock, ch_flags);
850 _cpdma_control_set(ctlr, CPDMA_TX_RLIM, rmask);
851 _cpdma_control_set(ctlr, CPDMA_TX_PRIO_FIXED, prio_mode);
852 spin_unlock_irqrestore(&ctlr->lock, flags);
856 spin_unlock_irqrestore(&ch->lock, ch_flags);
857 spin_unlock_irqrestore(&ctlr->lock, flags);
861 u32 cpdma_chan_get_rate(struct cpdma_chan *ch)
866 spin_lock_irqsave(&ch->lock, flags);
868 spin_unlock_irqrestore(&ch->lock, flags);
873 struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num,
874 cpdma_handler_fn handler, int rx_type)
876 int offset = chan_num * 4;
877 struct cpdma_chan *chan;
880 chan_num = rx_type ? rx_chan_num(chan_num) : tx_chan_num(chan_num);
882 if (__chan_linear(chan_num) >= ctlr->num_chan)
883 return ERR_PTR(-EINVAL);
885 chan = devm_kzalloc(ctlr->dev, sizeof(*chan), GFP_KERNEL);
887 return ERR_PTR(-ENOMEM);
889 spin_lock_irqsave(&ctlr->lock, flags);
890 if (ctlr->channels[chan_num]) {
891 spin_unlock_irqrestore(&ctlr->lock, flags);
892 devm_kfree(ctlr->dev, chan);
893 return ERR_PTR(-EBUSY);
897 chan->state = CPDMA_STATE_IDLE;
898 chan->chan_num = chan_num;
899 chan->handler = handler;
903 if (is_rx_chan(chan)) {
904 chan->hdp = ctlr->params.rxhdp + offset;
905 chan->cp = ctlr->params.rxcp + offset;
906 chan->rxfree = ctlr->params.rxfree + offset;
907 chan->int_set = CPDMA_RXINTMASKSET;
908 chan->int_clear = CPDMA_RXINTMASKCLEAR;
909 chan->td = CPDMA_RXTEARDOWN;
910 chan->dir = DMA_FROM_DEVICE;
912 chan->hdp = ctlr->params.txhdp + offset;
913 chan->cp = ctlr->params.txcp + offset;
914 chan->int_set = CPDMA_TXINTMASKSET;
915 chan->int_clear = CPDMA_TXINTMASKCLEAR;
916 chan->td = CPDMA_TXTEARDOWN;
917 chan->dir = DMA_TO_DEVICE;
919 chan->mask = BIT(chan_linear(chan));
921 spin_lock_init(&chan->lock);
923 ctlr->channels[chan_num] = chan;
926 cpdma_chan_split_pool(ctlr);
928 spin_unlock_irqrestore(&ctlr->lock, flags);
932 int cpdma_chan_get_rx_buf_num(struct cpdma_chan *chan)
937 spin_lock_irqsave(&chan->lock, flags);
938 desc_num = chan->desc_num;
939 spin_unlock_irqrestore(&chan->lock, flags);
944 int cpdma_chan_destroy(struct cpdma_chan *chan)
946 struct cpdma_ctlr *ctlr;
953 spin_lock_irqsave(&ctlr->lock, flags);
954 if (chan->state != CPDMA_STATE_IDLE)
955 cpdma_chan_stop(chan);
956 ctlr->channels[chan->chan_num] = NULL;
958 devm_kfree(ctlr->dev, chan);
959 cpdma_chan_split_pool(ctlr);
961 spin_unlock_irqrestore(&ctlr->lock, flags);
965 int cpdma_chan_get_stats(struct cpdma_chan *chan,
966 struct cpdma_chan_stats *stats)
971 spin_lock_irqsave(&chan->lock, flags);
972 memcpy(stats, &chan->stats, sizeof(*stats));
973 spin_unlock_irqrestore(&chan->lock, flags);
977 static void __cpdma_chan_submit(struct cpdma_chan *chan,
978 struct cpdma_desc __iomem *desc)
980 struct cpdma_ctlr *ctlr = chan->ctlr;
981 struct cpdma_desc __iomem *prev = chan->tail;
982 struct cpdma_desc_pool *pool = ctlr->pool;
986 desc_dma = desc_phys(pool, desc);
988 /* simple case - idle channel */
990 chan->stats.head_enqueue++;
993 if (chan->state == CPDMA_STATE_ACTIVE)
994 chan_write(chan, hdp, desc_dma);
998 /* first chain the descriptor at the tail of the list */
999 desc_write(prev, hw_next, desc_dma);
1001 chan->stats.tail_enqueue++;
1003 /* next check if EOQ has been triggered already */
1004 mode = desc_read(prev, hw_mode);
1005 if (((mode & (CPDMA_DESC_EOQ | CPDMA_DESC_OWNER)) == CPDMA_DESC_EOQ) &&
1006 (chan->state == CPDMA_STATE_ACTIVE)) {
1007 desc_write(prev, hw_mode, mode & ~CPDMA_DESC_EOQ);
1008 chan_write(chan, hdp, desc_dma);
1009 chan->stats.misqueued++;
1013 static int cpdma_chan_submit_si(struct submit_info *si)
1015 struct cpdma_chan *chan = si->chan;
1016 struct cpdma_ctlr *ctlr = chan->ctlr;
1018 struct cpdma_desc __iomem *desc;
1023 if (chan->count >= chan->desc_num) {
1024 chan->stats.desc_alloc_fail++;
1028 desc = cpdma_desc_alloc(ctlr->pool);
1030 chan->stats.desc_alloc_fail++;
1034 if (len < ctlr->params.min_packet_size) {
1035 len = ctlr->params.min_packet_size;
1036 chan->stats.runt_transmit_buff++;
1039 buffer = dma_map_single(ctlr->dev, si->data, len, chan->dir);
1040 ret = dma_mapping_error(ctlr->dev, buffer);
1042 cpdma_desc_free(ctlr->pool, desc, 1);
1046 mode = CPDMA_DESC_OWNER | CPDMA_DESC_SOP | CPDMA_DESC_EOP;
1047 cpdma_desc_to_port(chan, mode, si->directed);
1049 /* Relaxed IO accessors can be used here as there is read barrier
1050 * at the end of write sequence.
1052 writel_relaxed(0, &desc->hw_next);
1053 writel_relaxed(buffer, &desc->hw_buffer);
1054 writel_relaxed(len, &desc->hw_len);
1055 writel_relaxed(mode | len, &desc->hw_mode);
1056 writel_relaxed((uintptr_t)si->token, &desc->sw_token);
1057 writel_relaxed(buffer, &desc->sw_buffer);
1058 writel_relaxed(len, &desc->sw_len);
1059 desc_read(desc, sw_len);
1061 __cpdma_chan_submit(chan, desc);
1063 if (chan->state == CPDMA_STATE_ACTIVE && chan->rxfree)
1064 chan_write(chan, rxfree, 1);
1070 int cpdma_chan_idle_submit(struct cpdma_chan *chan, void *token, void *data,
1071 int len, int directed)
1073 struct submit_info si;
1074 unsigned long flags;
1081 si.directed = directed;
1083 spin_lock_irqsave(&chan->lock, flags);
1084 if (chan->state == CPDMA_STATE_TEARDOWN) {
1085 spin_unlock_irqrestore(&chan->lock, flags);
1089 ret = cpdma_chan_submit_si(&si);
1090 spin_unlock_irqrestore(&chan->lock, flags);
1094 int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data,
1095 int len, int directed)
1097 struct submit_info si;
1098 unsigned long flags;
1105 si.directed = directed;
1107 spin_lock_irqsave(&chan->lock, flags);
1108 if (chan->state != CPDMA_STATE_ACTIVE) {
1109 spin_unlock_irqrestore(&chan->lock, flags);
1113 ret = cpdma_chan_submit_si(&si);
1114 spin_unlock_irqrestore(&chan->lock, flags);
1118 bool cpdma_check_free_tx_desc(struct cpdma_chan *chan)
1120 struct cpdma_ctlr *ctlr = chan->ctlr;
1121 struct cpdma_desc_pool *pool = ctlr->pool;
1123 unsigned long flags;
1125 spin_lock_irqsave(&chan->lock, flags);
1126 free_tx_desc = (chan->count < chan->desc_num) &&
1127 gen_pool_avail(pool->gen_pool);
1128 spin_unlock_irqrestore(&chan->lock, flags);
1129 return free_tx_desc;
1132 static void __cpdma_chan_free(struct cpdma_chan *chan,
1133 struct cpdma_desc __iomem *desc,
1134 int outlen, int status)
1136 struct cpdma_ctlr *ctlr = chan->ctlr;
1137 struct cpdma_desc_pool *pool = ctlr->pool;
1138 dma_addr_t buff_dma;
1142 token = desc_read(desc, sw_token);
1143 buff_dma = desc_read(desc, sw_buffer);
1144 origlen = desc_read(desc, sw_len);
1146 dma_unmap_single(ctlr->dev, buff_dma, origlen, chan->dir);
1147 cpdma_desc_free(pool, desc, 1);
1148 (*chan->handler)((void *)token, outlen, status);
1151 static int __cpdma_chan_process(struct cpdma_chan *chan)
1153 struct cpdma_ctlr *ctlr = chan->ctlr;
1154 struct cpdma_desc __iomem *desc;
1157 struct cpdma_desc_pool *pool = ctlr->pool;
1158 dma_addr_t desc_dma;
1159 unsigned long flags;
1161 spin_lock_irqsave(&chan->lock, flags);
1165 chan->stats.empty_dequeue++;
1169 desc_dma = desc_phys(pool, desc);
1171 status = desc_read(desc, hw_mode);
1172 outlen = status & 0x7ff;
1173 if (status & CPDMA_DESC_OWNER) {
1174 chan->stats.busy_dequeue++;
1179 if (status & CPDMA_DESC_PASS_CRC)
1180 outlen -= CPDMA_DESC_CRC_LEN;
1182 status = status & (CPDMA_DESC_EOQ | CPDMA_DESC_TD_COMPLETE |
1183 CPDMA_DESC_PORT_MASK | CPDMA_RX_VLAN_ENCAP);
1185 chan->head = desc_from_phys(pool, desc_read(desc, hw_next));
1186 chan_write(chan, cp, desc_dma);
1188 chan->stats.good_dequeue++;
1190 if ((status & CPDMA_DESC_EOQ) && chan->head) {
1191 chan->stats.requeue++;
1192 chan_write(chan, hdp, desc_phys(pool, chan->head));
1195 spin_unlock_irqrestore(&chan->lock, flags);
1196 if (unlikely(status & CPDMA_DESC_TD_COMPLETE))
1197 cb_status = -ENOSYS;
1201 __cpdma_chan_free(chan, desc, outlen, cb_status);
1205 spin_unlock_irqrestore(&chan->lock, flags);
1209 int cpdma_chan_process(struct cpdma_chan *chan, int quota)
1211 int used = 0, ret = 0;
1213 if (chan->state != CPDMA_STATE_ACTIVE)
1216 while (used < quota) {
1217 ret = __cpdma_chan_process(chan);
1225 int cpdma_chan_start(struct cpdma_chan *chan)
1227 struct cpdma_ctlr *ctlr = chan->ctlr;
1228 unsigned long flags;
1231 spin_lock_irqsave(&ctlr->lock, flags);
1232 ret = cpdma_chan_set_chan_shaper(chan);
1233 spin_unlock_irqrestore(&ctlr->lock, flags);
1237 ret = cpdma_chan_on(chan);
1244 int cpdma_chan_stop(struct cpdma_chan *chan)
1246 struct cpdma_ctlr *ctlr = chan->ctlr;
1247 struct cpdma_desc_pool *pool = ctlr->pool;
1248 unsigned long flags;
1252 spin_lock_irqsave(&chan->lock, flags);
1253 if (chan->state == CPDMA_STATE_TEARDOWN) {
1254 spin_unlock_irqrestore(&chan->lock, flags);
1258 chan->state = CPDMA_STATE_TEARDOWN;
1259 dma_reg_write(ctlr, chan->int_clear, chan->mask);
1261 /* trigger teardown */
1262 dma_reg_write(ctlr, chan->td, chan_linear(chan));
1264 /* wait for teardown complete */
1265 timeout = 100 * 100; /* 100 ms */
1267 u32 cp = chan_read(chan, cp);
1268 if ((cp & CPDMA_TEARDOWN_VALUE) == CPDMA_TEARDOWN_VALUE)
1274 chan_write(chan, cp, CPDMA_TEARDOWN_VALUE);
1276 /* handle completed packets */
1277 spin_unlock_irqrestore(&chan->lock, flags);
1279 ret = __cpdma_chan_process(chan);
1282 } while ((ret & CPDMA_DESC_TD_COMPLETE) == 0);
1283 spin_lock_irqsave(&chan->lock, flags);
1285 /* remaining packets haven't been tx/rx'ed, clean them up */
1286 while (chan->head) {
1287 struct cpdma_desc __iomem *desc = chan->head;
1288 dma_addr_t next_dma;
1290 next_dma = desc_read(desc, hw_next);
1291 chan->head = desc_from_phys(pool, next_dma);
1293 chan->stats.teardown_dequeue++;
1295 /* issue callback without locks held */
1296 spin_unlock_irqrestore(&chan->lock, flags);
1297 __cpdma_chan_free(chan, desc, 0, -ENOSYS);
1298 spin_lock_irqsave(&chan->lock, flags);
1301 chan->state = CPDMA_STATE_IDLE;
1302 spin_unlock_irqrestore(&chan->lock, flags);
1306 int cpdma_chan_int_ctrl(struct cpdma_chan *chan, bool enable)
1308 unsigned long flags;
1310 spin_lock_irqsave(&chan->lock, flags);
1311 if (chan->state != CPDMA_STATE_ACTIVE) {
1312 spin_unlock_irqrestore(&chan->lock, flags);
1316 dma_reg_write(chan->ctlr, enable ? chan->int_set : chan->int_clear,
1318 spin_unlock_irqrestore(&chan->lock, flags);
1323 int cpdma_control_get(struct cpdma_ctlr *ctlr, int control)
1325 unsigned long flags;
1328 spin_lock_irqsave(&ctlr->lock, flags);
1329 ret = _cpdma_control_get(ctlr, control);
1330 spin_unlock_irqrestore(&ctlr->lock, flags);
1335 int cpdma_control_set(struct cpdma_ctlr *ctlr, int control, int value)
1337 unsigned long flags;
1340 spin_lock_irqsave(&ctlr->lock, flags);
1341 ret = _cpdma_control_set(ctlr, control, value);
1342 spin_unlock_irqrestore(&ctlr->lock, flags);
1347 int cpdma_get_num_rx_descs(struct cpdma_ctlr *ctlr)
1349 return ctlr->num_rx_desc;
1352 int cpdma_get_num_tx_descs(struct cpdma_ctlr *ctlr)
1354 return ctlr->num_tx_desc;
1357 void cpdma_set_num_rx_descs(struct cpdma_ctlr *ctlr, int num_rx_desc)
1359 ctlr->num_rx_desc = num_rx_desc;
1360 ctlr->num_tx_desc = ctlr->pool->num_desc - ctlr->num_rx_desc;