Merge tag 'mt76-for-kvalo-2019-06-27' of https://github.com/nbd168/wireless
[linux-2.6-microblaze.git] / drivers / net / ethernet / ti / davinci_cpdma.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Texas Instruments CPDMA Driver
4  *
5  * Copyright (C) 2010 Texas Instruments
6  *
7  */
8 #include <linux/kernel.h>
9 #include <linux/spinlock.h>
10 #include <linux/device.h>
11 #include <linux/module.h>
12 #include <linux/slab.h>
13 #include <linux/err.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/io.h>
16 #include <linux/delay.h>
17 #include <linux/genalloc.h>
18 #include "davinci_cpdma.h"
19
20 /* DMA Registers */
21 #define CPDMA_TXIDVER           0x00
22 #define CPDMA_TXCONTROL         0x04
23 #define CPDMA_TXTEARDOWN        0x08
24 #define CPDMA_RXIDVER           0x10
25 #define CPDMA_RXCONTROL         0x14
26 #define CPDMA_SOFTRESET         0x1c
27 #define CPDMA_RXTEARDOWN        0x18
28 #define CPDMA_TX_PRI0_RATE      0x30
29 #define CPDMA_TXINTSTATRAW      0x80
30 #define CPDMA_TXINTSTATMASKED   0x84
31 #define CPDMA_TXINTMASKSET      0x88
32 #define CPDMA_TXINTMASKCLEAR    0x8c
33 #define CPDMA_MACINVECTOR       0x90
34 #define CPDMA_MACEOIVECTOR      0x94
35 #define CPDMA_RXINTSTATRAW      0xa0
36 #define CPDMA_RXINTSTATMASKED   0xa4
37 #define CPDMA_RXINTMASKSET      0xa8
38 #define CPDMA_RXINTMASKCLEAR    0xac
39 #define CPDMA_DMAINTSTATRAW     0xb0
40 #define CPDMA_DMAINTSTATMASKED  0xb4
41 #define CPDMA_DMAINTMASKSET     0xb8
42 #define CPDMA_DMAINTMASKCLEAR   0xbc
43 #define CPDMA_DMAINT_HOSTERR    BIT(1)
44
45 /* the following exist only if has_ext_regs is set */
46 #define CPDMA_DMACONTROL        0x20
47 #define CPDMA_DMASTATUS         0x24
48 #define CPDMA_RXBUFFOFS         0x28
49 #define CPDMA_EM_CONTROL        0x2c
50
51 /* Descriptor mode bits */
52 #define CPDMA_DESC_SOP          BIT(31)
53 #define CPDMA_DESC_EOP          BIT(30)
54 #define CPDMA_DESC_OWNER        BIT(29)
55 #define CPDMA_DESC_EOQ          BIT(28)
56 #define CPDMA_DESC_TD_COMPLETE  BIT(27)
57 #define CPDMA_DESC_PASS_CRC     BIT(26)
58 #define CPDMA_DESC_TO_PORT_EN   BIT(20)
59 #define CPDMA_TO_PORT_SHIFT     16
60 #define CPDMA_DESC_PORT_MASK    (BIT(18) | BIT(17) | BIT(16))
61 #define CPDMA_DESC_CRC_LEN      4
62
63 #define CPDMA_TEARDOWN_VALUE    0xfffffffc
64
65 #define CPDMA_MAX_RLIM_CNT      16384
66
67 struct cpdma_desc {
68         /* hardware fields */
69         u32                     hw_next;
70         u32                     hw_buffer;
71         u32                     hw_len;
72         u32                     hw_mode;
73         /* software fields */
74         void                    *sw_token;
75         u32                     sw_buffer;
76         u32                     sw_len;
77 };
78
79 struct cpdma_desc_pool {
80         phys_addr_t             phys;
81         dma_addr_t              hw_addr;
82         void __iomem            *iomap;         /* ioremap map */
83         void                    *cpumap;        /* dma_alloc map */
84         int                     desc_size, mem_size;
85         int                     num_desc;
86         struct device           *dev;
87         struct gen_pool         *gen_pool;
88 };
89
90 enum cpdma_state {
91         CPDMA_STATE_IDLE,
92         CPDMA_STATE_ACTIVE,
93         CPDMA_STATE_TEARDOWN,
94 };
95
96 struct cpdma_ctlr {
97         enum cpdma_state        state;
98         struct cpdma_params     params;
99         struct device           *dev;
100         struct cpdma_desc_pool  *pool;
101         spinlock_t              lock;
102         struct cpdma_chan       *channels[2 * CPDMA_MAX_CHANNELS];
103         int chan_num;
104         int                     num_rx_desc; /* RX descriptors number */
105         int                     num_tx_desc; /* TX descriptors number */
106 };
107
108 struct cpdma_chan {
109         struct cpdma_desc __iomem       *head, *tail;
110         void __iomem                    *hdp, *cp, *rxfree;
111         enum cpdma_state                state;
112         struct cpdma_ctlr               *ctlr;
113         int                             chan_num;
114         spinlock_t                      lock;
115         int                             count;
116         u32                             desc_num;
117         u32                             mask;
118         cpdma_handler_fn                handler;
119         enum dma_data_direction         dir;
120         struct cpdma_chan_stats         stats;
121         /* offsets into dmaregs */
122         int     int_set, int_clear, td;
123         int                             weight;
124         u32                             rate_factor;
125         u32                             rate;
126 };
127
128 struct cpdma_control_info {
129         u32             reg;
130         u32             shift, mask;
131         int             access;
132 #define ACCESS_RO       BIT(0)
133 #define ACCESS_WO       BIT(1)
134 #define ACCESS_RW       (ACCESS_RO | ACCESS_WO)
135 };
136
137 struct submit_info {
138         struct cpdma_chan *chan;
139         int directed;
140         void *token;
141         void *data;
142         int len;
143 };
144
145 static struct cpdma_control_info controls[] = {
146         [CPDMA_TX_RLIM]           = {CPDMA_DMACONTROL,  8,  0xffff, ACCESS_RW},
147         [CPDMA_CMD_IDLE]          = {CPDMA_DMACONTROL,  3,  1,      ACCESS_WO},
148         [CPDMA_COPY_ERROR_FRAMES] = {CPDMA_DMACONTROL,  4,  1,      ACCESS_RW},
149         [CPDMA_RX_OFF_LEN_UPDATE] = {CPDMA_DMACONTROL,  2,  1,      ACCESS_RW},
150         [CPDMA_RX_OWNERSHIP_FLIP] = {CPDMA_DMACONTROL,  1,  1,      ACCESS_RW},
151         [CPDMA_TX_PRIO_FIXED]     = {CPDMA_DMACONTROL,  0,  1,      ACCESS_RW},
152         [CPDMA_STAT_IDLE]         = {CPDMA_DMASTATUS,   31, 1,      ACCESS_RO},
153         [CPDMA_STAT_TX_ERR_CODE]  = {CPDMA_DMASTATUS,   20, 0xf,    ACCESS_RW},
154         [CPDMA_STAT_TX_ERR_CHAN]  = {CPDMA_DMASTATUS,   16, 0x7,    ACCESS_RW},
155         [CPDMA_STAT_RX_ERR_CODE]  = {CPDMA_DMASTATUS,   12, 0xf,    ACCESS_RW},
156         [CPDMA_STAT_RX_ERR_CHAN]  = {CPDMA_DMASTATUS,   8,  0x7,    ACCESS_RW},
157         [CPDMA_RX_BUFFER_OFFSET]  = {CPDMA_RXBUFFOFS,   0,  0xffff, ACCESS_RW},
158 };
159
160 #define tx_chan_num(chan)       (chan)
161 #define rx_chan_num(chan)       ((chan) + CPDMA_MAX_CHANNELS)
162 #define is_rx_chan(chan)        ((chan)->chan_num >= CPDMA_MAX_CHANNELS)
163 #define is_tx_chan(chan)        (!is_rx_chan(chan))
164 #define __chan_linear(chan_num) ((chan_num) & (CPDMA_MAX_CHANNELS - 1))
165 #define chan_linear(chan)       __chan_linear((chan)->chan_num)
166
167 /* The following make access to common cpdma_ctlr params more readable */
168 #define dmaregs         params.dmaregs
169 #define num_chan        params.num_chan
170
171 /* various accessors */
172 #define dma_reg_read(ctlr, ofs)         readl((ctlr)->dmaregs + (ofs))
173 #define chan_read(chan, fld)            readl((chan)->fld)
174 #define desc_read(desc, fld)            readl(&(desc)->fld)
175 #define dma_reg_write(ctlr, ofs, v)     writel(v, (ctlr)->dmaregs + (ofs))
176 #define chan_write(chan, fld, v)        writel(v, (chan)->fld)
177 #define desc_write(desc, fld, v)        writel((u32)(v), &(desc)->fld)
178
179 #define cpdma_desc_to_port(chan, mode, directed)                        \
180         do {                                                            \
181                 if (!is_rx_chan(chan) && ((directed == 1) ||            \
182                                           (directed == 2)))             \
183                         mode |= (CPDMA_DESC_TO_PORT_EN |                \
184                                  (directed << CPDMA_TO_PORT_SHIFT));    \
185         } while (0)
186
187 static void cpdma_desc_pool_destroy(struct cpdma_ctlr *ctlr)
188 {
189         struct cpdma_desc_pool *pool = ctlr->pool;
190
191         if (!pool)
192                 return;
193
194         WARN(gen_pool_size(pool->gen_pool) != gen_pool_avail(pool->gen_pool),
195              "cpdma_desc_pool size %zd != avail %zd",
196              gen_pool_size(pool->gen_pool),
197              gen_pool_avail(pool->gen_pool));
198         if (pool->cpumap)
199                 dma_free_coherent(ctlr->dev, pool->mem_size, pool->cpumap,
200                                   pool->phys);
201 }
202
203 /*
204  * Utility constructs for a cpdma descriptor pool.  Some devices (e.g. davinci
205  * emac) have dedicated on-chip memory for these descriptors.  Some other
206  * devices (e.g. cpsw switches) use plain old memory.  Descriptor pools
207  * abstract out these details
208  */
209 static int cpdma_desc_pool_create(struct cpdma_ctlr *ctlr)
210 {
211         struct cpdma_params *cpdma_params = &ctlr->params;
212         struct cpdma_desc_pool *pool;
213         int ret = -ENOMEM;
214
215         pool = devm_kzalloc(ctlr->dev, sizeof(*pool), GFP_KERNEL);
216         if (!pool)
217                 goto gen_pool_create_fail;
218         ctlr->pool = pool;
219
220         pool->mem_size  = cpdma_params->desc_mem_size;
221         pool->desc_size = ALIGN(sizeof(struct cpdma_desc),
222                                 cpdma_params->desc_align);
223         pool->num_desc  = pool->mem_size / pool->desc_size;
224
225         if (cpdma_params->descs_pool_size) {
226                 /* recalculate memory size required cpdma descriptor pool
227                  * basing on number of descriptors specified by user and
228                  * if memory size > CPPI internal RAM size (desc_mem_size)
229                  * then switch to use DDR
230                  */
231                 pool->num_desc = cpdma_params->descs_pool_size;
232                 pool->mem_size = pool->desc_size * pool->num_desc;
233                 if (pool->mem_size > cpdma_params->desc_mem_size)
234                         cpdma_params->desc_mem_phys = 0;
235         }
236
237         pool->gen_pool = devm_gen_pool_create(ctlr->dev, ilog2(pool->desc_size),
238                                               -1, "cpdma");
239         if (IS_ERR(pool->gen_pool)) {
240                 ret = PTR_ERR(pool->gen_pool);
241                 dev_err(ctlr->dev, "pool create failed %d\n", ret);
242                 goto gen_pool_create_fail;
243         }
244
245         if (cpdma_params->desc_mem_phys) {
246                 pool->phys  = cpdma_params->desc_mem_phys;
247                 pool->iomap = devm_ioremap(ctlr->dev, pool->phys,
248                                            pool->mem_size);
249                 pool->hw_addr = cpdma_params->desc_hw_addr;
250         } else {
251                 pool->cpumap = dma_alloc_coherent(ctlr->dev,  pool->mem_size,
252                                                   &pool->hw_addr, GFP_KERNEL);
253                 pool->iomap = (void __iomem __force *)pool->cpumap;
254                 pool->phys = pool->hw_addr; /* assumes no IOMMU, don't use this value */
255         }
256
257         if (!pool->iomap)
258                 goto gen_pool_create_fail;
259
260         ret = gen_pool_add_virt(pool->gen_pool, (unsigned long)pool->iomap,
261                                 pool->phys, pool->mem_size, -1);
262         if (ret < 0) {
263                 dev_err(ctlr->dev, "pool add failed %d\n", ret);
264                 goto gen_pool_add_virt_fail;
265         }
266
267         return 0;
268
269 gen_pool_add_virt_fail:
270         cpdma_desc_pool_destroy(ctlr);
271 gen_pool_create_fail:
272         ctlr->pool = NULL;
273         return ret;
274 }
275
276 static inline dma_addr_t desc_phys(struct cpdma_desc_pool *pool,
277                   struct cpdma_desc __iomem *desc)
278 {
279         if (!desc)
280                 return 0;
281         return pool->hw_addr + (__force long)desc - (__force long)pool->iomap;
282 }
283
284 static inline struct cpdma_desc __iomem *
285 desc_from_phys(struct cpdma_desc_pool *pool, dma_addr_t dma)
286 {
287         return dma ? pool->iomap + dma - pool->hw_addr : NULL;
288 }
289
290 static struct cpdma_desc __iomem *
291 cpdma_desc_alloc(struct cpdma_desc_pool *pool)
292 {
293         return (struct cpdma_desc __iomem *)
294                 gen_pool_alloc(pool->gen_pool, pool->desc_size);
295 }
296
297 static void cpdma_desc_free(struct cpdma_desc_pool *pool,
298                             struct cpdma_desc __iomem *desc, int num_desc)
299 {
300         gen_pool_free(pool->gen_pool, (unsigned long)desc, pool->desc_size);
301 }
302
303 static int _cpdma_control_set(struct cpdma_ctlr *ctlr, int control, int value)
304 {
305         struct cpdma_control_info *info = &controls[control];
306         u32 val;
307
308         if (!ctlr->params.has_ext_regs)
309                 return -ENOTSUPP;
310
311         if (ctlr->state != CPDMA_STATE_ACTIVE)
312                 return -EINVAL;
313
314         if (control < 0 || control >= ARRAY_SIZE(controls))
315                 return -ENOENT;
316
317         if ((info->access & ACCESS_WO) != ACCESS_WO)
318                 return -EPERM;
319
320         val  = dma_reg_read(ctlr, info->reg);
321         val &= ~(info->mask << info->shift);
322         val |= (value & info->mask) << info->shift;
323         dma_reg_write(ctlr, info->reg, val);
324
325         return 0;
326 }
327
328 static int _cpdma_control_get(struct cpdma_ctlr *ctlr, int control)
329 {
330         struct cpdma_control_info *info = &controls[control];
331         int ret;
332
333         if (!ctlr->params.has_ext_regs)
334                 return -ENOTSUPP;
335
336         if (ctlr->state != CPDMA_STATE_ACTIVE)
337                 return -EINVAL;
338
339         if (control < 0 || control >= ARRAY_SIZE(controls))
340                 return -ENOENT;
341
342         if ((info->access & ACCESS_RO) != ACCESS_RO)
343                 return -EPERM;
344
345         ret = (dma_reg_read(ctlr, info->reg) >> info->shift) & info->mask;
346         return ret;
347 }
348
349 /* cpdma_chan_set_chan_shaper - set shaper for a channel
350  * Has to be called under ctlr lock
351  */
352 static int cpdma_chan_set_chan_shaper(struct cpdma_chan *chan)
353 {
354         struct cpdma_ctlr *ctlr = chan->ctlr;
355         u32 rate_reg;
356         u32 rmask;
357         int ret;
358
359         if (!chan->rate)
360                 return 0;
361
362         rate_reg = CPDMA_TX_PRI0_RATE + 4 * chan->chan_num;
363         dma_reg_write(ctlr, rate_reg, chan->rate_factor);
364
365         rmask = _cpdma_control_get(ctlr, CPDMA_TX_RLIM);
366         rmask |= chan->mask;
367
368         ret = _cpdma_control_set(ctlr, CPDMA_TX_RLIM, rmask);
369         return ret;
370 }
371
372 static int cpdma_chan_on(struct cpdma_chan *chan)
373 {
374         struct cpdma_ctlr *ctlr = chan->ctlr;
375         struct cpdma_desc_pool  *pool = ctlr->pool;
376         unsigned long flags;
377
378         spin_lock_irqsave(&chan->lock, flags);
379         if (chan->state != CPDMA_STATE_IDLE) {
380                 spin_unlock_irqrestore(&chan->lock, flags);
381                 return -EBUSY;
382         }
383         if (ctlr->state != CPDMA_STATE_ACTIVE) {
384                 spin_unlock_irqrestore(&chan->lock, flags);
385                 return -EINVAL;
386         }
387         dma_reg_write(ctlr, chan->int_set, chan->mask);
388         chan->state = CPDMA_STATE_ACTIVE;
389         if (chan->head) {
390                 chan_write(chan, hdp, desc_phys(pool, chan->head));
391                 if (chan->rxfree)
392                         chan_write(chan, rxfree, chan->count);
393         }
394
395         spin_unlock_irqrestore(&chan->lock, flags);
396         return 0;
397 }
398
399 /* cpdma_chan_fit_rate - set rate for a channel and check if it's possible.
400  * rmask - mask of rate limited channels
401  * Returns min rate in Kb/s
402  */
403 static int cpdma_chan_fit_rate(struct cpdma_chan *ch, u32 rate,
404                                u32 *rmask, int *prio_mode)
405 {
406         struct cpdma_ctlr *ctlr = ch->ctlr;
407         struct cpdma_chan *chan;
408         u32 old_rate = ch->rate;
409         u32 new_rmask = 0;
410         int rlim = 0;
411         int i;
412
413         for (i = tx_chan_num(0); i < tx_chan_num(CPDMA_MAX_CHANNELS); i++) {
414                 chan = ctlr->channels[i];
415                 if (!chan)
416                         continue;
417
418                 if (chan == ch)
419                         chan->rate = rate;
420
421                 if (chan->rate) {
422                         rlim = 1;
423                         new_rmask |= chan->mask;
424                         continue;
425                 }
426
427                 if (rlim)
428                         goto err;
429         }
430
431         *rmask = new_rmask;
432         *prio_mode = rlim;
433         return 0;
434
435 err:
436         ch->rate = old_rate;
437         dev_err(ctlr->dev, "Upper cpdma ch%d is not rate limited\n",
438                 chan->chan_num);
439         return -EINVAL;
440 }
441
442 static u32 cpdma_chan_set_factors(struct cpdma_ctlr *ctlr,
443                                   struct cpdma_chan *ch)
444 {
445         u32 delta = UINT_MAX, prev_delta = UINT_MAX, best_delta = UINT_MAX;
446         u32 best_send_cnt = 0, best_idle_cnt = 0;
447         u32 new_rate, best_rate = 0, rate_reg;
448         u64 send_cnt, idle_cnt;
449         u32 min_send_cnt, freq;
450         u64 divident, divisor;
451
452         if (!ch->rate) {
453                 ch->rate_factor = 0;
454                 goto set_factor;
455         }
456
457         freq = ctlr->params.bus_freq_mhz * 1000 * 32;
458         if (!freq) {
459                 dev_err(ctlr->dev, "The bus frequency is not set\n");
460                 return -EINVAL;
461         }
462
463         min_send_cnt = freq - ch->rate;
464         send_cnt = DIV_ROUND_UP(min_send_cnt, ch->rate);
465         while (send_cnt <= CPDMA_MAX_RLIM_CNT) {
466                 divident = ch->rate * send_cnt;
467                 divisor = min_send_cnt;
468                 idle_cnt = DIV_ROUND_CLOSEST_ULL(divident, divisor);
469
470                 divident = freq * idle_cnt;
471                 divisor = idle_cnt + send_cnt;
472                 new_rate = DIV_ROUND_CLOSEST_ULL(divident, divisor);
473
474                 delta = new_rate >= ch->rate ? new_rate - ch->rate : delta;
475                 if (delta < best_delta) {
476                         best_delta = delta;
477                         best_send_cnt = send_cnt;
478                         best_idle_cnt = idle_cnt;
479                         best_rate = new_rate;
480
481                         if (!delta)
482                                 break;
483                 }
484
485                 if (prev_delta >= delta) {
486                         prev_delta = delta;
487                         send_cnt++;
488                         continue;
489                 }
490
491                 idle_cnt++;
492                 divident = freq * idle_cnt;
493                 send_cnt = DIV_ROUND_CLOSEST_ULL(divident, ch->rate);
494                 send_cnt -= idle_cnt;
495                 prev_delta = UINT_MAX;
496         }
497
498         ch->rate = best_rate;
499         ch->rate_factor = best_send_cnt | (best_idle_cnt << 16);
500
501 set_factor:
502         rate_reg = CPDMA_TX_PRI0_RATE + 4 * ch->chan_num;
503         dma_reg_write(ctlr, rate_reg, ch->rate_factor);
504         return 0;
505 }
506
507 struct cpdma_ctlr *cpdma_ctlr_create(struct cpdma_params *params)
508 {
509         struct cpdma_ctlr *ctlr;
510
511         ctlr = devm_kzalloc(params->dev, sizeof(*ctlr), GFP_KERNEL);
512         if (!ctlr)
513                 return NULL;
514
515         ctlr->state = CPDMA_STATE_IDLE;
516         ctlr->params = *params;
517         ctlr->dev = params->dev;
518         ctlr->chan_num = 0;
519         spin_lock_init(&ctlr->lock);
520
521         if (cpdma_desc_pool_create(ctlr))
522                 return NULL;
523         /* split pool equally between RX/TX by default */
524         ctlr->num_tx_desc = ctlr->pool->num_desc / 2;
525         ctlr->num_rx_desc = ctlr->pool->num_desc - ctlr->num_tx_desc;
526
527         if (WARN_ON(ctlr->num_chan > CPDMA_MAX_CHANNELS))
528                 ctlr->num_chan = CPDMA_MAX_CHANNELS;
529         return ctlr;
530 }
531
532 int cpdma_ctlr_start(struct cpdma_ctlr *ctlr)
533 {
534         struct cpdma_chan *chan;
535         unsigned long flags;
536         int i, prio_mode;
537
538         spin_lock_irqsave(&ctlr->lock, flags);
539         if (ctlr->state != CPDMA_STATE_IDLE) {
540                 spin_unlock_irqrestore(&ctlr->lock, flags);
541                 return -EBUSY;
542         }
543
544         if (ctlr->params.has_soft_reset) {
545                 unsigned timeout = 10 * 100;
546
547                 dma_reg_write(ctlr, CPDMA_SOFTRESET, 1);
548                 while (timeout) {
549                         if (dma_reg_read(ctlr, CPDMA_SOFTRESET) == 0)
550                                 break;
551                         udelay(10);
552                         timeout--;
553                 }
554                 WARN_ON(!timeout);
555         }
556
557         for (i = 0; i < ctlr->num_chan; i++) {
558                 writel(0, ctlr->params.txhdp + 4 * i);
559                 writel(0, ctlr->params.rxhdp + 4 * i);
560                 writel(0, ctlr->params.txcp + 4 * i);
561                 writel(0, ctlr->params.rxcp + 4 * i);
562         }
563
564         dma_reg_write(ctlr, CPDMA_RXINTMASKCLEAR, 0xffffffff);
565         dma_reg_write(ctlr, CPDMA_TXINTMASKCLEAR, 0xffffffff);
566
567         dma_reg_write(ctlr, CPDMA_TXCONTROL, 1);
568         dma_reg_write(ctlr, CPDMA_RXCONTROL, 1);
569
570         ctlr->state = CPDMA_STATE_ACTIVE;
571
572         prio_mode = 0;
573         for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
574                 chan = ctlr->channels[i];
575                 if (chan) {
576                         cpdma_chan_set_chan_shaper(chan);
577                         cpdma_chan_on(chan);
578
579                         /* off prio mode if all tx channels are rate limited */
580                         if (is_tx_chan(chan) && !chan->rate)
581                                 prio_mode = 1;
582                 }
583         }
584
585         _cpdma_control_set(ctlr, CPDMA_TX_PRIO_FIXED, prio_mode);
586         _cpdma_control_set(ctlr, CPDMA_RX_BUFFER_OFFSET, 0);
587
588         spin_unlock_irqrestore(&ctlr->lock, flags);
589         return 0;
590 }
591
592 int cpdma_ctlr_stop(struct cpdma_ctlr *ctlr)
593 {
594         unsigned long flags;
595         int i;
596
597         spin_lock_irqsave(&ctlr->lock, flags);
598         if (ctlr->state != CPDMA_STATE_ACTIVE) {
599                 spin_unlock_irqrestore(&ctlr->lock, flags);
600                 return -EINVAL;
601         }
602
603         ctlr->state = CPDMA_STATE_TEARDOWN;
604         spin_unlock_irqrestore(&ctlr->lock, flags);
605
606         for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
607                 if (ctlr->channels[i])
608                         cpdma_chan_stop(ctlr->channels[i]);
609         }
610
611         spin_lock_irqsave(&ctlr->lock, flags);
612         dma_reg_write(ctlr, CPDMA_RXINTMASKCLEAR, 0xffffffff);
613         dma_reg_write(ctlr, CPDMA_TXINTMASKCLEAR, 0xffffffff);
614
615         dma_reg_write(ctlr, CPDMA_TXCONTROL, 0);
616         dma_reg_write(ctlr, CPDMA_RXCONTROL, 0);
617
618         ctlr->state = CPDMA_STATE_IDLE;
619
620         spin_unlock_irqrestore(&ctlr->lock, flags);
621         return 0;
622 }
623
624 int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr)
625 {
626         int ret = 0, i;
627
628         if (!ctlr)
629                 return -EINVAL;
630
631         if (ctlr->state != CPDMA_STATE_IDLE)
632                 cpdma_ctlr_stop(ctlr);
633
634         for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++)
635                 cpdma_chan_destroy(ctlr->channels[i]);
636
637         cpdma_desc_pool_destroy(ctlr);
638         return ret;
639 }
640
641 int cpdma_ctlr_int_ctrl(struct cpdma_ctlr *ctlr, bool enable)
642 {
643         unsigned long flags;
644         int i;
645
646         spin_lock_irqsave(&ctlr->lock, flags);
647         if (ctlr->state != CPDMA_STATE_ACTIVE) {
648                 spin_unlock_irqrestore(&ctlr->lock, flags);
649                 return -EINVAL;
650         }
651
652         for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
653                 if (ctlr->channels[i])
654                         cpdma_chan_int_ctrl(ctlr->channels[i], enable);
655         }
656
657         spin_unlock_irqrestore(&ctlr->lock, flags);
658         return 0;
659 }
660
661 void cpdma_ctlr_eoi(struct cpdma_ctlr *ctlr, u32 value)
662 {
663         dma_reg_write(ctlr, CPDMA_MACEOIVECTOR, value);
664 }
665
666 u32 cpdma_ctrl_rxchs_state(struct cpdma_ctlr *ctlr)
667 {
668         return dma_reg_read(ctlr, CPDMA_RXINTSTATMASKED);
669 }
670
671 u32 cpdma_ctrl_txchs_state(struct cpdma_ctlr *ctlr)
672 {
673         return dma_reg_read(ctlr, CPDMA_TXINTSTATMASKED);
674 }
675
676 static void cpdma_chan_set_descs(struct cpdma_ctlr *ctlr,
677                                  int rx, int desc_num,
678                                  int per_ch_desc)
679 {
680         struct cpdma_chan *chan, *most_chan = NULL;
681         int desc_cnt = desc_num;
682         int most_dnum = 0;
683         int min, max, i;
684
685         if (!desc_num)
686                 return;
687
688         if (rx) {
689                 min = rx_chan_num(0);
690                 max = rx_chan_num(CPDMA_MAX_CHANNELS);
691         } else {
692                 min = tx_chan_num(0);
693                 max = tx_chan_num(CPDMA_MAX_CHANNELS);
694         }
695
696         for (i = min; i < max; i++) {
697                 chan = ctlr->channels[i];
698                 if (!chan)
699                         continue;
700
701                 if (chan->weight)
702                         chan->desc_num = (chan->weight * desc_num) / 100;
703                 else
704                         chan->desc_num = per_ch_desc;
705
706                 desc_cnt -= chan->desc_num;
707
708                 if (most_dnum < chan->desc_num) {
709                         most_dnum = chan->desc_num;
710                         most_chan = chan;
711                 }
712         }
713         /* use remains */
714         if (most_chan)
715                 most_chan->desc_num += desc_cnt;
716 }
717
718 /**
719  * cpdma_chan_split_pool - Splits ctrl pool between all channels.
720  * Has to be called under ctlr lock
721  */
722 int cpdma_chan_split_pool(struct cpdma_ctlr *ctlr)
723 {
724         int tx_per_ch_desc = 0, rx_per_ch_desc = 0;
725         int free_rx_num = 0, free_tx_num = 0;
726         int rx_weight = 0, tx_weight = 0;
727         int tx_desc_num, rx_desc_num;
728         struct cpdma_chan *chan;
729         int i;
730
731         if (!ctlr->chan_num)
732                 return 0;
733
734         for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
735                 chan = ctlr->channels[i];
736                 if (!chan)
737                         continue;
738
739                 if (is_rx_chan(chan)) {
740                         if (!chan->weight)
741                                 free_rx_num++;
742                         rx_weight += chan->weight;
743                 } else {
744                         if (!chan->weight)
745                                 free_tx_num++;
746                         tx_weight += chan->weight;
747                 }
748         }
749
750         if (rx_weight > 100 || tx_weight > 100)
751                 return -EINVAL;
752
753         tx_desc_num = ctlr->num_tx_desc;
754         rx_desc_num = ctlr->num_rx_desc;
755
756         if (free_tx_num) {
757                 tx_per_ch_desc = tx_desc_num - (tx_weight * tx_desc_num) / 100;
758                 tx_per_ch_desc /= free_tx_num;
759         }
760         if (free_rx_num) {
761                 rx_per_ch_desc = rx_desc_num - (rx_weight * rx_desc_num) / 100;
762                 rx_per_ch_desc /= free_rx_num;
763         }
764
765         cpdma_chan_set_descs(ctlr, 0, tx_desc_num, tx_per_ch_desc);
766         cpdma_chan_set_descs(ctlr, 1, rx_desc_num, rx_per_ch_desc);
767
768         return 0;
769 }
770
771
772 /* cpdma_chan_set_weight - set weight of a channel in percentage.
773  * Tx and Rx channels have separate weights. That is 100% for RX
774  * and 100% for Tx. The weight is used to split cpdma resources
775  * in correct proportion required by the channels, including number
776  * of descriptors. The channel rate is not enough to know the
777  * weight of a channel as the maximum rate of an interface is needed.
778  * If weight = 0, then channel uses rest of descriptors leaved by
779  * weighted channels.
780  */
781 int cpdma_chan_set_weight(struct cpdma_chan *ch, int weight)
782 {
783         struct cpdma_ctlr *ctlr = ch->ctlr;
784         unsigned long flags, ch_flags;
785         int ret;
786
787         spin_lock_irqsave(&ctlr->lock, flags);
788         spin_lock_irqsave(&ch->lock, ch_flags);
789         if (ch->weight == weight) {
790                 spin_unlock_irqrestore(&ch->lock, ch_flags);
791                 spin_unlock_irqrestore(&ctlr->lock, flags);
792                 return 0;
793         }
794         ch->weight = weight;
795         spin_unlock_irqrestore(&ch->lock, ch_flags);
796
797         /* re-split pool using new channel weight */
798         ret = cpdma_chan_split_pool(ctlr);
799         spin_unlock_irqrestore(&ctlr->lock, flags);
800         return ret;
801 }
802
803 /* cpdma_chan_get_min_rate - get minimum allowed rate for channel
804  * Should be called before cpdma_chan_set_rate.
805  * Returns min rate in Kb/s
806  */
807 u32 cpdma_chan_get_min_rate(struct cpdma_ctlr *ctlr)
808 {
809         unsigned int divident, divisor;
810
811         divident = ctlr->params.bus_freq_mhz * 32 * 1000;
812         divisor = 1 + CPDMA_MAX_RLIM_CNT;
813
814         return DIV_ROUND_UP(divident, divisor);
815 }
816
817 /* cpdma_chan_set_rate - limits bandwidth for transmit channel.
818  * The bandwidth * limited channels have to be in order beginning from lowest.
819  * ch - transmit channel the bandwidth is configured for
820  * rate - bandwidth in Kb/s, if 0 - then off shaper
821  */
822 int cpdma_chan_set_rate(struct cpdma_chan *ch, u32 rate)
823 {
824         unsigned long flags, ch_flags;
825         struct cpdma_ctlr *ctlr;
826         int ret, prio_mode;
827         u32 rmask;
828
829         if (!ch || !is_tx_chan(ch))
830                 return -EINVAL;
831
832         if (ch->rate == rate)
833                 return rate;
834
835         ctlr = ch->ctlr;
836         spin_lock_irqsave(&ctlr->lock, flags);
837         spin_lock_irqsave(&ch->lock, ch_flags);
838
839         ret = cpdma_chan_fit_rate(ch, rate, &rmask, &prio_mode);
840         if (ret)
841                 goto err;
842
843         ret = cpdma_chan_set_factors(ctlr, ch);
844         if (ret)
845                 goto err;
846
847         spin_unlock_irqrestore(&ch->lock, ch_flags);
848
849         /* on shapers */
850         _cpdma_control_set(ctlr, CPDMA_TX_RLIM, rmask);
851         _cpdma_control_set(ctlr, CPDMA_TX_PRIO_FIXED, prio_mode);
852         spin_unlock_irqrestore(&ctlr->lock, flags);
853         return ret;
854
855 err:
856         spin_unlock_irqrestore(&ch->lock, ch_flags);
857         spin_unlock_irqrestore(&ctlr->lock, flags);
858         return ret;
859 }
860
861 u32 cpdma_chan_get_rate(struct cpdma_chan *ch)
862 {
863         unsigned long flags;
864         u32 rate;
865
866         spin_lock_irqsave(&ch->lock, flags);
867         rate = ch->rate;
868         spin_unlock_irqrestore(&ch->lock, flags);
869
870         return rate;
871 }
872
873 struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num,
874                                      cpdma_handler_fn handler, int rx_type)
875 {
876         int offset = chan_num * 4;
877         struct cpdma_chan *chan;
878         unsigned long flags;
879
880         chan_num = rx_type ? rx_chan_num(chan_num) : tx_chan_num(chan_num);
881
882         if (__chan_linear(chan_num) >= ctlr->num_chan)
883                 return ERR_PTR(-EINVAL);
884
885         chan = devm_kzalloc(ctlr->dev, sizeof(*chan), GFP_KERNEL);
886         if (!chan)
887                 return ERR_PTR(-ENOMEM);
888
889         spin_lock_irqsave(&ctlr->lock, flags);
890         if (ctlr->channels[chan_num]) {
891                 spin_unlock_irqrestore(&ctlr->lock, flags);
892                 devm_kfree(ctlr->dev, chan);
893                 return ERR_PTR(-EBUSY);
894         }
895
896         chan->ctlr      = ctlr;
897         chan->state     = CPDMA_STATE_IDLE;
898         chan->chan_num  = chan_num;
899         chan->handler   = handler;
900         chan->rate      = 0;
901         chan->weight    = 0;
902
903         if (is_rx_chan(chan)) {
904                 chan->hdp       = ctlr->params.rxhdp + offset;
905                 chan->cp        = ctlr->params.rxcp + offset;
906                 chan->rxfree    = ctlr->params.rxfree + offset;
907                 chan->int_set   = CPDMA_RXINTMASKSET;
908                 chan->int_clear = CPDMA_RXINTMASKCLEAR;
909                 chan->td        = CPDMA_RXTEARDOWN;
910                 chan->dir       = DMA_FROM_DEVICE;
911         } else {
912                 chan->hdp       = ctlr->params.txhdp + offset;
913                 chan->cp        = ctlr->params.txcp + offset;
914                 chan->int_set   = CPDMA_TXINTMASKSET;
915                 chan->int_clear = CPDMA_TXINTMASKCLEAR;
916                 chan->td        = CPDMA_TXTEARDOWN;
917                 chan->dir       = DMA_TO_DEVICE;
918         }
919         chan->mask = BIT(chan_linear(chan));
920
921         spin_lock_init(&chan->lock);
922
923         ctlr->channels[chan_num] = chan;
924         ctlr->chan_num++;
925
926         cpdma_chan_split_pool(ctlr);
927
928         spin_unlock_irqrestore(&ctlr->lock, flags);
929         return chan;
930 }
931
932 int cpdma_chan_get_rx_buf_num(struct cpdma_chan *chan)
933 {
934         unsigned long flags;
935         int desc_num;
936
937         spin_lock_irqsave(&chan->lock, flags);
938         desc_num = chan->desc_num;
939         spin_unlock_irqrestore(&chan->lock, flags);
940
941         return desc_num;
942 }
943
944 int cpdma_chan_destroy(struct cpdma_chan *chan)
945 {
946         struct cpdma_ctlr *ctlr;
947         unsigned long flags;
948
949         if (!chan)
950                 return -EINVAL;
951         ctlr = chan->ctlr;
952
953         spin_lock_irqsave(&ctlr->lock, flags);
954         if (chan->state != CPDMA_STATE_IDLE)
955                 cpdma_chan_stop(chan);
956         ctlr->channels[chan->chan_num] = NULL;
957         ctlr->chan_num--;
958         devm_kfree(ctlr->dev, chan);
959         cpdma_chan_split_pool(ctlr);
960
961         spin_unlock_irqrestore(&ctlr->lock, flags);
962         return 0;
963 }
964
965 int cpdma_chan_get_stats(struct cpdma_chan *chan,
966                          struct cpdma_chan_stats *stats)
967 {
968         unsigned long flags;
969         if (!chan)
970                 return -EINVAL;
971         spin_lock_irqsave(&chan->lock, flags);
972         memcpy(stats, &chan->stats, sizeof(*stats));
973         spin_unlock_irqrestore(&chan->lock, flags);
974         return 0;
975 }
976
977 static void __cpdma_chan_submit(struct cpdma_chan *chan,
978                                 struct cpdma_desc __iomem *desc)
979 {
980         struct cpdma_ctlr               *ctlr = chan->ctlr;
981         struct cpdma_desc __iomem       *prev = chan->tail;
982         struct cpdma_desc_pool          *pool = ctlr->pool;
983         dma_addr_t                      desc_dma;
984         u32                             mode;
985
986         desc_dma = desc_phys(pool, desc);
987
988         /* simple case - idle channel */
989         if (!chan->head) {
990                 chan->stats.head_enqueue++;
991                 chan->head = desc;
992                 chan->tail = desc;
993                 if (chan->state == CPDMA_STATE_ACTIVE)
994                         chan_write(chan, hdp, desc_dma);
995                 return;
996         }
997
998         /* first chain the descriptor at the tail of the list */
999         desc_write(prev, hw_next, desc_dma);
1000         chan->tail = desc;
1001         chan->stats.tail_enqueue++;
1002
1003         /* next check if EOQ has been triggered already */
1004         mode = desc_read(prev, hw_mode);
1005         if (((mode & (CPDMA_DESC_EOQ | CPDMA_DESC_OWNER)) == CPDMA_DESC_EOQ) &&
1006             (chan->state == CPDMA_STATE_ACTIVE)) {
1007                 desc_write(prev, hw_mode, mode & ~CPDMA_DESC_EOQ);
1008                 chan_write(chan, hdp, desc_dma);
1009                 chan->stats.misqueued++;
1010         }
1011 }
1012
1013 static int cpdma_chan_submit_si(struct submit_info *si)
1014 {
1015         struct cpdma_chan               *chan = si->chan;
1016         struct cpdma_ctlr               *ctlr = chan->ctlr;
1017         int                             len = si->len;
1018         struct cpdma_desc __iomem       *desc;
1019         dma_addr_t                      buffer;
1020         u32                             mode;
1021         int                             ret;
1022
1023         if (chan->count >= chan->desc_num)      {
1024                 chan->stats.desc_alloc_fail++;
1025                 return -ENOMEM;
1026         }
1027
1028         desc = cpdma_desc_alloc(ctlr->pool);
1029         if (!desc) {
1030                 chan->stats.desc_alloc_fail++;
1031                 return -ENOMEM;
1032         }
1033
1034         if (len < ctlr->params.min_packet_size) {
1035                 len = ctlr->params.min_packet_size;
1036                 chan->stats.runt_transmit_buff++;
1037         }
1038
1039         buffer = dma_map_single(ctlr->dev, si->data, len, chan->dir);
1040         ret = dma_mapping_error(ctlr->dev, buffer);
1041         if (ret) {
1042                 cpdma_desc_free(ctlr->pool, desc, 1);
1043                 return -EINVAL;
1044         }
1045
1046         mode = CPDMA_DESC_OWNER | CPDMA_DESC_SOP | CPDMA_DESC_EOP;
1047         cpdma_desc_to_port(chan, mode, si->directed);
1048
1049         /* Relaxed IO accessors can be used here as there is read barrier
1050          * at the end of write sequence.
1051          */
1052         writel_relaxed(0, &desc->hw_next);
1053         writel_relaxed(buffer, &desc->hw_buffer);
1054         writel_relaxed(len, &desc->hw_len);
1055         writel_relaxed(mode | len, &desc->hw_mode);
1056         writel_relaxed((uintptr_t)si->token, &desc->sw_token);
1057         writel_relaxed(buffer, &desc->sw_buffer);
1058         writel_relaxed(len, &desc->sw_len);
1059         desc_read(desc, sw_len);
1060
1061         __cpdma_chan_submit(chan, desc);
1062
1063         if (chan->state == CPDMA_STATE_ACTIVE && chan->rxfree)
1064                 chan_write(chan, rxfree, 1);
1065
1066         chan->count++;
1067         return 0;
1068 }
1069
1070 int cpdma_chan_idle_submit(struct cpdma_chan *chan, void *token, void *data,
1071                            int len, int directed)
1072 {
1073         struct submit_info si;
1074         unsigned long flags;
1075         int ret;
1076
1077         si.chan = chan;
1078         si.token = token;
1079         si.data = data;
1080         si.len = len;
1081         si.directed = directed;
1082
1083         spin_lock_irqsave(&chan->lock, flags);
1084         if (chan->state == CPDMA_STATE_TEARDOWN) {
1085                 spin_unlock_irqrestore(&chan->lock, flags);
1086                 return -EINVAL;
1087         }
1088
1089         ret = cpdma_chan_submit_si(&si);
1090         spin_unlock_irqrestore(&chan->lock, flags);
1091         return ret;
1092 }
1093
1094 int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data,
1095                       int len, int directed)
1096 {
1097         struct submit_info si;
1098         unsigned long flags;
1099         int ret;
1100
1101         si.chan = chan;
1102         si.token = token;
1103         si.data = data;
1104         si.len = len;
1105         si.directed = directed;
1106
1107         spin_lock_irqsave(&chan->lock, flags);
1108         if (chan->state != CPDMA_STATE_ACTIVE) {
1109                 spin_unlock_irqrestore(&chan->lock, flags);
1110                 return -EINVAL;
1111         }
1112
1113         ret = cpdma_chan_submit_si(&si);
1114         spin_unlock_irqrestore(&chan->lock, flags);
1115         return ret;
1116 }
1117
1118 bool cpdma_check_free_tx_desc(struct cpdma_chan *chan)
1119 {
1120         struct cpdma_ctlr       *ctlr = chan->ctlr;
1121         struct cpdma_desc_pool  *pool = ctlr->pool;
1122         bool                    free_tx_desc;
1123         unsigned long           flags;
1124
1125         spin_lock_irqsave(&chan->lock, flags);
1126         free_tx_desc = (chan->count < chan->desc_num) &&
1127                          gen_pool_avail(pool->gen_pool);
1128         spin_unlock_irqrestore(&chan->lock, flags);
1129         return free_tx_desc;
1130 }
1131
1132 static void __cpdma_chan_free(struct cpdma_chan *chan,
1133                               struct cpdma_desc __iomem *desc,
1134                               int outlen, int status)
1135 {
1136         struct cpdma_ctlr               *ctlr = chan->ctlr;
1137         struct cpdma_desc_pool          *pool = ctlr->pool;
1138         dma_addr_t                      buff_dma;
1139         int                             origlen;
1140         uintptr_t                       token;
1141
1142         token      = desc_read(desc, sw_token);
1143         buff_dma   = desc_read(desc, sw_buffer);
1144         origlen    = desc_read(desc, sw_len);
1145
1146         dma_unmap_single(ctlr->dev, buff_dma, origlen, chan->dir);
1147         cpdma_desc_free(pool, desc, 1);
1148         (*chan->handler)((void *)token, outlen, status);
1149 }
1150
1151 static int __cpdma_chan_process(struct cpdma_chan *chan)
1152 {
1153         struct cpdma_ctlr               *ctlr = chan->ctlr;
1154         struct cpdma_desc __iomem       *desc;
1155         int                             status, outlen;
1156         int                             cb_status = 0;
1157         struct cpdma_desc_pool          *pool = ctlr->pool;
1158         dma_addr_t                      desc_dma;
1159         unsigned long                   flags;
1160
1161         spin_lock_irqsave(&chan->lock, flags);
1162
1163         desc = chan->head;
1164         if (!desc) {
1165                 chan->stats.empty_dequeue++;
1166                 status = -ENOENT;
1167                 goto unlock_ret;
1168         }
1169         desc_dma = desc_phys(pool, desc);
1170
1171         status  = desc_read(desc, hw_mode);
1172         outlen  = status & 0x7ff;
1173         if (status & CPDMA_DESC_OWNER) {
1174                 chan->stats.busy_dequeue++;
1175                 status = -EBUSY;
1176                 goto unlock_ret;
1177         }
1178
1179         if (status & CPDMA_DESC_PASS_CRC)
1180                 outlen -= CPDMA_DESC_CRC_LEN;
1181
1182         status  = status & (CPDMA_DESC_EOQ | CPDMA_DESC_TD_COMPLETE |
1183                             CPDMA_DESC_PORT_MASK | CPDMA_RX_VLAN_ENCAP);
1184
1185         chan->head = desc_from_phys(pool, desc_read(desc, hw_next));
1186         chan_write(chan, cp, desc_dma);
1187         chan->count--;
1188         chan->stats.good_dequeue++;
1189
1190         if ((status & CPDMA_DESC_EOQ) && chan->head) {
1191                 chan->stats.requeue++;
1192                 chan_write(chan, hdp, desc_phys(pool, chan->head));
1193         }
1194
1195         spin_unlock_irqrestore(&chan->lock, flags);
1196         if (unlikely(status & CPDMA_DESC_TD_COMPLETE))
1197                 cb_status = -ENOSYS;
1198         else
1199                 cb_status = status;
1200
1201         __cpdma_chan_free(chan, desc, outlen, cb_status);
1202         return status;
1203
1204 unlock_ret:
1205         spin_unlock_irqrestore(&chan->lock, flags);
1206         return status;
1207 }
1208
1209 int cpdma_chan_process(struct cpdma_chan *chan, int quota)
1210 {
1211         int used = 0, ret = 0;
1212
1213         if (chan->state != CPDMA_STATE_ACTIVE)
1214                 return -EINVAL;
1215
1216         while (used < quota) {
1217                 ret = __cpdma_chan_process(chan);
1218                 if (ret < 0)
1219                         break;
1220                 used++;
1221         }
1222         return used;
1223 }
1224
1225 int cpdma_chan_start(struct cpdma_chan *chan)
1226 {
1227         struct cpdma_ctlr *ctlr = chan->ctlr;
1228         unsigned long flags;
1229         int ret;
1230
1231         spin_lock_irqsave(&ctlr->lock, flags);
1232         ret = cpdma_chan_set_chan_shaper(chan);
1233         spin_unlock_irqrestore(&ctlr->lock, flags);
1234         if (ret)
1235                 return ret;
1236
1237         ret = cpdma_chan_on(chan);
1238         if (ret)
1239                 return ret;
1240
1241         return 0;
1242 }
1243
1244 int cpdma_chan_stop(struct cpdma_chan *chan)
1245 {
1246         struct cpdma_ctlr       *ctlr = chan->ctlr;
1247         struct cpdma_desc_pool  *pool = ctlr->pool;
1248         unsigned long           flags;
1249         int                     ret;
1250         unsigned                timeout;
1251
1252         spin_lock_irqsave(&chan->lock, flags);
1253         if (chan->state == CPDMA_STATE_TEARDOWN) {
1254                 spin_unlock_irqrestore(&chan->lock, flags);
1255                 return -EINVAL;
1256         }
1257
1258         chan->state = CPDMA_STATE_TEARDOWN;
1259         dma_reg_write(ctlr, chan->int_clear, chan->mask);
1260
1261         /* trigger teardown */
1262         dma_reg_write(ctlr, chan->td, chan_linear(chan));
1263
1264         /* wait for teardown complete */
1265         timeout = 100 * 100; /* 100 ms */
1266         while (timeout) {
1267                 u32 cp = chan_read(chan, cp);
1268                 if ((cp & CPDMA_TEARDOWN_VALUE) == CPDMA_TEARDOWN_VALUE)
1269                         break;
1270                 udelay(10);
1271                 timeout--;
1272         }
1273         WARN_ON(!timeout);
1274         chan_write(chan, cp, CPDMA_TEARDOWN_VALUE);
1275
1276         /* handle completed packets */
1277         spin_unlock_irqrestore(&chan->lock, flags);
1278         do {
1279                 ret = __cpdma_chan_process(chan);
1280                 if (ret < 0)
1281                         break;
1282         } while ((ret & CPDMA_DESC_TD_COMPLETE) == 0);
1283         spin_lock_irqsave(&chan->lock, flags);
1284
1285         /* remaining packets haven't been tx/rx'ed, clean them up */
1286         while (chan->head) {
1287                 struct cpdma_desc __iomem *desc = chan->head;
1288                 dma_addr_t next_dma;
1289
1290                 next_dma = desc_read(desc, hw_next);
1291                 chan->head = desc_from_phys(pool, next_dma);
1292                 chan->count--;
1293                 chan->stats.teardown_dequeue++;
1294
1295                 /* issue callback without locks held */
1296                 spin_unlock_irqrestore(&chan->lock, flags);
1297                 __cpdma_chan_free(chan, desc, 0, -ENOSYS);
1298                 spin_lock_irqsave(&chan->lock, flags);
1299         }
1300
1301         chan->state = CPDMA_STATE_IDLE;
1302         spin_unlock_irqrestore(&chan->lock, flags);
1303         return 0;
1304 }
1305
1306 int cpdma_chan_int_ctrl(struct cpdma_chan *chan, bool enable)
1307 {
1308         unsigned long flags;
1309
1310         spin_lock_irqsave(&chan->lock, flags);
1311         if (chan->state != CPDMA_STATE_ACTIVE) {
1312                 spin_unlock_irqrestore(&chan->lock, flags);
1313                 return -EINVAL;
1314         }
1315
1316         dma_reg_write(chan->ctlr, enable ? chan->int_set : chan->int_clear,
1317                       chan->mask);
1318         spin_unlock_irqrestore(&chan->lock, flags);
1319
1320         return 0;
1321 }
1322
1323 int cpdma_control_get(struct cpdma_ctlr *ctlr, int control)
1324 {
1325         unsigned long flags;
1326         int ret;
1327
1328         spin_lock_irqsave(&ctlr->lock, flags);
1329         ret = _cpdma_control_get(ctlr, control);
1330         spin_unlock_irqrestore(&ctlr->lock, flags);
1331
1332         return ret;
1333 }
1334
1335 int cpdma_control_set(struct cpdma_ctlr *ctlr, int control, int value)
1336 {
1337         unsigned long flags;
1338         int ret;
1339
1340         spin_lock_irqsave(&ctlr->lock, flags);
1341         ret = _cpdma_control_set(ctlr, control, value);
1342         spin_unlock_irqrestore(&ctlr->lock, flags);
1343
1344         return ret;
1345 }
1346
1347 int cpdma_get_num_rx_descs(struct cpdma_ctlr *ctlr)
1348 {
1349         return ctlr->num_rx_desc;
1350 }
1351
1352 int cpdma_get_num_tx_descs(struct cpdma_ctlr *ctlr)
1353 {
1354         return ctlr->num_tx_desc;
1355 }
1356
1357 void cpdma_set_num_rx_descs(struct cpdma_ctlr *ctlr, int num_rx_desc)
1358 {
1359         ctlr->num_rx_desc = num_rx_desc;
1360         ctlr->num_tx_desc = ctlr->pool->num_desc - ctlr->num_rx_desc;
1361 }