drm/amd/display: gradually ramp ABM intensity
[linux-2.6-microblaze.git] / drivers / dma / zx_dma.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright 2015 Linaro.
4  */
5 #include <linux/sched.h>
6 #include <linux/device.h>
7 #include <linux/dmaengine.h>
8 #include <linux/dma-mapping.h>
9 #include <linux/dmapool.h>
10 #include <linux/init.h>
11 #include <linux/interrupt.h>
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/platform_device.h>
15 #include <linux/slab.h>
16 #include <linux/spinlock.h>
17 #include <linux/of_device.h>
18 #include <linux/of.h>
19 #include <linux/clk.h>
20 #include <linux/of_dma.h>
21
22 #include "virt-dma.h"
23
24 #define DRIVER_NAME             "zx-dma"
25 #define DMA_ALIGN               4
26 #define DMA_MAX_SIZE            (0x10000 - 512)
27 #define LLI_BLOCK_SIZE          (4 * PAGE_SIZE)
28
29 #define REG_ZX_SRC_ADDR                 0x00
30 #define REG_ZX_DST_ADDR                 0x04
31 #define REG_ZX_TX_X_COUNT               0x08
32 #define REG_ZX_TX_ZY_COUNT              0x0c
33 #define REG_ZX_SRC_ZY_STEP              0x10
34 #define REG_ZX_DST_ZY_STEP              0x14
35 #define REG_ZX_LLI_ADDR                 0x1c
36 #define REG_ZX_CTRL                     0x20
37 #define REG_ZX_TC_IRQ                   0x800
38 #define REG_ZX_SRC_ERR_IRQ              0x804
39 #define REG_ZX_DST_ERR_IRQ              0x808
40 #define REG_ZX_CFG_ERR_IRQ              0x80c
41 #define REG_ZX_TC_IRQ_RAW               0x810
42 #define REG_ZX_SRC_ERR_IRQ_RAW          0x814
43 #define REG_ZX_DST_ERR_IRQ_RAW          0x818
44 #define REG_ZX_CFG_ERR_IRQ_RAW          0x81c
45 #define REG_ZX_STATUS                   0x820
46 #define REG_ZX_DMA_GRP_PRIO             0x824
47 #define REG_ZX_DMA_ARB                  0x828
48
49 #define ZX_FORCE_CLOSE                  BIT(31)
50 #define ZX_DST_BURST_WIDTH(x)           (((x) & 0x7) << 13)
51 #define ZX_MAX_BURST_LEN                16
52 #define ZX_SRC_BURST_LEN(x)             (((x) & 0xf) << 9)
53 #define ZX_SRC_BURST_WIDTH(x)           (((x) & 0x7) << 6)
54 #define ZX_IRQ_ENABLE_ALL               (3 << 4)
55 #define ZX_DST_FIFO_MODE                BIT(3)
56 #define ZX_SRC_FIFO_MODE                BIT(2)
57 #define ZX_SOFT_REQ                     BIT(1)
58 #define ZX_CH_ENABLE                    BIT(0)
59
60 #define ZX_DMA_BUSWIDTHS \
61         (BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \
62         BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
63         BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
64         BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
65         BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
66
67 enum zx_dma_burst_width {
68         ZX_DMA_WIDTH_8BIT       = 0,
69         ZX_DMA_WIDTH_16BIT      = 1,
70         ZX_DMA_WIDTH_32BIT      = 2,
71         ZX_DMA_WIDTH_64BIT      = 3,
72 };
73
74 struct zx_desc_hw {
75         u32 saddr;
76         u32 daddr;
77         u32 src_x;
78         u32 src_zy;
79         u32 src_zy_step;
80         u32 dst_zy_step;
81         u32 reserved1;
82         u32 lli;
83         u32 ctr;
84         u32 reserved[7]; /* pack as hardware registers region size */
85 } __aligned(32);
86
87 struct zx_dma_desc_sw {
88         struct virt_dma_desc    vd;
89         dma_addr_t              desc_hw_lli;
90         size_t                  desc_num;
91         size_t                  size;
92         struct zx_desc_hw       *desc_hw;
93 };
94
95 struct zx_dma_phy;
96
97 struct zx_dma_chan {
98         struct dma_slave_config slave_cfg;
99         int                     id; /* Request phy chan id */
100         u32                     ccfg;
101         u32                     cyclic;
102         struct virt_dma_chan    vc;
103         struct zx_dma_phy       *phy;
104         struct list_head        node;
105         dma_addr_t              dev_addr;
106         enum dma_status         status;
107 };
108
109 struct zx_dma_phy {
110         u32                     idx;
111         void __iomem            *base;
112         struct zx_dma_chan      *vchan;
113         struct zx_dma_desc_sw   *ds_run;
114         struct zx_dma_desc_sw   *ds_done;
115 };
116
117 struct zx_dma_dev {
118         struct dma_device       slave;
119         void __iomem            *base;
120         spinlock_t              lock; /* lock for ch and phy */
121         struct list_head        chan_pending;
122         struct zx_dma_phy       *phy;
123         struct zx_dma_chan      *chans;
124         struct clk              *clk;
125         struct dma_pool         *pool;
126         u32                     dma_channels;
127         u32                     dma_requests;
128         int                     irq;
129 };
130
131 #define to_zx_dma(dmadev) container_of(dmadev, struct zx_dma_dev, slave)
132
133 static struct zx_dma_chan *to_zx_chan(struct dma_chan *chan)
134 {
135         return container_of(chan, struct zx_dma_chan, vc.chan);
136 }
137
138 static void zx_dma_terminate_chan(struct zx_dma_phy *phy, struct zx_dma_dev *d)
139 {
140         u32 val = 0;
141
142         val = readl_relaxed(phy->base + REG_ZX_CTRL);
143         val &= ~ZX_CH_ENABLE;
144         val |= ZX_FORCE_CLOSE;
145         writel_relaxed(val, phy->base + REG_ZX_CTRL);
146
147         val = 0x1 << phy->idx;
148         writel_relaxed(val, d->base + REG_ZX_TC_IRQ_RAW);
149         writel_relaxed(val, d->base + REG_ZX_SRC_ERR_IRQ_RAW);
150         writel_relaxed(val, d->base + REG_ZX_DST_ERR_IRQ_RAW);
151         writel_relaxed(val, d->base + REG_ZX_CFG_ERR_IRQ_RAW);
152 }
153
154 static void zx_dma_set_desc(struct zx_dma_phy *phy, struct zx_desc_hw *hw)
155 {
156         writel_relaxed(hw->saddr, phy->base + REG_ZX_SRC_ADDR);
157         writel_relaxed(hw->daddr, phy->base + REG_ZX_DST_ADDR);
158         writel_relaxed(hw->src_x, phy->base + REG_ZX_TX_X_COUNT);
159         writel_relaxed(0, phy->base + REG_ZX_TX_ZY_COUNT);
160         writel_relaxed(0, phy->base + REG_ZX_SRC_ZY_STEP);
161         writel_relaxed(0, phy->base + REG_ZX_DST_ZY_STEP);
162         writel_relaxed(hw->lli, phy->base + REG_ZX_LLI_ADDR);
163         writel_relaxed(hw->ctr, phy->base + REG_ZX_CTRL);
164 }
165
166 static u32 zx_dma_get_curr_lli(struct zx_dma_phy *phy)
167 {
168         return readl_relaxed(phy->base + REG_ZX_LLI_ADDR);
169 }
170
171 static u32 zx_dma_get_chan_stat(struct zx_dma_dev *d)
172 {
173         return readl_relaxed(d->base + REG_ZX_STATUS);
174 }
175
176 static void zx_dma_init_state(struct zx_dma_dev *d)
177 {
178         /* set same priority */
179         writel_relaxed(0x0, d->base + REG_ZX_DMA_ARB);
180         /* clear all irq */
181         writel_relaxed(0xffffffff, d->base + REG_ZX_TC_IRQ_RAW);
182         writel_relaxed(0xffffffff, d->base + REG_ZX_SRC_ERR_IRQ_RAW);
183         writel_relaxed(0xffffffff, d->base + REG_ZX_DST_ERR_IRQ_RAW);
184         writel_relaxed(0xffffffff, d->base + REG_ZX_CFG_ERR_IRQ_RAW);
185 }
186
187 static int zx_dma_start_txd(struct zx_dma_chan *c)
188 {
189         struct zx_dma_dev *d = to_zx_dma(c->vc.chan.device);
190         struct virt_dma_desc *vd = vchan_next_desc(&c->vc);
191
192         if (!c->phy)
193                 return -EAGAIN;
194
195         if (BIT(c->phy->idx) & zx_dma_get_chan_stat(d))
196                 return -EAGAIN;
197
198         if (vd) {
199                 struct zx_dma_desc_sw *ds =
200                         container_of(vd, struct zx_dma_desc_sw, vd);
201                 /*
202                  * fetch and remove request from vc->desc_issued
203                  * so vc->desc_issued only contains desc pending
204                  */
205                 list_del(&ds->vd.node);
206                 c->phy->ds_run = ds;
207                 c->phy->ds_done = NULL;
208                 /* start dma */
209                 zx_dma_set_desc(c->phy, ds->desc_hw);
210                 return 0;
211         }
212         c->phy->ds_done = NULL;
213         c->phy->ds_run = NULL;
214         return -EAGAIN;
215 }
216
217 static void zx_dma_task(struct zx_dma_dev *d)
218 {
219         struct zx_dma_phy *p;
220         struct zx_dma_chan *c, *cn;
221         unsigned pch, pch_alloc = 0;
222         unsigned long flags;
223
224         /* check new dma request of running channel in vc->desc_issued */
225         list_for_each_entry_safe(c, cn, &d->slave.channels,
226                                  vc.chan.device_node) {
227                 spin_lock_irqsave(&c->vc.lock, flags);
228                 p = c->phy;
229                 if (p && p->ds_done && zx_dma_start_txd(c)) {
230                         /* No current txd associated with this channel */
231                         dev_dbg(d->slave.dev, "pchan %u: free\n", p->idx);
232                         /* Mark this channel free */
233                         c->phy = NULL;
234                         p->vchan = NULL;
235                 }
236                 spin_unlock_irqrestore(&c->vc.lock, flags);
237         }
238
239         /* check new channel request in d->chan_pending */
240         spin_lock_irqsave(&d->lock, flags);
241         while (!list_empty(&d->chan_pending)) {
242                 c = list_first_entry(&d->chan_pending,
243                                      struct zx_dma_chan, node);
244                 p = &d->phy[c->id];
245                 if (!p->vchan) {
246                         /* remove from d->chan_pending */
247                         list_del_init(&c->node);
248                         pch_alloc |= 1 << c->id;
249                         /* Mark this channel allocated */
250                         p->vchan = c;
251                         c->phy = p;
252                 } else {
253                         dev_dbg(d->slave.dev, "pchan %u: busy!\n", c->id);
254                 }
255         }
256         spin_unlock_irqrestore(&d->lock, flags);
257
258         for (pch = 0; pch < d->dma_channels; pch++) {
259                 if (pch_alloc & (1 << pch)) {
260                         p = &d->phy[pch];
261                         c = p->vchan;
262                         if (c) {
263                                 spin_lock_irqsave(&c->vc.lock, flags);
264                                 zx_dma_start_txd(c);
265                                 spin_unlock_irqrestore(&c->vc.lock, flags);
266                         }
267                 }
268         }
269 }
270
271 static irqreturn_t zx_dma_int_handler(int irq, void *dev_id)
272 {
273         struct zx_dma_dev *d = (struct zx_dma_dev *)dev_id;
274         struct zx_dma_phy *p;
275         struct zx_dma_chan *c;
276         u32 tc = readl_relaxed(d->base + REG_ZX_TC_IRQ);
277         u32 serr = readl_relaxed(d->base + REG_ZX_SRC_ERR_IRQ);
278         u32 derr = readl_relaxed(d->base + REG_ZX_DST_ERR_IRQ);
279         u32 cfg = readl_relaxed(d->base + REG_ZX_CFG_ERR_IRQ);
280         u32 i, irq_chan = 0, task = 0;
281
282         while (tc) {
283                 i = __ffs(tc);
284                 tc &= ~BIT(i);
285                 p = &d->phy[i];
286                 c = p->vchan;
287                 if (c) {
288                         spin_lock(&c->vc.lock);
289                         if (c->cyclic) {
290                                 vchan_cyclic_callback(&p->ds_run->vd);
291                         } else {
292                                 vchan_cookie_complete(&p->ds_run->vd);
293                                 p->ds_done = p->ds_run;
294                                 task = 1;
295                         }
296                         spin_unlock(&c->vc.lock);
297                         irq_chan |= BIT(i);
298                 }
299         }
300
301         if (serr || derr || cfg)
302                 dev_warn(d->slave.dev, "DMA ERR src 0x%x, dst 0x%x, cfg 0x%x\n",
303                          serr, derr, cfg);
304
305         writel_relaxed(irq_chan, d->base + REG_ZX_TC_IRQ_RAW);
306         writel_relaxed(serr, d->base + REG_ZX_SRC_ERR_IRQ_RAW);
307         writel_relaxed(derr, d->base + REG_ZX_DST_ERR_IRQ_RAW);
308         writel_relaxed(cfg, d->base + REG_ZX_CFG_ERR_IRQ_RAW);
309
310         if (task)
311                 zx_dma_task(d);
312         return IRQ_HANDLED;
313 }
314
315 static void zx_dma_free_chan_resources(struct dma_chan *chan)
316 {
317         struct zx_dma_chan *c = to_zx_chan(chan);
318         struct zx_dma_dev *d = to_zx_dma(chan->device);
319         unsigned long flags;
320
321         spin_lock_irqsave(&d->lock, flags);
322         list_del_init(&c->node);
323         spin_unlock_irqrestore(&d->lock, flags);
324
325         vchan_free_chan_resources(&c->vc);
326         c->ccfg = 0;
327 }
328
329 static enum dma_status zx_dma_tx_status(struct dma_chan *chan,
330                                         dma_cookie_t cookie,
331                                         struct dma_tx_state *state)
332 {
333         struct zx_dma_chan *c = to_zx_chan(chan);
334         struct zx_dma_phy *p;
335         struct virt_dma_desc *vd;
336         unsigned long flags;
337         enum dma_status ret;
338         size_t bytes = 0;
339
340         ret = dma_cookie_status(&c->vc.chan, cookie, state);
341         if (ret == DMA_COMPLETE || !state)
342                 return ret;
343
344         spin_lock_irqsave(&c->vc.lock, flags);
345         p = c->phy;
346         ret = c->status;
347
348         /*
349          * If the cookie is on our issue queue, then the residue is
350          * its total size.
351          */
352         vd = vchan_find_desc(&c->vc, cookie);
353         if (vd) {
354                 bytes = container_of(vd, struct zx_dma_desc_sw, vd)->size;
355         } else if ((!p) || (!p->ds_run)) {
356                 bytes = 0;
357         } else {
358                 struct zx_dma_desc_sw *ds = p->ds_run;
359                 u32 clli = 0, index = 0;
360
361                 bytes = 0;
362                 clli = zx_dma_get_curr_lli(p);
363                 index = (clli - ds->desc_hw_lli) /
364                                 sizeof(struct zx_desc_hw) + 1;
365                 for (; index < ds->desc_num; index++) {
366                         bytes += ds->desc_hw[index].src_x;
367                         /* end of lli */
368                         if (!ds->desc_hw[index].lli)
369                                 break;
370                 }
371         }
372         spin_unlock_irqrestore(&c->vc.lock, flags);
373         dma_set_residue(state, bytes);
374         return ret;
375 }
376
377 static void zx_dma_issue_pending(struct dma_chan *chan)
378 {
379         struct zx_dma_chan *c = to_zx_chan(chan);
380         struct zx_dma_dev *d = to_zx_dma(chan->device);
381         unsigned long flags;
382         int issue = 0;
383
384         spin_lock_irqsave(&c->vc.lock, flags);
385         /* add request to vc->desc_issued */
386         if (vchan_issue_pending(&c->vc)) {
387                 spin_lock(&d->lock);
388                 if (!c->phy && list_empty(&c->node)) {
389                         /* if new channel, add chan_pending */
390                         list_add_tail(&c->node, &d->chan_pending);
391                         issue = 1;
392                         dev_dbg(d->slave.dev, "vchan %p: issued\n", &c->vc);
393                 }
394                 spin_unlock(&d->lock);
395         } else {
396                 dev_dbg(d->slave.dev, "vchan %p: nothing to issue\n", &c->vc);
397         }
398         spin_unlock_irqrestore(&c->vc.lock, flags);
399
400         if (issue)
401                 zx_dma_task(d);
402 }
403
404 static void zx_dma_fill_desc(struct zx_dma_desc_sw *ds, dma_addr_t dst,
405                              dma_addr_t src, size_t len, u32 num, u32 ccfg)
406 {
407         if ((num + 1) < ds->desc_num)
408                 ds->desc_hw[num].lli = ds->desc_hw_lli + (num + 1) *
409                         sizeof(struct zx_desc_hw);
410         ds->desc_hw[num].saddr = src;
411         ds->desc_hw[num].daddr = dst;
412         ds->desc_hw[num].src_x = len;
413         ds->desc_hw[num].ctr = ccfg;
414 }
415
416 static struct zx_dma_desc_sw *zx_alloc_desc_resource(int num,
417                                                      struct dma_chan *chan)
418 {
419         struct zx_dma_chan *c = to_zx_chan(chan);
420         struct zx_dma_desc_sw *ds;
421         struct zx_dma_dev *d = to_zx_dma(chan->device);
422         int lli_limit = LLI_BLOCK_SIZE / sizeof(struct zx_desc_hw);
423
424         if (num > lli_limit) {
425                 dev_dbg(chan->device->dev, "vch %p: sg num %d exceed max %d\n",
426                         &c->vc, num, lli_limit);
427                 return NULL;
428         }
429
430         ds = kzalloc(sizeof(*ds), GFP_ATOMIC);
431         if (!ds)
432                 return NULL;
433
434         ds->desc_hw = dma_pool_zalloc(d->pool, GFP_NOWAIT, &ds->desc_hw_lli);
435         if (!ds->desc_hw) {
436                 dev_dbg(chan->device->dev, "vch %p: dma alloc fail\n", &c->vc);
437                 kfree(ds);
438                 return NULL;
439         }
440         ds->desc_num = num;
441         return ds;
442 }
443
444 static enum zx_dma_burst_width zx_dma_burst_width(enum dma_slave_buswidth width)
445 {
446         switch (width) {
447         case DMA_SLAVE_BUSWIDTH_1_BYTE:
448         case DMA_SLAVE_BUSWIDTH_2_BYTES:
449         case DMA_SLAVE_BUSWIDTH_4_BYTES:
450         case DMA_SLAVE_BUSWIDTH_8_BYTES:
451                 return ffs(width) - 1;
452         default:
453                 return ZX_DMA_WIDTH_32BIT;
454         }
455 }
456
457 static int zx_pre_config(struct zx_dma_chan *c, enum dma_transfer_direction dir)
458 {
459         struct dma_slave_config *cfg = &c->slave_cfg;
460         enum zx_dma_burst_width src_width;
461         enum zx_dma_burst_width dst_width;
462         u32 maxburst = 0;
463
464         switch (dir) {
465         case DMA_MEM_TO_MEM:
466                 c->ccfg = ZX_CH_ENABLE | ZX_SOFT_REQ
467                         | ZX_SRC_BURST_LEN(ZX_MAX_BURST_LEN - 1)
468                         | ZX_SRC_BURST_WIDTH(ZX_DMA_WIDTH_32BIT)
469                         | ZX_DST_BURST_WIDTH(ZX_DMA_WIDTH_32BIT);
470                 break;
471         case DMA_MEM_TO_DEV:
472                 c->dev_addr = cfg->dst_addr;
473                 /* dst len is calculated from src width, len and dst width.
474                  * We need make sure dst len not exceed MAX LEN.
475                  * Trailing single transaction that does not fill a full
476                  * burst also require identical src/dst data width.
477                  */
478                 dst_width = zx_dma_burst_width(cfg->dst_addr_width);
479                 maxburst = cfg->dst_maxburst;
480                 maxburst = maxburst < ZX_MAX_BURST_LEN ?
481                                 maxburst : ZX_MAX_BURST_LEN;
482                 c->ccfg = ZX_DST_FIFO_MODE | ZX_CH_ENABLE
483                         | ZX_SRC_BURST_LEN(maxburst - 1)
484                         | ZX_SRC_BURST_WIDTH(dst_width)
485                         | ZX_DST_BURST_WIDTH(dst_width);
486                 break;
487         case DMA_DEV_TO_MEM:
488                 c->dev_addr = cfg->src_addr;
489                 src_width = zx_dma_burst_width(cfg->src_addr_width);
490                 maxburst = cfg->src_maxburst;
491                 maxburst = maxburst < ZX_MAX_BURST_LEN ?
492                                 maxburst : ZX_MAX_BURST_LEN;
493                 c->ccfg = ZX_SRC_FIFO_MODE | ZX_CH_ENABLE
494                         | ZX_SRC_BURST_LEN(maxburst - 1)
495                         | ZX_SRC_BURST_WIDTH(src_width)
496                         | ZX_DST_BURST_WIDTH(src_width);
497                 break;
498         default:
499                 return -EINVAL;
500         }
501         return 0;
502 }
503
504 static struct dma_async_tx_descriptor *zx_dma_prep_memcpy(
505         struct dma_chan *chan,  dma_addr_t dst, dma_addr_t src,
506         size_t len, unsigned long flags)
507 {
508         struct zx_dma_chan *c = to_zx_chan(chan);
509         struct zx_dma_desc_sw *ds;
510         size_t copy = 0;
511         int num = 0;
512
513         if (!len)
514                 return NULL;
515
516         if (zx_pre_config(c, DMA_MEM_TO_MEM))
517                 return NULL;
518
519         num = DIV_ROUND_UP(len, DMA_MAX_SIZE);
520
521         ds = zx_alloc_desc_resource(num, chan);
522         if (!ds)
523                 return NULL;
524
525         ds->size = len;
526         num = 0;
527
528         do {
529                 copy = min_t(size_t, len, DMA_MAX_SIZE);
530                 zx_dma_fill_desc(ds, dst, src, copy, num++, c->ccfg);
531
532                 src += copy;
533                 dst += copy;
534                 len -= copy;
535         } while (len);
536
537         c->cyclic = 0;
538         ds->desc_hw[num - 1].lli = 0;   /* end of link */
539         ds->desc_hw[num - 1].ctr |= ZX_IRQ_ENABLE_ALL;
540         return vchan_tx_prep(&c->vc, &ds->vd, flags);
541 }
542
543 static struct dma_async_tx_descriptor *zx_dma_prep_slave_sg(
544         struct dma_chan *chan, struct scatterlist *sgl, unsigned int sglen,
545         enum dma_transfer_direction dir, unsigned long flags, void *context)
546 {
547         struct zx_dma_chan *c = to_zx_chan(chan);
548         struct zx_dma_desc_sw *ds;
549         size_t len, avail, total = 0;
550         struct scatterlist *sg;
551         dma_addr_t addr, src = 0, dst = 0;
552         int num = sglen, i;
553
554         if (!sgl)
555                 return NULL;
556
557         if (zx_pre_config(c, dir))
558                 return NULL;
559
560         for_each_sg(sgl, sg, sglen, i) {
561                 avail = sg_dma_len(sg);
562                 if (avail > DMA_MAX_SIZE)
563                         num += DIV_ROUND_UP(avail, DMA_MAX_SIZE) - 1;
564         }
565
566         ds = zx_alloc_desc_resource(num, chan);
567         if (!ds)
568                 return NULL;
569
570         c->cyclic = 0;
571         num = 0;
572         for_each_sg(sgl, sg, sglen, i) {
573                 addr = sg_dma_address(sg);
574                 avail = sg_dma_len(sg);
575                 total += avail;
576
577                 do {
578                         len = min_t(size_t, avail, DMA_MAX_SIZE);
579
580                         if (dir == DMA_MEM_TO_DEV) {
581                                 src = addr;
582                                 dst = c->dev_addr;
583                         } else if (dir == DMA_DEV_TO_MEM) {
584                                 src = c->dev_addr;
585                                 dst = addr;
586                         }
587
588                         zx_dma_fill_desc(ds, dst, src, len, num++, c->ccfg);
589
590                         addr += len;
591                         avail -= len;
592                 } while (avail);
593         }
594
595         ds->desc_hw[num - 1].lli = 0;   /* end of link */
596         ds->desc_hw[num - 1].ctr |= ZX_IRQ_ENABLE_ALL;
597         ds->size = total;
598         return vchan_tx_prep(&c->vc, &ds->vd, flags);
599 }
600
601 static struct dma_async_tx_descriptor *zx_dma_prep_dma_cyclic(
602                 struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
603                 size_t period_len, enum dma_transfer_direction dir,
604                 unsigned long flags)
605 {
606         struct zx_dma_chan *c = to_zx_chan(chan);
607         struct zx_dma_desc_sw *ds;
608         dma_addr_t src = 0, dst = 0;
609         int num_periods = buf_len / period_len;
610         int buf = 0, num = 0;
611
612         if (period_len > DMA_MAX_SIZE) {
613                 dev_err(chan->device->dev, "maximum period size exceeded\n");
614                 return NULL;
615         }
616
617         if (zx_pre_config(c, dir))
618                 return NULL;
619
620         ds = zx_alloc_desc_resource(num_periods, chan);
621         if (!ds)
622                 return NULL;
623         c->cyclic = 1;
624
625         while (buf < buf_len) {
626                 if (dir == DMA_MEM_TO_DEV) {
627                         src = dma_addr;
628                         dst = c->dev_addr;
629                 } else if (dir == DMA_DEV_TO_MEM) {
630                         src = c->dev_addr;
631                         dst = dma_addr;
632                 }
633                 zx_dma_fill_desc(ds, dst, src, period_len, num++,
634                                  c->ccfg | ZX_IRQ_ENABLE_ALL);
635                 dma_addr += period_len;
636                 buf += period_len;
637         }
638
639         ds->desc_hw[num - 1].lli = ds->desc_hw_lli;
640         ds->size = buf_len;
641         return vchan_tx_prep(&c->vc, &ds->vd, flags);
642 }
643
644 static int zx_dma_config(struct dma_chan *chan,
645                          struct dma_slave_config *cfg)
646 {
647         struct zx_dma_chan *c = to_zx_chan(chan);
648
649         if (!cfg)
650                 return -EINVAL;
651
652         memcpy(&c->slave_cfg, cfg, sizeof(*cfg));
653
654         return 0;
655 }
656
657 static int zx_dma_terminate_all(struct dma_chan *chan)
658 {
659         struct zx_dma_chan *c = to_zx_chan(chan);
660         struct zx_dma_dev *d = to_zx_dma(chan->device);
661         struct zx_dma_phy *p = c->phy;
662         unsigned long flags;
663         LIST_HEAD(head);
664
665         dev_dbg(d->slave.dev, "vchan %p: terminate all\n", &c->vc);
666
667         /* Prevent this channel being scheduled */
668         spin_lock(&d->lock);
669         list_del_init(&c->node);
670         spin_unlock(&d->lock);
671
672         /* Clear the tx descriptor lists */
673         spin_lock_irqsave(&c->vc.lock, flags);
674         vchan_get_all_descriptors(&c->vc, &head);
675         if (p) {
676                 /* vchan is assigned to a pchan - stop the channel */
677                 zx_dma_terminate_chan(p, d);
678                 c->phy = NULL;
679                 p->vchan = NULL;
680                 p->ds_run = NULL;
681                 p->ds_done = NULL;
682         }
683         spin_unlock_irqrestore(&c->vc.lock, flags);
684         vchan_dma_desc_free_list(&c->vc, &head);
685
686         return 0;
687 }
688
689 static int zx_dma_transfer_pause(struct dma_chan *chan)
690 {
691         struct zx_dma_chan *c = to_zx_chan(chan);
692         u32 val = 0;
693
694         val = readl_relaxed(c->phy->base + REG_ZX_CTRL);
695         val &= ~ZX_CH_ENABLE;
696         writel_relaxed(val, c->phy->base + REG_ZX_CTRL);
697
698         return 0;
699 }
700
701 static int zx_dma_transfer_resume(struct dma_chan *chan)
702 {
703         struct zx_dma_chan *c = to_zx_chan(chan);
704         u32 val = 0;
705
706         val = readl_relaxed(c->phy->base + REG_ZX_CTRL);
707         val |= ZX_CH_ENABLE;
708         writel_relaxed(val, c->phy->base + REG_ZX_CTRL);
709
710         return 0;
711 }
712
713 static void zx_dma_free_desc(struct virt_dma_desc *vd)
714 {
715         struct zx_dma_desc_sw *ds =
716                 container_of(vd, struct zx_dma_desc_sw, vd);
717         struct zx_dma_dev *d = to_zx_dma(vd->tx.chan->device);
718
719         dma_pool_free(d->pool, ds->desc_hw, ds->desc_hw_lli);
720         kfree(ds);
721 }
722
723 static const struct of_device_id zx6702_dma_dt_ids[] = {
724         { .compatible = "zte,zx296702-dma", },
725         {}
726 };
727 MODULE_DEVICE_TABLE(of, zx6702_dma_dt_ids);
728
729 static struct dma_chan *zx_of_dma_simple_xlate(struct of_phandle_args *dma_spec,
730                                                struct of_dma *ofdma)
731 {
732         struct zx_dma_dev *d = ofdma->of_dma_data;
733         unsigned int request = dma_spec->args[0];
734         struct dma_chan *chan;
735         struct zx_dma_chan *c;
736
737         if (request >= d->dma_requests)
738                 return NULL;
739
740         chan = dma_get_any_slave_channel(&d->slave);
741         if (!chan) {
742                 dev_err(d->slave.dev, "get channel fail in %s.\n", __func__);
743                 return NULL;
744         }
745         c = to_zx_chan(chan);
746         c->id = request;
747         dev_info(d->slave.dev, "zx_dma: pchan %u: alloc vchan %p\n",
748                  c->id, &c->vc);
749         return chan;
750 }
751
752 static int zx_dma_probe(struct platform_device *op)
753 {
754         struct zx_dma_dev *d;
755         int i, ret = 0;
756
757         d = devm_kzalloc(&op->dev, sizeof(*d), GFP_KERNEL);
758         if (!d)
759                 return -ENOMEM;
760
761         d->base = devm_platform_ioremap_resource(op, 0);
762         if (IS_ERR(d->base))
763                 return PTR_ERR(d->base);
764
765         of_property_read_u32((&op->dev)->of_node,
766                              "dma-channels", &d->dma_channels);
767         of_property_read_u32((&op->dev)->of_node,
768                              "dma-requests", &d->dma_requests);
769         if (!d->dma_requests || !d->dma_channels)
770                 return -EINVAL;
771
772         d->clk = devm_clk_get(&op->dev, NULL);
773         if (IS_ERR(d->clk)) {
774                 dev_err(&op->dev, "no dma clk\n");
775                 return PTR_ERR(d->clk);
776         }
777
778         d->irq = platform_get_irq(op, 0);
779         ret = devm_request_irq(&op->dev, d->irq, zx_dma_int_handler,
780                                0, DRIVER_NAME, d);
781         if (ret)
782                 return ret;
783
784         /* A DMA memory pool for LLIs, align on 32-byte boundary */
785         d->pool = dmam_pool_create(DRIVER_NAME, &op->dev,
786                         LLI_BLOCK_SIZE, 32, 0);
787         if (!d->pool)
788                 return -ENOMEM;
789
790         /* init phy channel */
791         d->phy = devm_kcalloc(&op->dev,
792                 d->dma_channels, sizeof(struct zx_dma_phy), GFP_KERNEL);
793         if (!d->phy)
794                 return -ENOMEM;
795
796         for (i = 0; i < d->dma_channels; i++) {
797                 struct zx_dma_phy *p = &d->phy[i];
798
799                 p->idx = i;
800                 p->base = d->base + i * 0x40;
801         }
802
803         INIT_LIST_HEAD(&d->slave.channels);
804         dma_cap_set(DMA_SLAVE, d->slave.cap_mask);
805         dma_cap_set(DMA_MEMCPY, d->slave.cap_mask);
806         dma_cap_set(DMA_CYCLIC, d->slave.cap_mask);
807         dma_cap_set(DMA_PRIVATE, d->slave.cap_mask);
808         d->slave.dev = &op->dev;
809         d->slave.device_free_chan_resources = zx_dma_free_chan_resources;
810         d->slave.device_tx_status = zx_dma_tx_status;
811         d->slave.device_prep_dma_memcpy = zx_dma_prep_memcpy;
812         d->slave.device_prep_slave_sg = zx_dma_prep_slave_sg;
813         d->slave.device_prep_dma_cyclic = zx_dma_prep_dma_cyclic;
814         d->slave.device_issue_pending = zx_dma_issue_pending;
815         d->slave.device_config = zx_dma_config;
816         d->slave.device_terminate_all = zx_dma_terminate_all;
817         d->slave.device_pause = zx_dma_transfer_pause;
818         d->slave.device_resume = zx_dma_transfer_resume;
819         d->slave.copy_align = DMA_ALIGN;
820         d->slave.src_addr_widths = ZX_DMA_BUSWIDTHS;
821         d->slave.dst_addr_widths = ZX_DMA_BUSWIDTHS;
822         d->slave.directions = BIT(DMA_MEM_TO_MEM) | BIT(DMA_MEM_TO_DEV)
823                         | BIT(DMA_DEV_TO_MEM);
824         d->slave.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
825
826         /* init virtual channel */
827         d->chans = devm_kcalloc(&op->dev,
828                 d->dma_requests, sizeof(struct zx_dma_chan), GFP_KERNEL);
829         if (!d->chans)
830                 return -ENOMEM;
831
832         for (i = 0; i < d->dma_requests; i++) {
833                 struct zx_dma_chan *c = &d->chans[i];
834
835                 c->status = DMA_IN_PROGRESS;
836                 INIT_LIST_HEAD(&c->node);
837                 c->vc.desc_free = zx_dma_free_desc;
838                 vchan_init(&c->vc, &d->slave);
839         }
840
841         /* Enable clock before accessing registers */
842         ret = clk_prepare_enable(d->clk);
843         if (ret < 0) {
844                 dev_err(&op->dev, "clk_prepare_enable failed: %d\n", ret);
845                 goto zx_dma_out;
846         }
847
848         zx_dma_init_state(d);
849
850         spin_lock_init(&d->lock);
851         INIT_LIST_HEAD(&d->chan_pending);
852         platform_set_drvdata(op, d);
853
854         ret = dma_async_device_register(&d->slave);
855         if (ret)
856                 goto clk_dis;
857
858         ret = of_dma_controller_register((&op->dev)->of_node,
859                                          zx_of_dma_simple_xlate, d);
860         if (ret)
861                 goto of_dma_register_fail;
862
863         dev_info(&op->dev, "initialized\n");
864         return 0;
865
866 of_dma_register_fail:
867         dma_async_device_unregister(&d->slave);
868 clk_dis:
869         clk_disable_unprepare(d->clk);
870 zx_dma_out:
871         return ret;
872 }
873
874 static int zx_dma_remove(struct platform_device *op)
875 {
876         struct zx_dma_chan *c, *cn;
877         struct zx_dma_dev *d = platform_get_drvdata(op);
878
879         /* explictly free the irq */
880         devm_free_irq(&op->dev, d->irq, d);
881
882         dma_async_device_unregister(&d->slave);
883         of_dma_controller_free((&op->dev)->of_node);
884
885         list_for_each_entry_safe(c, cn, &d->slave.channels,
886                                  vc.chan.device_node) {
887                 list_del(&c->vc.chan.device_node);
888         }
889         clk_disable_unprepare(d->clk);
890
891         return 0;
892 }
893
894 #ifdef CONFIG_PM_SLEEP
895 static int zx_dma_suspend_dev(struct device *dev)
896 {
897         struct zx_dma_dev *d = dev_get_drvdata(dev);
898         u32 stat = 0;
899
900         stat = zx_dma_get_chan_stat(d);
901         if (stat) {
902                 dev_warn(d->slave.dev,
903                          "chan %d is running fail to suspend\n", stat);
904                 return -1;
905         }
906         clk_disable_unprepare(d->clk);
907         return 0;
908 }
909
910 static int zx_dma_resume_dev(struct device *dev)
911 {
912         struct zx_dma_dev *d = dev_get_drvdata(dev);
913         int ret = 0;
914
915         ret = clk_prepare_enable(d->clk);
916         if (ret < 0) {
917                 dev_err(d->slave.dev, "clk_prepare_enable failed: %d\n", ret);
918                 return ret;
919         }
920         zx_dma_init_state(d);
921         return 0;
922 }
923 #endif
924
925 static SIMPLE_DEV_PM_OPS(zx_dma_pmops, zx_dma_suspend_dev, zx_dma_resume_dev);
926
927 static struct platform_driver zx_pdma_driver = {
928         .driver         = {
929                 .name   = DRIVER_NAME,
930                 .pm     = &zx_dma_pmops,
931                 .of_match_table = zx6702_dma_dt_ids,
932         },
933         .probe          = zx_dma_probe,
934         .remove         = zx_dma_remove,
935 };
936
937 module_platform_driver(zx_pdma_driver);
938
939 MODULE_DESCRIPTION("ZTE ZX296702 DMA Driver");
940 MODULE_AUTHOR("Jun Nie jun.nie@linaro.org");
941 MODULE_LICENSE("GPL v2");