dmaengine: dma-jz4780: Separate chan/ctrl registers
[linux-2.6-microblaze.git] / drivers / dma / dma-jz4780.c
1 /*
2  * Ingenic JZ4780 DMA controller
3  *
4  * Copyright (c) 2015 Imagination Technologies
5  * Author: Alex Smith <alex@alex-smith.me.uk>
6  *
7  * This program is free software; you can redistribute it and/or modify it
8  * under the terms of the GNU General Public License as published by the
9  * Free Software Foundation;  either version 2 of the  License, or (at your
10  * option) any later version.
11  */
12
13 #include <linux/clk.h>
14 #include <linux/dmapool.h>
15 #include <linux/init.h>
16 #include <linux/interrupt.h>
17 #include <linux/module.h>
18 #include <linux/of.h>
19 #include <linux/of_device.h>
20 #include <linux/of_dma.h>
21 #include <linux/platform_device.h>
22 #include <linux/slab.h>
23
24 #include "dmaengine.h"
25 #include "virt-dma.h"
26
27 /* Global registers. */
28 #define JZ_DMA_REG_DMAC         0x00
29 #define JZ_DMA_REG_DIRQP        0x04
30 #define JZ_DMA_REG_DDR          0x08
31 #define JZ_DMA_REG_DDRS         0x0c
32 #define JZ_DMA_REG_DMACP        0x1c
33 #define JZ_DMA_REG_DSIRQP       0x20
34 #define JZ_DMA_REG_DSIRQM       0x24
35 #define JZ_DMA_REG_DCIRQP       0x28
36 #define JZ_DMA_REG_DCIRQM       0x2c
37
38 /* Per-channel registers. */
39 #define JZ_DMA_REG_CHAN(n)      (n * 0x20)
40 #define JZ_DMA_REG_DSA          0x00
41 #define JZ_DMA_REG_DTA          0x04
42 #define JZ_DMA_REG_DTC          0x08
43 #define JZ_DMA_REG_DRT          0x0c
44 #define JZ_DMA_REG_DCS          0x10
45 #define JZ_DMA_REG_DCM          0x14
46 #define JZ_DMA_REG_DDA          0x18
47 #define JZ_DMA_REG_DSD          0x1c
48
49 #define JZ_DMA_DMAC_DMAE        BIT(0)
50 #define JZ_DMA_DMAC_AR          BIT(2)
51 #define JZ_DMA_DMAC_HLT         BIT(3)
52 #define JZ_DMA_DMAC_FMSC        BIT(31)
53
54 #define JZ_DMA_DRT_AUTO         0x8
55
56 #define JZ_DMA_DCS_CTE          BIT(0)
57 #define JZ_DMA_DCS_HLT          BIT(2)
58 #define JZ_DMA_DCS_TT           BIT(3)
59 #define JZ_DMA_DCS_AR           BIT(4)
60 #define JZ_DMA_DCS_DES8         BIT(30)
61
62 #define JZ_DMA_DCM_LINK         BIT(0)
63 #define JZ_DMA_DCM_TIE          BIT(1)
64 #define JZ_DMA_DCM_STDE         BIT(2)
65 #define JZ_DMA_DCM_TSZ_SHIFT    8
66 #define JZ_DMA_DCM_TSZ_MASK     (0x7 << JZ_DMA_DCM_TSZ_SHIFT)
67 #define JZ_DMA_DCM_DP_SHIFT     12
68 #define JZ_DMA_DCM_SP_SHIFT     14
69 #define JZ_DMA_DCM_DAI          BIT(22)
70 #define JZ_DMA_DCM_SAI          BIT(23)
71
72 #define JZ_DMA_SIZE_4_BYTE      0x0
73 #define JZ_DMA_SIZE_1_BYTE      0x1
74 #define JZ_DMA_SIZE_2_BYTE      0x2
75 #define JZ_DMA_SIZE_16_BYTE     0x3
76 #define JZ_DMA_SIZE_32_BYTE     0x4
77 #define JZ_DMA_SIZE_64_BYTE     0x5
78 #define JZ_DMA_SIZE_128_BYTE    0x6
79
80 #define JZ_DMA_WIDTH_32_BIT     0x0
81 #define JZ_DMA_WIDTH_8_BIT      0x1
82 #define JZ_DMA_WIDTH_16_BIT     0x2
83
84 #define JZ_DMA_BUSWIDTHS        (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE)  | \
85                                  BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
86                                  BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
87
88 #define JZ4780_DMA_CTRL_OFFSET  0x1000
89
90 /**
91  * struct jz4780_dma_hwdesc - descriptor structure read by the DMA controller.
92  * @dcm: value for the DCM (channel command) register
93  * @dsa: source address
94  * @dta: target address
95  * @dtc: transfer count (number of blocks of the transfer size specified in DCM
96  * to transfer) in the low 24 bits, offset of the next descriptor from the
97  * descriptor base address in the upper 8 bits.
98  * @sd: target/source stride difference (in stride transfer mode).
99  * @drt: request type
100  */
101 struct jz4780_dma_hwdesc {
102         uint32_t dcm;
103         uint32_t dsa;
104         uint32_t dta;
105         uint32_t dtc;
106         uint32_t sd;
107         uint32_t drt;
108         uint32_t reserved[2];
109 };
110
111 /* Size of allocations for hardware descriptor blocks. */
112 #define JZ_DMA_DESC_BLOCK_SIZE  PAGE_SIZE
113 #define JZ_DMA_MAX_DESC         \
114         (JZ_DMA_DESC_BLOCK_SIZE / sizeof(struct jz4780_dma_hwdesc))
115
116 struct jz4780_dma_desc {
117         struct virt_dma_desc vdesc;
118
119         struct jz4780_dma_hwdesc *desc;
120         dma_addr_t desc_phys;
121         unsigned int count;
122         enum dma_transaction_type type;
123         uint32_t status;
124 };
125
126 struct jz4780_dma_chan {
127         struct virt_dma_chan vchan;
128         unsigned int id;
129         struct dma_pool *desc_pool;
130
131         uint32_t transfer_type;
132         uint32_t transfer_shift;
133         struct dma_slave_config config;
134
135         struct jz4780_dma_desc *desc;
136         unsigned int curr_hwdesc;
137 };
138
139 struct jz4780_dma_soc_data {
140         unsigned int nb_channels;
141 };
142
143 struct jz4780_dma_dev {
144         struct dma_device dma_device;
145         void __iomem *chn_base;
146         void __iomem *ctrl_base;
147         struct clk *clk;
148         unsigned int irq;
149         const struct jz4780_dma_soc_data *soc_data;
150
151         uint32_t chan_reserved;
152         struct jz4780_dma_chan chan[];
153 };
154
155 struct jz4780_dma_filter_data {
156         struct device_node *of_node;
157         uint32_t transfer_type;
158         int channel;
159 };
160
161 static inline struct jz4780_dma_chan *to_jz4780_dma_chan(struct dma_chan *chan)
162 {
163         return container_of(chan, struct jz4780_dma_chan, vchan.chan);
164 }
165
166 static inline struct jz4780_dma_desc *to_jz4780_dma_desc(
167         struct virt_dma_desc *vdesc)
168 {
169         return container_of(vdesc, struct jz4780_dma_desc, vdesc);
170 }
171
172 static inline struct jz4780_dma_dev *jz4780_dma_chan_parent(
173         struct jz4780_dma_chan *jzchan)
174 {
175         return container_of(jzchan->vchan.chan.device, struct jz4780_dma_dev,
176                             dma_device);
177 }
178
179 static inline uint32_t jz4780_dma_chn_readl(struct jz4780_dma_dev *jzdma,
180         unsigned int chn, unsigned int reg)
181 {
182         return readl(jzdma->chn_base + reg + JZ_DMA_REG_CHAN(chn));
183 }
184
185 static inline void jz4780_dma_chn_writel(struct jz4780_dma_dev *jzdma,
186         unsigned int chn, unsigned int reg, uint32_t val)
187 {
188         writel(val, jzdma->chn_base + reg + JZ_DMA_REG_CHAN(chn));
189 }
190
191 static inline uint32_t jz4780_dma_ctrl_readl(struct jz4780_dma_dev *jzdma,
192         unsigned int reg)
193 {
194         return readl(jzdma->ctrl_base + reg);
195 }
196
197 static inline void jz4780_dma_ctrl_writel(struct jz4780_dma_dev *jzdma,
198         unsigned int reg, uint32_t val)
199 {
200         writel(val, jzdma->ctrl_base + reg);
201 }
202
203 static struct jz4780_dma_desc *jz4780_dma_desc_alloc(
204         struct jz4780_dma_chan *jzchan, unsigned int count,
205         enum dma_transaction_type type)
206 {
207         struct jz4780_dma_desc *desc;
208
209         if (count > JZ_DMA_MAX_DESC)
210                 return NULL;
211
212         desc = kzalloc(sizeof(*desc), GFP_NOWAIT);
213         if (!desc)
214                 return NULL;
215
216         desc->desc = dma_pool_alloc(jzchan->desc_pool, GFP_NOWAIT,
217                                     &desc->desc_phys);
218         if (!desc->desc) {
219                 kfree(desc);
220                 return NULL;
221         }
222
223         desc->count = count;
224         desc->type = type;
225         return desc;
226 }
227
228 static void jz4780_dma_desc_free(struct virt_dma_desc *vdesc)
229 {
230         struct jz4780_dma_desc *desc = to_jz4780_dma_desc(vdesc);
231         struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(vdesc->tx.chan);
232
233         dma_pool_free(jzchan->desc_pool, desc->desc, desc->desc_phys);
234         kfree(desc);
235 }
236
237 static uint32_t jz4780_dma_transfer_size(unsigned long val, uint32_t *shift)
238 {
239         int ord = ffs(val) - 1;
240
241         /*
242          * 8 byte transfer sizes unsupported so fall back on 4. If it's larger
243          * than the maximum, just limit it. It is perfectly safe to fall back
244          * in this way since we won't exceed the maximum burst size supported
245          * by the device, the only effect is reduced efficiency. This is better
246          * than refusing to perform the request at all.
247          */
248         if (ord == 3)
249                 ord = 2;
250         else if (ord > 7)
251                 ord = 7;
252
253         *shift = ord;
254
255         switch (ord) {
256         case 0:
257                 return JZ_DMA_SIZE_1_BYTE;
258         case 1:
259                 return JZ_DMA_SIZE_2_BYTE;
260         case 2:
261                 return JZ_DMA_SIZE_4_BYTE;
262         case 4:
263                 return JZ_DMA_SIZE_16_BYTE;
264         case 5:
265                 return JZ_DMA_SIZE_32_BYTE;
266         case 6:
267                 return JZ_DMA_SIZE_64_BYTE;
268         default:
269                 return JZ_DMA_SIZE_128_BYTE;
270         }
271 }
272
273 static int jz4780_dma_setup_hwdesc(struct jz4780_dma_chan *jzchan,
274         struct jz4780_dma_hwdesc *desc, dma_addr_t addr, size_t len,
275         enum dma_transfer_direction direction)
276 {
277         struct dma_slave_config *config = &jzchan->config;
278         uint32_t width, maxburst, tsz;
279
280         if (direction == DMA_MEM_TO_DEV) {
281                 desc->dcm = JZ_DMA_DCM_SAI;
282                 desc->dsa = addr;
283                 desc->dta = config->dst_addr;
284                 desc->drt = jzchan->transfer_type;
285
286                 width = config->dst_addr_width;
287                 maxburst = config->dst_maxburst;
288         } else {
289                 desc->dcm = JZ_DMA_DCM_DAI;
290                 desc->dsa = config->src_addr;
291                 desc->dta = addr;
292                 desc->drt = jzchan->transfer_type;
293
294                 width = config->src_addr_width;
295                 maxburst = config->src_maxburst;
296         }
297
298         /*
299          * This calculates the maximum transfer size that can be used with the
300          * given address, length, width and maximum burst size. The address
301          * must be aligned to the transfer size, the total length must be
302          * divisible by the transfer size, and we must not use more than the
303          * maximum burst specified by the user.
304          */
305         tsz = jz4780_dma_transfer_size(addr | len | (width * maxburst),
306                                        &jzchan->transfer_shift);
307
308         switch (width) {
309         case DMA_SLAVE_BUSWIDTH_1_BYTE:
310         case DMA_SLAVE_BUSWIDTH_2_BYTES:
311                 break;
312         case DMA_SLAVE_BUSWIDTH_4_BYTES:
313                 width = JZ_DMA_WIDTH_32_BIT;
314                 break;
315         default:
316                 return -EINVAL;
317         }
318
319         desc->dcm |= tsz << JZ_DMA_DCM_TSZ_SHIFT;
320         desc->dcm |= width << JZ_DMA_DCM_SP_SHIFT;
321         desc->dcm |= width << JZ_DMA_DCM_DP_SHIFT;
322
323         desc->dtc = len >> jzchan->transfer_shift;
324         return 0;
325 }
326
327 static struct dma_async_tx_descriptor *jz4780_dma_prep_slave_sg(
328         struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
329         enum dma_transfer_direction direction, unsigned long flags,
330         void *context)
331 {
332         struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
333         struct jz4780_dma_desc *desc;
334         unsigned int i;
335         int err;
336
337         desc = jz4780_dma_desc_alloc(jzchan, sg_len, DMA_SLAVE);
338         if (!desc)
339                 return NULL;
340
341         for (i = 0; i < sg_len; i++) {
342                 err = jz4780_dma_setup_hwdesc(jzchan, &desc->desc[i],
343                                               sg_dma_address(&sgl[i]),
344                                               sg_dma_len(&sgl[i]),
345                                               direction);
346                 if (err < 0) {
347                         jz4780_dma_desc_free(&jzchan->desc->vdesc);
348                         return NULL;
349                 }
350
351                 desc->desc[i].dcm |= JZ_DMA_DCM_TIE;
352
353                 if (i != (sg_len - 1)) {
354                         /* Automatically proceeed to the next descriptor. */
355                         desc->desc[i].dcm |= JZ_DMA_DCM_LINK;
356
357                         /*
358                          * The upper 8 bits of the DTC field in the descriptor
359                          * must be set to (offset from descriptor base of next
360                          * descriptor >> 4).
361                          */
362                         desc->desc[i].dtc |=
363                                 (((i + 1) * sizeof(*desc->desc)) >> 4) << 24;
364                 }
365         }
366
367         return vchan_tx_prep(&jzchan->vchan, &desc->vdesc, flags);
368 }
369
370 static struct dma_async_tx_descriptor *jz4780_dma_prep_dma_cyclic(
371         struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
372         size_t period_len, enum dma_transfer_direction direction,
373         unsigned long flags)
374 {
375         struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
376         struct jz4780_dma_desc *desc;
377         unsigned int periods, i;
378         int err;
379
380         if (buf_len % period_len)
381                 return NULL;
382
383         periods = buf_len / period_len;
384
385         desc = jz4780_dma_desc_alloc(jzchan, periods, DMA_CYCLIC);
386         if (!desc)
387                 return NULL;
388
389         for (i = 0; i < periods; i++) {
390                 err = jz4780_dma_setup_hwdesc(jzchan, &desc->desc[i], buf_addr,
391                                               period_len, direction);
392                 if (err < 0) {
393                         jz4780_dma_desc_free(&jzchan->desc->vdesc);
394                         return NULL;
395                 }
396
397                 buf_addr += period_len;
398
399                 /*
400                  * Set the link bit to indicate that the controller should
401                  * automatically proceed to the next descriptor. In
402                  * jz4780_dma_begin(), this will be cleared if we need to issue
403                  * an interrupt after each period.
404                  */
405                 desc->desc[i].dcm |= JZ_DMA_DCM_TIE | JZ_DMA_DCM_LINK;
406
407                 /*
408                  * The upper 8 bits of the DTC field in the descriptor must be
409                  * set to (offset from descriptor base of next descriptor >> 4).
410                  * If this is the last descriptor, link it back to the first,
411                  * i.e. leave offset set to 0, otherwise point to the next one.
412                  */
413                 if (i != (periods - 1)) {
414                         desc->desc[i].dtc |=
415                                 (((i + 1) * sizeof(*desc->desc)) >> 4) << 24;
416                 }
417         }
418
419         return vchan_tx_prep(&jzchan->vchan, &desc->vdesc, flags);
420 }
421
422 static struct dma_async_tx_descriptor *jz4780_dma_prep_dma_memcpy(
423         struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
424         size_t len, unsigned long flags)
425 {
426         struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
427         struct jz4780_dma_desc *desc;
428         uint32_t tsz;
429
430         desc = jz4780_dma_desc_alloc(jzchan, 1, DMA_MEMCPY);
431         if (!desc)
432                 return NULL;
433
434         tsz = jz4780_dma_transfer_size(dest | src | len,
435                                        &jzchan->transfer_shift);
436
437         desc->desc[0].dsa = src;
438         desc->desc[0].dta = dest;
439         desc->desc[0].drt = JZ_DMA_DRT_AUTO;
440         desc->desc[0].dcm = JZ_DMA_DCM_TIE | JZ_DMA_DCM_SAI | JZ_DMA_DCM_DAI |
441                             tsz << JZ_DMA_DCM_TSZ_SHIFT |
442                             JZ_DMA_WIDTH_32_BIT << JZ_DMA_DCM_SP_SHIFT |
443                             JZ_DMA_WIDTH_32_BIT << JZ_DMA_DCM_DP_SHIFT;
444         desc->desc[0].dtc = len >> jzchan->transfer_shift;
445
446         return vchan_tx_prep(&jzchan->vchan, &desc->vdesc, flags);
447 }
448
449 static void jz4780_dma_begin(struct jz4780_dma_chan *jzchan)
450 {
451         struct jz4780_dma_dev *jzdma = jz4780_dma_chan_parent(jzchan);
452         struct virt_dma_desc *vdesc;
453         unsigned int i;
454         dma_addr_t desc_phys;
455
456         if (!jzchan->desc) {
457                 vdesc = vchan_next_desc(&jzchan->vchan);
458                 if (!vdesc)
459                         return;
460
461                 list_del(&vdesc->node);
462
463                 jzchan->desc = to_jz4780_dma_desc(vdesc);
464                 jzchan->curr_hwdesc = 0;
465
466                 if (jzchan->desc->type == DMA_CYCLIC && vdesc->tx.callback) {
467                         /*
468                          * The DMA controller doesn't support triggering an
469                          * interrupt after processing each descriptor, only
470                          * after processing an entire terminated list of
471                          * descriptors. For a cyclic DMA setup the list of
472                          * descriptors is not terminated so we can never get an
473                          * interrupt.
474                          *
475                          * If the user requested a callback for a cyclic DMA
476                          * setup then we workaround this hardware limitation
477                          * here by degrading to a set of unlinked descriptors
478                          * which we will submit in sequence in response to the
479                          * completion of processing the previous descriptor.
480                          */
481                         for (i = 0; i < jzchan->desc->count; i++)
482                                 jzchan->desc->desc[i].dcm &= ~JZ_DMA_DCM_LINK;
483                 }
484         } else {
485                 /*
486                  * There is an existing transfer, therefore this must be one
487                  * for which we unlinked the descriptors above. Advance to the
488                  * next one in the list.
489                  */
490                 jzchan->curr_hwdesc =
491                         (jzchan->curr_hwdesc + 1) % jzchan->desc->count;
492         }
493
494         /* Use 8-word descriptors. */
495         jz4780_dma_chn_writel(jzdma, jzchan->id,
496                               JZ_DMA_REG_DCS, JZ_DMA_DCS_DES8);
497
498         /* Write descriptor address and initiate descriptor fetch. */
499         desc_phys = jzchan->desc->desc_phys +
500                     (jzchan->curr_hwdesc * sizeof(*jzchan->desc->desc));
501         jz4780_dma_chn_writel(jzdma, jzchan->id, JZ_DMA_REG_DDA, desc_phys);
502         jz4780_dma_ctrl_writel(jzdma, JZ_DMA_REG_DDRS, BIT(jzchan->id));
503
504         /* Enable the channel. */
505         jz4780_dma_chn_writel(jzdma, jzchan->id, JZ_DMA_REG_DCS,
506                               JZ_DMA_DCS_DES8 | JZ_DMA_DCS_CTE);
507 }
508
509 static void jz4780_dma_issue_pending(struct dma_chan *chan)
510 {
511         struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
512         unsigned long flags;
513
514         spin_lock_irqsave(&jzchan->vchan.lock, flags);
515
516         if (vchan_issue_pending(&jzchan->vchan) && !jzchan->desc)
517                 jz4780_dma_begin(jzchan);
518
519         spin_unlock_irqrestore(&jzchan->vchan.lock, flags);
520 }
521
522 static int jz4780_dma_terminate_all(struct dma_chan *chan)
523 {
524         struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
525         struct jz4780_dma_dev *jzdma = jz4780_dma_chan_parent(jzchan);
526         unsigned long flags;
527         LIST_HEAD(head);
528
529         spin_lock_irqsave(&jzchan->vchan.lock, flags);
530
531         /* Clear the DMA status and stop the transfer. */
532         jz4780_dma_chn_writel(jzdma, jzchan->id, JZ_DMA_REG_DCS, 0);
533         if (jzchan->desc) {
534                 vchan_terminate_vdesc(&jzchan->desc->vdesc);
535                 jzchan->desc = NULL;
536         }
537
538         vchan_get_all_descriptors(&jzchan->vchan, &head);
539
540         spin_unlock_irqrestore(&jzchan->vchan.lock, flags);
541
542         vchan_dma_desc_free_list(&jzchan->vchan, &head);
543         return 0;
544 }
545
546 static void jz4780_dma_synchronize(struct dma_chan *chan)
547 {
548         struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
549
550         vchan_synchronize(&jzchan->vchan);
551 }
552
553 static int jz4780_dma_config(struct dma_chan *chan,
554         struct dma_slave_config *config)
555 {
556         struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
557
558         if ((config->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
559            || (config->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES))
560                 return -EINVAL;
561
562         /* Copy the reset of the slave configuration, it is used later. */
563         memcpy(&jzchan->config, config, sizeof(jzchan->config));
564
565         return 0;
566 }
567
568 static size_t jz4780_dma_desc_residue(struct jz4780_dma_chan *jzchan,
569         struct jz4780_dma_desc *desc, unsigned int next_sg)
570 {
571         struct jz4780_dma_dev *jzdma = jz4780_dma_chan_parent(jzchan);
572         unsigned int residue, count;
573         unsigned int i;
574
575         residue = 0;
576
577         for (i = next_sg; i < desc->count; i++)
578                 residue += desc->desc[i].dtc << jzchan->transfer_shift;
579
580         if (next_sg != 0) {
581                 count = jz4780_dma_chn_readl(jzdma, jzchan->id,
582                                          JZ_DMA_REG_DTC);
583                 residue += count << jzchan->transfer_shift;
584         }
585
586         return residue;
587 }
588
589 static enum dma_status jz4780_dma_tx_status(struct dma_chan *chan,
590         dma_cookie_t cookie, struct dma_tx_state *txstate)
591 {
592         struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
593         struct virt_dma_desc *vdesc;
594         enum dma_status status;
595         unsigned long flags;
596
597         status = dma_cookie_status(chan, cookie, txstate);
598         if ((status == DMA_COMPLETE) || (txstate == NULL))
599                 return status;
600
601         spin_lock_irqsave(&jzchan->vchan.lock, flags);
602
603         vdesc = vchan_find_desc(&jzchan->vchan, cookie);
604         if (vdesc) {
605                 /* On the issued list, so hasn't been processed yet */
606                 txstate->residue = jz4780_dma_desc_residue(jzchan,
607                                         to_jz4780_dma_desc(vdesc), 0);
608         } else if (cookie == jzchan->desc->vdesc.tx.cookie) {
609                 txstate->residue = jz4780_dma_desc_residue(jzchan, jzchan->desc,
610                           (jzchan->curr_hwdesc + 1) % jzchan->desc->count);
611         } else
612                 txstate->residue = 0;
613
614         if (vdesc && jzchan->desc && vdesc == &jzchan->desc->vdesc
615             && jzchan->desc->status & (JZ_DMA_DCS_AR | JZ_DMA_DCS_HLT))
616                 status = DMA_ERROR;
617
618         spin_unlock_irqrestore(&jzchan->vchan.lock, flags);
619         return status;
620 }
621
622 static void jz4780_dma_chan_irq(struct jz4780_dma_dev *jzdma,
623         struct jz4780_dma_chan *jzchan)
624 {
625         uint32_t dcs;
626
627         spin_lock(&jzchan->vchan.lock);
628
629         dcs = jz4780_dma_chn_readl(jzdma, jzchan->id, JZ_DMA_REG_DCS);
630         jz4780_dma_chn_writel(jzdma, jzchan->id, JZ_DMA_REG_DCS, 0);
631
632         if (dcs & JZ_DMA_DCS_AR) {
633                 dev_warn(&jzchan->vchan.chan.dev->device,
634                          "address error (DCS=0x%x)\n", dcs);
635         }
636
637         if (dcs & JZ_DMA_DCS_HLT) {
638                 dev_warn(&jzchan->vchan.chan.dev->device,
639                          "channel halt (DCS=0x%x)\n", dcs);
640         }
641
642         if (jzchan->desc) {
643                 jzchan->desc->status = dcs;
644
645                 if ((dcs & (JZ_DMA_DCS_AR | JZ_DMA_DCS_HLT)) == 0) {
646                         if (jzchan->desc->type == DMA_CYCLIC) {
647                                 vchan_cyclic_callback(&jzchan->desc->vdesc);
648                         } else {
649                                 vchan_cookie_complete(&jzchan->desc->vdesc);
650                                 jzchan->desc = NULL;
651                         }
652
653                         jz4780_dma_begin(jzchan);
654                 }
655         } else {
656                 dev_err(&jzchan->vchan.chan.dev->device,
657                         "channel IRQ with no active transfer\n");
658         }
659
660         spin_unlock(&jzchan->vchan.lock);
661 }
662
663 static irqreturn_t jz4780_dma_irq_handler(int irq, void *data)
664 {
665         struct jz4780_dma_dev *jzdma = data;
666         uint32_t pending, dmac;
667         int i;
668
669         pending = jz4780_dma_ctrl_readl(jzdma, JZ_DMA_REG_DIRQP);
670
671         for (i = 0; i < jzdma->soc_data->nb_channels; i++) {
672                 if (!(pending & (1<<i)))
673                         continue;
674
675                 jz4780_dma_chan_irq(jzdma, &jzdma->chan[i]);
676         }
677
678         /* Clear halt and address error status of all channels. */
679         dmac = jz4780_dma_ctrl_readl(jzdma, JZ_DMA_REG_DMAC);
680         dmac &= ~(JZ_DMA_DMAC_HLT | JZ_DMA_DMAC_AR);
681         jz4780_dma_ctrl_writel(jzdma, JZ_DMA_REG_DMAC, dmac);
682
683         /* Clear interrupt pending status. */
684         jz4780_dma_ctrl_writel(jzdma, JZ_DMA_REG_DIRQP, 0);
685
686         return IRQ_HANDLED;
687 }
688
689 static int jz4780_dma_alloc_chan_resources(struct dma_chan *chan)
690 {
691         struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
692
693         jzchan->desc_pool = dma_pool_create(dev_name(&chan->dev->device),
694                                             chan->device->dev,
695                                             JZ_DMA_DESC_BLOCK_SIZE,
696                                             PAGE_SIZE, 0);
697         if (!jzchan->desc_pool) {
698                 dev_err(&chan->dev->device,
699                         "failed to allocate descriptor pool\n");
700                 return -ENOMEM;
701         }
702
703         return 0;
704 }
705
706 static void jz4780_dma_free_chan_resources(struct dma_chan *chan)
707 {
708         struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
709
710         vchan_free_chan_resources(&jzchan->vchan);
711         dma_pool_destroy(jzchan->desc_pool);
712         jzchan->desc_pool = NULL;
713 }
714
715 static bool jz4780_dma_filter_fn(struct dma_chan *chan, void *param)
716 {
717         struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
718         struct jz4780_dma_dev *jzdma = jz4780_dma_chan_parent(jzchan);
719         struct jz4780_dma_filter_data *data = param;
720
721         if (jzdma->dma_device.dev->of_node != data->of_node)
722                 return false;
723
724         if (data->channel > -1) {
725                 if (data->channel != jzchan->id)
726                         return false;
727         } else if (jzdma->chan_reserved & BIT(jzchan->id)) {
728                 return false;
729         }
730
731         jzchan->transfer_type = data->transfer_type;
732
733         return true;
734 }
735
736 static struct dma_chan *jz4780_of_dma_xlate(struct of_phandle_args *dma_spec,
737         struct of_dma *ofdma)
738 {
739         struct jz4780_dma_dev *jzdma = ofdma->of_dma_data;
740         dma_cap_mask_t mask = jzdma->dma_device.cap_mask;
741         struct jz4780_dma_filter_data data;
742
743         if (dma_spec->args_count != 2)
744                 return NULL;
745
746         data.of_node = ofdma->of_node;
747         data.transfer_type = dma_spec->args[0];
748         data.channel = dma_spec->args[1];
749
750         if (data.channel > -1) {
751                 if (data.channel >= jzdma->soc_data->nb_channels) {
752                         dev_err(jzdma->dma_device.dev,
753                                 "device requested non-existent channel %u\n",
754                                 data.channel);
755                         return NULL;
756                 }
757
758                 /* Can only select a channel marked as reserved. */
759                 if (!(jzdma->chan_reserved & BIT(data.channel))) {
760                         dev_err(jzdma->dma_device.dev,
761                                 "device requested unreserved channel %u\n",
762                                 data.channel);
763                         return NULL;
764                 }
765
766                 jzdma->chan[data.channel].transfer_type = data.transfer_type;
767
768                 return dma_get_slave_channel(
769                         &jzdma->chan[data.channel].vchan.chan);
770         } else {
771                 return dma_request_channel(mask, jz4780_dma_filter_fn, &data);
772         }
773 }
774
775 static int jz4780_dma_probe(struct platform_device *pdev)
776 {
777         struct device *dev = &pdev->dev;
778         const struct jz4780_dma_soc_data *soc_data;
779         struct jz4780_dma_dev *jzdma;
780         struct jz4780_dma_chan *jzchan;
781         struct dma_device *dd;
782         struct resource *res;
783         int i, ret;
784
785         if (!dev->of_node) {
786                 dev_err(dev, "This driver must be probed from devicetree\n");
787                 return -EINVAL;
788         }
789
790         soc_data = device_get_match_data(dev);
791         if (!soc_data)
792                 return -EINVAL;
793
794         jzdma = devm_kzalloc(dev, sizeof(*jzdma)
795                                 + sizeof(*jzdma->chan) * soc_data->nb_channels,
796                                 GFP_KERNEL);
797         if (!jzdma)
798                 return -ENOMEM;
799
800         jzdma->soc_data = soc_data;
801         platform_set_drvdata(pdev, jzdma);
802
803         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
804         if (!res) {
805                 dev_err(dev, "failed to get I/O memory\n");
806                 return -EINVAL;
807         }
808
809         jzdma->chn_base = devm_ioremap_resource(dev, res);
810         if (IS_ERR(jzdma->chn_base))
811                 return PTR_ERR(jzdma->chn_base);
812
813         res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
814         if (res) {
815                 jzdma->ctrl_base = devm_ioremap_resource(dev, res);
816                 if (IS_ERR(jzdma->ctrl_base))
817                         return PTR_ERR(jzdma->ctrl_base);
818         } else {
819                 /*
820                  * On JZ4780, if the second memory resource was not supplied,
821                  * assume we're using an old devicetree, and calculate the
822                  * offset to the control registers.
823                  */
824                 jzdma->ctrl_base = jzdma->chn_base + JZ4780_DMA_CTRL_OFFSET;
825         }
826
827         ret = platform_get_irq(pdev, 0);
828         if (ret < 0) {
829                 dev_err(dev, "failed to get IRQ: %d\n", ret);
830                 return ret;
831         }
832
833         jzdma->irq = ret;
834
835         ret = request_irq(jzdma->irq, jz4780_dma_irq_handler, 0, dev_name(dev),
836                           jzdma);
837         if (ret) {
838                 dev_err(dev, "failed to request IRQ %u!\n", jzdma->irq);
839                 return ret;
840         }
841
842         jzdma->clk = devm_clk_get(dev, NULL);
843         if (IS_ERR(jzdma->clk)) {
844                 dev_err(dev, "failed to get clock\n");
845                 ret = PTR_ERR(jzdma->clk);
846                 goto err_free_irq;
847         }
848
849         clk_prepare_enable(jzdma->clk);
850
851         /* Property is optional, if it doesn't exist the value will remain 0. */
852         of_property_read_u32_index(dev->of_node, "ingenic,reserved-channels",
853                                    0, &jzdma->chan_reserved);
854
855         dd = &jzdma->dma_device;
856
857         dma_cap_set(DMA_MEMCPY, dd->cap_mask);
858         dma_cap_set(DMA_SLAVE, dd->cap_mask);
859         dma_cap_set(DMA_CYCLIC, dd->cap_mask);
860
861         dd->dev = dev;
862         dd->copy_align = DMAENGINE_ALIGN_4_BYTES;
863         dd->device_alloc_chan_resources = jz4780_dma_alloc_chan_resources;
864         dd->device_free_chan_resources = jz4780_dma_free_chan_resources;
865         dd->device_prep_slave_sg = jz4780_dma_prep_slave_sg;
866         dd->device_prep_dma_cyclic = jz4780_dma_prep_dma_cyclic;
867         dd->device_prep_dma_memcpy = jz4780_dma_prep_dma_memcpy;
868         dd->device_config = jz4780_dma_config;
869         dd->device_terminate_all = jz4780_dma_terminate_all;
870         dd->device_synchronize = jz4780_dma_synchronize;
871         dd->device_tx_status = jz4780_dma_tx_status;
872         dd->device_issue_pending = jz4780_dma_issue_pending;
873         dd->src_addr_widths = JZ_DMA_BUSWIDTHS;
874         dd->dst_addr_widths = JZ_DMA_BUSWIDTHS;
875         dd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
876         dd->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
877
878         /*
879          * Enable DMA controller, mark all channels as not programmable.
880          * Also set the FMSC bit - it increases MSC performance, so it makes
881          * little sense not to enable it.
882          */
883         jz4780_dma_ctrl_writel(jzdma, JZ_DMA_REG_DMAC,
884                           JZ_DMA_DMAC_DMAE | JZ_DMA_DMAC_FMSC);
885         jz4780_dma_ctrl_writel(jzdma, JZ_DMA_REG_DMACP, 0);
886
887         INIT_LIST_HEAD(&dd->channels);
888
889         for (i = 0; i < soc_data->nb_channels; i++) {
890                 jzchan = &jzdma->chan[i];
891                 jzchan->id = i;
892
893                 vchan_init(&jzchan->vchan, dd);
894                 jzchan->vchan.desc_free = jz4780_dma_desc_free;
895         }
896
897         ret = dma_async_device_register(dd);
898         if (ret) {
899                 dev_err(dev, "failed to register device\n");
900                 goto err_disable_clk;
901         }
902
903         /* Register with OF DMA helpers. */
904         ret = of_dma_controller_register(dev->of_node, jz4780_of_dma_xlate,
905                                          jzdma);
906         if (ret) {
907                 dev_err(dev, "failed to register OF DMA controller\n");
908                 goto err_unregister_dev;
909         }
910
911         dev_info(dev, "JZ4780 DMA controller initialised\n");
912         return 0;
913
914 err_unregister_dev:
915         dma_async_device_unregister(dd);
916
917 err_disable_clk:
918         clk_disable_unprepare(jzdma->clk);
919
920 err_free_irq:
921         free_irq(jzdma->irq, jzdma);
922         return ret;
923 }
924
925 static int jz4780_dma_remove(struct platform_device *pdev)
926 {
927         struct jz4780_dma_dev *jzdma = platform_get_drvdata(pdev);
928         int i;
929
930         of_dma_controller_free(pdev->dev.of_node);
931
932         free_irq(jzdma->irq, jzdma);
933
934         for (i = 0; i < jzdma->soc_data->nb_channels; i++)
935                 tasklet_kill(&jzdma->chan[i].vchan.task);
936
937         dma_async_device_unregister(&jzdma->dma_device);
938         return 0;
939 }
940
941 static const struct jz4780_dma_soc_data jz4780_dma_soc_data = {
942         .nb_channels = 32,
943 };
944
945 static const struct of_device_id jz4780_dma_dt_match[] = {
946         { .compatible = "ingenic,jz4780-dma", .data = &jz4780_dma_soc_data },
947         {},
948 };
949 MODULE_DEVICE_TABLE(of, jz4780_dma_dt_match);
950
951 static struct platform_driver jz4780_dma_driver = {
952         .probe          = jz4780_dma_probe,
953         .remove         = jz4780_dma_remove,
954         .driver = {
955                 .name   = "jz4780-dma",
956                 .of_match_table = of_match_ptr(jz4780_dma_dt_match),
957         },
958 };
959
960 static int __init jz4780_dma_init(void)
961 {
962         return platform_driver_register(&jz4780_dma_driver);
963 }
964 subsys_initcall(jz4780_dma_init);
965
966 static void __exit jz4780_dma_exit(void)
967 {
968         platform_driver_unregister(&jz4780_dma_driver);
969 }
970 module_exit(jz4780_dma_exit);
971
972 MODULE_AUTHOR("Alex Smith <alex@alex-smith.me.uk>");
973 MODULE_DESCRIPTION("Ingenic JZ4780 DMA controller driver");
974 MODULE_LICENSE("GPL");