Merge tag 'copy-file-range-fixes-1' of git://git.kernel.org/pub/scm/fs/xfs/xfs-linux
[linux-2.6-microblaze.git] / drivers / dma / pxa_dma.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright 2015 Robert Jarzmik <robert.jarzmik@free.fr>
4  */
5
6 #include <linux/err.h>
7 #include <linux/module.h>
8 #include <linux/init.h>
9 #include <linux/types.h>
10 #include <linux/interrupt.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/slab.h>
13 #include <linux/dmaengine.h>
14 #include <linux/platform_device.h>
15 #include <linux/device.h>
16 #include <linux/platform_data/mmp_dma.h>
17 #include <linux/dmapool.h>
18 #include <linux/of_device.h>
19 #include <linux/of_dma.h>
20 #include <linux/of.h>
21 #include <linux/wait.h>
22 #include <linux/dma/pxa-dma.h>
23
24 #include "dmaengine.h"
25 #include "virt-dma.h"
26
27 #define DCSR(n)         (0x0000 + ((n) << 2))
28 #define DALGN(n)        0x00a0
29 #define DINT            0x00f0
30 #define DDADR(n)        (0x0200 + ((n) << 4))
31 #define DSADR(n)        (0x0204 + ((n) << 4))
32 #define DTADR(n)        (0x0208 + ((n) << 4))
33 #define DCMD(n)         (0x020c + ((n) << 4))
34
35 #define PXA_DCSR_RUN            BIT(31) /* Run Bit (read / write) */
36 #define PXA_DCSR_NODESC         BIT(30) /* No-Descriptor Fetch (read / write) */
37 #define PXA_DCSR_STOPIRQEN      BIT(29) /* Stop Interrupt Enable (R/W) */
38 #define PXA_DCSR_REQPEND        BIT(8)  /* Request Pending (read-only) */
39 #define PXA_DCSR_STOPSTATE      BIT(3)  /* Stop State (read-only) */
40 #define PXA_DCSR_ENDINTR        BIT(2)  /* End Interrupt (read / write) */
41 #define PXA_DCSR_STARTINTR      BIT(1)  /* Start Interrupt (read / write) */
42 #define PXA_DCSR_BUSERR         BIT(0)  /* Bus Error Interrupt (read / write) */
43
44 #define PXA_DCSR_EORIRQEN       BIT(28) /* End of Receive IRQ Enable (R/W) */
45 #define PXA_DCSR_EORJMPEN       BIT(27) /* Jump to next descriptor on EOR */
46 #define PXA_DCSR_EORSTOPEN      BIT(26) /* STOP on an EOR */
47 #define PXA_DCSR_SETCMPST       BIT(25) /* Set Descriptor Compare Status */
48 #define PXA_DCSR_CLRCMPST       BIT(24) /* Clear Descriptor Compare Status */
49 #define PXA_DCSR_CMPST          BIT(10) /* The Descriptor Compare Status */
50 #define PXA_DCSR_EORINTR        BIT(9)  /* The end of Receive */
51
52 #define DRCMR_MAPVLD    BIT(7)  /* Map Valid (read / write) */
53 #define DRCMR_CHLNUM    0x1f    /* mask for Channel Number (read / write) */
54
55 #define DDADR_DESCADDR  0xfffffff0      /* Address of next descriptor (mask) */
56 #define DDADR_STOP      BIT(0)  /* Stop (read / write) */
57
58 #define PXA_DCMD_INCSRCADDR     BIT(31) /* Source Address Increment Setting. */
59 #define PXA_DCMD_INCTRGADDR     BIT(30) /* Target Address Increment Setting. */
60 #define PXA_DCMD_FLOWSRC        BIT(29) /* Flow Control by the source. */
61 #define PXA_DCMD_FLOWTRG        BIT(28) /* Flow Control by the target. */
62 #define PXA_DCMD_STARTIRQEN     BIT(22) /* Start Interrupt Enable */
63 #define PXA_DCMD_ENDIRQEN       BIT(21) /* End Interrupt Enable */
64 #define PXA_DCMD_ENDIAN         BIT(18) /* Device Endian-ness. */
65 #define PXA_DCMD_BURST8         (1 << 16)       /* 8 byte burst */
66 #define PXA_DCMD_BURST16        (2 << 16)       /* 16 byte burst */
67 #define PXA_DCMD_BURST32        (3 << 16)       /* 32 byte burst */
68 #define PXA_DCMD_WIDTH1         (1 << 14)       /* 1 byte width */
69 #define PXA_DCMD_WIDTH2         (2 << 14)       /* 2 byte width (HalfWord) */
70 #define PXA_DCMD_WIDTH4         (3 << 14)       /* 4 byte width (Word) */
71 #define PXA_DCMD_LENGTH         0x01fff         /* length mask (max = 8K - 1) */
72
73 #define PDMA_ALIGNMENT          3
74 #define PDMA_MAX_DESC_BYTES     (PXA_DCMD_LENGTH & ~((1 << PDMA_ALIGNMENT) - 1))
75
76 struct pxad_desc_hw {
77         u32 ddadr;      /* Points to the next descriptor + flags */
78         u32 dsadr;      /* DSADR value for the current transfer */
79         u32 dtadr;      /* DTADR value for the current transfer */
80         u32 dcmd;       /* DCMD value for the current transfer */
81 } __aligned(16);
82
83 struct pxad_desc_sw {
84         struct virt_dma_desc    vd;             /* Virtual descriptor */
85         int                     nb_desc;        /* Number of hw. descriptors */
86         size_t                  len;            /* Number of bytes xfered */
87         dma_addr_t              first;          /* First descriptor's addr */
88
89         /* At least one descriptor has an src/dst address not multiple of 8 */
90         bool                    misaligned;
91         bool                    cyclic;
92         struct dma_pool         *desc_pool;     /* Channel's used allocator */
93
94         struct pxad_desc_hw     *hw_desc[];     /* DMA coherent descriptors */
95 };
96
97 struct pxad_phy {
98         int                     idx;
99         void __iomem            *base;
100         struct pxad_chan        *vchan;
101 };
102
103 struct pxad_chan {
104         struct virt_dma_chan    vc;             /* Virtual channel */
105         u32                     drcmr;          /* Requestor of the channel */
106         enum pxad_chan_prio     prio;           /* Required priority of phy */
107         /*
108          * At least one desc_sw in submitted or issued transfers on this channel
109          * has one address such as: addr % 8 != 0. This implies the DALGN
110          * setting on the phy.
111          */
112         bool                    misaligned;
113         struct dma_slave_config cfg;            /* Runtime config */
114
115         /* protected by vc->lock */
116         struct pxad_phy         *phy;
117         struct dma_pool         *desc_pool;     /* Descriptors pool */
118         dma_cookie_t            bus_error;
119
120         wait_queue_head_t       wq_state;
121 };
122
123 struct pxad_device {
124         struct dma_device               slave;
125         int                             nr_chans;
126         int                             nr_requestors;
127         void __iomem                    *base;
128         struct pxad_phy                 *phys;
129         spinlock_t                      phy_lock;       /* Phy association */
130 #ifdef CONFIG_DEBUG_FS
131         struct dentry                   *dbgfs_root;
132         struct dentry                   *dbgfs_state;
133         struct dentry                   **dbgfs_chan;
134 #endif
135 };
136
137 #define tx_to_pxad_desc(tx)                                     \
138         container_of(tx, struct pxad_desc_sw, async_tx)
139 #define to_pxad_chan(dchan)                                     \
140         container_of(dchan, struct pxad_chan, vc.chan)
141 #define to_pxad_dev(dmadev)                                     \
142         container_of(dmadev, struct pxad_device, slave)
143 #define to_pxad_sw_desc(_vd)                            \
144         container_of((_vd), struct pxad_desc_sw, vd)
145
146 #define _phy_readl_relaxed(phy, _reg)                                   \
147         readl_relaxed((phy)->base + _reg((phy)->idx))
148 #define phy_readl_relaxed(phy, _reg)                                    \
149         ({                                                              \
150                 u32 _v;                                                 \
151                 _v = readl_relaxed((phy)->base + _reg((phy)->idx));     \
152                 dev_vdbg(&phy->vchan->vc.chan.dev->device,              \
153                          "%s(): readl(%s): 0x%08x\n", __func__, #_reg,  \
154                           _v);                                          \
155                 _v;                                                     \
156         })
157 #define phy_writel(phy, val, _reg)                                      \
158         do {                                                            \
159                 writel((val), (phy)->base + _reg((phy)->idx));          \
160                 dev_vdbg(&phy->vchan->vc.chan.dev->device,              \
161                          "%s(): writel(0x%08x, %s)\n",                  \
162                          __func__, (u32)(val), #_reg);                  \
163         } while (0)
164 #define phy_writel_relaxed(phy, val, _reg)                              \
165         do {                                                            \
166                 writel_relaxed((val), (phy)->base + _reg((phy)->idx));  \
167                 dev_vdbg(&phy->vchan->vc.chan.dev->device,              \
168                          "%s(): writel_relaxed(0x%08x, %s)\n",          \
169                          __func__, (u32)(val), #_reg);                  \
170         } while (0)
171
172 static unsigned int pxad_drcmr(unsigned int line)
173 {
174         if (line < 64)
175                 return 0x100 + line * 4;
176         return 0x1000 + line * 4;
177 }
178
179 static bool pxad_filter_fn(struct dma_chan *chan, void *param);
180
181 /*
182  * Debug fs
183  */
184 #ifdef CONFIG_DEBUG_FS
185 #include <linux/debugfs.h>
186 #include <linux/uaccess.h>
187 #include <linux/seq_file.h>
188
189 static int requester_chan_show(struct seq_file *s, void *p)
190 {
191         struct pxad_phy *phy = s->private;
192         int i;
193         u32 drcmr;
194
195         seq_printf(s, "DMA channel %d requester :\n", phy->idx);
196         for (i = 0; i < 70; i++) {
197                 drcmr = readl_relaxed(phy->base + pxad_drcmr(i));
198                 if ((drcmr & DRCMR_CHLNUM) == phy->idx)
199                         seq_printf(s, "\tRequester %d (MAPVLD=%d)\n", i,
200                                    !!(drcmr & DRCMR_MAPVLD));
201         }
202         return 0;
203 }
204
205 static inline int dbg_burst_from_dcmd(u32 dcmd)
206 {
207         int burst = (dcmd >> 16) & 0x3;
208
209         return burst ? 4 << burst : 0;
210 }
211
212 static int is_phys_valid(unsigned long addr)
213 {
214         return pfn_valid(__phys_to_pfn(addr));
215 }
216
217 #define PXA_DCSR_STR(flag) (dcsr & PXA_DCSR_##flag ? #flag" " : "")
218 #define PXA_DCMD_STR(flag) (dcmd & PXA_DCMD_##flag ? #flag" " : "")
219
220 static int descriptors_show(struct seq_file *s, void *p)
221 {
222         struct pxad_phy *phy = s->private;
223         int i, max_show = 20, burst, width;
224         u32 dcmd;
225         unsigned long phys_desc, ddadr;
226         struct pxad_desc_hw *desc;
227
228         phys_desc = ddadr = _phy_readl_relaxed(phy, DDADR);
229
230         seq_printf(s, "DMA channel %d descriptors :\n", phy->idx);
231         seq_printf(s, "[%03d] First descriptor unknown\n", 0);
232         for (i = 1; i < max_show && is_phys_valid(phys_desc); i++) {
233                 desc = phys_to_virt(phys_desc);
234                 dcmd = desc->dcmd;
235                 burst = dbg_burst_from_dcmd(dcmd);
236                 width = (1 << ((dcmd >> 14) & 0x3)) >> 1;
237
238                 seq_printf(s, "[%03d] Desc at %08lx(virt %p)\n",
239                            i, phys_desc, desc);
240                 seq_printf(s, "\tDDADR = %08x\n", desc->ddadr);
241                 seq_printf(s, "\tDSADR = %08x\n", desc->dsadr);
242                 seq_printf(s, "\tDTADR = %08x\n", desc->dtadr);
243                 seq_printf(s, "\tDCMD  = %08x (%s%s%s%s%s%s%sburst=%d width=%d len=%d)\n",
244                            dcmd,
245                            PXA_DCMD_STR(INCSRCADDR), PXA_DCMD_STR(INCTRGADDR),
246                            PXA_DCMD_STR(FLOWSRC), PXA_DCMD_STR(FLOWTRG),
247                            PXA_DCMD_STR(STARTIRQEN), PXA_DCMD_STR(ENDIRQEN),
248                            PXA_DCMD_STR(ENDIAN), burst, width,
249                            dcmd & PXA_DCMD_LENGTH);
250                 phys_desc = desc->ddadr;
251         }
252         if (i == max_show)
253                 seq_printf(s, "[%03d] Desc at %08lx ... max display reached\n",
254                            i, phys_desc);
255         else
256                 seq_printf(s, "[%03d] Desc at %08lx is %s\n",
257                            i, phys_desc, phys_desc == DDADR_STOP ?
258                            "DDADR_STOP" : "invalid");
259
260         return 0;
261 }
262
263 static int chan_state_show(struct seq_file *s, void *p)
264 {
265         struct pxad_phy *phy = s->private;
266         u32 dcsr, dcmd;
267         int burst, width;
268         static const char * const str_prio[] = {
269                 "high", "normal", "low", "invalid"
270         };
271
272         dcsr = _phy_readl_relaxed(phy, DCSR);
273         dcmd = _phy_readl_relaxed(phy, DCMD);
274         burst = dbg_burst_from_dcmd(dcmd);
275         width = (1 << ((dcmd >> 14) & 0x3)) >> 1;
276
277         seq_printf(s, "DMA channel %d\n", phy->idx);
278         seq_printf(s, "\tPriority : %s\n",
279                           str_prio[(phy->idx & 0xf) / 4]);
280         seq_printf(s, "\tUnaligned transfer bit: %s\n",
281                           _phy_readl_relaxed(phy, DALGN) & BIT(phy->idx) ?
282                           "yes" : "no");
283         seq_printf(s, "\tDCSR  = %08x (%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s)\n",
284                    dcsr, PXA_DCSR_STR(RUN), PXA_DCSR_STR(NODESC),
285                    PXA_DCSR_STR(STOPIRQEN), PXA_DCSR_STR(EORIRQEN),
286                    PXA_DCSR_STR(EORJMPEN), PXA_DCSR_STR(EORSTOPEN),
287                    PXA_DCSR_STR(SETCMPST), PXA_DCSR_STR(CLRCMPST),
288                    PXA_DCSR_STR(CMPST), PXA_DCSR_STR(EORINTR),
289                    PXA_DCSR_STR(REQPEND), PXA_DCSR_STR(STOPSTATE),
290                    PXA_DCSR_STR(ENDINTR), PXA_DCSR_STR(STARTINTR),
291                    PXA_DCSR_STR(BUSERR));
292
293         seq_printf(s, "\tDCMD  = %08x (%s%s%s%s%s%s%sburst=%d width=%d len=%d)\n",
294                    dcmd,
295                    PXA_DCMD_STR(INCSRCADDR), PXA_DCMD_STR(INCTRGADDR),
296                    PXA_DCMD_STR(FLOWSRC), PXA_DCMD_STR(FLOWTRG),
297                    PXA_DCMD_STR(STARTIRQEN), PXA_DCMD_STR(ENDIRQEN),
298                    PXA_DCMD_STR(ENDIAN), burst, width, dcmd & PXA_DCMD_LENGTH);
299         seq_printf(s, "\tDSADR = %08x\n", _phy_readl_relaxed(phy, DSADR));
300         seq_printf(s, "\tDTADR = %08x\n", _phy_readl_relaxed(phy, DTADR));
301         seq_printf(s, "\tDDADR = %08x\n", _phy_readl_relaxed(phy, DDADR));
302
303         return 0;
304 }
305
306 static int state_show(struct seq_file *s, void *p)
307 {
308         struct pxad_device *pdev = s->private;
309
310         /* basic device status */
311         seq_puts(s, "DMA engine status\n");
312         seq_printf(s, "\tChannel number: %d\n", pdev->nr_chans);
313
314         return 0;
315 }
316
317 DEFINE_SHOW_ATTRIBUTE(state);
318 DEFINE_SHOW_ATTRIBUTE(chan_state);
319 DEFINE_SHOW_ATTRIBUTE(descriptors);
320 DEFINE_SHOW_ATTRIBUTE(requester_chan);
321
322 static struct dentry *pxad_dbg_alloc_chan(struct pxad_device *pdev,
323                                              int ch, struct dentry *chandir)
324 {
325         char chan_name[11];
326         struct dentry *chan, *chan_state = NULL, *chan_descr = NULL;
327         struct dentry *chan_reqs = NULL;
328         void *dt;
329
330         scnprintf(chan_name, sizeof(chan_name), "%d", ch);
331         chan = debugfs_create_dir(chan_name, chandir);
332         dt = (void *)&pdev->phys[ch];
333
334         if (chan)
335                 chan_state = debugfs_create_file("state", 0400, chan, dt,
336                                                  &chan_state_fops);
337         if (chan_state)
338                 chan_descr = debugfs_create_file("descriptors", 0400, chan, dt,
339                                                  &descriptors_fops);
340         if (chan_descr)
341                 chan_reqs = debugfs_create_file("requesters", 0400, chan, dt,
342                                                 &requester_chan_fops);
343         if (!chan_reqs)
344                 goto err_state;
345
346         return chan;
347
348 err_state:
349         debugfs_remove_recursive(chan);
350         return NULL;
351 }
352
353 static void pxad_init_debugfs(struct pxad_device *pdev)
354 {
355         int i;
356         struct dentry *chandir;
357
358         pdev->dbgfs_root = debugfs_create_dir(dev_name(pdev->slave.dev), NULL);
359         if (IS_ERR(pdev->dbgfs_root) || !pdev->dbgfs_root)
360                 goto err_root;
361
362         pdev->dbgfs_state = debugfs_create_file("state", 0400, pdev->dbgfs_root,
363                                                 pdev, &state_fops);
364         if (!pdev->dbgfs_state)
365                 goto err_state;
366
367         pdev->dbgfs_chan =
368                 kmalloc_array(pdev->nr_chans, sizeof(*pdev->dbgfs_state),
369                               GFP_KERNEL);
370         if (!pdev->dbgfs_chan)
371                 goto err_alloc;
372
373         chandir = debugfs_create_dir("channels", pdev->dbgfs_root);
374         if (!chandir)
375                 goto err_chandir;
376
377         for (i = 0; i < pdev->nr_chans; i++) {
378                 pdev->dbgfs_chan[i] = pxad_dbg_alloc_chan(pdev, i, chandir);
379                 if (!pdev->dbgfs_chan[i])
380                         goto err_chans;
381         }
382
383         return;
384 err_chans:
385 err_chandir:
386         kfree(pdev->dbgfs_chan);
387 err_alloc:
388 err_state:
389         debugfs_remove_recursive(pdev->dbgfs_root);
390 err_root:
391         pr_err("pxad: debugfs is not available\n");
392 }
393
394 static void pxad_cleanup_debugfs(struct pxad_device *pdev)
395 {
396         debugfs_remove_recursive(pdev->dbgfs_root);
397 }
398 #else
399 static inline void pxad_init_debugfs(struct pxad_device *pdev) {}
400 static inline void pxad_cleanup_debugfs(struct pxad_device *pdev) {}
401 #endif
402
403 static struct pxad_phy *lookup_phy(struct pxad_chan *pchan)
404 {
405         int prio, i;
406         struct pxad_device *pdev = to_pxad_dev(pchan->vc.chan.device);
407         struct pxad_phy *phy, *found = NULL;
408         unsigned long flags;
409
410         /*
411          * dma channel priorities
412          * ch 0 - 3,  16 - 19  <--> (0)
413          * ch 4 - 7,  20 - 23  <--> (1)
414          * ch 8 - 11, 24 - 27  <--> (2)
415          * ch 12 - 15, 28 - 31  <--> (3)
416          */
417
418         spin_lock_irqsave(&pdev->phy_lock, flags);
419         for (prio = pchan->prio; prio >= PXAD_PRIO_HIGHEST; prio--) {
420                 for (i = 0; i < pdev->nr_chans; i++) {
421                         if (prio != (i & 0xf) >> 2)
422                                 continue;
423                         phy = &pdev->phys[i];
424                         if (!phy->vchan) {
425                                 phy->vchan = pchan;
426                                 found = phy;
427                                 goto out_unlock;
428                         }
429                 }
430         }
431
432 out_unlock:
433         spin_unlock_irqrestore(&pdev->phy_lock, flags);
434         dev_dbg(&pchan->vc.chan.dev->device,
435                 "%s(): phy=%p(%d)\n", __func__, found,
436                 found ? found->idx : -1);
437
438         return found;
439 }
440
441 static void pxad_free_phy(struct pxad_chan *chan)
442 {
443         struct pxad_device *pdev = to_pxad_dev(chan->vc.chan.device);
444         unsigned long flags;
445         u32 reg;
446
447         dev_dbg(&chan->vc.chan.dev->device,
448                 "%s(): freeing\n", __func__);
449         if (!chan->phy)
450                 return;
451
452         /* clear the channel mapping in DRCMR */
453         if (chan->drcmr <= pdev->nr_requestors) {
454                 reg = pxad_drcmr(chan->drcmr);
455                 writel_relaxed(0, chan->phy->base + reg);
456         }
457
458         spin_lock_irqsave(&pdev->phy_lock, flags);
459         chan->phy->vchan = NULL;
460         chan->phy = NULL;
461         spin_unlock_irqrestore(&pdev->phy_lock, flags);
462 }
463
464 static bool is_chan_running(struct pxad_chan *chan)
465 {
466         u32 dcsr;
467         struct pxad_phy *phy = chan->phy;
468
469         if (!phy)
470                 return false;
471         dcsr = phy_readl_relaxed(phy, DCSR);
472         return dcsr & PXA_DCSR_RUN;
473 }
474
475 static bool is_running_chan_misaligned(struct pxad_chan *chan)
476 {
477         u32 dalgn;
478
479         BUG_ON(!chan->phy);
480         dalgn = phy_readl_relaxed(chan->phy, DALGN);
481         return dalgn & (BIT(chan->phy->idx));
482 }
483
484 static void phy_enable(struct pxad_phy *phy, bool misaligned)
485 {
486         struct pxad_device *pdev;
487         u32 reg, dalgn;
488
489         if (!phy->vchan)
490                 return;
491
492         dev_dbg(&phy->vchan->vc.chan.dev->device,
493                 "%s(); phy=%p(%d) misaligned=%d\n", __func__,
494                 phy, phy->idx, misaligned);
495
496         pdev = to_pxad_dev(phy->vchan->vc.chan.device);
497         if (phy->vchan->drcmr <= pdev->nr_requestors) {
498                 reg = pxad_drcmr(phy->vchan->drcmr);
499                 writel_relaxed(DRCMR_MAPVLD | phy->idx, phy->base + reg);
500         }
501
502         dalgn = phy_readl_relaxed(phy, DALGN);
503         if (misaligned)
504                 dalgn |= BIT(phy->idx);
505         else
506                 dalgn &= ~BIT(phy->idx);
507         phy_writel_relaxed(phy, dalgn, DALGN);
508
509         phy_writel(phy, PXA_DCSR_STOPIRQEN | PXA_DCSR_ENDINTR |
510                    PXA_DCSR_BUSERR | PXA_DCSR_RUN, DCSR);
511 }
512
513 static void phy_disable(struct pxad_phy *phy)
514 {
515         u32 dcsr;
516
517         if (!phy)
518                 return;
519
520         dcsr = phy_readl_relaxed(phy, DCSR);
521         dev_dbg(&phy->vchan->vc.chan.dev->device,
522                 "%s(): phy=%p(%d)\n", __func__, phy, phy->idx);
523         phy_writel(phy, dcsr & ~PXA_DCSR_RUN & ~PXA_DCSR_STOPIRQEN, DCSR);
524 }
525
526 static void pxad_launch_chan(struct pxad_chan *chan,
527                                  struct pxad_desc_sw *desc)
528 {
529         dev_dbg(&chan->vc.chan.dev->device,
530                 "%s(): desc=%p\n", __func__, desc);
531         if (!chan->phy) {
532                 chan->phy = lookup_phy(chan);
533                 if (!chan->phy) {
534                         dev_dbg(&chan->vc.chan.dev->device,
535                                 "%s(): no free dma channel\n", __func__);
536                         return;
537                 }
538         }
539         chan->bus_error = 0;
540
541         /*
542          * Program the descriptor's address into the DMA controller,
543          * then start the DMA transaction
544          */
545         phy_writel(chan->phy, desc->first, DDADR);
546         phy_enable(chan->phy, chan->misaligned);
547         wake_up(&chan->wq_state);
548 }
549
550 static void set_updater_desc(struct pxad_desc_sw *sw_desc,
551                              unsigned long flags)
552 {
553         struct pxad_desc_hw *updater =
554                 sw_desc->hw_desc[sw_desc->nb_desc - 1];
555         dma_addr_t dma = sw_desc->hw_desc[sw_desc->nb_desc - 2]->ddadr;
556
557         updater->ddadr = DDADR_STOP;
558         updater->dsadr = dma;
559         updater->dtadr = dma + 8;
560         updater->dcmd = PXA_DCMD_WIDTH4 | PXA_DCMD_BURST32 |
561                 (PXA_DCMD_LENGTH & sizeof(u32));
562         if (flags & DMA_PREP_INTERRUPT)
563                 updater->dcmd |= PXA_DCMD_ENDIRQEN;
564         if (sw_desc->cyclic)
565                 sw_desc->hw_desc[sw_desc->nb_desc - 2]->ddadr = sw_desc->first;
566 }
567
568 static bool is_desc_completed(struct virt_dma_desc *vd)
569 {
570         struct pxad_desc_sw *sw_desc = to_pxad_sw_desc(vd);
571         struct pxad_desc_hw *updater =
572                 sw_desc->hw_desc[sw_desc->nb_desc - 1];
573
574         return updater->dtadr != (updater->dsadr + 8);
575 }
576
577 static void pxad_desc_chain(struct virt_dma_desc *vd1,
578                                 struct virt_dma_desc *vd2)
579 {
580         struct pxad_desc_sw *desc1 = to_pxad_sw_desc(vd1);
581         struct pxad_desc_sw *desc2 = to_pxad_sw_desc(vd2);
582         dma_addr_t dma_to_chain;
583
584         dma_to_chain = desc2->first;
585         desc1->hw_desc[desc1->nb_desc - 1]->ddadr = dma_to_chain;
586 }
587
588 static bool pxad_try_hotchain(struct virt_dma_chan *vc,
589                                   struct virt_dma_desc *vd)
590 {
591         struct virt_dma_desc *vd_last_issued = NULL;
592         struct pxad_chan *chan = to_pxad_chan(&vc->chan);
593
594         /*
595          * Attempt to hot chain the tx if the phy is still running. This is
596          * considered successful only if either the channel is still running
597          * after the chaining, or if the chained transfer is completed after
598          * having been hot chained.
599          * A change of alignment is not allowed, and forbids hotchaining.
600          */
601         if (is_chan_running(chan)) {
602                 BUG_ON(list_empty(&vc->desc_issued));
603
604                 if (!is_running_chan_misaligned(chan) &&
605                     to_pxad_sw_desc(vd)->misaligned)
606                         return false;
607
608                 vd_last_issued = list_entry(vc->desc_issued.prev,
609                                             struct virt_dma_desc, node);
610                 pxad_desc_chain(vd_last_issued, vd);
611                 if (is_chan_running(chan) || is_desc_completed(vd))
612                         return true;
613         }
614
615         return false;
616 }
617
618 static unsigned int clear_chan_irq(struct pxad_phy *phy)
619 {
620         u32 dcsr;
621         u32 dint = readl(phy->base + DINT);
622
623         if (!(dint & BIT(phy->idx)))
624                 return PXA_DCSR_RUN;
625
626         /* clear irq */
627         dcsr = phy_readl_relaxed(phy, DCSR);
628         phy_writel(phy, dcsr, DCSR);
629         if ((dcsr & PXA_DCSR_BUSERR) && (phy->vchan))
630                 dev_warn(&phy->vchan->vc.chan.dev->device,
631                          "%s(chan=%p): PXA_DCSR_BUSERR\n",
632                          __func__, &phy->vchan);
633
634         return dcsr & ~PXA_DCSR_RUN;
635 }
636
637 static irqreturn_t pxad_chan_handler(int irq, void *dev_id)
638 {
639         struct pxad_phy *phy = dev_id;
640         struct pxad_chan *chan = phy->vchan;
641         struct virt_dma_desc *vd, *tmp;
642         unsigned int dcsr;
643         unsigned long flags;
644         bool vd_completed;
645         dma_cookie_t last_started = 0;
646
647         BUG_ON(!chan);
648
649         dcsr = clear_chan_irq(phy);
650         if (dcsr & PXA_DCSR_RUN)
651                 return IRQ_NONE;
652
653         spin_lock_irqsave(&chan->vc.lock, flags);
654         list_for_each_entry_safe(vd, tmp, &chan->vc.desc_issued, node) {
655                 vd_completed = is_desc_completed(vd);
656                 dev_dbg(&chan->vc.chan.dev->device,
657                         "%s(): checking txd %p[%x]: completed=%d dcsr=0x%x\n",
658                         __func__, vd, vd->tx.cookie, vd_completed,
659                         dcsr);
660                 last_started = vd->tx.cookie;
661                 if (to_pxad_sw_desc(vd)->cyclic) {
662                         vchan_cyclic_callback(vd);
663                         break;
664                 }
665                 if (vd_completed) {
666                         list_del(&vd->node);
667                         vchan_cookie_complete(vd);
668                 } else {
669                         break;
670                 }
671         }
672
673         if (dcsr & PXA_DCSR_BUSERR) {
674                 chan->bus_error = last_started;
675                 phy_disable(phy);
676         }
677
678         if (!chan->bus_error && dcsr & PXA_DCSR_STOPSTATE) {
679                 dev_dbg(&chan->vc.chan.dev->device,
680                 "%s(): channel stopped, submitted_empty=%d issued_empty=%d",
681                         __func__,
682                         list_empty(&chan->vc.desc_submitted),
683                         list_empty(&chan->vc.desc_issued));
684                 phy_writel_relaxed(phy, dcsr & ~PXA_DCSR_STOPIRQEN, DCSR);
685
686                 if (list_empty(&chan->vc.desc_issued)) {
687                         chan->misaligned =
688                                 !list_empty(&chan->vc.desc_submitted);
689                 } else {
690                         vd = list_first_entry(&chan->vc.desc_issued,
691                                               struct virt_dma_desc, node);
692                         pxad_launch_chan(chan, to_pxad_sw_desc(vd));
693                 }
694         }
695         spin_unlock_irqrestore(&chan->vc.lock, flags);
696         wake_up(&chan->wq_state);
697
698         return IRQ_HANDLED;
699 }
700
701 static irqreturn_t pxad_int_handler(int irq, void *dev_id)
702 {
703         struct pxad_device *pdev = dev_id;
704         struct pxad_phy *phy;
705         u32 dint = readl(pdev->base + DINT);
706         int i, ret = IRQ_NONE;
707
708         while (dint) {
709                 i = __ffs(dint);
710                 dint &= (dint - 1);
711                 phy = &pdev->phys[i];
712                 if (pxad_chan_handler(irq, phy) == IRQ_HANDLED)
713                         ret = IRQ_HANDLED;
714         }
715
716         return ret;
717 }
718
719 static int pxad_alloc_chan_resources(struct dma_chan *dchan)
720 {
721         struct pxad_chan *chan = to_pxad_chan(dchan);
722         struct pxad_device *pdev = to_pxad_dev(chan->vc.chan.device);
723
724         if (chan->desc_pool)
725                 return 1;
726
727         chan->desc_pool = dma_pool_create(dma_chan_name(dchan),
728                                           pdev->slave.dev,
729                                           sizeof(struct pxad_desc_hw),
730                                           __alignof__(struct pxad_desc_hw),
731                                           0);
732         if (!chan->desc_pool) {
733                 dev_err(&chan->vc.chan.dev->device,
734                         "%s(): unable to allocate descriptor pool\n",
735                         __func__);
736                 return -ENOMEM;
737         }
738
739         return 1;
740 }
741
742 static void pxad_free_chan_resources(struct dma_chan *dchan)
743 {
744         struct pxad_chan *chan = to_pxad_chan(dchan);
745
746         vchan_free_chan_resources(&chan->vc);
747         dma_pool_destroy(chan->desc_pool);
748         chan->desc_pool = NULL;
749
750         chan->drcmr = U32_MAX;
751         chan->prio = PXAD_PRIO_LOWEST;
752 }
753
754 static void pxad_free_desc(struct virt_dma_desc *vd)
755 {
756         int i;
757         dma_addr_t dma;
758         struct pxad_desc_sw *sw_desc = to_pxad_sw_desc(vd);
759
760         BUG_ON(sw_desc->nb_desc == 0);
761         for (i = sw_desc->nb_desc - 1; i >= 0; i--) {
762                 if (i > 0)
763                         dma = sw_desc->hw_desc[i - 1]->ddadr;
764                 else
765                         dma = sw_desc->first;
766                 dma_pool_free(sw_desc->desc_pool,
767                               sw_desc->hw_desc[i], dma);
768         }
769         sw_desc->nb_desc = 0;
770         kfree(sw_desc);
771 }
772
773 static struct pxad_desc_sw *
774 pxad_alloc_desc(struct pxad_chan *chan, unsigned int nb_hw_desc)
775 {
776         struct pxad_desc_sw *sw_desc;
777         dma_addr_t dma;
778         int i;
779
780         sw_desc = kzalloc(sizeof(*sw_desc) +
781                           nb_hw_desc * sizeof(struct pxad_desc_hw *),
782                           GFP_NOWAIT);
783         if (!sw_desc)
784                 return NULL;
785         sw_desc->desc_pool = chan->desc_pool;
786
787         for (i = 0; i < nb_hw_desc; i++) {
788                 sw_desc->hw_desc[i] = dma_pool_alloc(sw_desc->desc_pool,
789                                                      GFP_NOWAIT, &dma);
790                 if (!sw_desc->hw_desc[i]) {
791                         dev_err(&chan->vc.chan.dev->device,
792                                 "%s(): Couldn't allocate the %dth hw_desc from dma_pool %p\n",
793                                 __func__, i, sw_desc->desc_pool);
794                         goto err;
795                 }
796
797                 if (i == 0)
798                         sw_desc->first = dma;
799                 else
800                         sw_desc->hw_desc[i - 1]->ddadr = dma;
801                 sw_desc->nb_desc++;
802         }
803
804         return sw_desc;
805 err:
806         pxad_free_desc(&sw_desc->vd);
807         return NULL;
808 }
809
810 static dma_cookie_t pxad_tx_submit(struct dma_async_tx_descriptor *tx)
811 {
812         struct virt_dma_chan *vc = to_virt_chan(tx->chan);
813         struct pxad_chan *chan = to_pxad_chan(&vc->chan);
814         struct virt_dma_desc *vd_chained = NULL,
815                 *vd = container_of(tx, struct virt_dma_desc, tx);
816         dma_cookie_t cookie;
817         unsigned long flags;
818
819         set_updater_desc(to_pxad_sw_desc(vd), tx->flags);
820
821         spin_lock_irqsave(&vc->lock, flags);
822         cookie = dma_cookie_assign(tx);
823
824         if (list_empty(&vc->desc_submitted) && pxad_try_hotchain(vc, vd)) {
825                 list_move_tail(&vd->node, &vc->desc_issued);
826                 dev_dbg(&chan->vc.chan.dev->device,
827                         "%s(): txd %p[%x]: submitted (hot linked)\n",
828                         __func__, vd, cookie);
829                 goto out;
830         }
831
832         /*
833          * Fallback to placing the tx in the submitted queue
834          */
835         if (!list_empty(&vc->desc_submitted)) {
836                 vd_chained = list_entry(vc->desc_submitted.prev,
837                                         struct virt_dma_desc, node);
838                 /*
839                  * Only chain the descriptors if no new misalignment is
840                  * introduced. If a new misalignment is chained, let the channel
841                  * stop, and be relaunched in misalign mode from the irq
842                  * handler.
843                  */
844                 if (chan->misaligned || !to_pxad_sw_desc(vd)->misaligned)
845                         pxad_desc_chain(vd_chained, vd);
846                 else
847                         vd_chained = NULL;
848         }
849         dev_dbg(&chan->vc.chan.dev->device,
850                 "%s(): txd %p[%x]: submitted (%s linked)\n",
851                 __func__, vd, cookie, vd_chained ? "cold" : "not");
852         list_move_tail(&vd->node, &vc->desc_submitted);
853         chan->misaligned |= to_pxad_sw_desc(vd)->misaligned;
854
855 out:
856         spin_unlock_irqrestore(&vc->lock, flags);
857         return cookie;
858 }
859
860 static void pxad_issue_pending(struct dma_chan *dchan)
861 {
862         struct pxad_chan *chan = to_pxad_chan(dchan);
863         struct virt_dma_desc *vd_first;
864         unsigned long flags;
865
866         spin_lock_irqsave(&chan->vc.lock, flags);
867         if (list_empty(&chan->vc.desc_submitted))
868                 goto out;
869
870         vd_first = list_first_entry(&chan->vc.desc_submitted,
871                                     struct virt_dma_desc, node);
872         dev_dbg(&chan->vc.chan.dev->device,
873                 "%s(): txd %p[%x]", __func__, vd_first, vd_first->tx.cookie);
874
875         vchan_issue_pending(&chan->vc);
876         if (!pxad_try_hotchain(&chan->vc, vd_first))
877                 pxad_launch_chan(chan, to_pxad_sw_desc(vd_first));
878 out:
879         spin_unlock_irqrestore(&chan->vc.lock, flags);
880 }
881
882 static inline struct dma_async_tx_descriptor *
883 pxad_tx_prep(struct virt_dma_chan *vc, struct virt_dma_desc *vd,
884                  unsigned long tx_flags)
885 {
886         struct dma_async_tx_descriptor *tx;
887         struct pxad_chan *chan = container_of(vc, struct pxad_chan, vc);
888
889         INIT_LIST_HEAD(&vd->node);
890         tx = vchan_tx_prep(vc, vd, tx_flags);
891         tx->tx_submit = pxad_tx_submit;
892         dev_dbg(&chan->vc.chan.dev->device,
893                 "%s(): vc=%p txd=%p[%x] flags=0x%lx\n", __func__,
894                 vc, vd, vd->tx.cookie,
895                 tx_flags);
896
897         return tx;
898 }
899
900 static void pxad_get_config(struct pxad_chan *chan,
901                             enum dma_transfer_direction dir,
902                             u32 *dcmd, u32 *dev_src, u32 *dev_dst)
903 {
904         u32 maxburst = 0, dev_addr = 0;
905         enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED;
906         struct pxad_device *pdev = to_pxad_dev(chan->vc.chan.device);
907
908         *dcmd = 0;
909         if (dir == DMA_DEV_TO_MEM) {
910                 maxburst = chan->cfg.src_maxburst;
911                 width = chan->cfg.src_addr_width;
912                 dev_addr = chan->cfg.src_addr;
913                 *dev_src = dev_addr;
914                 *dcmd |= PXA_DCMD_INCTRGADDR;
915                 if (chan->drcmr <= pdev->nr_requestors)
916                         *dcmd |= PXA_DCMD_FLOWSRC;
917         }
918         if (dir == DMA_MEM_TO_DEV) {
919                 maxburst = chan->cfg.dst_maxburst;
920                 width = chan->cfg.dst_addr_width;
921                 dev_addr = chan->cfg.dst_addr;
922                 *dev_dst = dev_addr;
923                 *dcmd |= PXA_DCMD_INCSRCADDR;
924                 if (chan->drcmr <= pdev->nr_requestors)
925                         *dcmd |= PXA_DCMD_FLOWTRG;
926         }
927         if (dir == DMA_MEM_TO_MEM)
928                 *dcmd |= PXA_DCMD_BURST32 | PXA_DCMD_INCTRGADDR |
929                         PXA_DCMD_INCSRCADDR;
930
931         dev_dbg(&chan->vc.chan.dev->device,
932                 "%s(): dev_addr=0x%x maxburst=%d width=%d  dir=%d\n",
933                 __func__, dev_addr, maxburst, width, dir);
934
935         if (width == DMA_SLAVE_BUSWIDTH_1_BYTE)
936                 *dcmd |= PXA_DCMD_WIDTH1;
937         else if (width == DMA_SLAVE_BUSWIDTH_2_BYTES)
938                 *dcmd |= PXA_DCMD_WIDTH2;
939         else if (width == DMA_SLAVE_BUSWIDTH_4_BYTES)
940                 *dcmd |= PXA_DCMD_WIDTH4;
941
942         if (maxburst == 8)
943                 *dcmd |= PXA_DCMD_BURST8;
944         else if (maxburst == 16)
945                 *dcmd |= PXA_DCMD_BURST16;
946         else if (maxburst == 32)
947                 *dcmd |= PXA_DCMD_BURST32;
948
949         /* FIXME: drivers should be ported over to use the filter
950          * function. Once that's done, the following two lines can
951          * be removed.
952          */
953         if (chan->cfg.slave_id)
954                 chan->drcmr = chan->cfg.slave_id;
955 }
956
957 static struct dma_async_tx_descriptor *
958 pxad_prep_memcpy(struct dma_chan *dchan,
959                  dma_addr_t dma_dst, dma_addr_t dma_src,
960                  size_t len, unsigned long flags)
961 {
962         struct pxad_chan *chan = to_pxad_chan(dchan);
963         struct pxad_desc_sw *sw_desc;
964         struct pxad_desc_hw *hw_desc;
965         u32 dcmd;
966         unsigned int i, nb_desc = 0;
967         size_t copy;
968
969         if (!dchan || !len)
970                 return NULL;
971
972         dev_dbg(&chan->vc.chan.dev->device,
973                 "%s(): dma_dst=0x%lx dma_src=0x%lx len=%zu flags=%lx\n",
974                 __func__, (unsigned long)dma_dst, (unsigned long)dma_src,
975                 len, flags);
976         pxad_get_config(chan, DMA_MEM_TO_MEM, &dcmd, NULL, NULL);
977
978         nb_desc = DIV_ROUND_UP(len, PDMA_MAX_DESC_BYTES);
979         sw_desc = pxad_alloc_desc(chan, nb_desc + 1);
980         if (!sw_desc)
981                 return NULL;
982         sw_desc->len = len;
983
984         if (!IS_ALIGNED(dma_src, 1 << PDMA_ALIGNMENT) ||
985             !IS_ALIGNED(dma_dst, 1 << PDMA_ALIGNMENT))
986                 sw_desc->misaligned = true;
987
988         i = 0;
989         do {
990                 hw_desc = sw_desc->hw_desc[i++];
991                 copy = min_t(size_t, len, PDMA_MAX_DESC_BYTES);
992                 hw_desc->dcmd = dcmd | (PXA_DCMD_LENGTH & copy);
993                 hw_desc->dsadr = dma_src;
994                 hw_desc->dtadr = dma_dst;
995                 len -= copy;
996                 dma_src += copy;
997                 dma_dst += copy;
998         } while (len);
999         set_updater_desc(sw_desc, flags);
1000
1001         return pxad_tx_prep(&chan->vc, &sw_desc->vd, flags);
1002 }
1003
1004 static struct dma_async_tx_descriptor *
1005 pxad_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
1006                    unsigned int sg_len, enum dma_transfer_direction dir,
1007                    unsigned long flags, void *context)
1008 {
1009         struct pxad_chan *chan = to_pxad_chan(dchan);
1010         struct pxad_desc_sw *sw_desc;
1011         size_t len, avail;
1012         struct scatterlist *sg;
1013         dma_addr_t dma;
1014         u32 dcmd, dsadr = 0, dtadr = 0;
1015         unsigned int nb_desc = 0, i, j = 0;
1016
1017         if ((sgl == NULL) || (sg_len == 0))
1018                 return NULL;
1019
1020         pxad_get_config(chan, dir, &dcmd, &dsadr, &dtadr);
1021         dev_dbg(&chan->vc.chan.dev->device,
1022                 "%s(): dir=%d flags=%lx\n", __func__, dir, flags);
1023
1024         for_each_sg(sgl, sg, sg_len, i)
1025                 nb_desc += DIV_ROUND_UP(sg_dma_len(sg), PDMA_MAX_DESC_BYTES);
1026         sw_desc = pxad_alloc_desc(chan, nb_desc + 1);
1027         if (!sw_desc)
1028                 return NULL;
1029
1030         for_each_sg(sgl, sg, sg_len, i) {
1031                 dma = sg_dma_address(sg);
1032                 avail = sg_dma_len(sg);
1033                 sw_desc->len += avail;
1034
1035                 do {
1036                         len = min_t(size_t, avail, PDMA_MAX_DESC_BYTES);
1037                         if (dma & 0x7)
1038                                 sw_desc->misaligned = true;
1039
1040                         sw_desc->hw_desc[j]->dcmd =
1041                                 dcmd | (PXA_DCMD_LENGTH & len);
1042                         sw_desc->hw_desc[j]->dsadr = dsadr ? dsadr : dma;
1043                         sw_desc->hw_desc[j++]->dtadr = dtadr ? dtadr : dma;
1044
1045                         dma += len;
1046                         avail -= len;
1047                 } while (avail);
1048         }
1049         set_updater_desc(sw_desc, flags);
1050
1051         return pxad_tx_prep(&chan->vc, &sw_desc->vd, flags);
1052 }
1053
1054 static struct dma_async_tx_descriptor *
1055 pxad_prep_dma_cyclic(struct dma_chan *dchan,
1056                      dma_addr_t buf_addr, size_t len, size_t period_len,
1057                      enum dma_transfer_direction dir, unsigned long flags)
1058 {
1059         struct pxad_chan *chan = to_pxad_chan(dchan);
1060         struct pxad_desc_sw *sw_desc;
1061         struct pxad_desc_hw **phw_desc;
1062         dma_addr_t dma;
1063         u32 dcmd, dsadr = 0, dtadr = 0;
1064         unsigned int nb_desc = 0;
1065
1066         if (!dchan || !len || !period_len)
1067                 return NULL;
1068         if ((dir != DMA_DEV_TO_MEM) && (dir != DMA_MEM_TO_DEV)) {
1069                 dev_err(&chan->vc.chan.dev->device,
1070                         "Unsupported direction for cyclic DMA\n");
1071                 return NULL;
1072         }
1073         /* the buffer length must be a multiple of period_len */
1074         if (len % period_len != 0 || period_len > PDMA_MAX_DESC_BYTES ||
1075             !IS_ALIGNED(period_len, 1 << PDMA_ALIGNMENT))
1076                 return NULL;
1077
1078         pxad_get_config(chan, dir, &dcmd, &dsadr, &dtadr);
1079         dcmd |= PXA_DCMD_ENDIRQEN | (PXA_DCMD_LENGTH & period_len);
1080         dev_dbg(&chan->vc.chan.dev->device,
1081                 "%s(): buf_addr=0x%lx len=%zu period=%zu dir=%d flags=%lx\n",
1082                 __func__, (unsigned long)buf_addr, len, period_len, dir, flags);
1083
1084         nb_desc = DIV_ROUND_UP(period_len, PDMA_MAX_DESC_BYTES);
1085         nb_desc *= DIV_ROUND_UP(len, period_len);
1086         sw_desc = pxad_alloc_desc(chan, nb_desc + 1);
1087         if (!sw_desc)
1088                 return NULL;
1089         sw_desc->cyclic = true;
1090         sw_desc->len = len;
1091
1092         phw_desc = sw_desc->hw_desc;
1093         dma = buf_addr;
1094         do {
1095                 phw_desc[0]->dsadr = dsadr ? dsadr : dma;
1096                 phw_desc[0]->dtadr = dtadr ? dtadr : dma;
1097                 phw_desc[0]->dcmd = dcmd;
1098                 phw_desc++;
1099                 dma += period_len;
1100                 len -= period_len;
1101         } while (len);
1102         set_updater_desc(sw_desc, flags);
1103
1104         return pxad_tx_prep(&chan->vc, &sw_desc->vd, flags);
1105 }
1106
1107 static int pxad_config(struct dma_chan *dchan,
1108                        struct dma_slave_config *cfg)
1109 {
1110         struct pxad_chan *chan = to_pxad_chan(dchan);
1111
1112         if (!dchan)
1113                 return -EINVAL;
1114
1115         chan->cfg = *cfg;
1116         return 0;
1117 }
1118
1119 static int pxad_terminate_all(struct dma_chan *dchan)
1120 {
1121         struct pxad_chan *chan = to_pxad_chan(dchan);
1122         struct pxad_device *pdev = to_pxad_dev(chan->vc.chan.device);
1123         struct virt_dma_desc *vd = NULL;
1124         unsigned long flags;
1125         struct pxad_phy *phy;
1126         LIST_HEAD(head);
1127
1128         dev_dbg(&chan->vc.chan.dev->device,
1129                 "%s(): vchan %p: terminate all\n", __func__, &chan->vc);
1130
1131         spin_lock_irqsave(&chan->vc.lock, flags);
1132         vchan_get_all_descriptors(&chan->vc, &head);
1133
1134         list_for_each_entry(vd, &head, node) {
1135                 dev_dbg(&chan->vc.chan.dev->device,
1136                         "%s(): cancelling txd %p[%x] (completed=%d)", __func__,
1137                         vd, vd->tx.cookie, is_desc_completed(vd));
1138         }
1139
1140         phy = chan->phy;
1141         if (phy) {
1142                 phy_disable(chan->phy);
1143                 pxad_free_phy(chan);
1144                 chan->phy = NULL;
1145                 spin_lock(&pdev->phy_lock);
1146                 phy->vchan = NULL;
1147                 spin_unlock(&pdev->phy_lock);
1148         }
1149         spin_unlock_irqrestore(&chan->vc.lock, flags);
1150         vchan_dma_desc_free_list(&chan->vc, &head);
1151
1152         return 0;
1153 }
1154
1155 static unsigned int pxad_residue(struct pxad_chan *chan,
1156                                  dma_cookie_t cookie)
1157 {
1158         struct virt_dma_desc *vd = NULL;
1159         struct pxad_desc_sw *sw_desc = NULL;
1160         struct pxad_desc_hw *hw_desc = NULL;
1161         u32 curr, start, len, end, residue = 0;
1162         unsigned long flags;
1163         bool passed = false;
1164         int i;
1165
1166         /*
1167          * If the channel does not have a phy pointer anymore, it has already
1168          * been completed. Therefore, its residue is 0.
1169          */
1170         if (!chan->phy)
1171                 return 0;
1172
1173         spin_lock_irqsave(&chan->vc.lock, flags);
1174
1175         vd = vchan_find_desc(&chan->vc, cookie);
1176         if (!vd)
1177                 goto out;
1178
1179         sw_desc = to_pxad_sw_desc(vd);
1180         if (sw_desc->hw_desc[0]->dcmd & PXA_DCMD_INCSRCADDR)
1181                 curr = phy_readl_relaxed(chan->phy, DSADR);
1182         else
1183                 curr = phy_readl_relaxed(chan->phy, DTADR);
1184
1185         /*
1186          * curr has to be actually read before checking descriptor
1187          * completion, so that a curr inside a status updater
1188          * descriptor implies the following test returns true, and
1189          * preventing reordering of curr load and the test.
1190          */
1191         rmb();
1192         if (is_desc_completed(vd))
1193                 goto out;
1194
1195         for (i = 0; i < sw_desc->nb_desc - 1; i++) {
1196                 hw_desc = sw_desc->hw_desc[i];
1197                 if (sw_desc->hw_desc[0]->dcmd & PXA_DCMD_INCSRCADDR)
1198                         start = hw_desc->dsadr;
1199                 else
1200                         start = hw_desc->dtadr;
1201                 len = hw_desc->dcmd & PXA_DCMD_LENGTH;
1202                 end = start + len;
1203
1204                 /*
1205                  * 'passed' will be latched once we found the descriptor
1206                  * which lies inside the boundaries of the curr
1207                  * pointer. All descriptors that occur in the list
1208                  * _after_ we found that partially handled descriptor
1209                  * are still to be processed and are hence added to the
1210                  * residual bytes counter.
1211                  */
1212
1213                 if (passed) {
1214                         residue += len;
1215                 } else if (curr >= start && curr <= end) {
1216                         residue += end - curr;
1217                         passed = true;
1218                 }
1219         }
1220         if (!passed)
1221                 residue = sw_desc->len;
1222
1223 out:
1224         spin_unlock_irqrestore(&chan->vc.lock, flags);
1225         dev_dbg(&chan->vc.chan.dev->device,
1226                 "%s(): txd %p[%x] sw_desc=%p: %d\n",
1227                 __func__, vd, cookie, sw_desc, residue);
1228         return residue;
1229 }
1230
1231 static enum dma_status pxad_tx_status(struct dma_chan *dchan,
1232                                       dma_cookie_t cookie,
1233                                       struct dma_tx_state *txstate)
1234 {
1235         struct pxad_chan *chan = to_pxad_chan(dchan);
1236         enum dma_status ret;
1237
1238         if (cookie == chan->bus_error)
1239                 return DMA_ERROR;
1240
1241         ret = dma_cookie_status(dchan, cookie, txstate);
1242         if (likely(txstate && (ret != DMA_ERROR)))
1243                 dma_set_residue(txstate, pxad_residue(chan, cookie));
1244
1245         return ret;
1246 }
1247
1248 static void pxad_synchronize(struct dma_chan *dchan)
1249 {
1250         struct pxad_chan *chan = to_pxad_chan(dchan);
1251
1252         wait_event(chan->wq_state, !is_chan_running(chan));
1253         vchan_synchronize(&chan->vc);
1254 }
1255
1256 static void pxad_free_channels(struct dma_device *dmadev)
1257 {
1258         struct pxad_chan *c, *cn;
1259
1260         list_for_each_entry_safe(c, cn, &dmadev->channels,
1261                                  vc.chan.device_node) {
1262                 list_del(&c->vc.chan.device_node);
1263                 tasklet_kill(&c->vc.task);
1264         }
1265 }
1266
1267 static int pxad_remove(struct platform_device *op)
1268 {
1269         struct pxad_device *pdev = platform_get_drvdata(op);
1270
1271         pxad_cleanup_debugfs(pdev);
1272         pxad_free_channels(&pdev->slave);
1273         return 0;
1274 }
1275
1276 static int pxad_init_phys(struct platform_device *op,
1277                           struct pxad_device *pdev,
1278                           unsigned int nb_phy_chans)
1279 {
1280         int irq0, irq, nr_irq = 0, i, ret;
1281         struct pxad_phy *phy;
1282
1283         irq0 = platform_get_irq(op, 0);
1284         if (irq0 < 0)
1285                 return irq0;
1286
1287         pdev->phys = devm_kcalloc(&op->dev, nb_phy_chans,
1288                                   sizeof(pdev->phys[0]), GFP_KERNEL);
1289         if (!pdev->phys)
1290                 return -ENOMEM;
1291
1292         for (i = 0; i < nb_phy_chans; i++)
1293                 if (platform_get_irq(op, i) > 0)
1294                         nr_irq++;
1295
1296         for (i = 0; i < nb_phy_chans; i++) {
1297                 phy = &pdev->phys[i];
1298                 phy->base = pdev->base;
1299                 phy->idx = i;
1300                 irq = platform_get_irq(op, i);
1301                 if ((nr_irq > 1) && (irq > 0))
1302                         ret = devm_request_irq(&op->dev, irq,
1303                                                pxad_chan_handler,
1304                                                IRQF_SHARED, "pxa-dma", phy);
1305                 if ((nr_irq == 1) && (i == 0))
1306                         ret = devm_request_irq(&op->dev, irq0,
1307                                                pxad_int_handler,
1308                                                IRQF_SHARED, "pxa-dma", pdev);
1309                 if (ret) {
1310                         dev_err(pdev->slave.dev,
1311                                 "%s(): can't request irq %d:%d\n", __func__,
1312                                 irq, ret);
1313                         return ret;
1314                 }
1315         }
1316
1317         return 0;
1318 }
1319
1320 static const struct of_device_id pxad_dt_ids[] = {
1321         { .compatible = "marvell,pdma-1.0", },
1322         {}
1323 };
1324 MODULE_DEVICE_TABLE(of, pxad_dt_ids);
1325
1326 static struct dma_chan *pxad_dma_xlate(struct of_phandle_args *dma_spec,
1327                                            struct of_dma *ofdma)
1328 {
1329         struct pxad_device *d = ofdma->of_dma_data;
1330         struct dma_chan *chan;
1331
1332         chan = dma_get_any_slave_channel(&d->slave);
1333         if (!chan)
1334                 return NULL;
1335
1336         to_pxad_chan(chan)->drcmr = dma_spec->args[0];
1337         to_pxad_chan(chan)->prio = dma_spec->args[1];
1338
1339         return chan;
1340 }
1341
1342 static int pxad_init_dmadev(struct platform_device *op,
1343                             struct pxad_device *pdev,
1344                             unsigned int nr_phy_chans,
1345                             unsigned int nr_requestors)
1346 {
1347         int ret;
1348         unsigned int i;
1349         struct pxad_chan *c;
1350
1351         pdev->nr_chans = nr_phy_chans;
1352         pdev->nr_requestors = nr_requestors;
1353         INIT_LIST_HEAD(&pdev->slave.channels);
1354         pdev->slave.device_alloc_chan_resources = pxad_alloc_chan_resources;
1355         pdev->slave.device_free_chan_resources = pxad_free_chan_resources;
1356         pdev->slave.device_tx_status = pxad_tx_status;
1357         pdev->slave.device_issue_pending = pxad_issue_pending;
1358         pdev->slave.device_config = pxad_config;
1359         pdev->slave.device_synchronize = pxad_synchronize;
1360         pdev->slave.device_terminate_all = pxad_terminate_all;
1361
1362         if (op->dev.coherent_dma_mask)
1363                 dma_set_mask(&op->dev, op->dev.coherent_dma_mask);
1364         else
1365                 dma_set_mask(&op->dev, DMA_BIT_MASK(32));
1366
1367         ret = pxad_init_phys(op, pdev, nr_phy_chans);
1368         if (ret)
1369                 return ret;
1370
1371         for (i = 0; i < nr_phy_chans; i++) {
1372                 c = devm_kzalloc(&op->dev, sizeof(*c), GFP_KERNEL);
1373                 if (!c)
1374                         return -ENOMEM;
1375
1376                 c->drcmr = U32_MAX;
1377                 c->prio = PXAD_PRIO_LOWEST;
1378                 c->vc.desc_free = pxad_free_desc;
1379                 vchan_init(&c->vc, &pdev->slave);
1380                 init_waitqueue_head(&c->wq_state);
1381         }
1382
1383         return dmaenginem_async_device_register(&pdev->slave);
1384 }
1385
1386 static int pxad_probe(struct platform_device *op)
1387 {
1388         struct pxad_device *pdev;
1389         const struct of_device_id *of_id;
1390         const struct dma_slave_map *slave_map = NULL;
1391         struct mmp_dma_platdata *pdata = dev_get_platdata(&op->dev);
1392         struct resource *iores;
1393         int ret, dma_channels = 0, nb_requestors = 0, slave_map_cnt = 0;
1394         const enum dma_slave_buswidth widths =
1395                 DMA_SLAVE_BUSWIDTH_1_BYTE   | DMA_SLAVE_BUSWIDTH_2_BYTES |
1396                 DMA_SLAVE_BUSWIDTH_4_BYTES;
1397
1398         pdev = devm_kzalloc(&op->dev, sizeof(*pdev), GFP_KERNEL);
1399         if (!pdev)
1400                 return -ENOMEM;
1401
1402         spin_lock_init(&pdev->phy_lock);
1403
1404         iores = platform_get_resource(op, IORESOURCE_MEM, 0);
1405         pdev->base = devm_ioremap_resource(&op->dev, iores);
1406         if (IS_ERR(pdev->base))
1407                 return PTR_ERR(pdev->base);
1408
1409         of_id = of_match_device(pxad_dt_ids, &op->dev);
1410         if (of_id) {
1411                 of_property_read_u32(op->dev.of_node, "#dma-channels",
1412                                      &dma_channels);
1413                 ret = of_property_read_u32(op->dev.of_node, "#dma-requests",
1414                                            &nb_requestors);
1415                 if (ret) {
1416                         dev_warn(pdev->slave.dev,
1417                                  "#dma-requests set to default 32 as missing in OF: %d",
1418                                  ret);
1419                         nb_requestors = 32;
1420                 }
1421         } else if (pdata && pdata->dma_channels) {
1422                 dma_channels = pdata->dma_channels;
1423                 nb_requestors = pdata->nb_requestors;
1424                 slave_map = pdata->slave_map;
1425                 slave_map_cnt = pdata->slave_map_cnt;
1426         } else {
1427                 dma_channels = 32;      /* default 32 channel */
1428         }
1429
1430         dma_cap_set(DMA_SLAVE, pdev->slave.cap_mask);
1431         dma_cap_set(DMA_MEMCPY, pdev->slave.cap_mask);
1432         dma_cap_set(DMA_CYCLIC, pdev->slave.cap_mask);
1433         dma_cap_set(DMA_PRIVATE, pdev->slave.cap_mask);
1434         pdev->slave.device_prep_dma_memcpy = pxad_prep_memcpy;
1435         pdev->slave.device_prep_slave_sg = pxad_prep_slave_sg;
1436         pdev->slave.device_prep_dma_cyclic = pxad_prep_dma_cyclic;
1437         pdev->slave.filter.map = slave_map;
1438         pdev->slave.filter.mapcnt = slave_map_cnt;
1439         pdev->slave.filter.fn = pxad_filter_fn;
1440
1441         pdev->slave.copy_align = PDMA_ALIGNMENT;
1442         pdev->slave.src_addr_widths = widths;
1443         pdev->slave.dst_addr_widths = widths;
1444         pdev->slave.directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);
1445         pdev->slave.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
1446         pdev->slave.descriptor_reuse = true;
1447
1448         pdev->slave.dev = &op->dev;
1449         ret = pxad_init_dmadev(op, pdev, dma_channels, nb_requestors);
1450         if (ret) {
1451                 dev_err(pdev->slave.dev, "unable to register\n");
1452                 return ret;
1453         }
1454
1455         if (op->dev.of_node) {
1456                 /* Device-tree DMA controller registration */
1457                 ret = of_dma_controller_register(op->dev.of_node,
1458                                                  pxad_dma_xlate, pdev);
1459                 if (ret < 0) {
1460                         dev_err(pdev->slave.dev,
1461                                 "of_dma_controller_register failed\n");
1462                         return ret;
1463                 }
1464         }
1465
1466         platform_set_drvdata(op, pdev);
1467         pxad_init_debugfs(pdev);
1468         dev_info(pdev->slave.dev, "initialized %d channels on %d requestors\n",
1469                  dma_channels, nb_requestors);
1470         return 0;
1471 }
1472
1473 static const struct platform_device_id pxad_id_table[] = {
1474         { "pxa-dma", },
1475         { },
1476 };
1477
1478 static struct platform_driver pxad_driver = {
1479         .driver         = {
1480                 .name   = "pxa-dma",
1481                 .of_match_table = pxad_dt_ids,
1482         },
1483         .id_table       = pxad_id_table,
1484         .probe          = pxad_probe,
1485         .remove         = pxad_remove,
1486 };
1487
1488 static bool pxad_filter_fn(struct dma_chan *chan, void *param)
1489 {
1490         struct pxad_chan *c = to_pxad_chan(chan);
1491         struct pxad_param *p = param;
1492
1493         if (chan->device->dev->driver != &pxad_driver.driver)
1494                 return false;
1495
1496         c->drcmr = p->drcmr;
1497         c->prio = p->prio;
1498
1499         return true;
1500 }
1501
1502 module_platform_driver(pxad_driver);
1503
1504 MODULE_DESCRIPTION("Marvell PXA Peripheral DMA Driver");
1505 MODULE_AUTHOR("Robert Jarzmik <robert.jarzmik@free.fr>");
1506 MODULE_LICENSE("GPL v2");