2 * Driver for the TXx9 SoC DMA Controller
4 * Copyright (C) 2009 Atsushi Nemoto
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 #include <linux/dma-mapping.h>
11 #include <linux/init.h>
12 #include <linux/interrupt.h>
14 #include <linux/module.h>
15 #include <linux/platform_device.h>
16 #include <linux/slab.h>
17 #include <linux/scatterlist.h>
19 #include "dmaengine.h"
22 static struct txx9dmac_chan *to_txx9dmac_chan(struct dma_chan *chan)
24 return container_of(chan, struct txx9dmac_chan, chan);
27 static struct txx9dmac_cregs __iomem *__dma_regs(const struct txx9dmac_chan *dc)
32 static struct txx9dmac_cregs32 __iomem *__dma_regs32(
33 const struct txx9dmac_chan *dc)
38 #define channel64_readq(dc, name) \
39 __raw_readq(&(__dma_regs(dc)->name))
40 #define channel64_writeq(dc, name, val) \
41 __raw_writeq((val), &(__dma_regs(dc)->name))
42 #define channel64_readl(dc, name) \
43 __raw_readl(&(__dma_regs(dc)->name))
44 #define channel64_writel(dc, name, val) \
45 __raw_writel((val), &(__dma_regs(dc)->name))
47 #define channel32_readl(dc, name) \
48 __raw_readl(&(__dma_regs32(dc)->name))
49 #define channel32_writel(dc, name, val) \
50 __raw_writel((val), &(__dma_regs32(dc)->name))
52 #define channel_readq(dc, name) channel64_readq(dc, name)
53 #define channel_writeq(dc, name, val) channel64_writeq(dc, name, val)
54 #define channel_readl(dc, name) \
56 channel64_readl(dc, name) : channel32_readl(dc, name))
57 #define channel_writel(dc, name, val) \
59 channel64_writel(dc, name, val) : channel32_writel(dc, name, val))
61 static dma_addr_t channel64_read_CHAR(const struct txx9dmac_chan *dc)
63 if (sizeof(__dma_regs(dc)->CHAR) == sizeof(u64))
64 return channel64_readq(dc, CHAR);
66 return channel64_readl(dc, CHAR);
69 static void channel64_write_CHAR(const struct txx9dmac_chan *dc, dma_addr_t val)
71 if (sizeof(__dma_regs(dc)->CHAR) == sizeof(u64))
72 channel64_writeq(dc, CHAR, val);
74 channel64_writel(dc, CHAR, val);
77 static void channel64_clear_CHAR(const struct txx9dmac_chan *dc)
79 #if defined(CONFIG_32BIT) && !defined(CONFIG_PHYS_ADDR_T_64BIT)
80 channel64_writel(dc, CHAR, 0);
81 channel64_writel(dc, __pad_CHAR, 0);
83 channel64_writeq(dc, CHAR, 0);
87 static dma_addr_t channel_read_CHAR(const struct txx9dmac_chan *dc)
90 return channel64_read_CHAR(dc);
92 return channel32_readl(dc, CHAR);
95 static void channel_write_CHAR(const struct txx9dmac_chan *dc, dma_addr_t val)
98 channel64_write_CHAR(dc, val);
100 channel32_writel(dc, CHAR, val);
103 static struct txx9dmac_regs __iomem *__txx9dmac_regs(
104 const struct txx9dmac_dev *ddev)
109 static struct txx9dmac_regs32 __iomem *__txx9dmac_regs32(
110 const struct txx9dmac_dev *ddev)
115 #define dma64_readl(ddev, name) \
116 __raw_readl(&(__txx9dmac_regs(ddev)->name))
117 #define dma64_writel(ddev, name, val) \
118 __raw_writel((val), &(__txx9dmac_regs(ddev)->name))
120 #define dma32_readl(ddev, name) \
121 __raw_readl(&(__txx9dmac_regs32(ddev)->name))
122 #define dma32_writel(ddev, name, val) \
123 __raw_writel((val), &(__txx9dmac_regs32(ddev)->name))
125 #define dma_readl(ddev, name) \
126 (__is_dmac64(ddev) ? \
127 dma64_readl(ddev, name) : dma32_readl(ddev, name))
128 #define dma_writel(ddev, name, val) \
129 (__is_dmac64(ddev) ? \
130 dma64_writel(ddev, name, val) : dma32_writel(ddev, name, val))
132 static struct device *chan2dev(struct dma_chan *chan)
134 return &chan->dev->device;
136 static struct device *chan2parent(struct dma_chan *chan)
138 return chan->dev->device.parent;
141 static struct txx9dmac_desc *
142 txd_to_txx9dmac_desc(struct dma_async_tx_descriptor *txd)
144 return container_of(txd, struct txx9dmac_desc, txd);
147 static dma_addr_t desc_read_CHAR(const struct txx9dmac_chan *dc,
148 const struct txx9dmac_desc *desc)
150 return is_dmac64(dc) ? desc->hwdesc.CHAR : desc->hwdesc32.CHAR;
153 static void desc_write_CHAR(const struct txx9dmac_chan *dc,
154 struct txx9dmac_desc *desc, dma_addr_t val)
157 desc->hwdesc.CHAR = val;
159 desc->hwdesc32.CHAR = val;
162 #define TXX9_DMA_MAX_COUNT 0x04000000
164 #define TXX9_DMA_INITIAL_DESC_COUNT 64
166 static struct txx9dmac_desc *txx9dmac_first_active(struct txx9dmac_chan *dc)
168 return list_entry(dc->active_list.next,
169 struct txx9dmac_desc, desc_node);
172 static struct txx9dmac_desc *txx9dmac_last_active(struct txx9dmac_chan *dc)
174 return list_entry(dc->active_list.prev,
175 struct txx9dmac_desc, desc_node);
178 static struct txx9dmac_desc *txx9dmac_first_queued(struct txx9dmac_chan *dc)
180 return list_entry(dc->queue.next, struct txx9dmac_desc, desc_node);
183 static struct txx9dmac_desc *txx9dmac_last_child(struct txx9dmac_desc *desc)
185 if (!list_empty(&desc->tx_list))
186 desc = list_entry(desc->tx_list.prev, typeof(*desc), desc_node);
190 static dma_cookie_t txx9dmac_tx_submit(struct dma_async_tx_descriptor *tx);
192 static struct txx9dmac_desc *txx9dmac_desc_alloc(struct txx9dmac_chan *dc,
195 struct txx9dmac_dev *ddev = dc->ddev;
196 struct txx9dmac_desc *desc;
198 desc = kzalloc(sizeof(*desc), flags);
201 INIT_LIST_HEAD(&desc->tx_list);
202 dma_async_tx_descriptor_init(&desc->txd, &dc->chan);
203 desc->txd.tx_submit = txx9dmac_tx_submit;
204 /* txd.flags will be overwritten in prep funcs */
205 desc->txd.flags = DMA_CTRL_ACK;
206 desc->txd.phys = dma_map_single(chan2parent(&dc->chan), &desc->hwdesc,
207 ddev->descsize, DMA_TO_DEVICE);
211 static struct txx9dmac_desc *txx9dmac_desc_get(struct txx9dmac_chan *dc)
213 struct txx9dmac_desc *desc, *_desc;
214 struct txx9dmac_desc *ret = NULL;
217 spin_lock_bh(&dc->lock);
218 list_for_each_entry_safe(desc, _desc, &dc->free_list, desc_node) {
219 if (async_tx_test_ack(&desc->txd)) {
220 list_del(&desc->desc_node);
224 dev_dbg(chan2dev(&dc->chan), "desc %p not ACKed\n", desc);
227 spin_unlock_bh(&dc->lock);
229 dev_vdbg(chan2dev(&dc->chan), "scanned %u descriptors on freelist\n",
232 ret = txx9dmac_desc_alloc(dc, GFP_ATOMIC);
234 spin_lock_bh(&dc->lock);
235 dc->descs_allocated++;
236 spin_unlock_bh(&dc->lock);
238 dev_err(chan2dev(&dc->chan),
239 "not enough descriptors available\n");
244 static void txx9dmac_sync_desc_for_cpu(struct txx9dmac_chan *dc,
245 struct txx9dmac_desc *desc)
247 struct txx9dmac_dev *ddev = dc->ddev;
248 struct txx9dmac_desc *child;
250 list_for_each_entry(child, &desc->tx_list, desc_node)
251 dma_sync_single_for_cpu(chan2parent(&dc->chan),
252 child->txd.phys, ddev->descsize,
254 dma_sync_single_for_cpu(chan2parent(&dc->chan),
255 desc->txd.phys, ddev->descsize,
260 * Move a descriptor, including any children, to the free list.
261 * `desc' must not be on any lists.
263 static void txx9dmac_desc_put(struct txx9dmac_chan *dc,
264 struct txx9dmac_desc *desc)
267 struct txx9dmac_desc *child;
269 txx9dmac_sync_desc_for_cpu(dc, desc);
271 spin_lock_bh(&dc->lock);
272 list_for_each_entry(child, &desc->tx_list, desc_node)
273 dev_vdbg(chan2dev(&dc->chan),
274 "moving child desc %p to freelist\n",
276 list_splice_init(&desc->tx_list, &dc->free_list);
277 dev_vdbg(chan2dev(&dc->chan), "moving desc %p to freelist\n",
279 list_add(&desc->desc_node, &dc->free_list);
280 spin_unlock_bh(&dc->lock);
284 /*----------------------------------------------------------------------*/
286 static void txx9dmac_dump_regs(struct txx9dmac_chan *dc)
289 dev_err(chan2dev(&dc->chan),
290 " CHAR: %#llx SAR: %#llx DAR: %#llx CNTR: %#x"
291 " SAIR: %#x DAIR: %#x CCR: %#x CSR: %#x\n",
292 (u64)channel64_read_CHAR(dc),
293 channel64_readq(dc, SAR),
294 channel64_readq(dc, DAR),
295 channel64_readl(dc, CNTR),
296 channel64_readl(dc, SAIR),
297 channel64_readl(dc, DAIR),
298 channel64_readl(dc, CCR),
299 channel64_readl(dc, CSR));
301 dev_err(chan2dev(&dc->chan),
302 " CHAR: %#x SAR: %#x DAR: %#x CNTR: %#x"
303 " SAIR: %#x DAIR: %#x CCR: %#x CSR: %#x\n",
304 channel32_readl(dc, CHAR),
305 channel32_readl(dc, SAR),
306 channel32_readl(dc, DAR),
307 channel32_readl(dc, CNTR),
308 channel32_readl(dc, SAIR),
309 channel32_readl(dc, DAIR),
310 channel32_readl(dc, CCR),
311 channel32_readl(dc, CSR));
314 static void txx9dmac_reset_chan(struct txx9dmac_chan *dc)
316 channel_writel(dc, CCR, TXX9_DMA_CCR_CHRST);
318 channel64_clear_CHAR(dc);
319 channel_writeq(dc, SAR, 0);
320 channel_writeq(dc, DAR, 0);
322 channel_writel(dc, CHAR, 0);
323 channel_writel(dc, SAR, 0);
324 channel_writel(dc, DAR, 0);
326 channel_writel(dc, CNTR, 0);
327 channel_writel(dc, SAIR, 0);
328 channel_writel(dc, DAIR, 0);
329 channel_writel(dc, CCR, 0);
332 /* Called with dc->lock held and bh disabled */
333 static void txx9dmac_dostart(struct txx9dmac_chan *dc,
334 struct txx9dmac_desc *first)
336 struct txx9dmac_slave *ds = dc->chan.private;
339 dev_vdbg(chan2dev(&dc->chan), "dostart %u %p\n",
340 first->txd.cookie, first);
341 /* ASSERT: channel is idle */
342 if (channel_readl(dc, CSR) & TXX9_DMA_CSR_XFACT) {
343 dev_err(chan2dev(&dc->chan),
344 "BUG: Attempted to start non-idle channel\n");
345 txx9dmac_dump_regs(dc);
346 /* The tasklet will hopefully advance the queue... */
351 channel64_writel(dc, CNTR, 0);
352 channel64_writel(dc, CSR, 0xffffffff);
365 channel64_writel(dc, SAIR, sai);
366 channel64_writel(dc, DAIR, dai);
367 /* All 64-bit DMAC supports SMPCHN */
368 channel64_writel(dc, CCR, dc->ccr);
369 /* Writing a non zero value to CHAR will assert XFACT */
370 channel64_write_CHAR(dc, first->txd.phys);
372 channel32_writel(dc, CNTR, 0);
373 channel32_writel(dc, CSR, 0xffffffff);
386 channel32_writel(dc, SAIR, sai);
387 channel32_writel(dc, DAIR, dai);
388 if (txx9_dma_have_SMPCHN()) {
389 channel32_writel(dc, CCR, dc->ccr);
390 /* Writing a non zero value to CHAR will assert XFACT */
391 channel32_writel(dc, CHAR, first->txd.phys);
393 channel32_writel(dc, CHAR, first->txd.phys);
394 channel32_writel(dc, CCR, dc->ccr);
399 /*----------------------------------------------------------------------*/
402 txx9dmac_descriptor_complete(struct txx9dmac_chan *dc,
403 struct txx9dmac_desc *desc)
405 struct dmaengine_desc_callback cb;
406 struct dma_async_tx_descriptor *txd = &desc->txd;
408 dev_vdbg(chan2dev(&dc->chan), "descriptor %u %p complete\n",
411 dma_cookie_complete(txd);
412 dmaengine_desc_get_callback(txd, &cb);
414 txx9dmac_sync_desc_for_cpu(dc, desc);
415 list_splice_init(&desc->tx_list, &dc->free_list);
416 list_move(&desc->desc_node, &dc->free_list);
418 dma_descriptor_unmap(txd);
420 * The API requires that no submissions are done from a
421 * callback, so we don't need to drop the lock here
423 dmaengine_desc_callback_invoke(&cb, NULL);
424 dma_run_dependencies(txd);
427 static void txx9dmac_dequeue(struct txx9dmac_chan *dc, struct list_head *list)
429 struct txx9dmac_dev *ddev = dc->ddev;
430 struct txx9dmac_desc *desc;
431 struct txx9dmac_desc *prev = NULL;
433 BUG_ON(!list_empty(list));
435 desc = txx9dmac_first_queued(dc);
437 desc_write_CHAR(dc, prev, desc->txd.phys);
438 dma_sync_single_for_device(chan2parent(&dc->chan),
439 prev->txd.phys, ddev->descsize,
442 prev = txx9dmac_last_child(desc);
443 list_move_tail(&desc->desc_node, list);
444 /* Make chain-completion interrupt happen */
445 if ((desc->txd.flags & DMA_PREP_INTERRUPT) &&
446 !txx9dmac_chan_INTENT(dc))
448 } while (!list_empty(&dc->queue));
451 static void txx9dmac_complete_all(struct txx9dmac_chan *dc)
453 struct txx9dmac_desc *desc, *_desc;
457 * Submit queued descriptors ASAP, i.e. before we go through
458 * the completed ones.
460 list_splice_init(&dc->active_list, &list);
461 if (!list_empty(&dc->queue)) {
462 txx9dmac_dequeue(dc, &dc->active_list);
463 txx9dmac_dostart(dc, txx9dmac_first_active(dc));
466 list_for_each_entry_safe(desc, _desc, &list, desc_node)
467 txx9dmac_descriptor_complete(dc, desc);
470 static void txx9dmac_dump_desc(struct txx9dmac_chan *dc,
471 struct txx9dmac_hwdesc *desc)
474 #ifdef TXX9_DMA_USE_SIMPLE_CHAIN
475 dev_crit(chan2dev(&dc->chan),
476 " desc: ch%#llx s%#llx d%#llx c%#x\n",
477 (u64)desc->CHAR, desc->SAR, desc->DAR, desc->CNTR);
479 dev_crit(chan2dev(&dc->chan),
480 " desc: ch%#llx s%#llx d%#llx c%#x"
481 " si%#x di%#x cc%#x cs%#x\n",
482 (u64)desc->CHAR, desc->SAR, desc->DAR, desc->CNTR,
483 desc->SAIR, desc->DAIR, desc->CCR, desc->CSR);
486 struct txx9dmac_hwdesc32 *d = (struct txx9dmac_hwdesc32 *)desc;
487 #ifdef TXX9_DMA_USE_SIMPLE_CHAIN
488 dev_crit(chan2dev(&dc->chan),
489 " desc: ch%#x s%#x d%#x c%#x\n",
490 d->CHAR, d->SAR, d->DAR, d->CNTR);
492 dev_crit(chan2dev(&dc->chan),
493 " desc: ch%#x s%#x d%#x c%#x"
494 " si%#x di%#x cc%#x cs%#x\n",
495 d->CHAR, d->SAR, d->DAR, d->CNTR,
496 d->SAIR, d->DAIR, d->CCR, d->CSR);
501 static void txx9dmac_handle_error(struct txx9dmac_chan *dc, u32 csr)
503 struct txx9dmac_desc *bad_desc;
504 struct txx9dmac_desc *child;
508 * The descriptor currently at the head of the active list is
509 * borked. Since we don't have any way to report errors, we'll
510 * just have to scream loudly and try to carry on.
512 dev_crit(chan2dev(&dc->chan), "Abnormal Chain Completion\n");
513 txx9dmac_dump_regs(dc);
515 bad_desc = txx9dmac_first_active(dc);
516 list_del_init(&bad_desc->desc_node);
518 /* Clear all error flags and try to restart the controller */
519 errors = csr & (TXX9_DMA_CSR_ABCHC |
520 TXX9_DMA_CSR_CFERR | TXX9_DMA_CSR_CHERR |
521 TXX9_DMA_CSR_DESERR | TXX9_DMA_CSR_SORERR);
522 channel_writel(dc, CSR, errors);
524 if (list_empty(&dc->active_list) && !list_empty(&dc->queue))
525 txx9dmac_dequeue(dc, &dc->active_list);
526 if (!list_empty(&dc->active_list))
527 txx9dmac_dostart(dc, txx9dmac_first_active(dc));
529 dev_crit(chan2dev(&dc->chan),
530 "Bad descriptor submitted for DMA! (cookie: %d)\n",
531 bad_desc->txd.cookie);
532 txx9dmac_dump_desc(dc, &bad_desc->hwdesc);
533 list_for_each_entry(child, &bad_desc->tx_list, desc_node)
534 txx9dmac_dump_desc(dc, &child->hwdesc);
535 /* Pretend the descriptor completed successfully */
536 txx9dmac_descriptor_complete(dc, bad_desc);
539 static void txx9dmac_scan_descriptors(struct txx9dmac_chan *dc)
542 struct txx9dmac_desc *desc, *_desc;
543 struct txx9dmac_desc *child;
547 chain = channel64_read_CHAR(dc);
548 csr = channel64_readl(dc, CSR);
549 channel64_writel(dc, CSR, csr);
551 chain = channel32_readl(dc, CHAR);
552 csr = channel32_readl(dc, CSR);
553 channel32_writel(dc, CSR, csr);
555 /* For dynamic chain, we should look at XFACT instead of NCHNC */
556 if (!(csr & (TXX9_DMA_CSR_XFACT | TXX9_DMA_CSR_ABCHC))) {
557 /* Everything we've submitted is done */
558 txx9dmac_complete_all(dc);
561 if (!(csr & TXX9_DMA_CSR_CHNEN))
562 chain = 0; /* last descriptor of this chain */
564 dev_vdbg(chan2dev(&dc->chan), "scan_descriptors: char=%#llx\n",
567 list_for_each_entry_safe(desc, _desc, &dc->active_list, desc_node) {
568 if (desc_read_CHAR(dc, desc) == chain) {
569 /* This one is currently in progress */
570 if (csr & TXX9_DMA_CSR_ABCHC)
575 list_for_each_entry(child, &desc->tx_list, desc_node)
576 if (desc_read_CHAR(dc, child) == chain) {
577 /* Currently in progress */
578 if (csr & TXX9_DMA_CSR_ABCHC)
584 * No descriptors so far seem to be in progress, i.e.
585 * this one must be done.
587 txx9dmac_descriptor_complete(dc, desc);
590 if (csr & TXX9_DMA_CSR_ABCHC) {
591 txx9dmac_handle_error(dc, csr);
595 dev_err(chan2dev(&dc->chan),
596 "BUG: All descriptors done, but channel not idle!\n");
598 /* Try to continue after resetting the channel... */
599 txx9dmac_reset_chan(dc);
601 if (!list_empty(&dc->queue)) {
602 txx9dmac_dequeue(dc, &dc->active_list);
603 txx9dmac_dostart(dc, txx9dmac_first_active(dc));
607 static void txx9dmac_chan_tasklet(unsigned long data)
611 struct txx9dmac_chan *dc;
613 dc = (struct txx9dmac_chan *)data;
614 csr = channel_readl(dc, CSR);
615 dev_vdbg(chan2dev(&dc->chan), "tasklet: status=%x\n", csr);
617 spin_lock(&dc->lock);
618 if (csr & (TXX9_DMA_CSR_ABCHC | TXX9_DMA_CSR_NCHNC |
619 TXX9_DMA_CSR_NTRNFC))
620 txx9dmac_scan_descriptors(dc);
621 spin_unlock(&dc->lock);
627 static irqreturn_t txx9dmac_chan_interrupt(int irq, void *dev_id)
629 struct txx9dmac_chan *dc = dev_id;
631 dev_vdbg(chan2dev(&dc->chan), "interrupt: status=%#x\n",
632 channel_readl(dc, CSR));
634 tasklet_schedule(&dc->tasklet);
636 * Just disable the interrupts. We'll turn them back on in the
639 disable_irq_nosync(irq);
644 static void txx9dmac_tasklet(unsigned long data)
648 struct txx9dmac_chan *dc;
650 struct txx9dmac_dev *ddev = (struct txx9dmac_dev *)data;
654 mcr = dma_readl(ddev, MCR);
655 dev_vdbg(ddev->chan[0]->dma.dev, "tasklet: mcr=%x\n", mcr);
656 for (i = 0; i < TXX9_DMA_MAX_NR_CHANNELS; i++) {
657 if ((mcr >> (24 + i)) & 0x11) {
659 csr = channel_readl(dc, CSR);
660 dev_vdbg(chan2dev(&dc->chan), "tasklet: status=%x\n",
662 spin_lock(&dc->lock);
663 if (csr & (TXX9_DMA_CSR_ABCHC | TXX9_DMA_CSR_NCHNC |
664 TXX9_DMA_CSR_NTRNFC))
665 txx9dmac_scan_descriptors(dc);
666 spin_unlock(&dc->lock);
674 static irqreturn_t txx9dmac_interrupt(int irq, void *dev_id)
676 struct txx9dmac_dev *ddev = dev_id;
678 dev_vdbg(ddev->chan[0]->dma.dev, "interrupt: status=%#x\n",
679 dma_readl(ddev, MCR));
681 tasklet_schedule(&ddev->tasklet);
683 * Just disable the interrupts. We'll turn them back on in the
686 disable_irq_nosync(irq);
691 /*----------------------------------------------------------------------*/
693 static dma_cookie_t txx9dmac_tx_submit(struct dma_async_tx_descriptor *tx)
695 struct txx9dmac_desc *desc = txd_to_txx9dmac_desc(tx);
696 struct txx9dmac_chan *dc = to_txx9dmac_chan(tx->chan);
699 spin_lock_bh(&dc->lock);
700 cookie = dma_cookie_assign(tx);
702 dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u %p\n",
703 desc->txd.cookie, desc);
705 list_add_tail(&desc->desc_node, &dc->queue);
706 spin_unlock_bh(&dc->lock);
711 static struct dma_async_tx_descriptor *
712 txx9dmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
713 size_t len, unsigned long flags)
715 struct txx9dmac_chan *dc = to_txx9dmac_chan(chan);
716 struct txx9dmac_dev *ddev = dc->ddev;
717 struct txx9dmac_desc *desc;
718 struct txx9dmac_desc *first;
719 struct txx9dmac_desc *prev;
723 dev_vdbg(chan2dev(chan), "prep_dma_memcpy d%#llx s%#llx l%#zx f%#lx\n",
724 (u64)dest, (u64)src, len, flags);
726 if (unlikely(!len)) {
727 dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n");
733 for (offset = 0; offset < len; offset += xfer_count) {
734 xfer_count = min_t(size_t, len - offset, TXX9_DMA_MAX_COUNT);
736 * Workaround for ERT-TX49H2-033, ERT-TX49H3-020,
737 * ERT-TX49H4-016 (slightly conservative)
739 if (__is_dmac64(ddev)) {
740 if (xfer_count > 0x100 &&
741 (xfer_count & 0xff) >= 0xfa &&
742 (xfer_count & 0xff) <= 0xff)
745 if (xfer_count > 0x80 &&
746 (xfer_count & 0x7f) >= 0x7e &&
747 (xfer_count & 0x7f) <= 0x7f)
751 desc = txx9dmac_desc_get(dc);
753 txx9dmac_desc_put(dc, first);
757 if (__is_dmac64(ddev)) {
758 desc->hwdesc.SAR = src + offset;
759 desc->hwdesc.DAR = dest + offset;
760 desc->hwdesc.CNTR = xfer_count;
761 txx9dmac_desc_set_nosimple(ddev, desc, 8, 8,
762 dc->ccr | TXX9_DMA_CCR_XFACT);
764 desc->hwdesc32.SAR = src + offset;
765 desc->hwdesc32.DAR = dest + offset;
766 desc->hwdesc32.CNTR = xfer_count;
767 txx9dmac_desc_set_nosimple(ddev, desc, 4, 4,
768 dc->ccr | TXX9_DMA_CCR_XFACT);
772 * The descriptors on tx_list are not reachable from
773 * the dc->queue list or dc->active_list after a
774 * submit. If we put all descriptors on active_list,
775 * calling of callback on the completion will be more
781 desc_write_CHAR(dc, prev, desc->txd.phys);
782 dma_sync_single_for_device(chan2parent(&dc->chan),
783 prev->txd.phys, ddev->descsize,
785 list_add_tail(&desc->desc_node, &first->tx_list);
790 /* Trigger interrupt after last block */
791 if (flags & DMA_PREP_INTERRUPT)
792 txx9dmac_desc_set_INTENT(ddev, prev);
794 desc_write_CHAR(dc, prev, 0);
795 dma_sync_single_for_device(chan2parent(&dc->chan),
796 prev->txd.phys, ddev->descsize,
799 first->txd.flags = flags;
805 static struct dma_async_tx_descriptor *
806 txx9dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
807 unsigned int sg_len, enum dma_transfer_direction direction,
808 unsigned long flags, void *context)
810 struct txx9dmac_chan *dc = to_txx9dmac_chan(chan);
811 struct txx9dmac_dev *ddev = dc->ddev;
812 struct txx9dmac_slave *ds = chan->private;
813 struct txx9dmac_desc *prev;
814 struct txx9dmac_desc *first;
816 struct scatterlist *sg;
818 dev_vdbg(chan2dev(chan), "prep_dma_slave\n");
820 BUG_ON(!ds || !ds->reg_width);
822 BUG_ON(direction != DMA_MEM_TO_DEV);
824 BUG_ON(direction != DMA_DEV_TO_MEM);
825 if (unlikely(!sg_len))
830 for_each_sg(sgl, sg, sg_len, i) {
831 struct txx9dmac_desc *desc;
835 desc = txx9dmac_desc_get(dc);
837 txx9dmac_desc_put(dc, first);
841 mem = sg_dma_address(sg);
843 if (__is_dmac64(ddev)) {
844 if (direction == DMA_MEM_TO_DEV) {
845 desc->hwdesc.SAR = mem;
846 desc->hwdesc.DAR = ds->tx_reg;
848 desc->hwdesc.SAR = ds->rx_reg;
849 desc->hwdesc.DAR = mem;
851 desc->hwdesc.CNTR = sg_dma_len(sg);
853 if (direction == DMA_MEM_TO_DEV) {
854 desc->hwdesc32.SAR = mem;
855 desc->hwdesc32.DAR = ds->tx_reg;
857 desc->hwdesc32.SAR = ds->rx_reg;
858 desc->hwdesc32.DAR = mem;
860 desc->hwdesc32.CNTR = sg_dma_len(sg);
862 if (direction == DMA_MEM_TO_DEV) {
869 txx9dmac_desc_set_nosimple(ddev, desc, sai, dai,
870 dc->ccr | TXX9_DMA_CCR_XFACT);
875 desc_write_CHAR(dc, prev, desc->txd.phys);
876 dma_sync_single_for_device(chan2parent(&dc->chan),
880 list_add_tail(&desc->desc_node, &first->tx_list);
885 /* Trigger interrupt after last block */
886 if (flags & DMA_PREP_INTERRUPT)
887 txx9dmac_desc_set_INTENT(ddev, prev);
889 desc_write_CHAR(dc, prev, 0);
890 dma_sync_single_for_device(chan2parent(&dc->chan),
891 prev->txd.phys, ddev->descsize,
894 first->txd.flags = flags;
900 static int txx9dmac_terminate_all(struct dma_chan *chan)
902 struct txx9dmac_chan *dc = to_txx9dmac_chan(chan);
903 struct txx9dmac_desc *desc, *_desc;
906 dev_vdbg(chan2dev(chan), "terminate_all\n");
907 spin_lock_bh(&dc->lock);
909 txx9dmac_reset_chan(dc);
911 /* active_list entries will end up before queued entries */
912 list_splice_init(&dc->queue, &list);
913 list_splice_init(&dc->active_list, &list);
915 spin_unlock_bh(&dc->lock);
917 /* Flush all pending and queued descriptors */
918 list_for_each_entry_safe(desc, _desc, &list, desc_node)
919 txx9dmac_descriptor_complete(dc, desc);
924 static enum dma_status
925 txx9dmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
926 struct dma_tx_state *txstate)
928 struct txx9dmac_chan *dc = to_txx9dmac_chan(chan);
931 ret = dma_cookie_status(chan, cookie, txstate);
932 if (ret == DMA_COMPLETE)
935 spin_lock_bh(&dc->lock);
936 txx9dmac_scan_descriptors(dc);
937 spin_unlock_bh(&dc->lock);
939 return dma_cookie_status(chan, cookie, txstate);
942 static void txx9dmac_chain_dynamic(struct txx9dmac_chan *dc,
943 struct txx9dmac_desc *prev)
945 struct txx9dmac_dev *ddev = dc->ddev;
946 struct txx9dmac_desc *desc;
949 prev = txx9dmac_last_child(prev);
950 txx9dmac_dequeue(dc, &list);
951 desc = list_entry(list.next, struct txx9dmac_desc, desc_node);
952 desc_write_CHAR(dc, prev, desc->txd.phys);
953 dma_sync_single_for_device(chan2parent(&dc->chan),
954 prev->txd.phys, ddev->descsize,
956 if (!(channel_readl(dc, CSR) & TXX9_DMA_CSR_CHNEN) &&
957 channel_read_CHAR(dc) == prev->txd.phys)
958 /* Restart chain DMA */
959 channel_write_CHAR(dc, desc->txd.phys);
960 list_splice_tail(&list, &dc->active_list);
963 static void txx9dmac_issue_pending(struct dma_chan *chan)
965 struct txx9dmac_chan *dc = to_txx9dmac_chan(chan);
967 spin_lock_bh(&dc->lock);
969 if (!list_empty(&dc->active_list))
970 txx9dmac_scan_descriptors(dc);
971 if (!list_empty(&dc->queue)) {
972 if (list_empty(&dc->active_list)) {
973 txx9dmac_dequeue(dc, &dc->active_list);
974 txx9dmac_dostart(dc, txx9dmac_first_active(dc));
975 } else if (txx9_dma_have_SMPCHN()) {
976 struct txx9dmac_desc *prev = txx9dmac_last_active(dc);
978 if (!(prev->txd.flags & DMA_PREP_INTERRUPT) ||
979 txx9dmac_chan_INTENT(dc))
980 txx9dmac_chain_dynamic(dc, prev);
984 spin_unlock_bh(&dc->lock);
987 static int txx9dmac_alloc_chan_resources(struct dma_chan *chan)
989 struct txx9dmac_chan *dc = to_txx9dmac_chan(chan);
990 struct txx9dmac_slave *ds = chan->private;
991 struct txx9dmac_desc *desc;
994 dev_vdbg(chan2dev(chan), "alloc_chan_resources\n");
996 /* ASSERT: channel is idle */
997 if (channel_readl(dc, CSR) & TXX9_DMA_CSR_XFACT) {
998 dev_dbg(chan2dev(chan), "DMA channel not idle?\n");
1002 dma_cookie_init(chan);
1004 dc->ccr = TXX9_DMA_CCR_IMMCHN | TXX9_DMA_CCR_INTENE | CCR_LE;
1005 txx9dmac_chan_set_SMPCHN(dc);
1006 if (!txx9_dma_have_SMPCHN() || (dc->ccr & TXX9_DMA_CCR_SMPCHN))
1007 dc->ccr |= TXX9_DMA_CCR_INTENC;
1008 if (chan->device->device_prep_dma_memcpy) {
1011 dc->ccr |= TXX9_DMA_CCR_XFSZ_X8;
1014 (ds->tx_reg && ds->rx_reg) || (!ds->tx_reg && !ds->rx_reg))
1016 dc->ccr |= TXX9_DMA_CCR_EXTRQ |
1017 TXX9_DMA_CCR_XFSZ(__ffs(ds->reg_width));
1018 txx9dmac_chan_set_INTENT(dc);
1021 spin_lock_bh(&dc->lock);
1022 i = dc->descs_allocated;
1023 while (dc->descs_allocated < TXX9_DMA_INITIAL_DESC_COUNT) {
1024 spin_unlock_bh(&dc->lock);
1026 desc = txx9dmac_desc_alloc(dc, GFP_KERNEL);
1028 dev_info(chan2dev(chan),
1029 "only allocated %d descriptors\n", i);
1030 spin_lock_bh(&dc->lock);
1033 txx9dmac_desc_put(dc, desc);
1035 spin_lock_bh(&dc->lock);
1036 i = ++dc->descs_allocated;
1038 spin_unlock_bh(&dc->lock);
1040 dev_dbg(chan2dev(chan),
1041 "alloc_chan_resources allocated %d descriptors\n", i);
1046 static void txx9dmac_free_chan_resources(struct dma_chan *chan)
1048 struct txx9dmac_chan *dc = to_txx9dmac_chan(chan);
1049 struct txx9dmac_dev *ddev = dc->ddev;
1050 struct txx9dmac_desc *desc, *_desc;
1053 dev_dbg(chan2dev(chan), "free_chan_resources (descs allocated=%u)\n",
1054 dc->descs_allocated);
1056 /* ASSERT: channel is idle */
1057 BUG_ON(!list_empty(&dc->active_list));
1058 BUG_ON(!list_empty(&dc->queue));
1059 BUG_ON(channel_readl(dc, CSR) & TXX9_DMA_CSR_XFACT);
1061 spin_lock_bh(&dc->lock);
1062 list_splice_init(&dc->free_list, &list);
1063 dc->descs_allocated = 0;
1064 spin_unlock_bh(&dc->lock);
1066 list_for_each_entry_safe(desc, _desc, &list, desc_node) {
1067 dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc);
1068 dma_unmap_single(chan2parent(chan), desc->txd.phys,
1069 ddev->descsize, DMA_TO_DEVICE);
1073 dev_vdbg(chan2dev(chan), "free_chan_resources done\n");
1076 /*----------------------------------------------------------------------*/
1078 static void txx9dmac_off(struct txx9dmac_dev *ddev)
1080 dma_writel(ddev, MCR, 0);
1083 static int __init txx9dmac_chan_probe(struct platform_device *pdev)
1085 struct txx9dmac_chan_platform_data *cpdata =
1086 dev_get_platdata(&pdev->dev);
1087 struct platform_device *dmac_dev = cpdata->dmac_dev;
1088 struct txx9dmac_platform_data *pdata = dev_get_platdata(&dmac_dev->dev);
1089 struct txx9dmac_chan *dc;
1091 int ch = pdev->id % TXX9_DMA_MAX_NR_CHANNELS;
1094 dc = devm_kzalloc(&pdev->dev, sizeof(*dc), GFP_KERNEL);
1098 dc->dma.dev = &pdev->dev;
1099 dc->dma.device_alloc_chan_resources = txx9dmac_alloc_chan_resources;
1100 dc->dma.device_free_chan_resources = txx9dmac_free_chan_resources;
1101 dc->dma.device_terminate_all = txx9dmac_terminate_all;
1102 dc->dma.device_tx_status = txx9dmac_tx_status;
1103 dc->dma.device_issue_pending = txx9dmac_issue_pending;
1104 if (pdata && pdata->memcpy_chan == ch) {
1105 dc->dma.device_prep_dma_memcpy = txx9dmac_prep_dma_memcpy;
1106 dma_cap_set(DMA_MEMCPY, dc->dma.cap_mask);
1108 dc->dma.device_prep_slave_sg = txx9dmac_prep_slave_sg;
1109 dma_cap_set(DMA_SLAVE, dc->dma.cap_mask);
1110 dma_cap_set(DMA_PRIVATE, dc->dma.cap_mask);
1113 INIT_LIST_HEAD(&dc->dma.channels);
1114 dc->ddev = platform_get_drvdata(dmac_dev);
1115 if (dc->ddev->irq < 0) {
1116 irq = platform_get_irq(pdev, 0);
1119 tasklet_init(&dc->tasklet, txx9dmac_chan_tasklet,
1122 err = devm_request_irq(&pdev->dev, dc->irq,
1123 txx9dmac_chan_interrupt, 0, dev_name(&pdev->dev), dc);
1128 dc->ddev->chan[ch] = dc;
1129 dc->chan.device = &dc->dma;
1130 list_add_tail(&dc->chan.device_node, &dc->chan.device->channels);
1131 dma_cookie_init(&dc->chan);
1134 dc->ch_regs = &__txx9dmac_regs(dc->ddev)->CHAN[ch];
1136 dc->ch_regs = &__txx9dmac_regs32(dc->ddev)->CHAN[ch];
1137 spin_lock_init(&dc->lock);
1139 INIT_LIST_HEAD(&dc->active_list);
1140 INIT_LIST_HEAD(&dc->queue);
1141 INIT_LIST_HEAD(&dc->free_list);
1143 txx9dmac_reset_chan(dc);
1145 platform_set_drvdata(pdev, dc);
1147 err = dma_async_device_register(&dc->dma);
1150 dev_dbg(&pdev->dev, "TXx9 DMA Channel (dma%d%s%s)\n",
1152 dma_has_cap(DMA_MEMCPY, dc->dma.cap_mask) ? " memcpy" : "",
1153 dma_has_cap(DMA_SLAVE, dc->dma.cap_mask) ? " slave" : "");
1158 static int txx9dmac_chan_remove(struct platform_device *pdev)
1160 struct txx9dmac_chan *dc = platform_get_drvdata(pdev);
1163 dma_async_device_unregister(&dc->dma);
1165 devm_free_irq(&pdev->dev, dc->irq, dc);
1166 tasklet_kill(&dc->tasklet);
1168 dc->ddev->chan[pdev->id % TXX9_DMA_MAX_NR_CHANNELS] = NULL;
1172 static int __init txx9dmac_probe(struct platform_device *pdev)
1174 struct txx9dmac_platform_data *pdata = dev_get_platdata(&pdev->dev);
1175 struct resource *io;
1176 struct txx9dmac_dev *ddev;
1180 io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1184 ddev = devm_kzalloc(&pdev->dev, sizeof(*ddev), GFP_KERNEL);
1188 if (!devm_request_mem_region(&pdev->dev, io->start, resource_size(io),
1189 dev_name(&pdev->dev)))
1192 ddev->regs = devm_ioremap(&pdev->dev, io->start, resource_size(io));
1195 ddev->have_64bit_regs = pdata->have_64bit_regs;
1196 if (__is_dmac64(ddev))
1197 ddev->descsize = sizeof(struct txx9dmac_hwdesc);
1199 ddev->descsize = sizeof(struct txx9dmac_hwdesc32);
1201 /* force dma off, just in case */
1204 ddev->irq = platform_get_irq(pdev, 0);
1205 if (ddev->irq >= 0) {
1206 tasklet_init(&ddev->tasklet, txx9dmac_tasklet,
1207 (unsigned long)ddev);
1208 err = devm_request_irq(&pdev->dev, ddev->irq,
1209 txx9dmac_interrupt, 0, dev_name(&pdev->dev), ddev);
1214 mcr = TXX9_DMA_MCR_MSTEN | MCR_LE;
1215 if (pdata && pdata->memcpy_chan >= 0)
1216 mcr |= TXX9_DMA_MCR_FIFUM(pdata->memcpy_chan);
1217 dma_writel(ddev, MCR, mcr);
1219 platform_set_drvdata(pdev, ddev);
1223 static int txx9dmac_remove(struct platform_device *pdev)
1225 struct txx9dmac_dev *ddev = platform_get_drvdata(pdev);
1228 if (ddev->irq >= 0) {
1229 devm_free_irq(&pdev->dev, ddev->irq, ddev);
1230 tasklet_kill(&ddev->tasklet);
1235 static void txx9dmac_shutdown(struct platform_device *pdev)
1237 struct txx9dmac_dev *ddev = platform_get_drvdata(pdev);
1242 static int txx9dmac_suspend_noirq(struct device *dev)
1244 struct txx9dmac_dev *ddev = dev_get_drvdata(dev);
1250 static int txx9dmac_resume_noirq(struct device *dev)
1252 struct txx9dmac_dev *ddev = dev_get_drvdata(dev);
1253 struct txx9dmac_platform_data *pdata = dev_get_platdata(dev);
1256 mcr = TXX9_DMA_MCR_MSTEN | MCR_LE;
1257 if (pdata && pdata->memcpy_chan >= 0)
1258 mcr |= TXX9_DMA_MCR_FIFUM(pdata->memcpy_chan);
1259 dma_writel(ddev, MCR, mcr);
1264 static const struct dev_pm_ops txx9dmac_dev_pm_ops = {
1265 .suspend_noirq = txx9dmac_suspend_noirq,
1266 .resume_noirq = txx9dmac_resume_noirq,
1269 static struct platform_driver txx9dmac_chan_driver = {
1270 .remove = txx9dmac_chan_remove,
1272 .name = "txx9dmac-chan",
1276 static struct platform_driver txx9dmac_driver = {
1277 .remove = txx9dmac_remove,
1278 .shutdown = txx9dmac_shutdown,
1281 .pm = &txx9dmac_dev_pm_ops,
1285 static int __init txx9dmac_init(void)
1289 rc = platform_driver_probe(&txx9dmac_driver, txx9dmac_probe);
1291 rc = platform_driver_probe(&txx9dmac_chan_driver,
1292 txx9dmac_chan_probe);
1294 platform_driver_unregister(&txx9dmac_driver);
1298 module_init(txx9dmac_init);
1300 static void __exit txx9dmac_exit(void)
1302 platform_driver_unregister(&txx9dmac_chan_driver);
1303 platform_driver_unregister(&txx9dmac_driver);
1305 module_exit(txx9dmac_exit);
1307 MODULE_LICENSE("GPL");
1308 MODULE_DESCRIPTION("TXx9 DMA Controller driver");
1309 MODULE_AUTHOR("Atsushi Nemoto <anemo@mba.ocn.ne.jp>");
1310 MODULE_ALIAS("platform:txx9dmac");
1311 MODULE_ALIAS("platform:txx9dmac-chan");