1 // SPDX-License-Identifier: GPL-2.0
3 * Renesas R-Car Gen2/Gen3 DMA Controller Driver
5 * Copyright (C) 2014-2019 Renesas Electronics Inc.
7 * Author: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
10 #include <linux/delay.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/dmaengine.h>
13 #include <linux/interrupt.h>
14 #include <linux/list.h>
15 #include <linux/module.h>
16 #include <linux/mutex.h>
18 #include <linux/of_dma.h>
19 #include <linux/of_platform.h>
20 #include <linux/platform_device.h>
21 #include <linux/pm_runtime.h>
22 #include <linux/slab.h>
23 #include <linux/spinlock.h>
25 #include "../dmaengine.h"
28 * struct rcar_dmac_xfer_chunk - Descriptor for a hardware transfer
29 * @node: entry in the parent's chunks list
30 * @src_addr: device source address
31 * @dst_addr: device destination address
32 * @size: transfer size in bytes
34 struct rcar_dmac_xfer_chunk {
35 struct list_head node;
43 * struct rcar_dmac_hw_desc - Hardware descriptor for a transfer chunk
44 * @sar: value of the SAR register (source address)
45 * @dar: value of the DAR register (destination address)
46 * @tcr: value of the TCR register (transfer count)
48 struct rcar_dmac_hw_desc {
53 } __attribute__((__packed__));
56 * struct rcar_dmac_desc - R-Car Gen2 DMA Transfer Descriptor
57 * @async_tx: base DMA asynchronous transaction descriptor
58 * @direction: direction of the DMA transfer
59 * @xfer_shift: log2 of the transfer size
60 * @chcr: value of the channel configuration register for this transfer
61 * @node: entry in the channel's descriptors lists
62 * @chunks: list of transfer chunks for this transfer
63 * @running: the transfer chunk being currently processed
64 * @nchunks: number of transfer chunks for this transfer
65 * @hwdescs.use: whether the transfer descriptor uses hardware descriptors
66 * @hwdescs.mem: hardware descriptors memory for the transfer
67 * @hwdescs.dma: device address of the hardware descriptors memory
68 * @hwdescs.size: size of the hardware descriptors in bytes
69 * @size: transfer size in bytes
70 * @cyclic: when set indicates that the DMA transfer is cyclic
72 struct rcar_dmac_desc {
73 struct dma_async_tx_descriptor async_tx;
74 enum dma_transfer_direction direction;
75 unsigned int xfer_shift;
78 struct list_head node;
79 struct list_head chunks;
80 struct rcar_dmac_xfer_chunk *running;
85 struct rcar_dmac_hw_desc *mem;
94 #define to_rcar_dmac_desc(d) container_of(d, struct rcar_dmac_desc, async_tx)
97 * struct rcar_dmac_desc_page - One page worth of descriptors
98 * @node: entry in the channel's pages list
99 * @descs: array of DMA descriptors
100 * @chunks: array of transfer chunk descriptors
102 struct rcar_dmac_desc_page {
103 struct list_head node;
106 struct rcar_dmac_desc descs[0];
107 struct rcar_dmac_xfer_chunk chunks[0];
111 #define RCAR_DMAC_DESCS_PER_PAGE \
112 ((PAGE_SIZE - offsetof(struct rcar_dmac_desc_page, descs)) / \
113 sizeof(struct rcar_dmac_desc))
114 #define RCAR_DMAC_XFER_CHUNKS_PER_PAGE \
115 ((PAGE_SIZE - offsetof(struct rcar_dmac_desc_page, chunks)) / \
116 sizeof(struct rcar_dmac_xfer_chunk))
119 * struct rcar_dmac_chan_slave - Slave configuration
120 * @slave_addr: slave memory address
121 * @xfer_size: size (in bytes) of hardware transfers
123 struct rcar_dmac_chan_slave {
124 phys_addr_t slave_addr;
125 unsigned int xfer_size;
129 * struct rcar_dmac_chan_map - Map of slave device phys to dma address
130 * @addr: slave dma address
131 * @dir: direction of mapping
132 * @slave: slave configuration that is mapped
134 struct rcar_dmac_chan_map {
136 enum dma_data_direction dir;
137 struct rcar_dmac_chan_slave slave;
141 * struct rcar_dmac_chan - R-Car Gen2 DMA Controller Channel
142 * @chan: base DMA channel object
143 * @iomem: channel I/O memory base
144 * @index: index of this channel in the controller
146 * @src: slave memory address and size on the source side
147 * @dst: slave memory address and size on the destination side
148 * @mid_rid: hardware MID/RID for the DMA client using this channel
149 * @lock: protects the channel CHCR register and the desc members
150 * @desc.free: list of free descriptors
151 * @desc.pending: list of pending descriptors (submitted with tx_submit)
152 * @desc.active: list of active descriptors (activated with issue_pending)
153 * @desc.done: list of completed descriptors
154 * @desc.wait: list of descriptors waiting for an ack
155 * @desc.running: the descriptor being processed (a member of the active list)
156 * @desc.chunks_free: list of free transfer chunk descriptors
157 * @desc.pages: list of pages used by allocated descriptors
159 struct rcar_dmac_chan {
160 struct dma_chan chan;
165 struct rcar_dmac_chan_slave src;
166 struct rcar_dmac_chan_slave dst;
167 struct rcar_dmac_chan_map map;
173 struct list_head free;
174 struct list_head pending;
175 struct list_head active;
176 struct list_head done;
177 struct list_head wait;
178 struct rcar_dmac_desc *running;
180 struct list_head chunks_free;
182 struct list_head pages;
186 #define to_rcar_dmac_chan(c) container_of(c, struct rcar_dmac_chan, chan)
189 * struct rcar_dmac - R-Car Gen2 DMA Controller
190 * @engine: base DMA engine object
191 * @dev: the hardware device
192 * @iomem: remapped I/O memory base
193 * @n_channels: number of available channels
194 * @channels: array of DMAC channels
195 * @channels_mask: bitfield of which DMA channels are managed by this driver
196 * @modules: bitmask of client modules in use
199 struct dma_device engine;
203 unsigned int n_channels;
204 struct rcar_dmac_chan *channels;
207 DECLARE_BITMAP(modules, 256);
210 #define to_rcar_dmac(d) container_of(d, struct rcar_dmac, engine)
212 #define for_each_rcar_dmac_chan(i, dmac, chan) \
213 for (i = 0, chan = &(dmac)->channels[0]; i < (dmac)->n_channels; i++, chan++) \
214 if (!((dmac)->channels_mask & BIT(i))) continue; else
217 * struct rcar_dmac_of_data - This driver's OF data
218 * @chan_offset_base: DMAC channels base offset
219 * @chan_offset_stride: DMAC channels offset stride
221 struct rcar_dmac_of_data {
222 u32 chan_offset_base;
223 u32 chan_offset_stride;
226 /* -----------------------------------------------------------------------------
230 #define RCAR_DMAISTA 0x0020
231 #define RCAR_DMASEC 0x0030
232 #define RCAR_DMAOR 0x0060
233 #define RCAR_DMAOR_PRI_FIXED (0 << 8)
234 #define RCAR_DMAOR_PRI_ROUND_ROBIN (3 << 8)
235 #define RCAR_DMAOR_AE (1 << 2)
236 #define RCAR_DMAOR_DME (1 << 0)
237 #define RCAR_DMACHCLR 0x0080
238 #define RCAR_DMADPSEC 0x00a0
240 #define RCAR_DMASAR 0x0000
241 #define RCAR_DMADAR 0x0004
242 #define RCAR_DMATCR 0x0008
243 #define RCAR_DMATCR_MASK 0x00ffffff
244 #define RCAR_DMATSR 0x0028
245 #define RCAR_DMACHCR 0x000c
246 #define RCAR_DMACHCR_CAE (1 << 31)
247 #define RCAR_DMACHCR_CAIE (1 << 30)
248 #define RCAR_DMACHCR_DPM_DISABLED (0 << 28)
249 #define RCAR_DMACHCR_DPM_ENABLED (1 << 28)
250 #define RCAR_DMACHCR_DPM_REPEAT (2 << 28)
251 #define RCAR_DMACHCR_DPM_INFINITE (3 << 28)
252 #define RCAR_DMACHCR_RPT_SAR (1 << 27)
253 #define RCAR_DMACHCR_RPT_DAR (1 << 26)
254 #define RCAR_DMACHCR_RPT_TCR (1 << 25)
255 #define RCAR_DMACHCR_DPB (1 << 22)
256 #define RCAR_DMACHCR_DSE (1 << 19)
257 #define RCAR_DMACHCR_DSIE (1 << 18)
258 #define RCAR_DMACHCR_TS_1B ((0 << 20) | (0 << 3))
259 #define RCAR_DMACHCR_TS_2B ((0 << 20) | (1 << 3))
260 #define RCAR_DMACHCR_TS_4B ((0 << 20) | (2 << 3))
261 #define RCAR_DMACHCR_TS_16B ((0 << 20) | (3 << 3))
262 #define RCAR_DMACHCR_TS_32B ((1 << 20) | (0 << 3))
263 #define RCAR_DMACHCR_TS_64B ((1 << 20) | (1 << 3))
264 #define RCAR_DMACHCR_TS_8B ((1 << 20) | (3 << 3))
265 #define RCAR_DMACHCR_DM_FIXED (0 << 14)
266 #define RCAR_DMACHCR_DM_INC (1 << 14)
267 #define RCAR_DMACHCR_DM_DEC (2 << 14)
268 #define RCAR_DMACHCR_SM_FIXED (0 << 12)
269 #define RCAR_DMACHCR_SM_INC (1 << 12)
270 #define RCAR_DMACHCR_SM_DEC (2 << 12)
271 #define RCAR_DMACHCR_RS_AUTO (4 << 8)
272 #define RCAR_DMACHCR_RS_DMARS (8 << 8)
273 #define RCAR_DMACHCR_IE (1 << 2)
274 #define RCAR_DMACHCR_TE (1 << 1)
275 #define RCAR_DMACHCR_DE (1 << 0)
276 #define RCAR_DMATCRB 0x0018
277 #define RCAR_DMATSRB 0x0038
278 #define RCAR_DMACHCRB 0x001c
279 #define RCAR_DMACHCRB_DCNT(n) ((n) << 24)
280 #define RCAR_DMACHCRB_DPTR_MASK (0xff << 16)
281 #define RCAR_DMACHCRB_DPTR_SHIFT 16
282 #define RCAR_DMACHCRB_DRST (1 << 15)
283 #define RCAR_DMACHCRB_DTS (1 << 8)
284 #define RCAR_DMACHCRB_SLM_NORMAL (0 << 4)
285 #define RCAR_DMACHCRB_SLM_CLK(n) ((8 | (n)) << 4)
286 #define RCAR_DMACHCRB_PRI(n) ((n) << 0)
287 #define RCAR_DMARS 0x0040
288 #define RCAR_DMABUFCR 0x0048
289 #define RCAR_DMABUFCR_MBU(n) ((n) << 16)
290 #define RCAR_DMABUFCR_ULB(n) ((n) << 0)
291 #define RCAR_DMADPBASE 0x0050
292 #define RCAR_DMADPBASE_MASK 0xfffffff0
293 #define RCAR_DMADPBASE_SEL (1 << 0)
294 #define RCAR_DMADPCR 0x0054
295 #define RCAR_DMADPCR_DIPT(n) ((n) << 24)
296 #define RCAR_DMAFIXSAR 0x0010
297 #define RCAR_DMAFIXDAR 0x0014
298 #define RCAR_DMAFIXDPBASE 0x0060
300 /* Hardcode the MEMCPY transfer size to 4 bytes. */
301 #define RCAR_DMAC_MEMCPY_XFER_SIZE 4
303 /* -----------------------------------------------------------------------------
307 static void rcar_dmac_write(struct rcar_dmac *dmac, u32 reg, u32 data)
309 if (reg == RCAR_DMAOR)
310 writew(data, dmac->iomem + reg);
312 writel(data, dmac->iomem + reg);
315 static u32 rcar_dmac_read(struct rcar_dmac *dmac, u32 reg)
317 if (reg == RCAR_DMAOR)
318 return readw(dmac->iomem + reg);
320 return readl(dmac->iomem + reg);
323 static u32 rcar_dmac_chan_read(struct rcar_dmac_chan *chan, u32 reg)
325 if (reg == RCAR_DMARS)
326 return readw(chan->iomem + reg);
328 return readl(chan->iomem + reg);
331 static void rcar_dmac_chan_write(struct rcar_dmac_chan *chan, u32 reg, u32 data)
333 if (reg == RCAR_DMARS)
334 writew(data, chan->iomem + reg);
336 writel(data, chan->iomem + reg);
339 static void rcar_dmac_chan_clear(struct rcar_dmac *dmac,
340 struct rcar_dmac_chan *chan)
342 rcar_dmac_write(dmac, RCAR_DMACHCLR, BIT(chan->index));
345 static void rcar_dmac_chan_clear_all(struct rcar_dmac *dmac)
347 rcar_dmac_write(dmac, RCAR_DMACHCLR, dmac->channels_mask);
350 /* -----------------------------------------------------------------------------
351 * Initialization and configuration
354 static bool rcar_dmac_chan_is_busy(struct rcar_dmac_chan *chan)
356 u32 chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR);
358 return !!(chcr & (RCAR_DMACHCR_DE | RCAR_DMACHCR_TE));
361 static void rcar_dmac_chan_start_xfer(struct rcar_dmac_chan *chan)
363 struct rcar_dmac_desc *desc = chan->desc.running;
364 u32 chcr = desc->chcr;
366 WARN_ON_ONCE(rcar_dmac_chan_is_busy(chan));
368 if (chan->mid_rid >= 0)
369 rcar_dmac_chan_write(chan, RCAR_DMARS, chan->mid_rid);
371 if (desc->hwdescs.use) {
372 struct rcar_dmac_xfer_chunk *chunk =
373 list_first_entry(&desc->chunks,
374 struct rcar_dmac_xfer_chunk, node);
376 dev_dbg(chan->chan.device->dev,
377 "chan%u: queue desc %p: %u@%pad\n",
378 chan->index, desc, desc->nchunks, &desc->hwdescs.dma);
380 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
381 rcar_dmac_chan_write(chan, RCAR_DMAFIXSAR,
382 chunk->src_addr >> 32);
383 rcar_dmac_chan_write(chan, RCAR_DMAFIXDAR,
384 chunk->dst_addr >> 32);
385 rcar_dmac_chan_write(chan, RCAR_DMAFIXDPBASE,
386 desc->hwdescs.dma >> 32);
388 rcar_dmac_chan_write(chan, RCAR_DMADPBASE,
389 (desc->hwdescs.dma & 0xfffffff0) |
391 rcar_dmac_chan_write(chan, RCAR_DMACHCRB,
392 RCAR_DMACHCRB_DCNT(desc->nchunks - 1) |
396 * Errata: When descriptor memory is accessed through an IOMMU
397 * the DMADAR register isn't initialized automatically from the
398 * first descriptor at beginning of transfer by the DMAC like it
399 * should. Initialize it manually with the destination address
400 * of the first chunk.
402 rcar_dmac_chan_write(chan, RCAR_DMADAR,
403 chunk->dst_addr & 0xffffffff);
406 * Program the descriptor stage interrupt to occur after the end
407 * of the first stage.
409 rcar_dmac_chan_write(chan, RCAR_DMADPCR, RCAR_DMADPCR_DIPT(1));
411 chcr |= RCAR_DMACHCR_RPT_SAR | RCAR_DMACHCR_RPT_DAR
412 | RCAR_DMACHCR_RPT_TCR | RCAR_DMACHCR_DPB;
415 * If the descriptor isn't cyclic enable normal descriptor mode
416 * and the transfer completion interrupt.
419 chcr |= RCAR_DMACHCR_DPM_ENABLED | RCAR_DMACHCR_IE;
421 * If the descriptor is cyclic and has a callback enable the
422 * descriptor stage interrupt in infinite repeat mode.
424 else if (desc->async_tx.callback)
425 chcr |= RCAR_DMACHCR_DPM_INFINITE | RCAR_DMACHCR_DSIE;
427 * Otherwise just select infinite repeat mode without any
431 chcr |= RCAR_DMACHCR_DPM_INFINITE;
433 struct rcar_dmac_xfer_chunk *chunk = desc->running;
435 dev_dbg(chan->chan.device->dev,
436 "chan%u: queue chunk %p: %u@%pad -> %pad\n",
437 chan->index, chunk, chunk->size, &chunk->src_addr,
440 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
441 rcar_dmac_chan_write(chan, RCAR_DMAFIXSAR,
442 chunk->src_addr >> 32);
443 rcar_dmac_chan_write(chan, RCAR_DMAFIXDAR,
444 chunk->dst_addr >> 32);
446 rcar_dmac_chan_write(chan, RCAR_DMASAR,
447 chunk->src_addr & 0xffffffff);
448 rcar_dmac_chan_write(chan, RCAR_DMADAR,
449 chunk->dst_addr & 0xffffffff);
450 rcar_dmac_chan_write(chan, RCAR_DMATCR,
451 chunk->size >> desc->xfer_shift);
453 chcr |= RCAR_DMACHCR_DPM_DISABLED | RCAR_DMACHCR_IE;
456 rcar_dmac_chan_write(chan, RCAR_DMACHCR,
457 chcr | RCAR_DMACHCR_DE | RCAR_DMACHCR_CAIE);
460 static int rcar_dmac_init(struct rcar_dmac *dmac)
464 /* Clear all channels and enable the DMAC globally. */
465 rcar_dmac_chan_clear_all(dmac);
466 rcar_dmac_write(dmac, RCAR_DMAOR,
467 RCAR_DMAOR_PRI_FIXED | RCAR_DMAOR_DME);
469 dmaor = rcar_dmac_read(dmac, RCAR_DMAOR);
470 if ((dmaor & (RCAR_DMAOR_AE | RCAR_DMAOR_DME)) != RCAR_DMAOR_DME) {
471 dev_warn(dmac->dev, "DMAOR initialization failed.\n");
478 /* -----------------------------------------------------------------------------
479 * Descriptors submission
482 static dma_cookie_t rcar_dmac_tx_submit(struct dma_async_tx_descriptor *tx)
484 struct rcar_dmac_chan *chan = to_rcar_dmac_chan(tx->chan);
485 struct rcar_dmac_desc *desc = to_rcar_dmac_desc(tx);
489 spin_lock_irqsave(&chan->lock, flags);
491 cookie = dma_cookie_assign(tx);
493 dev_dbg(chan->chan.device->dev, "chan%u: submit #%d@%p\n",
494 chan->index, tx->cookie, desc);
496 list_add_tail(&desc->node, &chan->desc.pending);
497 desc->running = list_first_entry(&desc->chunks,
498 struct rcar_dmac_xfer_chunk, node);
500 spin_unlock_irqrestore(&chan->lock, flags);
505 /* -----------------------------------------------------------------------------
506 * Descriptors allocation and free
510 * rcar_dmac_desc_alloc - Allocate a page worth of DMA descriptors
511 * @chan: the DMA channel
512 * @gfp: allocation flags
514 static int rcar_dmac_desc_alloc(struct rcar_dmac_chan *chan, gfp_t gfp)
516 struct rcar_dmac_desc_page *page;
521 page = (void *)get_zeroed_page(gfp);
525 for (i = 0; i < RCAR_DMAC_DESCS_PER_PAGE; ++i) {
526 struct rcar_dmac_desc *desc = &page->descs[i];
528 dma_async_tx_descriptor_init(&desc->async_tx, &chan->chan);
529 desc->async_tx.tx_submit = rcar_dmac_tx_submit;
530 INIT_LIST_HEAD(&desc->chunks);
532 list_add_tail(&desc->node, &list);
535 spin_lock_irqsave(&chan->lock, flags);
536 list_splice_tail(&list, &chan->desc.free);
537 list_add_tail(&page->node, &chan->desc.pages);
538 spin_unlock_irqrestore(&chan->lock, flags);
544 * rcar_dmac_desc_put - Release a DMA transfer descriptor
545 * @chan: the DMA channel
546 * @desc: the descriptor
548 * Put the descriptor and its transfer chunk descriptors back in the channel's
549 * free descriptors lists. The descriptor's chunks list will be reinitialized to
550 * an empty list as a result.
552 * The descriptor must have been removed from the channel's lists before calling
555 static void rcar_dmac_desc_put(struct rcar_dmac_chan *chan,
556 struct rcar_dmac_desc *desc)
560 spin_lock_irqsave(&chan->lock, flags);
561 list_splice_tail_init(&desc->chunks, &chan->desc.chunks_free);
562 list_add(&desc->node, &chan->desc.free);
563 spin_unlock_irqrestore(&chan->lock, flags);
566 static void rcar_dmac_desc_recycle_acked(struct rcar_dmac_chan *chan)
568 struct rcar_dmac_desc *desc, *_desc;
573 * We have to temporarily move all descriptors from the wait list to a
574 * local list as iterating over the wait list, even with
575 * list_for_each_entry_safe, isn't safe if we release the channel lock
576 * around the rcar_dmac_desc_put() call.
578 spin_lock_irqsave(&chan->lock, flags);
579 list_splice_init(&chan->desc.wait, &list);
580 spin_unlock_irqrestore(&chan->lock, flags);
582 list_for_each_entry_safe(desc, _desc, &list, node) {
583 if (async_tx_test_ack(&desc->async_tx)) {
584 list_del(&desc->node);
585 rcar_dmac_desc_put(chan, desc);
589 if (list_empty(&list))
592 /* Put the remaining descriptors back in the wait list. */
593 spin_lock_irqsave(&chan->lock, flags);
594 list_splice(&list, &chan->desc.wait);
595 spin_unlock_irqrestore(&chan->lock, flags);
599 * rcar_dmac_desc_get - Allocate a descriptor for a DMA transfer
600 * @chan: the DMA channel
602 * Locking: This function must be called in a non-atomic context.
604 * Return: A pointer to the allocated descriptor or NULL if no descriptor can
607 static struct rcar_dmac_desc *rcar_dmac_desc_get(struct rcar_dmac_chan *chan)
609 struct rcar_dmac_desc *desc;
613 /* Recycle acked descriptors before attempting allocation. */
614 rcar_dmac_desc_recycle_acked(chan);
616 spin_lock_irqsave(&chan->lock, flags);
618 while (list_empty(&chan->desc.free)) {
620 * No free descriptors, allocate a page worth of them and try
621 * again, as someone else could race us to get the newly
622 * allocated descriptors. If the allocation fails return an
625 spin_unlock_irqrestore(&chan->lock, flags);
626 ret = rcar_dmac_desc_alloc(chan, GFP_NOWAIT);
629 spin_lock_irqsave(&chan->lock, flags);
632 desc = list_first_entry(&chan->desc.free, struct rcar_dmac_desc, node);
633 list_del(&desc->node);
635 spin_unlock_irqrestore(&chan->lock, flags);
641 * rcar_dmac_xfer_chunk_alloc - Allocate a page worth of transfer chunks
642 * @chan: the DMA channel
643 * @gfp: allocation flags
645 static int rcar_dmac_xfer_chunk_alloc(struct rcar_dmac_chan *chan, gfp_t gfp)
647 struct rcar_dmac_desc_page *page;
652 page = (void *)get_zeroed_page(gfp);
656 for (i = 0; i < RCAR_DMAC_XFER_CHUNKS_PER_PAGE; ++i) {
657 struct rcar_dmac_xfer_chunk *chunk = &page->chunks[i];
659 list_add_tail(&chunk->node, &list);
662 spin_lock_irqsave(&chan->lock, flags);
663 list_splice_tail(&list, &chan->desc.chunks_free);
664 list_add_tail(&page->node, &chan->desc.pages);
665 spin_unlock_irqrestore(&chan->lock, flags);
671 * rcar_dmac_xfer_chunk_get - Allocate a transfer chunk for a DMA transfer
672 * @chan: the DMA channel
674 * Locking: This function must be called in a non-atomic context.
676 * Return: A pointer to the allocated transfer chunk descriptor or NULL if no
677 * descriptor can be allocated.
679 static struct rcar_dmac_xfer_chunk *
680 rcar_dmac_xfer_chunk_get(struct rcar_dmac_chan *chan)
682 struct rcar_dmac_xfer_chunk *chunk;
686 spin_lock_irqsave(&chan->lock, flags);
688 while (list_empty(&chan->desc.chunks_free)) {
690 * No free descriptors, allocate a page worth of them and try
691 * again, as someone else could race us to get the newly
692 * allocated descriptors. If the allocation fails return an
695 spin_unlock_irqrestore(&chan->lock, flags);
696 ret = rcar_dmac_xfer_chunk_alloc(chan, GFP_NOWAIT);
699 spin_lock_irqsave(&chan->lock, flags);
702 chunk = list_first_entry(&chan->desc.chunks_free,
703 struct rcar_dmac_xfer_chunk, node);
704 list_del(&chunk->node);
706 spin_unlock_irqrestore(&chan->lock, flags);
711 static void rcar_dmac_realloc_hwdesc(struct rcar_dmac_chan *chan,
712 struct rcar_dmac_desc *desc, size_t size)
715 * dma_alloc_coherent() allocates memory in page size increments. To
716 * avoid reallocating the hardware descriptors when the allocated size
717 * wouldn't change align the requested size to a multiple of the page
720 size = PAGE_ALIGN(size);
722 if (desc->hwdescs.size == size)
725 if (desc->hwdescs.mem) {
726 dma_free_coherent(chan->chan.device->dev, desc->hwdescs.size,
727 desc->hwdescs.mem, desc->hwdescs.dma);
728 desc->hwdescs.mem = NULL;
729 desc->hwdescs.size = 0;
735 desc->hwdescs.mem = dma_alloc_coherent(chan->chan.device->dev, size,
736 &desc->hwdescs.dma, GFP_NOWAIT);
737 if (!desc->hwdescs.mem)
740 desc->hwdescs.size = size;
743 static int rcar_dmac_fill_hwdesc(struct rcar_dmac_chan *chan,
744 struct rcar_dmac_desc *desc)
746 struct rcar_dmac_xfer_chunk *chunk;
747 struct rcar_dmac_hw_desc *hwdesc;
749 rcar_dmac_realloc_hwdesc(chan, desc, desc->nchunks * sizeof(*hwdesc));
751 hwdesc = desc->hwdescs.mem;
755 list_for_each_entry(chunk, &desc->chunks, node) {
756 hwdesc->sar = chunk->src_addr;
757 hwdesc->dar = chunk->dst_addr;
758 hwdesc->tcr = chunk->size >> desc->xfer_shift;
765 /* -----------------------------------------------------------------------------
768 static void rcar_dmac_chcr_de_barrier(struct rcar_dmac_chan *chan)
774 * Ensure that the setting of the DE bit is actually 0 after
777 for (i = 0; i < 1024; i++) {
778 chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR);
779 if (!(chcr & RCAR_DMACHCR_DE))
784 dev_err(chan->chan.device->dev, "CHCR DE check error\n");
787 static void rcar_dmac_clear_chcr_de(struct rcar_dmac_chan *chan)
789 u32 chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR);
791 /* set DE=0 and flush remaining data */
792 rcar_dmac_chan_write(chan, RCAR_DMACHCR, (chcr & ~RCAR_DMACHCR_DE));
794 /* make sure all remaining data was flushed */
795 rcar_dmac_chcr_de_barrier(chan);
798 static void rcar_dmac_chan_halt(struct rcar_dmac_chan *chan)
800 u32 chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR);
802 chcr &= ~(RCAR_DMACHCR_DSE | RCAR_DMACHCR_DSIE | RCAR_DMACHCR_IE |
803 RCAR_DMACHCR_TE | RCAR_DMACHCR_DE |
804 RCAR_DMACHCR_CAE | RCAR_DMACHCR_CAIE);
805 rcar_dmac_chan_write(chan, RCAR_DMACHCR, chcr);
806 rcar_dmac_chcr_de_barrier(chan);
809 static void rcar_dmac_chan_reinit(struct rcar_dmac_chan *chan)
811 struct rcar_dmac_desc *desc, *_desc;
815 spin_lock_irqsave(&chan->lock, flags);
817 /* Move all non-free descriptors to the local lists. */
818 list_splice_init(&chan->desc.pending, &descs);
819 list_splice_init(&chan->desc.active, &descs);
820 list_splice_init(&chan->desc.done, &descs);
821 list_splice_init(&chan->desc.wait, &descs);
823 chan->desc.running = NULL;
825 spin_unlock_irqrestore(&chan->lock, flags);
827 list_for_each_entry_safe(desc, _desc, &descs, node) {
828 list_del(&desc->node);
829 rcar_dmac_desc_put(chan, desc);
833 static void rcar_dmac_stop_all_chan(struct rcar_dmac *dmac)
835 struct rcar_dmac_chan *chan;
838 /* Stop all channels. */
839 for_each_rcar_dmac_chan(i, dmac, chan) {
840 /* Stop and reinitialize the channel. */
841 spin_lock_irq(&chan->lock);
842 rcar_dmac_chan_halt(chan);
843 spin_unlock_irq(&chan->lock);
847 static int rcar_dmac_chan_pause(struct dma_chan *chan)
850 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
852 spin_lock_irqsave(&rchan->lock, flags);
853 rcar_dmac_clear_chcr_de(rchan);
854 spin_unlock_irqrestore(&rchan->lock, flags);
859 /* -----------------------------------------------------------------------------
860 * Descriptors preparation
863 static void rcar_dmac_chan_configure_desc(struct rcar_dmac_chan *chan,
864 struct rcar_dmac_desc *desc)
866 static const u32 chcr_ts[] = {
867 RCAR_DMACHCR_TS_1B, RCAR_DMACHCR_TS_2B,
868 RCAR_DMACHCR_TS_4B, RCAR_DMACHCR_TS_8B,
869 RCAR_DMACHCR_TS_16B, RCAR_DMACHCR_TS_32B,
873 unsigned int xfer_size;
876 switch (desc->direction) {
878 chcr = RCAR_DMACHCR_DM_INC | RCAR_DMACHCR_SM_FIXED
879 | RCAR_DMACHCR_RS_DMARS;
880 xfer_size = chan->src.xfer_size;
884 chcr = RCAR_DMACHCR_DM_FIXED | RCAR_DMACHCR_SM_INC
885 | RCAR_DMACHCR_RS_DMARS;
886 xfer_size = chan->dst.xfer_size;
891 chcr = RCAR_DMACHCR_DM_INC | RCAR_DMACHCR_SM_INC
892 | RCAR_DMACHCR_RS_AUTO;
893 xfer_size = RCAR_DMAC_MEMCPY_XFER_SIZE;
897 desc->xfer_shift = ilog2(xfer_size);
898 desc->chcr = chcr | chcr_ts[desc->xfer_shift];
902 * rcar_dmac_chan_prep_sg - prepare transfer descriptors from an SG list
904 * Common routine for public (MEMCPY) and slave DMA. The MEMCPY case is also
905 * converted to scatter-gather to guarantee consistent locking and a correct
906 * list manipulation. For slave DMA direction carries the usual meaning, and,
907 * logically, the SG list is RAM and the addr variable contains slave address,
908 * e.g., the FIFO I/O register. For MEMCPY direction equals DMA_MEM_TO_MEM
909 * and the SG list contains only one element and points at the source buffer.
911 static struct dma_async_tx_descriptor *
912 rcar_dmac_chan_prep_sg(struct rcar_dmac_chan *chan, struct scatterlist *sgl,
913 unsigned int sg_len, dma_addr_t dev_addr,
914 enum dma_transfer_direction dir, unsigned long dma_flags,
917 struct rcar_dmac_xfer_chunk *chunk;
918 struct rcar_dmac_desc *desc;
919 struct scatterlist *sg;
920 unsigned int nchunks = 0;
921 unsigned int max_chunk_size;
922 unsigned int full_size = 0;
923 bool cross_boundary = false;
925 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
930 desc = rcar_dmac_desc_get(chan);
934 desc->async_tx.flags = dma_flags;
935 desc->async_tx.cookie = -EBUSY;
937 desc->cyclic = cyclic;
938 desc->direction = dir;
940 rcar_dmac_chan_configure_desc(chan, desc);
942 max_chunk_size = RCAR_DMATCR_MASK << desc->xfer_shift;
945 * Allocate and fill the transfer chunk descriptors. We own the only
946 * reference to the DMA descriptor, there's no need for locking.
948 for_each_sg(sgl, sg, sg_len, i) {
949 dma_addr_t mem_addr = sg_dma_address(sg);
950 unsigned int len = sg_dma_len(sg);
954 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
956 high_dev_addr = dev_addr >> 32;
957 high_mem_addr = mem_addr >> 32;
960 if ((dev_addr >> 32 != high_dev_addr) ||
961 (mem_addr >> 32 != high_mem_addr))
962 cross_boundary = true;
965 unsigned int size = min(len, max_chunk_size);
967 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
969 * Prevent individual transfers from crossing 4GB
972 if (dev_addr >> 32 != (dev_addr + size - 1) >> 32) {
973 size = ALIGN(dev_addr, 1ULL << 32) - dev_addr;
974 cross_boundary = true;
976 if (mem_addr >> 32 != (mem_addr + size - 1) >> 32) {
977 size = ALIGN(mem_addr, 1ULL << 32) - mem_addr;
978 cross_boundary = true;
982 chunk = rcar_dmac_xfer_chunk_get(chan);
984 rcar_dmac_desc_put(chan, desc);
988 if (dir == DMA_DEV_TO_MEM) {
989 chunk->src_addr = dev_addr;
990 chunk->dst_addr = mem_addr;
992 chunk->src_addr = mem_addr;
993 chunk->dst_addr = dev_addr;
998 dev_dbg(chan->chan.device->dev,
999 "chan%u: chunk %p/%p sgl %u@%p, %u/%u %pad -> %pad\n",
1000 chan->index, chunk, desc, i, sg, size, len,
1001 &chunk->src_addr, &chunk->dst_addr);
1004 if (dir == DMA_MEM_TO_MEM)
1009 list_add_tail(&chunk->node, &desc->chunks);
1014 desc->nchunks = nchunks;
1015 desc->size = full_size;
1018 * Use hardware descriptor lists if possible when more than one chunk
1019 * needs to be transferred (otherwise they don't make much sense).
1021 * Source/Destination address should be located in same 4GiB region
1022 * in the 40bit address space when it uses Hardware descriptor,
1023 * and cross_boundary is checking it.
1025 desc->hwdescs.use = !cross_boundary && nchunks > 1;
1026 if (desc->hwdescs.use) {
1027 if (rcar_dmac_fill_hwdesc(chan, desc) < 0)
1028 desc->hwdescs.use = false;
1031 return &desc->async_tx;
1034 /* -----------------------------------------------------------------------------
1035 * DMA engine operations
1038 static int rcar_dmac_alloc_chan_resources(struct dma_chan *chan)
1040 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
1043 INIT_LIST_HEAD(&rchan->desc.chunks_free);
1044 INIT_LIST_HEAD(&rchan->desc.pages);
1046 /* Preallocate descriptors. */
1047 ret = rcar_dmac_xfer_chunk_alloc(rchan, GFP_KERNEL);
1051 ret = rcar_dmac_desc_alloc(rchan, GFP_KERNEL);
1055 return pm_runtime_get_sync(chan->device->dev);
1058 static void rcar_dmac_free_chan_resources(struct dma_chan *chan)
1060 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
1061 struct rcar_dmac *dmac = to_rcar_dmac(chan->device);
1062 struct rcar_dmac_chan_map *map = &rchan->map;
1063 struct rcar_dmac_desc_page *page, *_page;
1064 struct rcar_dmac_desc *desc;
1067 /* Protect against ISR */
1068 spin_lock_irq(&rchan->lock);
1069 rcar_dmac_chan_halt(rchan);
1070 spin_unlock_irq(&rchan->lock);
1073 * Now no new interrupts will occur, but one might already be
1074 * running. Wait for it to finish before freeing resources.
1076 synchronize_irq(rchan->irq);
1078 if (rchan->mid_rid >= 0) {
1079 /* The caller is holding dma_list_mutex */
1080 clear_bit(rchan->mid_rid, dmac->modules);
1081 rchan->mid_rid = -EINVAL;
1084 list_splice_init(&rchan->desc.free, &list);
1085 list_splice_init(&rchan->desc.pending, &list);
1086 list_splice_init(&rchan->desc.active, &list);
1087 list_splice_init(&rchan->desc.done, &list);
1088 list_splice_init(&rchan->desc.wait, &list);
1090 rchan->desc.running = NULL;
1092 list_for_each_entry(desc, &list, node)
1093 rcar_dmac_realloc_hwdesc(rchan, desc, 0);
1095 list_for_each_entry_safe(page, _page, &rchan->desc.pages, node) {
1096 list_del(&page->node);
1097 free_page((unsigned long)page);
1100 /* Remove slave mapping if present. */
1101 if (map->slave.xfer_size) {
1102 dma_unmap_resource(chan->device->dev, map->addr,
1103 map->slave.xfer_size, map->dir, 0);
1104 map->slave.xfer_size = 0;
1107 pm_runtime_put(chan->device->dev);
1110 static struct dma_async_tx_descriptor *
1111 rcar_dmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dma_dest,
1112 dma_addr_t dma_src, size_t len, unsigned long flags)
1114 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
1115 struct scatterlist sgl;
1120 sg_init_table(&sgl, 1);
1121 sg_set_page(&sgl, pfn_to_page(PFN_DOWN(dma_src)), len,
1122 offset_in_page(dma_src));
1123 sg_dma_address(&sgl) = dma_src;
1124 sg_dma_len(&sgl) = len;
1126 return rcar_dmac_chan_prep_sg(rchan, &sgl, 1, dma_dest,
1127 DMA_MEM_TO_MEM, flags, false);
1130 static int rcar_dmac_map_slave_addr(struct dma_chan *chan,
1131 enum dma_transfer_direction dir)
1133 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
1134 struct rcar_dmac_chan_map *map = &rchan->map;
1135 phys_addr_t dev_addr;
1137 enum dma_data_direction dev_dir;
1139 if (dir == DMA_DEV_TO_MEM) {
1140 dev_addr = rchan->src.slave_addr;
1141 dev_size = rchan->src.xfer_size;
1142 dev_dir = DMA_TO_DEVICE;
1144 dev_addr = rchan->dst.slave_addr;
1145 dev_size = rchan->dst.xfer_size;
1146 dev_dir = DMA_FROM_DEVICE;
1149 /* Reuse current map if possible. */
1150 if (dev_addr == map->slave.slave_addr &&
1151 dev_size == map->slave.xfer_size &&
1152 dev_dir == map->dir)
1155 /* Remove old mapping if present. */
1156 if (map->slave.xfer_size)
1157 dma_unmap_resource(chan->device->dev, map->addr,
1158 map->slave.xfer_size, map->dir, 0);
1159 map->slave.xfer_size = 0;
1161 /* Create new slave address map. */
1162 map->addr = dma_map_resource(chan->device->dev, dev_addr, dev_size,
1165 if (dma_mapping_error(chan->device->dev, map->addr)) {
1166 dev_err(chan->device->dev,
1167 "chan%u: failed to map %zx@%pap", rchan->index,
1168 dev_size, &dev_addr);
1172 dev_dbg(chan->device->dev, "chan%u: map %zx@%pap to %pad dir: %s\n",
1173 rchan->index, dev_size, &dev_addr, &map->addr,
1174 dev_dir == DMA_TO_DEVICE ? "DMA_TO_DEVICE" : "DMA_FROM_DEVICE");
1176 map->slave.slave_addr = dev_addr;
1177 map->slave.xfer_size = dev_size;
1183 static struct dma_async_tx_descriptor *
1184 rcar_dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
1185 unsigned int sg_len, enum dma_transfer_direction dir,
1186 unsigned long flags, void *context)
1188 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
1190 /* Someone calling slave DMA on a generic channel? */
1191 if (rchan->mid_rid < 0 || !sg_len || !sg_dma_len(sgl)) {
1192 dev_warn(chan->device->dev,
1193 "%s: bad parameter: len=%d, id=%d\n",
1194 __func__, sg_len, rchan->mid_rid);
1198 if (rcar_dmac_map_slave_addr(chan, dir))
1201 return rcar_dmac_chan_prep_sg(rchan, sgl, sg_len, rchan->map.addr,
1205 #define RCAR_DMAC_MAX_SG_LEN 32
1207 static struct dma_async_tx_descriptor *
1208 rcar_dmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr,
1209 size_t buf_len, size_t period_len,
1210 enum dma_transfer_direction dir, unsigned long flags)
1212 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
1213 struct dma_async_tx_descriptor *desc;
1214 struct scatterlist *sgl;
1215 unsigned int sg_len;
1218 /* Someone calling slave DMA on a generic channel? */
1219 if (rchan->mid_rid < 0 || buf_len < period_len) {
1220 dev_warn(chan->device->dev,
1221 "%s: bad parameter: buf_len=%zu, period_len=%zu, id=%d\n",
1222 __func__, buf_len, period_len, rchan->mid_rid);
1226 if (rcar_dmac_map_slave_addr(chan, dir))
1229 sg_len = buf_len / period_len;
1230 if (sg_len > RCAR_DMAC_MAX_SG_LEN) {
1231 dev_err(chan->device->dev,
1232 "chan%u: sg length %d exceeds limit %d",
1233 rchan->index, sg_len, RCAR_DMAC_MAX_SG_LEN);
1238 * Allocate the sg list dynamically as it would consume too much stack
1241 sgl = kmalloc_array(sg_len, sizeof(*sgl), GFP_NOWAIT);
1245 sg_init_table(sgl, sg_len);
1247 for (i = 0; i < sg_len; ++i) {
1248 dma_addr_t src = buf_addr + (period_len * i);
1250 sg_set_page(&sgl[i], pfn_to_page(PFN_DOWN(src)), period_len,
1251 offset_in_page(src));
1252 sg_dma_address(&sgl[i]) = src;
1253 sg_dma_len(&sgl[i]) = period_len;
1256 desc = rcar_dmac_chan_prep_sg(rchan, sgl, sg_len, rchan->map.addr,
1263 static int rcar_dmac_device_config(struct dma_chan *chan,
1264 struct dma_slave_config *cfg)
1266 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
1269 * We could lock this, but you shouldn't be configuring the
1270 * channel, while using it...
1272 rchan->src.slave_addr = cfg->src_addr;
1273 rchan->dst.slave_addr = cfg->dst_addr;
1274 rchan->src.xfer_size = cfg->src_addr_width;
1275 rchan->dst.xfer_size = cfg->dst_addr_width;
1280 static int rcar_dmac_chan_terminate_all(struct dma_chan *chan)
1282 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
1283 unsigned long flags;
1285 spin_lock_irqsave(&rchan->lock, flags);
1286 rcar_dmac_chan_halt(rchan);
1287 spin_unlock_irqrestore(&rchan->lock, flags);
1290 * FIXME: No new interrupt can occur now, but the IRQ thread might still
1294 rcar_dmac_chan_reinit(rchan);
1299 static unsigned int rcar_dmac_chan_get_residue(struct rcar_dmac_chan *chan,
1300 dma_cookie_t cookie)
1302 struct rcar_dmac_desc *desc = chan->desc.running;
1303 struct rcar_dmac_xfer_chunk *running = NULL;
1304 struct rcar_dmac_xfer_chunk *chunk;
1305 enum dma_status status;
1306 unsigned int residue = 0;
1307 unsigned int dptr = 0;
1316 * If the cookie corresponds to a descriptor that has been completed
1317 * there is no residue. The same check has already been performed by the
1318 * caller but without holding the channel lock, so the descriptor could
1321 status = dma_cookie_status(&chan->chan, cookie, NULL);
1322 if (status == DMA_COMPLETE)
1326 * If the cookie doesn't correspond to the currently running transfer
1327 * then the descriptor hasn't been processed yet, and the residue is
1328 * equal to the full descriptor size.
1329 * Also, a client driver is possible to call this function before
1330 * rcar_dmac_isr_channel_thread() runs. In this case, the "desc.running"
1331 * will be the next descriptor, and the done list will appear. So, if
1332 * the argument cookie matches the done list's cookie, we can assume
1333 * the residue is zero.
1335 if (cookie != desc->async_tx.cookie) {
1336 list_for_each_entry(desc, &chan->desc.done, node) {
1337 if (cookie == desc->async_tx.cookie)
1340 list_for_each_entry(desc, &chan->desc.pending, node) {
1341 if (cookie == desc->async_tx.cookie)
1344 list_for_each_entry(desc, &chan->desc.active, node) {
1345 if (cookie == desc->async_tx.cookie)
1350 * No descriptor found for the cookie, there's thus no residue.
1351 * This shouldn't happen if the calling driver passes a correct
1354 WARN(1, "No descriptor for cookie!");
1359 * We need to read two registers.
1360 * Make sure the control register does not skip to next chunk
1361 * while reading the counter.
1362 * Trying it 3 times should be enough: Initial read, retry, retry
1365 for (i = 0; i < 3; i++) {
1366 chcrb = rcar_dmac_chan_read(chan, RCAR_DMACHCRB) &
1367 RCAR_DMACHCRB_DPTR_MASK;
1368 tcrb = rcar_dmac_chan_read(chan, RCAR_DMATCRB);
1369 /* Still the same? */
1370 if (chcrb == (rcar_dmac_chan_read(chan, RCAR_DMACHCRB) &
1371 RCAR_DMACHCRB_DPTR_MASK))
1374 WARN_ONCE(i >= 3, "residue might be not continuous!");
1377 * In descriptor mode the descriptor running pointer is not maintained
1378 * by the interrupt handler, find the running descriptor from the
1379 * descriptor pointer field in the CHCRB register. In non-descriptor
1380 * mode just use the running descriptor pointer.
1382 if (desc->hwdescs.use) {
1383 dptr = chcrb >> RCAR_DMACHCRB_DPTR_SHIFT;
1385 dptr = desc->nchunks;
1387 WARN_ON(dptr >= desc->nchunks);
1389 running = desc->running;
1392 /* Compute the size of all chunks still to be transferred. */
1393 list_for_each_entry_reverse(chunk, &desc->chunks, node) {
1394 if (chunk == running || ++dptr == desc->nchunks)
1397 residue += chunk->size;
1400 /* Add the residue for the current chunk. */
1401 residue += tcrb << desc->xfer_shift;
1406 static enum dma_status rcar_dmac_tx_status(struct dma_chan *chan,
1407 dma_cookie_t cookie,
1408 struct dma_tx_state *txstate)
1410 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
1411 enum dma_status status;
1412 unsigned long flags;
1413 unsigned int residue;
1416 status = dma_cookie_status(chan, cookie, txstate);
1417 if (status == DMA_COMPLETE || !txstate)
1420 spin_lock_irqsave(&rchan->lock, flags);
1421 residue = rcar_dmac_chan_get_residue(rchan, cookie);
1422 cyclic = rchan->desc.running ? rchan->desc.running->cyclic : false;
1423 spin_unlock_irqrestore(&rchan->lock, flags);
1425 /* if there's no residue, the cookie is complete */
1426 if (!residue && !cyclic)
1427 return DMA_COMPLETE;
1429 dma_set_residue(txstate, residue);
1434 static void rcar_dmac_issue_pending(struct dma_chan *chan)
1436 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
1437 unsigned long flags;
1439 spin_lock_irqsave(&rchan->lock, flags);
1441 if (list_empty(&rchan->desc.pending))
1444 /* Append the pending list to the active list. */
1445 list_splice_tail_init(&rchan->desc.pending, &rchan->desc.active);
1448 * If no transfer is running pick the first descriptor from the active
1449 * list and start the transfer.
1451 if (!rchan->desc.running) {
1452 struct rcar_dmac_desc *desc;
1454 desc = list_first_entry(&rchan->desc.active,
1455 struct rcar_dmac_desc, node);
1456 rchan->desc.running = desc;
1458 rcar_dmac_chan_start_xfer(rchan);
1462 spin_unlock_irqrestore(&rchan->lock, flags);
1465 static void rcar_dmac_device_synchronize(struct dma_chan *chan)
1467 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
1469 synchronize_irq(rchan->irq);
1472 /* -----------------------------------------------------------------------------
1476 static irqreturn_t rcar_dmac_isr_desc_stage_end(struct rcar_dmac_chan *chan)
1478 struct rcar_dmac_desc *desc = chan->desc.running;
1481 if (WARN_ON(!desc || !desc->cyclic)) {
1483 * This should never happen, there should always be a running
1484 * cyclic descriptor when a descriptor stage end interrupt is
1485 * triggered. Warn and return.
1490 /* Program the interrupt pointer to the next stage. */
1491 stage = (rcar_dmac_chan_read(chan, RCAR_DMACHCRB) &
1492 RCAR_DMACHCRB_DPTR_MASK) >> RCAR_DMACHCRB_DPTR_SHIFT;
1493 rcar_dmac_chan_write(chan, RCAR_DMADPCR, RCAR_DMADPCR_DIPT(stage));
1495 return IRQ_WAKE_THREAD;
1498 static irqreturn_t rcar_dmac_isr_transfer_end(struct rcar_dmac_chan *chan)
1500 struct rcar_dmac_desc *desc = chan->desc.running;
1501 irqreturn_t ret = IRQ_WAKE_THREAD;
1503 if (WARN_ON_ONCE(!desc)) {
1505 * This should never happen, there should always be a running
1506 * descriptor when a transfer end interrupt is triggered. Warn
1513 * The transfer end interrupt isn't generated for each chunk when using
1514 * descriptor mode. Only update the running chunk pointer in
1515 * non-descriptor mode.
1517 if (!desc->hwdescs.use) {
1519 * If we haven't completed the last transfer chunk simply move
1520 * to the next one. Only wake the IRQ thread if the transfer is
1523 if (!list_is_last(&desc->running->node, &desc->chunks)) {
1524 desc->running = list_next_entry(desc->running, node);
1531 * We've completed the last transfer chunk. If the transfer is
1532 * cyclic, move back to the first one.
1536 list_first_entry(&desc->chunks,
1537 struct rcar_dmac_xfer_chunk,
1543 /* The descriptor is complete, move it to the done list. */
1544 list_move_tail(&desc->node, &chan->desc.done);
1546 /* Queue the next descriptor, if any. */
1547 if (!list_empty(&chan->desc.active))
1548 chan->desc.running = list_first_entry(&chan->desc.active,
1549 struct rcar_dmac_desc,
1552 chan->desc.running = NULL;
1555 if (chan->desc.running)
1556 rcar_dmac_chan_start_xfer(chan);
1561 static irqreturn_t rcar_dmac_isr_channel(int irq, void *dev)
1563 u32 mask = RCAR_DMACHCR_DSE | RCAR_DMACHCR_TE;
1564 struct rcar_dmac_chan *chan = dev;
1565 irqreturn_t ret = IRQ_NONE;
1566 bool reinit = false;
1569 spin_lock(&chan->lock);
1571 chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR);
1572 if (chcr & RCAR_DMACHCR_CAE) {
1573 struct rcar_dmac *dmac = to_rcar_dmac(chan->chan.device);
1576 * We don't need to call rcar_dmac_chan_halt()
1577 * because channel is already stopped in error case.
1578 * We need to clear register and check DE bit as recovery.
1580 rcar_dmac_chan_clear(dmac, chan);
1581 rcar_dmac_chcr_de_barrier(chan);
1586 if (chcr & RCAR_DMACHCR_TE)
1587 mask |= RCAR_DMACHCR_DE;
1588 rcar_dmac_chan_write(chan, RCAR_DMACHCR, chcr & ~mask);
1589 if (mask & RCAR_DMACHCR_DE)
1590 rcar_dmac_chcr_de_barrier(chan);
1592 if (chcr & RCAR_DMACHCR_DSE)
1593 ret |= rcar_dmac_isr_desc_stage_end(chan);
1595 if (chcr & RCAR_DMACHCR_TE)
1596 ret |= rcar_dmac_isr_transfer_end(chan);
1599 spin_unlock(&chan->lock);
1602 dev_err(chan->chan.device->dev, "Channel Address Error\n");
1604 rcar_dmac_chan_reinit(chan);
1611 static irqreturn_t rcar_dmac_isr_channel_thread(int irq, void *dev)
1613 struct rcar_dmac_chan *chan = dev;
1614 struct rcar_dmac_desc *desc;
1615 struct dmaengine_desc_callback cb;
1617 spin_lock_irq(&chan->lock);
1619 /* For cyclic transfers notify the user after every chunk. */
1620 if (chan->desc.running && chan->desc.running->cyclic) {
1621 desc = chan->desc.running;
1622 dmaengine_desc_get_callback(&desc->async_tx, &cb);
1624 if (dmaengine_desc_callback_valid(&cb)) {
1625 spin_unlock_irq(&chan->lock);
1626 dmaengine_desc_callback_invoke(&cb, NULL);
1627 spin_lock_irq(&chan->lock);
1632 * Call the callback function for all descriptors on the done list and
1633 * move them to the ack wait list.
1635 while (!list_empty(&chan->desc.done)) {
1636 desc = list_first_entry(&chan->desc.done, struct rcar_dmac_desc,
1638 dma_cookie_complete(&desc->async_tx);
1639 list_del(&desc->node);
1641 dmaengine_desc_get_callback(&desc->async_tx, &cb);
1642 if (dmaengine_desc_callback_valid(&cb)) {
1643 spin_unlock_irq(&chan->lock);
1645 * We own the only reference to this descriptor, we can
1646 * safely dereference it without holding the channel
1649 dmaengine_desc_callback_invoke(&cb, NULL);
1650 spin_lock_irq(&chan->lock);
1653 list_add_tail(&desc->node, &chan->desc.wait);
1656 spin_unlock_irq(&chan->lock);
1658 /* Recycle all acked descriptors. */
1659 rcar_dmac_desc_recycle_acked(chan);
1664 /* -----------------------------------------------------------------------------
1665 * OF xlate and channel filter
1668 static bool rcar_dmac_chan_filter(struct dma_chan *chan, void *arg)
1670 struct rcar_dmac *dmac = to_rcar_dmac(chan->device);
1671 struct of_phandle_args *dma_spec = arg;
1674 * FIXME: Using a filter on OF platforms is a nonsense. The OF xlate
1675 * function knows from which device it wants to allocate a channel from,
1676 * and would be perfectly capable of selecting the channel it wants.
1677 * Forcing it to call dma_request_channel() and iterate through all
1678 * channels from all controllers is just pointless.
1680 if (chan->device->device_config != rcar_dmac_device_config)
1683 return !test_and_set_bit(dma_spec->args[0], dmac->modules);
1686 static struct dma_chan *rcar_dmac_of_xlate(struct of_phandle_args *dma_spec,
1687 struct of_dma *ofdma)
1689 struct rcar_dmac_chan *rchan;
1690 struct dma_chan *chan;
1691 dma_cap_mask_t mask;
1693 if (dma_spec->args_count != 1)
1696 /* Only slave DMA channels can be allocated via DT */
1698 dma_cap_set(DMA_SLAVE, mask);
1700 chan = __dma_request_channel(&mask, rcar_dmac_chan_filter, dma_spec,
1705 rchan = to_rcar_dmac_chan(chan);
1706 rchan->mid_rid = dma_spec->args[0];
1711 /* -----------------------------------------------------------------------------
1716 static int rcar_dmac_runtime_suspend(struct device *dev)
1721 static int rcar_dmac_runtime_resume(struct device *dev)
1723 struct rcar_dmac *dmac = dev_get_drvdata(dev);
1725 return rcar_dmac_init(dmac);
1729 static const struct dev_pm_ops rcar_dmac_pm = {
1731 * TODO for system sleep/resume:
1732 * - Wait for the current transfer to complete and stop the device,
1733 * - Resume transfers, if any.
1735 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
1736 pm_runtime_force_resume)
1737 SET_RUNTIME_PM_OPS(rcar_dmac_runtime_suspend, rcar_dmac_runtime_resume,
1741 /* -----------------------------------------------------------------------------
1745 static int rcar_dmac_chan_probe(struct rcar_dmac *dmac,
1746 struct rcar_dmac_chan *rchan,
1747 const struct rcar_dmac_of_data *data,
1750 struct platform_device *pdev = to_platform_device(dmac->dev);
1751 struct dma_chan *chan = &rchan->chan;
1752 char pdev_irqname[5];
1756 rchan->index = index;
1757 rchan->iomem = dmac->iomem + data->chan_offset_base +
1758 data->chan_offset_stride * index;
1759 rchan->mid_rid = -EINVAL;
1761 spin_lock_init(&rchan->lock);
1763 INIT_LIST_HEAD(&rchan->desc.free);
1764 INIT_LIST_HEAD(&rchan->desc.pending);
1765 INIT_LIST_HEAD(&rchan->desc.active);
1766 INIT_LIST_HEAD(&rchan->desc.done);
1767 INIT_LIST_HEAD(&rchan->desc.wait);
1769 /* Request the channel interrupt. */
1770 sprintf(pdev_irqname, "ch%u", index);
1771 rchan->irq = platform_get_irq_byname(pdev, pdev_irqname);
1775 irqname = devm_kasprintf(dmac->dev, GFP_KERNEL, "%s:%u",
1776 dev_name(dmac->dev), index);
1781 * Initialize the DMA engine channel and add it to the DMA engine
1784 chan->device = &dmac->engine;
1785 dma_cookie_init(chan);
1787 list_add_tail(&chan->device_node, &dmac->engine.channels);
1789 ret = devm_request_threaded_irq(dmac->dev, rchan->irq,
1790 rcar_dmac_isr_channel,
1791 rcar_dmac_isr_channel_thread, 0,
1794 dev_err(dmac->dev, "failed to request IRQ %u (%d)\n",
1802 #define RCAR_DMAC_MAX_CHANNELS 32
1804 static int rcar_dmac_parse_of(struct device *dev, struct rcar_dmac *dmac)
1806 struct device_node *np = dev->of_node;
1809 ret = of_property_read_u32(np, "dma-channels", &dmac->n_channels);
1811 dev_err(dev, "unable to read dma-channels property\n");
1815 /* The hardware and driver don't support more than 32 bits in CHCLR */
1816 if (dmac->n_channels <= 0 ||
1817 dmac->n_channels >= RCAR_DMAC_MAX_CHANNELS) {
1818 dev_err(dev, "invalid number of channels %u\n",
1824 * If the driver is unable to read dma-channel-mask property,
1825 * the driver assumes that it can use all channels.
1827 dmac->channels_mask = GENMASK(dmac->n_channels - 1, 0);
1828 of_property_read_u32(np, "dma-channel-mask", &dmac->channels_mask);
1830 /* If the property has out-of-channel mask, this driver clears it */
1831 dmac->channels_mask &= GENMASK(dmac->n_channels - 1, 0);
1836 static int rcar_dmac_probe(struct platform_device *pdev)
1838 const enum dma_slave_buswidth widths = DMA_SLAVE_BUSWIDTH_1_BYTE |
1839 DMA_SLAVE_BUSWIDTH_2_BYTES | DMA_SLAVE_BUSWIDTH_4_BYTES |
1840 DMA_SLAVE_BUSWIDTH_8_BYTES | DMA_SLAVE_BUSWIDTH_16_BYTES |
1841 DMA_SLAVE_BUSWIDTH_32_BYTES | DMA_SLAVE_BUSWIDTH_64_BYTES;
1842 const struct rcar_dmac_of_data *data;
1843 struct rcar_dmac_chan *chan;
1844 struct dma_device *engine;
1845 struct rcar_dmac *dmac;
1849 data = of_device_get_match_data(&pdev->dev);
1853 dmac = devm_kzalloc(&pdev->dev, sizeof(*dmac), GFP_KERNEL);
1857 dmac->dev = &pdev->dev;
1858 platform_set_drvdata(pdev, dmac);
1859 dma_set_max_seg_size(dmac->dev, RCAR_DMATCR_MASK);
1860 dma_set_mask_and_coherent(dmac->dev, DMA_BIT_MASK(40));
1862 ret = rcar_dmac_parse_of(&pdev->dev, dmac);
1867 * A still unconfirmed hardware bug prevents the IPMMU microTLB 0 to be
1868 * flushed correctly, resulting in memory corruption. DMAC 0 channel 0
1869 * is connected to microTLB 0 on currently supported platforms, so we
1870 * can't use it with the IPMMU. As the IOMMU API operates at the device
1871 * level we can't disable it selectively, so ignore channel 0 for now if
1872 * the device is part of an IOMMU group.
1874 if (device_iommu_mapped(&pdev->dev))
1875 dmac->channels_mask &= ~BIT(0);
1877 dmac->channels = devm_kcalloc(&pdev->dev, dmac->n_channels,
1878 sizeof(*dmac->channels), GFP_KERNEL);
1879 if (!dmac->channels)
1882 /* Request resources. */
1883 dmac->iomem = devm_platform_ioremap_resource(pdev, 0);
1884 if (IS_ERR(dmac->iomem))
1885 return PTR_ERR(dmac->iomem);
1887 /* Enable runtime PM and initialize the device. */
1888 pm_runtime_enable(&pdev->dev);
1889 ret = pm_runtime_get_sync(&pdev->dev);
1891 dev_err(&pdev->dev, "runtime PM get sync failed (%d)\n", ret);
1895 ret = rcar_dmac_init(dmac);
1896 pm_runtime_put(&pdev->dev);
1899 dev_err(&pdev->dev, "failed to reset device\n");
1903 /* Initialize engine */
1904 engine = &dmac->engine;
1906 dma_cap_set(DMA_MEMCPY, engine->cap_mask);
1907 dma_cap_set(DMA_SLAVE, engine->cap_mask);
1909 engine->dev = &pdev->dev;
1910 engine->copy_align = ilog2(RCAR_DMAC_MEMCPY_XFER_SIZE);
1912 engine->src_addr_widths = widths;
1913 engine->dst_addr_widths = widths;
1914 engine->directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);
1915 engine->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
1917 engine->device_alloc_chan_resources = rcar_dmac_alloc_chan_resources;
1918 engine->device_free_chan_resources = rcar_dmac_free_chan_resources;
1919 engine->device_prep_dma_memcpy = rcar_dmac_prep_dma_memcpy;
1920 engine->device_prep_slave_sg = rcar_dmac_prep_slave_sg;
1921 engine->device_prep_dma_cyclic = rcar_dmac_prep_dma_cyclic;
1922 engine->device_config = rcar_dmac_device_config;
1923 engine->device_pause = rcar_dmac_chan_pause;
1924 engine->device_terminate_all = rcar_dmac_chan_terminate_all;
1925 engine->device_tx_status = rcar_dmac_tx_status;
1926 engine->device_issue_pending = rcar_dmac_issue_pending;
1927 engine->device_synchronize = rcar_dmac_device_synchronize;
1929 INIT_LIST_HEAD(&engine->channels);
1931 for_each_rcar_dmac_chan(i, dmac, chan) {
1932 ret = rcar_dmac_chan_probe(dmac, chan, data, i);
1937 /* Register the DMAC as a DMA provider for DT. */
1938 ret = of_dma_controller_register(pdev->dev.of_node, rcar_dmac_of_xlate,
1944 * Register the DMA engine device.
1946 * Default transfer size of 32 bytes requires 32-byte alignment.
1948 ret = dma_async_device_register(engine);
1955 of_dma_controller_free(pdev->dev.of_node);
1956 pm_runtime_disable(&pdev->dev);
1960 static int rcar_dmac_remove(struct platform_device *pdev)
1962 struct rcar_dmac *dmac = platform_get_drvdata(pdev);
1964 of_dma_controller_free(pdev->dev.of_node);
1965 dma_async_device_unregister(&dmac->engine);
1967 pm_runtime_disable(&pdev->dev);
1972 static void rcar_dmac_shutdown(struct platform_device *pdev)
1974 struct rcar_dmac *dmac = platform_get_drvdata(pdev);
1976 rcar_dmac_stop_all_chan(dmac);
1979 static const struct rcar_dmac_of_data rcar_dmac_data = {
1980 .chan_offset_base = 0x8000,
1981 .chan_offset_stride = 0x80,
1984 static const struct of_device_id rcar_dmac_of_ids[] = {
1986 .compatible = "renesas,rcar-dmac",
1987 .data = &rcar_dmac_data,
1991 MODULE_DEVICE_TABLE(of, rcar_dmac_of_ids);
1993 static struct platform_driver rcar_dmac_driver = {
1995 .pm = &rcar_dmac_pm,
1996 .name = "rcar-dmac",
1997 .of_match_table = rcar_dmac_of_ids,
1999 .probe = rcar_dmac_probe,
2000 .remove = rcar_dmac_remove,
2001 .shutdown = rcar_dmac_shutdown,
2004 module_platform_driver(rcar_dmac_driver);
2006 MODULE_DESCRIPTION("R-Car Gen2 DMA Controller Driver");
2007 MODULE_AUTHOR("Laurent Pinchart <laurent.pinchart@ideasonboard.com>");
2008 MODULE_LICENSE("GPL v2");