1 // SPDX-License-Identifier: GPL-2.0-only
3 * DMA driver for Nvidia's Tegra20 APB DMA controller.
5 * Copyright (c) 2012-2013, NVIDIA CORPORATION. All rights reserved.
8 #include <linux/bitops.h>
10 #include <linux/delay.h>
11 #include <linux/dmaengine.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/err.h>
14 #include <linux/init.h>
15 #include <linux/interrupt.h>
18 #include <linux/module.h>
20 #include <linux/of_device.h>
21 #include <linux/of_dma.h>
22 #include <linux/platform_device.h>
24 #include <linux/pm_runtime.h>
25 #include <linux/reset.h>
26 #include <linux/slab.h>
28 #include "dmaengine.h"
30 #define CREATE_TRACE_POINTS
31 #include <trace/events/tegra_apb_dma.h>
33 #define TEGRA_APBDMA_GENERAL 0x0
34 #define TEGRA_APBDMA_GENERAL_ENABLE BIT(31)
36 #define TEGRA_APBDMA_CONTROL 0x010
37 #define TEGRA_APBDMA_IRQ_MASK 0x01c
38 #define TEGRA_APBDMA_IRQ_MASK_SET 0x020
41 #define TEGRA_APBDMA_CHAN_CSR 0x00
42 #define TEGRA_APBDMA_CSR_ENB BIT(31)
43 #define TEGRA_APBDMA_CSR_IE_EOC BIT(30)
44 #define TEGRA_APBDMA_CSR_HOLD BIT(29)
45 #define TEGRA_APBDMA_CSR_DIR BIT(28)
46 #define TEGRA_APBDMA_CSR_ONCE BIT(27)
47 #define TEGRA_APBDMA_CSR_FLOW BIT(21)
48 #define TEGRA_APBDMA_CSR_REQ_SEL_SHIFT 16
49 #define TEGRA_APBDMA_CSR_REQ_SEL_MASK 0x1F
50 #define TEGRA_APBDMA_CSR_WCOUNT_MASK 0xFFFC
53 #define TEGRA_APBDMA_CHAN_STATUS 0x004
54 #define TEGRA_APBDMA_STATUS_BUSY BIT(31)
55 #define TEGRA_APBDMA_STATUS_ISE_EOC BIT(30)
56 #define TEGRA_APBDMA_STATUS_HALT BIT(29)
57 #define TEGRA_APBDMA_STATUS_PING_PONG BIT(28)
58 #define TEGRA_APBDMA_STATUS_COUNT_SHIFT 2
59 #define TEGRA_APBDMA_STATUS_COUNT_MASK 0xFFFC
61 #define TEGRA_APBDMA_CHAN_CSRE 0x00C
62 #define TEGRA_APBDMA_CHAN_CSRE_PAUSE (1 << 31)
64 /* AHB memory address */
65 #define TEGRA_APBDMA_CHAN_AHBPTR 0x010
67 /* AHB sequence register */
68 #define TEGRA_APBDMA_CHAN_AHBSEQ 0x14
69 #define TEGRA_APBDMA_AHBSEQ_INTR_ENB BIT(31)
70 #define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_8 (0 << 28)
71 #define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_16 (1 << 28)
72 #define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_32 (2 << 28)
73 #define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_64 (3 << 28)
74 #define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_128 (4 << 28)
75 #define TEGRA_APBDMA_AHBSEQ_DATA_SWAP BIT(27)
76 #define TEGRA_APBDMA_AHBSEQ_BURST_1 (4 << 24)
77 #define TEGRA_APBDMA_AHBSEQ_BURST_4 (5 << 24)
78 #define TEGRA_APBDMA_AHBSEQ_BURST_8 (6 << 24)
79 #define TEGRA_APBDMA_AHBSEQ_DBL_BUF BIT(19)
80 #define TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT 16
81 #define TEGRA_APBDMA_AHBSEQ_WRAP_NONE 0
84 #define TEGRA_APBDMA_CHAN_APBPTR 0x018
86 /* APB sequence register */
87 #define TEGRA_APBDMA_CHAN_APBSEQ 0x01c
88 #define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_8 (0 << 28)
89 #define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_16 (1 << 28)
90 #define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_32 (2 << 28)
91 #define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_64 (3 << 28)
92 #define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_128 (4 << 28)
93 #define TEGRA_APBDMA_APBSEQ_DATA_SWAP BIT(27)
94 #define TEGRA_APBDMA_APBSEQ_WRAP_WORD_1 (1 << 16)
96 /* Tegra148 specific registers */
97 #define TEGRA_APBDMA_CHAN_WCOUNT 0x20
99 #define TEGRA_APBDMA_CHAN_WORD_TRANSFER 0x24
102 * If any burst is in flight and DMA paused then this is the time to complete
103 * on-flight burst and update DMA status register.
105 #define TEGRA_APBDMA_BURST_COMPLETE_TIME 20
107 /* Channel base address offset from APBDMA base address */
108 #define TEGRA_APBDMA_CHANNEL_BASE_ADD_OFFSET 0x1000
110 #define TEGRA_APBDMA_SLAVE_ID_INVALID (TEGRA_APBDMA_CSR_REQ_SEL_MASK + 1)
115 * tegra_dma_chip_data Tegra chip specific DMA data
116 * @nr_channels: Number of channels available in the controller.
117 * @channel_reg_size: Channel register size/stride.
118 * @max_dma_count: Maximum DMA transfer count supported by DMA controller.
119 * @support_channel_pause: Support channel wise pause of dma.
120 * @support_separate_wcount_reg: Support separate word count register.
122 struct tegra_dma_chip_data {
124 int channel_reg_size;
126 bool support_channel_pause;
127 bool support_separate_wcount_reg;
130 /* DMA channel registers */
131 struct tegra_dma_channel_regs {
133 unsigned long ahb_ptr;
134 unsigned long apb_ptr;
135 unsigned long ahb_seq;
136 unsigned long apb_seq;
137 unsigned long wcount;
141 * tegra_dma_sg_req: DMA request details to configure hardware. This
142 * contains the details for one transfer to configure DMA hw.
143 * The client's request for data transfer can be broken into multiple
144 * sub-transfer as per requester details and hw support.
145 * This sub transfer get added in the list of transfer and point to Tegra
146 * DMA descriptor which manages the transfer details.
148 struct tegra_dma_sg_req {
149 struct tegra_dma_channel_regs ch_regs;
150 unsigned int req_len;
153 struct list_head node;
154 struct tegra_dma_desc *dma_desc;
158 * tegra_dma_desc: Tegra DMA descriptors which manages the client requests.
159 * This descriptor keep track of transfer status, callbacks and request
162 struct tegra_dma_desc {
163 struct dma_async_tx_descriptor txd;
164 unsigned int bytes_requested;
165 unsigned int bytes_transferred;
166 enum dma_status dma_status;
167 struct list_head node;
168 struct list_head tx_list;
169 struct list_head cb_node;
173 struct tegra_dma_channel;
175 typedef void (*dma_isr_handler)(struct tegra_dma_channel *tdc,
178 /* tegra_dma_channel: Channel specific information */
179 struct tegra_dma_channel {
180 struct dma_chan dma_chan;
185 void __iomem *chan_addr;
188 struct tegra_dma *tdma;
191 /* Different lists for managing the requests */
192 struct list_head free_sg_req;
193 struct list_head pending_sg_req;
194 struct list_head free_dma_desc;
195 struct list_head cb_desc;
197 /* ISR handler and tasklet for bottom half of isr handling */
198 dma_isr_handler isr_handler;
199 struct tasklet_struct tasklet;
201 /* Channel-slave specific configuration */
202 unsigned int slave_id;
203 struct dma_slave_config dma_sconfig;
204 struct tegra_dma_channel_regs channel_reg;
207 /* tegra_dma: Tegra DMA specific information */
209 struct dma_device dma_dev;
212 struct reset_control *rst;
213 spinlock_t global_lock;
214 void __iomem *base_addr;
215 const struct tegra_dma_chip_data *chip_data;
218 * Counter for managing global pausing of the DMA controller.
219 * Only applicable for devices that don't support individual
222 u32 global_pause_count;
224 /* Some register need to be cache before suspend */
227 /* Last member of the structure */
228 struct tegra_dma_channel channels[0];
231 static inline void tdma_write(struct tegra_dma *tdma, u32 reg, u32 val)
233 writel(val, tdma->base_addr + reg);
236 static inline u32 tdma_read(struct tegra_dma *tdma, u32 reg)
238 return readl(tdma->base_addr + reg);
241 static inline void tdc_write(struct tegra_dma_channel *tdc,
244 writel(val, tdc->chan_addr + reg);
247 static inline u32 tdc_read(struct tegra_dma_channel *tdc, u32 reg)
249 return readl(tdc->chan_addr + reg);
252 static inline struct tegra_dma_channel *to_tegra_dma_chan(struct dma_chan *dc)
254 return container_of(dc, struct tegra_dma_channel, dma_chan);
257 static inline struct tegra_dma_desc *txd_to_tegra_dma_desc(
258 struct dma_async_tx_descriptor *td)
260 return container_of(td, struct tegra_dma_desc, txd);
263 static inline struct device *tdc2dev(struct tegra_dma_channel *tdc)
265 return &tdc->dma_chan.dev->device;
268 static dma_cookie_t tegra_dma_tx_submit(struct dma_async_tx_descriptor *tx);
269 static int tegra_dma_runtime_suspend(struct device *dev);
270 static int tegra_dma_runtime_resume(struct device *dev);
272 /* Get DMA desc from free list, if not there then allocate it. */
273 static struct tegra_dma_desc *tegra_dma_desc_get(
274 struct tegra_dma_channel *tdc)
276 struct tegra_dma_desc *dma_desc;
279 spin_lock_irqsave(&tdc->lock, flags);
281 /* Do not allocate if desc are waiting for ack */
282 list_for_each_entry(dma_desc, &tdc->free_dma_desc, node) {
283 if (async_tx_test_ack(&dma_desc->txd)) {
284 list_del(&dma_desc->node);
285 spin_unlock_irqrestore(&tdc->lock, flags);
286 dma_desc->txd.flags = 0;
291 spin_unlock_irqrestore(&tdc->lock, flags);
293 /* Allocate DMA desc */
294 dma_desc = kzalloc(sizeof(*dma_desc), GFP_NOWAIT);
298 dma_async_tx_descriptor_init(&dma_desc->txd, &tdc->dma_chan);
299 dma_desc->txd.tx_submit = tegra_dma_tx_submit;
300 dma_desc->txd.flags = 0;
304 static void tegra_dma_desc_put(struct tegra_dma_channel *tdc,
305 struct tegra_dma_desc *dma_desc)
309 spin_lock_irqsave(&tdc->lock, flags);
310 if (!list_empty(&dma_desc->tx_list))
311 list_splice_init(&dma_desc->tx_list, &tdc->free_sg_req);
312 list_add_tail(&dma_desc->node, &tdc->free_dma_desc);
313 spin_unlock_irqrestore(&tdc->lock, flags);
316 static struct tegra_dma_sg_req *tegra_dma_sg_req_get(
317 struct tegra_dma_channel *tdc)
319 struct tegra_dma_sg_req *sg_req = NULL;
322 spin_lock_irqsave(&tdc->lock, flags);
323 if (!list_empty(&tdc->free_sg_req)) {
324 sg_req = list_first_entry(&tdc->free_sg_req,
325 typeof(*sg_req), node);
326 list_del(&sg_req->node);
327 spin_unlock_irqrestore(&tdc->lock, flags);
330 spin_unlock_irqrestore(&tdc->lock, flags);
332 sg_req = kzalloc(sizeof(struct tegra_dma_sg_req), GFP_NOWAIT);
337 static int tegra_dma_slave_config(struct dma_chan *dc,
338 struct dma_slave_config *sconfig)
340 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
342 if (!list_empty(&tdc->pending_sg_req)) {
343 dev_err(tdc2dev(tdc), "Configuration not allowed\n");
347 memcpy(&tdc->dma_sconfig, sconfig, sizeof(*sconfig));
348 if (tdc->slave_id == TEGRA_APBDMA_SLAVE_ID_INVALID &&
349 sconfig->device_fc) {
350 if (sconfig->slave_id > TEGRA_APBDMA_CSR_REQ_SEL_MASK)
352 tdc->slave_id = sconfig->slave_id;
354 tdc->config_init = true;
358 static void tegra_dma_global_pause(struct tegra_dma_channel *tdc,
359 bool wait_for_burst_complete)
361 struct tegra_dma *tdma = tdc->tdma;
363 spin_lock(&tdma->global_lock);
365 if (tdc->tdma->global_pause_count == 0) {
366 tdma_write(tdma, TEGRA_APBDMA_GENERAL, 0);
367 if (wait_for_burst_complete)
368 udelay(TEGRA_APBDMA_BURST_COMPLETE_TIME);
371 tdc->tdma->global_pause_count++;
373 spin_unlock(&tdma->global_lock);
376 static void tegra_dma_global_resume(struct tegra_dma_channel *tdc)
378 struct tegra_dma *tdma = tdc->tdma;
380 spin_lock(&tdma->global_lock);
382 if (WARN_ON(tdc->tdma->global_pause_count == 0))
385 if (--tdc->tdma->global_pause_count == 0)
386 tdma_write(tdma, TEGRA_APBDMA_GENERAL,
387 TEGRA_APBDMA_GENERAL_ENABLE);
390 spin_unlock(&tdma->global_lock);
393 static void tegra_dma_pause(struct tegra_dma_channel *tdc,
394 bool wait_for_burst_complete)
396 struct tegra_dma *tdma = tdc->tdma;
398 if (tdma->chip_data->support_channel_pause) {
399 tdc_write(tdc, TEGRA_APBDMA_CHAN_CSRE,
400 TEGRA_APBDMA_CHAN_CSRE_PAUSE);
401 if (wait_for_burst_complete)
402 udelay(TEGRA_APBDMA_BURST_COMPLETE_TIME);
404 tegra_dma_global_pause(tdc, wait_for_burst_complete);
408 static void tegra_dma_resume(struct tegra_dma_channel *tdc)
410 struct tegra_dma *tdma = tdc->tdma;
412 if (tdma->chip_data->support_channel_pause) {
413 tdc_write(tdc, TEGRA_APBDMA_CHAN_CSRE, 0);
415 tegra_dma_global_resume(tdc);
419 static void tegra_dma_stop(struct tegra_dma_channel *tdc)
424 /* Disable interrupts */
425 csr = tdc_read(tdc, TEGRA_APBDMA_CHAN_CSR);
426 csr &= ~TEGRA_APBDMA_CSR_IE_EOC;
427 tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, csr);
430 csr &= ~TEGRA_APBDMA_CSR_ENB;
431 tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, csr);
433 /* Clear interrupt status if it is there */
434 status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
435 if (status & TEGRA_APBDMA_STATUS_ISE_EOC) {
436 dev_dbg(tdc2dev(tdc), "%s():clearing interrupt\n", __func__);
437 tdc_write(tdc, TEGRA_APBDMA_CHAN_STATUS, status);
442 static void tegra_dma_start(struct tegra_dma_channel *tdc,
443 struct tegra_dma_sg_req *sg_req)
445 struct tegra_dma_channel_regs *ch_regs = &sg_req->ch_regs;
447 tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, ch_regs->csr);
448 tdc_write(tdc, TEGRA_APBDMA_CHAN_APBSEQ, ch_regs->apb_seq);
449 tdc_write(tdc, TEGRA_APBDMA_CHAN_APBPTR, ch_regs->apb_ptr);
450 tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBSEQ, ch_regs->ahb_seq);
451 tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBPTR, ch_regs->ahb_ptr);
452 if (tdc->tdma->chip_data->support_separate_wcount_reg)
453 tdc_write(tdc, TEGRA_APBDMA_CHAN_WCOUNT, ch_regs->wcount);
456 tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR,
457 ch_regs->csr | TEGRA_APBDMA_CSR_ENB);
460 static void tegra_dma_configure_for_next(struct tegra_dma_channel *tdc,
461 struct tegra_dma_sg_req *nsg_req)
463 unsigned long status;
466 * The DMA controller reloads the new configuration for next transfer
467 * after last burst of current transfer completes.
468 * If there is no IEC status then this makes sure that last burst
469 * has not be completed. There may be case that last burst is on
470 * flight and so it can complete but because DMA is paused, it
471 * will not generates interrupt as well as not reload the new
473 * If there is already IEC status then interrupt handler need to
474 * load new configuration.
476 tegra_dma_pause(tdc, false);
477 status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
480 * If interrupt is pending then do nothing as the ISR will handle
481 * the programing for new request.
483 if (status & TEGRA_APBDMA_STATUS_ISE_EOC) {
484 dev_err(tdc2dev(tdc),
485 "Skipping new configuration as interrupt is pending\n");
486 tegra_dma_resume(tdc);
490 /* Safe to program new configuration */
491 tdc_write(tdc, TEGRA_APBDMA_CHAN_APBPTR, nsg_req->ch_regs.apb_ptr);
492 tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBPTR, nsg_req->ch_regs.ahb_ptr);
493 if (tdc->tdma->chip_data->support_separate_wcount_reg)
494 tdc_write(tdc, TEGRA_APBDMA_CHAN_WCOUNT,
495 nsg_req->ch_regs.wcount);
496 tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR,
497 nsg_req->ch_regs.csr | TEGRA_APBDMA_CSR_ENB);
498 nsg_req->configured = true;
500 tegra_dma_resume(tdc);
503 static void tdc_start_head_req(struct tegra_dma_channel *tdc)
505 struct tegra_dma_sg_req *sg_req;
507 if (list_empty(&tdc->pending_sg_req))
510 sg_req = list_first_entry(&tdc->pending_sg_req,
511 typeof(*sg_req), node);
512 tegra_dma_start(tdc, sg_req);
513 sg_req->configured = true;
517 static void tdc_configure_next_head_desc(struct tegra_dma_channel *tdc)
519 struct tegra_dma_sg_req *hsgreq;
520 struct tegra_dma_sg_req *hnsgreq;
522 if (list_empty(&tdc->pending_sg_req))
525 hsgreq = list_first_entry(&tdc->pending_sg_req, typeof(*hsgreq), node);
526 if (!list_is_last(&hsgreq->node, &tdc->pending_sg_req)) {
527 hnsgreq = list_first_entry(&hsgreq->node,
528 typeof(*hnsgreq), node);
529 tegra_dma_configure_for_next(tdc, hnsgreq);
533 static inline int get_current_xferred_count(struct tegra_dma_channel *tdc,
534 struct tegra_dma_sg_req *sg_req, unsigned long status)
536 return sg_req->req_len - (status & TEGRA_APBDMA_STATUS_COUNT_MASK) - 4;
539 static void tegra_dma_abort_all(struct tegra_dma_channel *tdc)
541 struct tegra_dma_sg_req *sgreq;
542 struct tegra_dma_desc *dma_desc;
544 while (!list_empty(&tdc->pending_sg_req)) {
545 sgreq = list_first_entry(&tdc->pending_sg_req,
546 typeof(*sgreq), node);
547 list_move_tail(&sgreq->node, &tdc->free_sg_req);
548 if (sgreq->last_sg) {
549 dma_desc = sgreq->dma_desc;
550 dma_desc->dma_status = DMA_ERROR;
551 list_add_tail(&dma_desc->node, &tdc->free_dma_desc);
553 /* Add in cb list if it is not there. */
554 if (!dma_desc->cb_count)
555 list_add_tail(&dma_desc->cb_node,
557 dma_desc->cb_count++;
560 tdc->isr_handler = NULL;
563 static bool handle_continuous_head_request(struct tegra_dma_channel *tdc,
564 struct tegra_dma_sg_req *last_sg_req, bool to_terminate)
566 struct tegra_dma_sg_req *hsgreq = NULL;
568 if (list_empty(&tdc->pending_sg_req)) {
569 dev_err(tdc2dev(tdc), "DMA is running without req\n");
575 * Check that head req on list should be in flight.
576 * If it is not in flight then abort transfer as
577 * looping of transfer can not continue.
579 hsgreq = list_first_entry(&tdc->pending_sg_req, typeof(*hsgreq), node);
580 if (!hsgreq->configured) {
582 dev_err(tdc2dev(tdc), "Error in DMA transfer, aborting DMA\n");
583 tegra_dma_abort_all(tdc);
587 /* Configure next request */
589 tdc_configure_next_head_desc(tdc);
593 static void handle_once_dma_done(struct tegra_dma_channel *tdc,
596 struct tegra_dma_sg_req *sgreq;
597 struct tegra_dma_desc *dma_desc;
600 sgreq = list_first_entry(&tdc->pending_sg_req, typeof(*sgreq), node);
601 dma_desc = sgreq->dma_desc;
602 dma_desc->bytes_transferred += sgreq->req_len;
604 list_del(&sgreq->node);
605 if (sgreq->last_sg) {
606 dma_desc->dma_status = DMA_COMPLETE;
607 dma_cookie_complete(&dma_desc->txd);
608 if (!dma_desc->cb_count)
609 list_add_tail(&dma_desc->cb_node, &tdc->cb_desc);
610 dma_desc->cb_count++;
611 list_add_tail(&dma_desc->node, &tdc->free_dma_desc);
613 list_add_tail(&sgreq->node, &tdc->free_sg_req);
615 /* Do not start DMA if it is going to be terminate */
616 if (to_terminate || list_empty(&tdc->pending_sg_req))
619 tdc_start_head_req(tdc);
622 static void handle_cont_sngl_cycle_dma_done(struct tegra_dma_channel *tdc,
625 struct tegra_dma_sg_req *sgreq;
626 struct tegra_dma_desc *dma_desc;
629 sgreq = list_first_entry(&tdc->pending_sg_req, typeof(*sgreq), node);
630 dma_desc = sgreq->dma_desc;
631 /* if we dma for long enough the transfer count will wrap */
632 dma_desc->bytes_transferred =
633 (dma_desc->bytes_transferred + sgreq->req_len) %
634 dma_desc->bytes_requested;
636 /* Callback need to be call */
637 if (!dma_desc->cb_count)
638 list_add_tail(&dma_desc->cb_node, &tdc->cb_desc);
639 dma_desc->cb_count++;
641 /* If not last req then put at end of pending list */
642 if (!list_is_last(&sgreq->node, &tdc->pending_sg_req)) {
643 list_move_tail(&sgreq->node, &tdc->pending_sg_req);
644 sgreq->configured = false;
645 st = handle_continuous_head_request(tdc, sgreq, to_terminate);
647 dma_desc->dma_status = DMA_ERROR;
651 static void tegra_dma_tasklet(unsigned long data)
653 struct tegra_dma_channel *tdc = (struct tegra_dma_channel *)data;
654 struct dmaengine_desc_callback cb;
655 struct tegra_dma_desc *dma_desc;
659 spin_lock_irqsave(&tdc->lock, flags);
660 while (!list_empty(&tdc->cb_desc)) {
661 dma_desc = list_first_entry(&tdc->cb_desc,
662 typeof(*dma_desc), cb_node);
663 list_del(&dma_desc->cb_node);
664 dmaengine_desc_get_callback(&dma_desc->txd, &cb);
665 cb_count = dma_desc->cb_count;
666 dma_desc->cb_count = 0;
667 trace_tegra_dma_complete_cb(&tdc->dma_chan, cb_count,
669 spin_unlock_irqrestore(&tdc->lock, flags);
671 dmaengine_desc_callback_invoke(&cb, NULL);
672 spin_lock_irqsave(&tdc->lock, flags);
674 spin_unlock_irqrestore(&tdc->lock, flags);
677 static irqreturn_t tegra_dma_isr(int irq, void *dev_id)
679 struct tegra_dma_channel *tdc = dev_id;
680 unsigned long status;
683 spin_lock_irqsave(&tdc->lock, flags);
685 trace_tegra_dma_isr(&tdc->dma_chan, irq);
686 status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
687 if (status & TEGRA_APBDMA_STATUS_ISE_EOC) {
688 tdc_write(tdc, TEGRA_APBDMA_CHAN_STATUS, status);
689 tdc->isr_handler(tdc, false);
690 tasklet_schedule(&tdc->tasklet);
691 spin_unlock_irqrestore(&tdc->lock, flags);
695 spin_unlock_irqrestore(&tdc->lock, flags);
696 dev_info(tdc2dev(tdc),
697 "Interrupt already served status 0x%08lx\n", status);
701 static dma_cookie_t tegra_dma_tx_submit(struct dma_async_tx_descriptor *txd)
703 struct tegra_dma_desc *dma_desc = txd_to_tegra_dma_desc(txd);
704 struct tegra_dma_channel *tdc = to_tegra_dma_chan(txd->chan);
708 spin_lock_irqsave(&tdc->lock, flags);
709 dma_desc->dma_status = DMA_IN_PROGRESS;
710 cookie = dma_cookie_assign(&dma_desc->txd);
711 list_splice_tail_init(&dma_desc->tx_list, &tdc->pending_sg_req);
712 spin_unlock_irqrestore(&tdc->lock, flags);
716 static void tegra_dma_issue_pending(struct dma_chan *dc)
718 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
721 spin_lock_irqsave(&tdc->lock, flags);
722 if (list_empty(&tdc->pending_sg_req)) {
723 dev_err(tdc2dev(tdc), "No DMA request\n");
727 tdc_start_head_req(tdc);
729 /* Continuous single mode: Configure next req */
732 * Wait for 1 burst time for configure DMA for
735 udelay(TEGRA_APBDMA_BURST_COMPLETE_TIME);
736 tdc_configure_next_head_desc(tdc);
740 spin_unlock_irqrestore(&tdc->lock, flags);
743 static int tegra_dma_terminate_all(struct dma_chan *dc)
745 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
746 struct tegra_dma_sg_req *sgreq;
747 struct tegra_dma_desc *dma_desc;
749 unsigned long status;
750 unsigned long wcount;
753 spin_lock_irqsave(&tdc->lock, flags);
754 if (list_empty(&tdc->pending_sg_req)) {
755 spin_unlock_irqrestore(&tdc->lock, flags);
762 /* Pause DMA before checking the queue status */
763 tegra_dma_pause(tdc, true);
765 status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
766 if (status & TEGRA_APBDMA_STATUS_ISE_EOC) {
767 dev_dbg(tdc2dev(tdc), "%s():handling isr\n", __func__);
768 tdc->isr_handler(tdc, true);
769 status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
771 if (tdc->tdma->chip_data->support_separate_wcount_reg)
772 wcount = tdc_read(tdc, TEGRA_APBDMA_CHAN_WORD_TRANSFER);
776 was_busy = tdc->busy;
779 if (!list_empty(&tdc->pending_sg_req) && was_busy) {
780 sgreq = list_first_entry(&tdc->pending_sg_req,
781 typeof(*sgreq), node);
782 sgreq->dma_desc->bytes_transferred +=
783 get_current_xferred_count(tdc, sgreq, wcount);
785 tegra_dma_resume(tdc);
788 tegra_dma_abort_all(tdc);
790 while (!list_empty(&tdc->cb_desc)) {
791 dma_desc = list_first_entry(&tdc->cb_desc,
792 typeof(*dma_desc), cb_node);
793 list_del(&dma_desc->cb_node);
794 dma_desc->cb_count = 0;
796 spin_unlock_irqrestore(&tdc->lock, flags);
800 static enum dma_status tegra_dma_tx_status(struct dma_chan *dc,
801 dma_cookie_t cookie, struct dma_tx_state *txstate)
803 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
804 struct tegra_dma_desc *dma_desc;
805 struct tegra_dma_sg_req *sg_req;
808 unsigned int residual;
810 ret = dma_cookie_status(dc, cookie, txstate);
811 if (ret == DMA_COMPLETE)
814 spin_lock_irqsave(&tdc->lock, flags);
816 /* Check on wait_ack desc status */
817 list_for_each_entry(dma_desc, &tdc->free_dma_desc, node) {
818 if (dma_desc->txd.cookie == cookie) {
819 ret = dma_desc->dma_status;
824 /* Check in pending list */
825 list_for_each_entry(sg_req, &tdc->pending_sg_req, node) {
826 dma_desc = sg_req->dma_desc;
827 if (dma_desc->txd.cookie == cookie) {
828 ret = dma_desc->dma_status;
833 dev_dbg(tdc2dev(tdc), "cookie %d not found\n", cookie);
837 if (dma_desc && txstate) {
838 residual = dma_desc->bytes_requested -
839 (dma_desc->bytes_transferred %
840 dma_desc->bytes_requested);
841 dma_set_residue(txstate, residual);
844 trace_tegra_dma_tx_status(&tdc->dma_chan, cookie, txstate);
845 spin_unlock_irqrestore(&tdc->lock, flags);
849 static inline int get_bus_width(struct tegra_dma_channel *tdc,
850 enum dma_slave_buswidth slave_bw)
853 case DMA_SLAVE_BUSWIDTH_1_BYTE:
854 return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_8;
855 case DMA_SLAVE_BUSWIDTH_2_BYTES:
856 return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_16;
857 case DMA_SLAVE_BUSWIDTH_4_BYTES:
858 return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_32;
859 case DMA_SLAVE_BUSWIDTH_8_BYTES:
860 return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_64;
862 dev_warn(tdc2dev(tdc),
863 "slave bw is not supported, using 32bits\n");
864 return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_32;
868 static inline int get_burst_size(struct tegra_dma_channel *tdc,
869 u32 burst_size, enum dma_slave_buswidth slave_bw, int len)
875 * burst_size from client is in terms of the bus_width.
876 * convert them into AHB memory width which is 4 byte.
878 burst_byte = burst_size * slave_bw;
879 burst_ahb_width = burst_byte / 4;
881 /* If burst size is 0 then calculate the burst size based on length */
882 if (!burst_ahb_width) {
884 return TEGRA_APBDMA_AHBSEQ_BURST_1;
885 else if ((len >> 4) & 0x1)
886 return TEGRA_APBDMA_AHBSEQ_BURST_4;
888 return TEGRA_APBDMA_AHBSEQ_BURST_8;
890 if (burst_ahb_width < 4)
891 return TEGRA_APBDMA_AHBSEQ_BURST_1;
892 else if (burst_ahb_width < 8)
893 return TEGRA_APBDMA_AHBSEQ_BURST_4;
895 return TEGRA_APBDMA_AHBSEQ_BURST_8;
898 static int get_transfer_param(struct tegra_dma_channel *tdc,
899 enum dma_transfer_direction direction, unsigned long *apb_addr,
900 unsigned long *apb_seq, unsigned long *csr, unsigned int *burst_size,
901 enum dma_slave_buswidth *slave_bw)
905 *apb_addr = tdc->dma_sconfig.dst_addr;
906 *apb_seq = get_bus_width(tdc, tdc->dma_sconfig.dst_addr_width);
907 *burst_size = tdc->dma_sconfig.dst_maxburst;
908 *slave_bw = tdc->dma_sconfig.dst_addr_width;
909 *csr = TEGRA_APBDMA_CSR_DIR;
913 *apb_addr = tdc->dma_sconfig.src_addr;
914 *apb_seq = get_bus_width(tdc, tdc->dma_sconfig.src_addr_width);
915 *burst_size = tdc->dma_sconfig.src_maxburst;
916 *slave_bw = tdc->dma_sconfig.src_addr_width;
921 dev_err(tdc2dev(tdc), "DMA direction is not supported\n");
927 static void tegra_dma_prep_wcount(struct tegra_dma_channel *tdc,
928 struct tegra_dma_channel_regs *ch_regs, u32 len)
930 u32 len_field = (len - 4) & 0xFFFC;
932 if (tdc->tdma->chip_data->support_separate_wcount_reg)
933 ch_regs->wcount = len_field;
935 ch_regs->csr |= len_field;
938 static struct dma_async_tx_descriptor *tegra_dma_prep_slave_sg(
939 struct dma_chan *dc, struct scatterlist *sgl, unsigned int sg_len,
940 enum dma_transfer_direction direction, unsigned long flags,
943 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
944 struct tegra_dma_desc *dma_desc;
946 struct scatterlist *sg;
947 unsigned long csr, ahb_seq, apb_ptr, apb_seq;
948 struct list_head req_list;
949 struct tegra_dma_sg_req *sg_req = NULL;
951 enum dma_slave_buswidth slave_bw;
953 if (!tdc->config_init) {
954 dev_err(tdc2dev(tdc), "DMA channel is not configured\n");
958 dev_err(tdc2dev(tdc), "Invalid segment length %d\n", sg_len);
962 if (get_transfer_param(tdc, direction, &apb_ptr, &apb_seq, &csr,
963 &burst_size, &slave_bw) < 0)
966 INIT_LIST_HEAD(&req_list);
968 ahb_seq = TEGRA_APBDMA_AHBSEQ_INTR_ENB;
969 ahb_seq |= TEGRA_APBDMA_AHBSEQ_WRAP_NONE <<
970 TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT;
971 ahb_seq |= TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_32;
973 csr |= TEGRA_APBDMA_CSR_ONCE;
975 if (tdc->slave_id != TEGRA_APBDMA_SLAVE_ID_INVALID) {
976 csr |= TEGRA_APBDMA_CSR_FLOW;
977 csr |= tdc->slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT;
980 if (flags & DMA_PREP_INTERRUPT) {
981 csr |= TEGRA_APBDMA_CSR_IE_EOC;
987 apb_seq |= TEGRA_APBDMA_APBSEQ_WRAP_WORD_1;
989 dma_desc = tegra_dma_desc_get(tdc);
991 dev_err(tdc2dev(tdc), "DMA descriptors not available\n");
994 INIT_LIST_HEAD(&dma_desc->tx_list);
995 INIT_LIST_HEAD(&dma_desc->cb_node);
996 dma_desc->cb_count = 0;
997 dma_desc->bytes_requested = 0;
998 dma_desc->bytes_transferred = 0;
999 dma_desc->dma_status = DMA_IN_PROGRESS;
1001 /* Make transfer requests */
1002 for_each_sg(sgl, sg, sg_len, i) {
1005 mem = sg_dma_address(sg);
1006 len = sg_dma_len(sg);
1008 if ((len & 3) || (mem & 3) ||
1009 (len > tdc->tdma->chip_data->max_dma_count)) {
1010 dev_err(tdc2dev(tdc),
1011 "DMA length/memory address is not supported\n");
1012 tegra_dma_desc_put(tdc, dma_desc);
1016 sg_req = tegra_dma_sg_req_get(tdc);
1018 dev_err(tdc2dev(tdc), "DMA sg-req not available\n");
1019 tegra_dma_desc_put(tdc, dma_desc);
1023 ahb_seq |= get_burst_size(tdc, burst_size, slave_bw, len);
1024 dma_desc->bytes_requested += len;
1026 sg_req->ch_regs.apb_ptr = apb_ptr;
1027 sg_req->ch_regs.ahb_ptr = mem;
1028 sg_req->ch_regs.csr = csr;
1029 tegra_dma_prep_wcount(tdc, &sg_req->ch_regs, len);
1030 sg_req->ch_regs.apb_seq = apb_seq;
1031 sg_req->ch_regs.ahb_seq = ahb_seq;
1032 sg_req->configured = false;
1033 sg_req->last_sg = false;
1034 sg_req->dma_desc = dma_desc;
1035 sg_req->req_len = len;
1037 list_add_tail(&sg_req->node, &dma_desc->tx_list);
1039 sg_req->last_sg = true;
1040 if (flags & DMA_CTRL_ACK)
1041 dma_desc->txd.flags = DMA_CTRL_ACK;
1044 * Make sure that mode should not be conflicting with currently
1047 if (!tdc->isr_handler) {
1048 tdc->isr_handler = handle_once_dma_done;
1049 tdc->cyclic = false;
1052 dev_err(tdc2dev(tdc), "DMA configured in cyclic mode\n");
1053 tegra_dma_desc_put(tdc, dma_desc);
1058 return &dma_desc->txd;
1061 static struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic(
1062 struct dma_chan *dc, dma_addr_t buf_addr, size_t buf_len,
1063 size_t period_len, enum dma_transfer_direction direction,
1064 unsigned long flags)
1066 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
1067 struct tegra_dma_desc *dma_desc = NULL;
1068 struct tegra_dma_sg_req *sg_req = NULL;
1069 unsigned long csr, ahb_seq, apb_ptr, apb_seq;
1072 dma_addr_t mem = buf_addr;
1074 enum dma_slave_buswidth slave_bw;
1076 if (!buf_len || !period_len) {
1077 dev_err(tdc2dev(tdc), "Invalid buffer/period len\n");
1081 if (!tdc->config_init) {
1082 dev_err(tdc2dev(tdc), "DMA slave is not configured\n");
1087 * We allow to take more number of requests till DMA is
1088 * not started. The driver will loop over all requests.
1089 * Once DMA is started then new requests can be queued only after
1090 * terminating the DMA.
1093 dev_err(tdc2dev(tdc), "Request not allowed when DMA running\n");
1098 * We only support cycle transfer when buf_len is multiple of
1101 if (buf_len % period_len) {
1102 dev_err(tdc2dev(tdc), "buf_len is not multiple of period_len\n");
1107 if ((len & 3) || (buf_addr & 3) ||
1108 (len > tdc->tdma->chip_data->max_dma_count)) {
1109 dev_err(tdc2dev(tdc), "Req len/mem address is not correct\n");
1113 if (get_transfer_param(tdc, direction, &apb_ptr, &apb_seq, &csr,
1114 &burst_size, &slave_bw) < 0)
1117 ahb_seq = TEGRA_APBDMA_AHBSEQ_INTR_ENB;
1118 ahb_seq |= TEGRA_APBDMA_AHBSEQ_WRAP_NONE <<
1119 TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT;
1120 ahb_seq |= TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_32;
1122 if (tdc->slave_id != TEGRA_APBDMA_SLAVE_ID_INVALID) {
1123 csr |= TEGRA_APBDMA_CSR_FLOW;
1124 csr |= tdc->slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT;
1127 if (flags & DMA_PREP_INTERRUPT) {
1128 csr |= TEGRA_APBDMA_CSR_IE_EOC;
1134 apb_seq |= TEGRA_APBDMA_APBSEQ_WRAP_WORD_1;
1136 dma_desc = tegra_dma_desc_get(tdc);
1138 dev_err(tdc2dev(tdc), "not enough descriptors available\n");
1142 INIT_LIST_HEAD(&dma_desc->tx_list);
1143 INIT_LIST_HEAD(&dma_desc->cb_node);
1144 dma_desc->cb_count = 0;
1146 dma_desc->bytes_transferred = 0;
1147 dma_desc->bytes_requested = buf_len;
1148 remain_len = buf_len;
1150 /* Split transfer equal to period size */
1151 while (remain_len) {
1152 sg_req = tegra_dma_sg_req_get(tdc);
1154 dev_err(tdc2dev(tdc), "DMA sg-req not available\n");
1155 tegra_dma_desc_put(tdc, dma_desc);
1159 ahb_seq |= get_burst_size(tdc, burst_size, slave_bw, len);
1160 sg_req->ch_regs.apb_ptr = apb_ptr;
1161 sg_req->ch_regs.ahb_ptr = mem;
1162 sg_req->ch_regs.csr = csr;
1163 tegra_dma_prep_wcount(tdc, &sg_req->ch_regs, len);
1164 sg_req->ch_regs.apb_seq = apb_seq;
1165 sg_req->ch_regs.ahb_seq = ahb_seq;
1166 sg_req->configured = false;
1167 sg_req->last_sg = false;
1168 sg_req->dma_desc = dma_desc;
1169 sg_req->req_len = len;
1171 list_add_tail(&sg_req->node, &dma_desc->tx_list);
1175 sg_req->last_sg = true;
1176 if (flags & DMA_CTRL_ACK)
1177 dma_desc->txd.flags = DMA_CTRL_ACK;
1180 * Make sure that mode should not be conflicting with currently
1183 if (!tdc->isr_handler) {
1184 tdc->isr_handler = handle_cont_sngl_cycle_dma_done;
1188 dev_err(tdc2dev(tdc), "DMA configuration conflict\n");
1189 tegra_dma_desc_put(tdc, dma_desc);
1194 return &dma_desc->txd;
1197 static int tegra_dma_alloc_chan_resources(struct dma_chan *dc)
1199 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
1200 struct tegra_dma *tdma = tdc->tdma;
1203 dma_cookie_init(&tdc->dma_chan);
1204 tdc->config_init = false;
1206 ret = pm_runtime_get_sync(tdma->dev);
1213 static void tegra_dma_free_chan_resources(struct dma_chan *dc)
1215 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
1216 struct tegra_dma *tdma = tdc->tdma;
1217 struct tegra_dma_desc *dma_desc;
1218 struct tegra_dma_sg_req *sg_req;
1219 struct list_head dma_desc_list;
1220 struct list_head sg_req_list;
1221 unsigned long flags;
1223 INIT_LIST_HEAD(&dma_desc_list);
1224 INIT_LIST_HEAD(&sg_req_list);
1226 dev_dbg(tdc2dev(tdc), "Freeing channel %d\n", tdc->id);
1229 tegra_dma_terminate_all(dc);
1231 spin_lock_irqsave(&tdc->lock, flags);
1232 list_splice_init(&tdc->pending_sg_req, &sg_req_list);
1233 list_splice_init(&tdc->free_sg_req, &sg_req_list);
1234 list_splice_init(&tdc->free_dma_desc, &dma_desc_list);
1235 INIT_LIST_HEAD(&tdc->cb_desc);
1236 tdc->config_init = false;
1237 tdc->isr_handler = NULL;
1238 spin_unlock_irqrestore(&tdc->lock, flags);
1240 while (!list_empty(&dma_desc_list)) {
1241 dma_desc = list_first_entry(&dma_desc_list,
1242 typeof(*dma_desc), node);
1243 list_del(&dma_desc->node);
1247 while (!list_empty(&sg_req_list)) {
1248 sg_req = list_first_entry(&sg_req_list, typeof(*sg_req), node);
1249 list_del(&sg_req->node);
1252 pm_runtime_put(tdma->dev);
1254 tdc->slave_id = TEGRA_APBDMA_SLAVE_ID_INVALID;
1257 static struct dma_chan *tegra_dma_of_xlate(struct of_phandle_args *dma_spec,
1258 struct of_dma *ofdma)
1260 struct tegra_dma *tdma = ofdma->of_dma_data;
1261 struct dma_chan *chan;
1262 struct tegra_dma_channel *tdc;
1264 if (dma_spec->args[0] > TEGRA_APBDMA_CSR_REQ_SEL_MASK) {
1265 dev_err(tdma->dev, "Invalid slave id: %d\n", dma_spec->args[0]);
1269 chan = dma_get_any_slave_channel(&tdma->dma_dev);
1273 tdc = to_tegra_dma_chan(chan);
1274 tdc->slave_id = dma_spec->args[0];
1279 /* Tegra20 specific DMA controller information */
1280 static const struct tegra_dma_chip_data tegra20_dma_chip_data = {
1282 .channel_reg_size = 0x20,
1283 .max_dma_count = 1024UL * 64,
1284 .support_channel_pause = false,
1285 .support_separate_wcount_reg = false,
1288 /* Tegra30 specific DMA controller information */
1289 static const struct tegra_dma_chip_data tegra30_dma_chip_data = {
1291 .channel_reg_size = 0x20,
1292 .max_dma_count = 1024UL * 64,
1293 .support_channel_pause = false,
1294 .support_separate_wcount_reg = false,
1297 /* Tegra114 specific DMA controller information */
1298 static const struct tegra_dma_chip_data tegra114_dma_chip_data = {
1300 .channel_reg_size = 0x20,
1301 .max_dma_count = 1024UL * 64,
1302 .support_channel_pause = true,
1303 .support_separate_wcount_reg = false,
1306 /* Tegra148 specific DMA controller information */
1307 static const struct tegra_dma_chip_data tegra148_dma_chip_data = {
1309 .channel_reg_size = 0x40,
1310 .max_dma_count = 1024UL * 64,
1311 .support_channel_pause = true,
1312 .support_separate_wcount_reg = true,
1315 static int tegra_dma_probe(struct platform_device *pdev)
1317 struct resource *res;
1318 struct tegra_dma *tdma;
1321 const struct tegra_dma_chip_data *cdata;
1323 cdata = of_device_get_match_data(&pdev->dev);
1325 dev_err(&pdev->dev, "Error: No device match data found\n");
1329 tdma = devm_kzalloc(&pdev->dev,
1330 struct_size(tdma, channels, cdata->nr_channels),
1335 tdma->dev = &pdev->dev;
1336 tdma->chip_data = cdata;
1337 platform_set_drvdata(pdev, tdma);
1339 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1340 tdma->base_addr = devm_ioremap_resource(&pdev->dev, res);
1341 if (IS_ERR(tdma->base_addr))
1342 return PTR_ERR(tdma->base_addr);
1344 tdma->dma_clk = devm_clk_get(&pdev->dev, NULL);
1345 if (IS_ERR(tdma->dma_clk)) {
1346 dev_err(&pdev->dev, "Error: Missing controller clock\n");
1347 return PTR_ERR(tdma->dma_clk);
1350 tdma->rst = devm_reset_control_get(&pdev->dev, "dma");
1351 if (IS_ERR(tdma->rst)) {
1352 dev_err(&pdev->dev, "Error: Missing reset\n");
1353 return PTR_ERR(tdma->rst);
1356 spin_lock_init(&tdma->global_lock);
1358 pm_runtime_enable(&pdev->dev);
1359 if (!pm_runtime_enabled(&pdev->dev))
1360 ret = tegra_dma_runtime_resume(&pdev->dev);
1362 ret = pm_runtime_get_sync(&pdev->dev);
1365 pm_runtime_disable(&pdev->dev);
1369 /* Reset DMA controller */
1370 reset_control_assert(tdma->rst);
1372 reset_control_deassert(tdma->rst);
1374 /* Enable global DMA registers */
1375 tdma_write(tdma, TEGRA_APBDMA_GENERAL, TEGRA_APBDMA_GENERAL_ENABLE);
1376 tdma_write(tdma, TEGRA_APBDMA_CONTROL, 0);
1377 tdma_write(tdma, TEGRA_APBDMA_IRQ_MASK_SET, 0xFFFFFFFFul);
1379 pm_runtime_put(&pdev->dev);
1381 INIT_LIST_HEAD(&tdma->dma_dev.channels);
1382 for (i = 0; i < cdata->nr_channels; i++) {
1383 struct tegra_dma_channel *tdc = &tdma->channels[i];
1385 tdc->chan_addr = tdma->base_addr +
1386 TEGRA_APBDMA_CHANNEL_BASE_ADD_OFFSET +
1387 (i * cdata->channel_reg_size);
1389 res = platform_get_resource(pdev, IORESOURCE_IRQ, i);
1392 dev_err(&pdev->dev, "No irq resource for chan %d\n", i);
1395 tdc->irq = res->start;
1396 snprintf(tdc->name, sizeof(tdc->name), "apbdma.%d", i);
1397 ret = request_irq(tdc->irq, tegra_dma_isr, 0, tdc->name, tdc);
1400 "request_irq failed with err %d channel %d\n",
1405 tdc->dma_chan.device = &tdma->dma_dev;
1406 dma_cookie_init(&tdc->dma_chan);
1407 list_add_tail(&tdc->dma_chan.device_node,
1408 &tdma->dma_dev.channels);
1411 tdc->slave_id = TEGRA_APBDMA_SLAVE_ID_INVALID;
1413 tasklet_init(&tdc->tasklet, tegra_dma_tasklet,
1414 (unsigned long)tdc);
1415 spin_lock_init(&tdc->lock);
1417 INIT_LIST_HEAD(&tdc->pending_sg_req);
1418 INIT_LIST_HEAD(&tdc->free_sg_req);
1419 INIT_LIST_HEAD(&tdc->free_dma_desc);
1420 INIT_LIST_HEAD(&tdc->cb_desc);
1423 dma_cap_set(DMA_SLAVE, tdma->dma_dev.cap_mask);
1424 dma_cap_set(DMA_PRIVATE, tdma->dma_dev.cap_mask);
1425 dma_cap_set(DMA_CYCLIC, tdma->dma_dev.cap_mask);
1427 tdma->global_pause_count = 0;
1428 tdma->dma_dev.dev = &pdev->dev;
1429 tdma->dma_dev.device_alloc_chan_resources =
1430 tegra_dma_alloc_chan_resources;
1431 tdma->dma_dev.device_free_chan_resources =
1432 tegra_dma_free_chan_resources;
1433 tdma->dma_dev.device_prep_slave_sg = tegra_dma_prep_slave_sg;
1434 tdma->dma_dev.device_prep_dma_cyclic = tegra_dma_prep_dma_cyclic;
1435 tdma->dma_dev.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
1436 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
1437 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |
1438 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES);
1439 tdma->dma_dev.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
1440 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
1441 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |
1442 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES);
1443 tdma->dma_dev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
1445 * XXX The hardware appears to support
1446 * DMA_RESIDUE_GRANULARITY_BURST-level reporting, but it's
1447 * only used by this driver during tegra_dma_terminate_all()
1449 tdma->dma_dev.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
1450 tdma->dma_dev.device_config = tegra_dma_slave_config;
1451 tdma->dma_dev.device_terminate_all = tegra_dma_terminate_all;
1452 tdma->dma_dev.device_tx_status = tegra_dma_tx_status;
1453 tdma->dma_dev.device_issue_pending = tegra_dma_issue_pending;
1455 ret = dma_async_device_register(&tdma->dma_dev);
1458 "Tegra20 APB DMA driver registration failed %d\n", ret);
1462 ret = of_dma_controller_register(pdev->dev.of_node,
1463 tegra_dma_of_xlate, tdma);
1466 "Tegra20 APB DMA OF registration failed %d\n", ret);
1467 goto err_unregister_dma_dev;
1470 dev_info(&pdev->dev, "Tegra20 APB DMA driver register %d channels\n",
1471 cdata->nr_channels);
1474 err_unregister_dma_dev:
1475 dma_async_device_unregister(&tdma->dma_dev);
1478 struct tegra_dma_channel *tdc = &tdma->channels[i];
1480 free_irq(tdc->irq, tdc);
1481 tasklet_kill(&tdc->tasklet);
1484 pm_runtime_disable(&pdev->dev);
1485 if (!pm_runtime_status_suspended(&pdev->dev))
1486 tegra_dma_runtime_suspend(&pdev->dev);
1490 static int tegra_dma_remove(struct platform_device *pdev)
1492 struct tegra_dma *tdma = platform_get_drvdata(pdev);
1494 struct tegra_dma_channel *tdc;
1496 dma_async_device_unregister(&tdma->dma_dev);
1498 for (i = 0; i < tdma->chip_data->nr_channels; ++i) {
1499 tdc = &tdma->channels[i];
1500 free_irq(tdc->irq, tdc);
1501 tasklet_kill(&tdc->tasklet);
1504 pm_runtime_disable(&pdev->dev);
1505 if (!pm_runtime_status_suspended(&pdev->dev))
1506 tegra_dma_runtime_suspend(&pdev->dev);
1511 static int tegra_dma_runtime_suspend(struct device *dev)
1513 struct tegra_dma *tdma = dev_get_drvdata(dev);
1516 tdma->reg_gen = tdma_read(tdma, TEGRA_APBDMA_GENERAL);
1517 for (i = 0; i < tdma->chip_data->nr_channels; i++) {
1518 struct tegra_dma_channel *tdc = &tdma->channels[i];
1519 struct tegra_dma_channel_regs *ch_reg = &tdc->channel_reg;
1521 /* Only save the state of DMA channels that are in use */
1522 if (!tdc->config_init)
1525 ch_reg->csr = tdc_read(tdc, TEGRA_APBDMA_CHAN_CSR);
1526 ch_reg->ahb_ptr = tdc_read(tdc, TEGRA_APBDMA_CHAN_AHBPTR);
1527 ch_reg->apb_ptr = tdc_read(tdc, TEGRA_APBDMA_CHAN_APBPTR);
1528 ch_reg->ahb_seq = tdc_read(tdc, TEGRA_APBDMA_CHAN_AHBSEQ);
1529 ch_reg->apb_seq = tdc_read(tdc, TEGRA_APBDMA_CHAN_APBSEQ);
1530 if (tdma->chip_data->support_separate_wcount_reg)
1531 ch_reg->wcount = tdc_read(tdc,
1532 TEGRA_APBDMA_CHAN_WCOUNT);
1535 clk_disable_unprepare(tdma->dma_clk);
1540 static int tegra_dma_runtime_resume(struct device *dev)
1542 struct tegra_dma *tdma = dev_get_drvdata(dev);
1545 ret = clk_prepare_enable(tdma->dma_clk);
1547 dev_err(dev, "clk_enable failed: %d\n", ret);
1551 tdma_write(tdma, TEGRA_APBDMA_GENERAL, tdma->reg_gen);
1552 tdma_write(tdma, TEGRA_APBDMA_CONTROL, 0);
1553 tdma_write(tdma, TEGRA_APBDMA_IRQ_MASK_SET, 0xFFFFFFFFul);
1555 for (i = 0; i < tdma->chip_data->nr_channels; i++) {
1556 struct tegra_dma_channel *tdc = &tdma->channels[i];
1557 struct tegra_dma_channel_regs *ch_reg = &tdc->channel_reg;
1559 /* Only restore the state of DMA channels that are in use */
1560 if (!tdc->config_init)
1563 if (tdma->chip_data->support_separate_wcount_reg)
1564 tdc_write(tdc, TEGRA_APBDMA_CHAN_WCOUNT,
1566 tdc_write(tdc, TEGRA_APBDMA_CHAN_APBSEQ, ch_reg->apb_seq);
1567 tdc_write(tdc, TEGRA_APBDMA_CHAN_APBPTR, ch_reg->apb_ptr);
1568 tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBSEQ, ch_reg->ahb_seq);
1569 tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBPTR, ch_reg->ahb_ptr);
1570 tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR,
1571 (ch_reg->csr & ~TEGRA_APBDMA_CSR_ENB));
1577 static const struct dev_pm_ops tegra_dma_dev_pm_ops = {
1578 SET_RUNTIME_PM_OPS(tegra_dma_runtime_suspend, tegra_dma_runtime_resume,
1580 SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
1581 pm_runtime_force_resume)
1584 static const struct of_device_id tegra_dma_of_match[] = {
1586 .compatible = "nvidia,tegra148-apbdma",
1587 .data = &tegra148_dma_chip_data,
1589 .compatible = "nvidia,tegra114-apbdma",
1590 .data = &tegra114_dma_chip_data,
1592 .compatible = "nvidia,tegra30-apbdma",
1593 .data = &tegra30_dma_chip_data,
1595 .compatible = "nvidia,tegra20-apbdma",
1596 .data = &tegra20_dma_chip_data,
1600 MODULE_DEVICE_TABLE(of, tegra_dma_of_match);
1602 static struct platform_driver tegra_dmac_driver = {
1604 .name = "tegra-apbdma",
1605 .pm = &tegra_dma_dev_pm_ops,
1606 .of_match_table = tegra_dma_of_match,
1608 .probe = tegra_dma_probe,
1609 .remove = tegra_dma_remove,
1612 module_platform_driver(tegra_dmac_driver);
1614 MODULE_ALIAS("platform:tegra20-apbdma");
1615 MODULE_DESCRIPTION("NVIDIA Tegra APB DMA Controller driver");
1616 MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
1617 MODULE_LICENSE("GPL v2");