1 // SPDX-License-Identifier: GPL-2.0+
3 // Actions Semi Owl SoCs DMA driver
5 // Copyright (c) 2014 Actions Semi Inc.
6 // Author: David Liu <liuwei@actions-semi.com>
8 // Copyright (c) 2018 Linaro Ltd.
9 // Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
11 #include <linux/bitops.h>
12 #include <linux/clk.h>
13 #include <linux/delay.h>
14 #include <linux/dmaengine.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/dmapool.h>
17 #include <linux/err.h>
18 #include <linux/init.h>
19 #include <linux/interrupt.h>
22 #include <linux/module.h>
23 #include <linux/of_device.h>
24 #include <linux/of_dma.h>
25 #include <linux/slab.h>
28 #define OWL_DMA_FRAME_MAX_LENGTH 0xfffff
30 /* Global DMA Controller Registers */
31 #define OWL_DMA_IRQ_PD0 0x00
32 #define OWL_DMA_IRQ_PD1 0x04
33 #define OWL_DMA_IRQ_PD2 0x08
34 #define OWL_DMA_IRQ_PD3 0x0C
35 #define OWL_DMA_IRQ_EN0 0x10
36 #define OWL_DMA_IRQ_EN1 0x14
37 #define OWL_DMA_IRQ_EN2 0x18
38 #define OWL_DMA_IRQ_EN3 0x1C
39 #define OWL_DMA_SECURE_ACCESS_CTL 0x20
40 #define OWL_DMA_NIC_QOS 0x24
41 #define OWL_DMA_DBGSEL 0x28
42 #define OWL_DMA_IDLE_STAT 0x2C
44 /* Channel Registers */
45 #define OWL_DMA_CHAN_BASE(i) (0x100 + (i) * 0x100)
46 #define OWL_DMAX_MODE 0x00
47 #define OWL_DMAX_SOURCE 0x04
48 #define OWL_DMAX_DESTINATION 0x08
49 #define OWL_DMAX_FRAME_LEN 0x0C
50 #define OWL_DMAX_FRAME_CNT 0x10
51 #define OWL_DMAX_REMAIN_FRAME_CNT 0x14
52 #define OWL_DMAX_REMAIN_CNT 0x18
53 #define OWL_DMAX_SOURCE_STRIDE 0x1C
54 #define OWL_DMAX_DESTINATION_STRIDE 0x20
55 #define OWL_DMAX_START 0x24
56 #define OWL_DMAX_PAUSE 0x28
57 #define OWL_DMAX_CHAINED_CTL 0x2C
58 #define OWL_DMAX_CONSTANT 0x30
59 #define OWL_DMAX_LINKLIST_CTL 0x34
60 #define OWL_DMAX_NEXT_DESCRIPTOR 0x38
61 #define OWL_DMAX_CURRENT_DESCRIPTOR_NUM 0x3C
62 #define OWL_DMAX_INT_CTL 0x40
63 #define OWL_DMAX_INT_STATUS 0x44
64 #define OWL_DMAX_CURRENT_SOURCE_POINTER 0x48
65 #define OWL_DMAX_CURRENT_DESTINATION_POINTER 0x4C
67 /* OWL_DMAX_MODE Bits */
68 #define OWL_DMA_MODE_TS(x) (((x) & GENMASK(5, 0)) << 0)
69 #define OWL_DMA_MODE_ST(x) (((x) & GENMASK(1, 0)) << 8)
70 #define OWL_DMA_MODE_ST_DEV OWL_DMA_MODE_ST(0)
71 #define OWL_DMA_MODE_ST_DCU OWL_DMA_MODE_ST(2)
72 #define OWL_DMA_MODE_ST_SRAM OWL_DMA_MODE_ST(3)
73 #define OWL_DMA_MODE_DT(x) (((x) & GENMASK(1, 0)) << 10)
74 #define OWL_DMA_MODE_DT_DEV OWL_DMA_MODE_DT(0)
75 #define OWL_DMA_MODE_DT_DCU OWL_DMA_MODE_DT(2)
76 #define OWL_DMA_MODE_DT_SRAM OWL_DMA_MODE_DT(3)
77 #define OWL_DMA_MODE_SAM(x) (((x) & GENMASK(1, 0)) << 16)
78 #define OWL_DMA_MODE_SAM_CONST OWL_DMA_MODE_SAM(0)
79 #define OWL_DMA_MODE_SAM_INC OWL_DMA_MODE_SAM(1)
80 #define OWL_DMA_MODE_SAM_STRIDE OWL_DMA_MODE_SAM(2)
81 #define OWL_DMA_MODE_DAM(x) (((x) & GENMASK(1, 0)) << 18)
82 #define OWL_DMA_MODE_DAM_CONST OWL_DMA_MODE_DAM(0)
83 #define OWL_DMA_MODE_DAM_INC OWL_DMA_MODE_DAM(1)
84 #define OWL_DMA_MODE_DAM_STRIDE OWL_DMA_MODE_DAM(2)
85 #define OWL_DMA_MODE_PW(x) (((x) & GENMASK(2, 0)) << 20)
86 #define OWL_DMA_MODE_CB BIT(23)
87 #define OWL_DMA_MODE_NDDBW(x) (((x) & 0x1) << 28)
88 #define OWL_DMA_MODE_NDDBW_32BIT OWL_DMA_MODE_NDDBW(0)
89 #define OWL_DMA_MODE_NDDBW_8BIT OWL_DMA_MODE_NDDBW(1)
90 #define OWL_DMA_MODE_CFE BIT(29)
91 #define OWL_DMA_MODE_LME BIT(30)
92 #define OWL_DMA_MODE_CME BIT(31)
94 /* OWL_DMAX_LINKLIST_CTL Bits */
95 #define OWL_DMA_LLC_SAV(x) (((x) & GENMASK(1, 0)) << 8)
96 #define OWL_DMA_LLC_SAV_INC OWL_DMA_LLC_SAV(0)
97 #define OWL_DMA_LLC_SAV_LOAD_NEXT OWL_DMA_LLC_SAV(1)
98 #define OWL_DMA_LLC_SAV_LOAD_PREV OWL_DMA_LLC_SAV(2)
99 #define OWL_DMA_LLC_DAV(x) (((x) & GENMASK(1, 0)) << 10)
100 #define OWL_DMA_LLC_DAV_INC OWL_DMA_LLC_DAV(0)
101 #define OWL_DMA_LLC_DAV_LOAD_NEXT OWL_DMA_LLC_DAV(1)
102 #define OWL_DMA_LLC_DAV_LOAD_PREV OWL_DMA_LLC_DAV(2)
103 #define OWL_DMA_LLC_SUSPEND BIT(16)
105 /* OWL_DMAX_INT_CTL Bits */
106 #define OWL_DMA_INTCTL_BLOCK BIT(0)
107 #define OWL_DMA_INTCTL_SUPER_BLOCK BIT(1)
108 #define OWL_DMA_INTCTL_FRAME BIT(2)
109 #define OWL_DMA_INTCTL_HALF_FRAME BIT(3)
110 #define OWL_DMA_INTCTL_LAST_FRAME BIT(4)
112 /* OWL_DMAX_INT_STATUS Bits */
113 #define OWL_DMA_INTSTAT_BLOCK BIT(0)
114 #define OWL_DMA_INTSTAT_SUPER_BLOCK BIT(1)
115 #define OWL_DMA_INTSTAT_FRAME BIT(2)
116 #define OWL_DMA_INTSTAT_HALF_FRAME BIT(3)
117 #define OWL_DMA_INTSTAT_LAST_FRAME BIT(4)
119 /* Pack shift and newshift in a single word */
120 #define BIT_FIELD(val, width, shift, newshift) \
121 ((((val) >> (shift)) & ((BIT(width)) - 1)) << (newshift))
123 /* Frame count value is fixed as 1 */
127 * owl_dmadesc_offsets - Describe DMA descriptor, hardware link
128 * list for dma transfer
129 * @OWL_DMADESC_NEXT_LLI: physical address of the next link list
130 * @OWL_DMADESC_SADDR: source physical address
131 * @OWL_DMADESC_DADDR: destination physical address
132 * @OWL_DMADESC_FLEN: frame length
133 * @OWL_DMADESC_SRC_STRIDE: source stride
134 * @OWL_DMADESC_DST_STRIDE: destination stride
135 * @OWL_DMADESC_CTRLA: dma_mode and linklist ctrl config
136 * @OWL_DMADESC_CTRLB: interrupt config
137 * @OWL_DMADESC_CONST_NUM: data for constant fill
139 enum owl_dmadesc_offsets {
140 OWL_DMADESC_NEXT_LLI = 0,
144 OWL_DMADESC_SRC_STRIDE,
145 OWL_DMADESC_DST_STRIDE,
148 OWL_DMADESC_CONST_NUM,
158 * struct owl_dma_lli - Link list for dma transfer
159 * @hw: hardware link list
160 * @phys: physical address of hardware link list
161 * @node: node for txd's lli_list
164 u32 hw[OWL_DMADESC_SIZE];
166 struct list_head node;
170 * struct owl_dma_txd - Wrapper for struct dma_async_tx_descriptor
171 * @vd: virtual DMA descriptor
172 * @lli_list: link list of lli nodes
173 * @cyclic: flag to indicate cyclic transfers
176 struct virt_dma_desc vd;
177 struct list_head lli_list;
182 * struct owl_dma_pchan - Holder for the physical channels
183 * @id: physical index to this channel
184 * @base: virtual memory base for the dma channel
185 * @vchan: the virtual channel currently being served by this physical channel
187 struct owl_dma_pchan {
190 struct owl_dma_vchan *vchan;
194 * struct owl_dma_pchan - Wrapper for DMA ENGINE channel
195 * @vc: wrappped virtual channel
196 * @pchan: the physical channel utilized by this channel
197 * @txd: active transaction on this channel
198 * @cfg: slave configuration for this channel
199 * @drq: physical DMA request ID for this channel
201 struct owl_dma_vchan {
202 struct virt_dma_chan vc;
203 struct owl_dma_pchan *pchan;
204 struct owl_dma_txd *txd;
205 struct dma_slave_config cfg;
210 * struct owl_dma - Holder for the Owl DMA controller
211 * @dma: dma engine for this instance
212 * @base: virtual memory base for the DMA controller
213 * @clk: clock for the DMA controller
214 * @lock: a lock to use when change DMA controller global register
215 * @lli_pool: a pool for the LLI descriptors
216 * @irq: interrupt ID for the DMA controller
217 * @nr_pchans: the number of physical channels
218 * @pchans: array of data for the physical channels
219 * @nr_vchans: the number of physical channels
220 * @vchans: array of data for the physical channels
221 * @devid: device id based on OWL SoC
224 struct dma_device dma;
228 struct dma_pool *lli_pool;
231 unsigned int nr_pchans;
232 struct owl_dma_pchan *pchans;
234 unsigned int nr_vchans;
235 struct owl_dma_vchan *vchans;
236 enum owl_dma_id devid;
239 static void pchan_update(struct owl_dma_pchan *pchan, u32 reg,
244 regval = readl(pchan->base + reg);
251 writel(val, pchan->base + reg);
254 static void pchan_writel(struct owl_dma_pchan *pchan, u32 reg, u32 data)
256 writel(data, pchan->base + reg);
259 static u32 pchan_readl(struct owl_dma_pchan *pchan, u32 reg)
261 return readl(pchan->base + reg);
264 static void dma_update(struct owl_dma *od, u32 reg, u32 val, bool state)
268 regval = readl(od->base + reg);
275 writel(val, od->base + reg);
278 static void dma_writel(struct owl_dma *od, u32 reg, u32 data)
280 writel(data, od->base + reg);
283 static u32 dma_readl(struct owl_dma *od, u32 reg)
285 return readl(od->base + reg);
288 static inline struct owl_dma *to_owl_dma(struct dma_device *dd)
290 return container_of(dd, struct owl_dma, dma);
293 static struct device *chan2dev(struct dma_chan *chan)
295 return &chan->dev->device;
298 static inline struct owl_dma_vchan *to_owl_vchan(struct dma_chan *chan)
300 return container_of(chan, struct owl_dma_vchan, vc.chan);
303 static inline struct owl_dma_txd *to_owl_txd(struct dma_async_tx_descriptor *tx)
305 return container_of(tx, struct owl_dma_txd, vd.tx);
308 static inline u32 llc_hw_ctrla(u32 mode, u32 llc_ctl)
312 ctl = BIT_FIELD(mode, 4, 28, 28) |
313 BIT_FIELD(mode, 8, 16, 20) |
314 BIT_FIELD(mode, 4, 8, 16) |
315 BIT_FIELD(mode, 6, 0, 10) |
316 BIT_FIELD(llc_ctl, 2, 10, 8) |
317 BIT_FIELD(llc_ctl, 2, 8, 6);
322 static inline u32 llc_hw_ctrlb(u32 int_ctl)
327 * Irrespective of the SoC, ctrlb value starts filling from
330 ctl = BIT_FIELD(int_ctl, 7, 0, 18);
335 static u32 llc_hw_flen(struct owl_dma_lli *lli)
337 return lli->hw[OWL_DMADESC_FLEN] & GENMASK(19, 0);
340 static void owl_dma_free_lli(struct owl_dma *od,
341 struct owl_dma_lli *lli)
343 list_del(&lli->node);
344 dma_pool_free(od->lli_pool, lli, lli->phys);
347 static struct owl_dma_lli *owl_dma_alloc_lli(struct owl_dma *od)
349 struct owl_dma_lli *lli;
352 lli = dma_pool_alloc(od->lli_pool, GFP_NOWAIT, &phys);
356 INIT_LIST_HEAD(&lli->node);
362 static struct owl_dma_lli *owl_dma_add_lli(struct owl_dma_txd *txd,
363 struct owl_dma_lli *prev,
364 struct owl_dma_lli *next,
368 list_add_tail(&next->node, &txd->lli_list);
371 prev->hw[OWL_DMADESC_NEXT_LLI] = next->phys;
372 prev->hw[OWL_DMADESC_CTRLA] |=
373 llc_hw_ctrla(OWL_DMA_MODE_LME, 0);
379 static inline int owl_dma_cfg_lli(struct owl_dma_vchan *vchan,
380 struct owl_dma_lli *lli,
381 dma_addr_t src, dma_addr_t dst,
382 u32 len, enum dma_transfer_direction dir,
383 struct dma_slave_config *sconfig,
386 struct owl_dma *od = to_owl_dma(vchan->vc.chan.device);
389 mode = OWL_DMA_MODE_PW(0);
393 mode |= OWL_DMA_MODE_TS(0) | OWL_DMA_MODE_ST_DCU |
394 OWL_DMA_MODE_DT_DCU | OWL_DMA_MODE_SAM_INC |
395 OWL_DMA_MODE_DAM_INC;
399 mode |= OWL_DMA_MODE_TS(vchan->drq)
400 | OWL_DMA_MODE_ST_DCU | OWL_DMA_MODE_DT_DEV
401 | OWL_DMA_MODE_SAM_INC | OWL_DMA_MODE_DAM_CONST;
404 * Hardware only supports 32bit and 8bit buswidth. Since the
405 * default is 32bit, select 8bit only when requested.
407 if (sconfig->dst_addr_width == DMA_SLAVE_BUSWIDTH_1_BYTE)
408 mode |= OWL_DMA_MODE_NDDBW_8BIT;
412 mode |= OWL_DMA_MODE_TS(vchan->drq)
413 | OWL_DMA_MODE_ST_DEV | OWL_DMA_MODE_DT_DCU
414 | OWL_DMA_MODE_SAM_CONST | OWL_DMA_MODE_DAM_INC;
417 * Hardware only supports 32bit and 8bit buswidth. Since the
418 * default is 32bit, select 8bit only when requested.
420 if (sconfig->src_addr_width == DMA_SLAVE_BUSWIDTH_1_BYTE)
421 mode |= OWL_DMA_MODE_NDDBW_8BIT;
428 lli->hw[OWL_DMADESC_CTRLA] = llc_hw_ctrla(mode,
429 OWL_DMA_LLC_SAV_LOAD_NEXT |
430 OWL_DMA_LLC_DAV_LOAD_NEXT);
433 ctrlb = llc_hw_ctrlb(OWL_DMA_INTCTL_BLOCK);
435 ctrlb = llc_hw_ctrlb(OWL_DMA_INTCTL_SUPER_BLOCK);
437 lli->hw[OWL_DMADESC_NEXT_LLI] = 0; /* One link list by default */
438 lli->hw[OWL_DMADESC_SADDR] = src;
439 lli->hw[OWL_DMADESC_DADDR] = dst;
440 lli->hw[OWL_DMADESC_SRC_STRIDE] = 0;
441 lli->hw[OWL_DMADESC_DST_STRIDE] = 0;
443 if (od->devid == S700_DMA) {
444 /* Max frame length is 1MB */
445 lli->hw[OWL_DMADESC_FLEN] = len;
447 * On S700, word starts from offset 0x1C is shared between
448 * frame count and ctrlb, where first 12 bits are for frame
449 * count and rest of 20 bits are for ctrlb.
451 lli->hw[OWL_DMADESC_CTRLB] = FCNT_VAL | ctrlb;
454 * On S900, word starts from offset 0xC is shared between
455 * frame length (max frame length is 1MB) and frame count,
456 * where first 20 bits are for frame length and rest of
457 * 12 bits are for frame count.
459 lli->hw[OWL_DMADESC_FLEN] = len | FCNT_VAL << 20;
460 lli->hw[OWL_DMADESC_CTRLB] = ctrlb;
466 static struct owl_dma_pchan *owl_dma_get_pchan(struct owl_dma *od,
467 struct owl_dma_vchan *vchan)
469 struct owl_dma_pchan *pchan = NULL;
473 for (i = 0; i < od->nr_pchans; i++) {
474 pchan = &od->pchans[i];
476 spin_lock_irqsave(&od->lock, flags);
478 pchan->vchan = vchan;
479 spin_unlock_irqrestore(&od->lock, flags);
483 spin_unlock_irqrestore(&od->lock, flags);
489 static int owl_dma_pchan_busy(struct owl_dma *od, struct owl_dma_pchan *pchan)
493 val = dma_readl(od, OWL_DMA_IDLE_STAT);
495 return !(val & (1 << pchan->id));
498 static void owl_dma_terminate_pchan(struct owl_dma *od,
499 struct owl_dma_pchan *pchan)
504 pchan_writel(pchan, OWL_DMAX_START, 0);
505 pchan_update(pchan, OWL_DMAX_INT_STATUS, 0xff, false);
507 spin_lock_irqsave(&od->lock, flags);
508 dma_update(od, OWL_DMA_IRQ_EN0, (1 << pchan->id), false);
510 irq_pd = dma_readl(od, OWL_DMA_IRQ_PD0);
511 if (irq_pd & (1 << pchan->id)) {
512 dev_warn(od->dma.dev,
513 "terminating pchan %d that still has pending irq\n",
515 dma_writel(od, OWL_DMA_IRQ_PD0, (1 << pchan->id));
520 spin_unlock_irqrestore(&od->lock, flags);
523 static void owl_dma_pause_pchan(struct owl_dma_pchan *pchan)
525 pchan_writel(pchan, 1, OWL_DMAX_PAUSE);
528 static void owl_dma_resume_pchan(struct owl_dma_pchan *pchan)
530 pchan_writel(pchan, 0, OWL_DMAX_PAUSE);
533 static int owl_dma_start_next_txd(struct owl_dma_vchan *vchan)
535 struct owl_dma *od = to_owl_dma(vchan->vc.chan.device);
536 struct virt_dma_desc *vd = vchan_next_desc(&vchan->vc);
537 struct owl_dma_pchan *pchan = vchan->pchan;
538 struct owl_dma_txd *txd = to_owl_txd(&vd->tx);
539 struct owl_dma_lli *lli;
547 /* Wait for channel inactive */
548 while (owl_dma_pchan_busy(od, pchan))
551 lli = list_first_entry(&txd->lli_list,
552 struct owl_dma_lli, node);
555 int_ctl = OWL_DMA_INTCTL_BLOCK;
557 int_ctl = OWL_DMA_INTCTL_SUPER_BLOCK;
559 pchan_writel(pchan, OWL_DMAX_MODE, OWL_DMA_MODE_LME);
560 pchan_writel(pchan, OWL_DMAX_LINKLIST_CTL,
561 OWL_DMA_LLC_SAV_LOAD_NEXT | OWL_DMA_LLC_DAV_LOAD_NEXT);
562 pchan_writel(pchan, OWL_DMAX_NEXT_DESCRIPTOR, lli->phys);
563 pchan_writel(pchan, OWL_DMAX_INT_CTL, int_ctl);
565 /* Clear IRQ status for this pchan */
566 pchan_update(pchan, OWL_DMAX_INT_STATUS, 0xff, false);
568 spin_lock_irqsave(&od->lock, flags);
570 dma_update(od, OWL_DMA_IRQ_EN0, (1 << pchan->id), true);
572 spin_unlock_irqrestore(&od->lock, flags);
574 dev_dbg(chan2dev(&vchan->vc.chan), "starting pchan %d\n", pchan->id);
576 /* Start DMA transfer for this pchan */
577 pchan_writel(pchan, OWL_DMAX_START, 0x1);
582 static void owl_dma_phy_free(struct owl_dma *od, struct owl_dma_vchan *vchan)
584 /* Ensure that the physical channel is stopped */
585 owl_dma_terminate_pchan(od, vchan->pchan);
590 static irqreturn_t owl_dma_interrupt(int irq, void *dev_id)
592 struct owl_dma *od = dev_id;
593 struct owl_dma_vchan *vchan;
594 struct owl_dma_pchan *pchan;
595 unsigned long pending;
597 unsigned int global_irq_pending, chan_irq_pending;
599 spin_lock(&od->lock);
601 pending = dma_readl(od, OWL_DMA_IRQ_PD0);
603 /* Clear IRQ status for each pchan */
604 for_each_set_bit(i, &pending, od->nr_pchans) {
605 pchan = &od->pchans[i];
606 pchan_update(pchan, OWL_DMAX_INT_STATUS, 0xff, false);
609 /* Clear pending IRQ */
610 dma_writel(od, OWL_DMA_IRQ_PD0, pending);
612 /* Check missed pending IRQ */
613 for (i = 0; i < od->nr_pchans; i++) {
614 pchan = &od->pchans[i];
615 chan_irq_pending = pchan_readl(pchan, OWL_DMAX_INT_CTL) &
616 pchan_readl(pchan, OWL_DMAX_INT_STATUS);
618 /* Dummy read to ensure OWL_DMA_IRQ_PD0 value is updated */
619 dma_readl(od, OWL_DMA_IRQ_PD0);
621 global_irq_pending = dma_readl(od, OWL_DMA_IRQ_PD0);
623 if (chan_irq_pending && !(global_irq_pending & BIT(i))) {
625 "global and channel IRQ pending match err\n");
627 /* Clear IRQ status for this pchan */
628 pchan_update(pchan, OWL_DMAX_INT_STATUS,
631 /* Update global IRQ pending */
636 spin_unlock(&od->lock);
638 for_each_set_bit(i, &pending, od->nr_pchans) {
639 struct owl_dma_txd *txd;
641 pchan = &od->pchans[i];
643 vchan = pchan->vchan;
645 dev_warn(od->dma.dev, "no vchan attached on pchan %d\n",
650 spin_lock(&vchan->vc.lock);
656 vchan_cookie_complete(&txd->vd);
659 * Start the next descriptor (if any),
660 * otherwise free this channel.
662 if (vchan_next_desc(&vchan->vc))
663 owl_dma_start_next_txd(vchan);
665 owl_dma_phy_free(od, vchan);
668 spin_unlock(&vchan->vc.lock);
674 static void owl_dma_free_txd(struct owl_dma *od, struct owl_dma_txd *txd)
676 struct owl_dma_lli *lli, *_lli;
681 list_for_each_entry_safe(lli, _lli, &txd->lli_list, node)
682 owl_dma_free_lli(od, lli);
687 static void owl_dma_desc_free(struct virt_dma_desc *vd)
689 struct owl_dma *od = to_owl_dma(vd->tx.chan->device);
690 struct owl_dma_txd *txd = to_owl_txd(&vd->tx);
692 owl_dma_free_txd(od, txd);
695 static int owl_dma_terminate_all(struct dma_chan *chan)
697 struct owl_dma *od = to_owl_dma(chan->device);
698 struct owl_dma_vchan *vchan = to_owl_vchan(chan);
702 spin_lock_irqsave(&vchan->vc.lock, flags);
705 owl_dma_phy_free(od, vchan);
708 owl_dma_desc_free(&vchan->txd->vd);
712 vchan_get_all_descriptors(&vchan->vc, &head);
714 spin_unlock_irqrestore(&vchan->vc.lock, flags);
716 vchan_dma_desc_free_list(&vchan->vc, &head);
721 static int owl_dma_config(struct dma_chan *chan,
722 struct dma_slave_config *config)
724 struct owl_dma_vchan *vchan = to_owl_vchan(chan);
726 /* Reject definitely invalid configurations */
727 if (config->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
728 config->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
731 memcpy(&vchan->cfg, config, sizeof(struct dma_slave_config));
736 static int owl_dma_pause(struct dma_chan *chan)
738 struct owl_dma_vchan *vchan = to_owl_vchan(chan);
741 spin_lock_irqsave(&vchan->vc.lock, flags);
743 owl_dma_pause_pchan(vchan->pchan);
745 spin_unlock_irqrestore(&vchan->vc.lock, flags);
750 static int owl_dma_resume(struct dma_chan *chan)
752 struct owl_dma_vchan *vchan = to_owl_vchan(chan);
755 if (!vchan->pchan && !vchan->txd)
758 dev_dbg(chan2dev(chan), "vchan %p: resume\n", &vchan->vc);
760 spin_lock_irqsave(&vchan->vc.lock, flags);
762 owl_dma_resume_pchan(vchan->pchan);
764 spin_unlock_irqrestore(&vchan->vc.lock, flags);
769 static u32 owl_dma_getbytes_chan(struct owl_dma_vchan *vchan)
771 struct owl_dma_pchan *pchan;
772 struct owl_dma_txd *txd;
773 struct owl_dma_lli *lli;
774 unsigned int next_lli_phy;
777 pchan = vchan->pchan;
783 /* Get remain count of current node in link list */
784 bytes = pchan_readl(pchan, OWL_DMAX_REMAIN_CNT);
786 /* Loop through the preceding nodes to get total remaining bytes */
787 if (pchan_readl(pchan, OWL_DMAX_MODE) & OWL_DMA_MODE_LME) {
788 next_lli_phy = pchan_readl(pchan, OWL_DMAX_NEXT_DESCRIPTOR);
789 list_for_each_entry(lli, &txd->lli_list, node) {
790 /* Start from the next active node */
791 if (lli->phys == next_lli_phy) {
792 list_for_each_entry(lli, &txd->lli_list, node)
793 bytes += llc_hw_flen(lli);
802 static enum dma_status owl_dma_tx_status(struct dma_chan *chan,
804 struct dma_tx_state *state)
806 struct owl_dma_vchan *vchan = to_owl_vchan(chan);
807 struct owl_dma_lli *lli;
808 struct virt_dma_desc *vd;
809 struct owl_dma_txd *txd;
814 ret = dma_cookie_status(chan, cookie, state);
815 if (ret == DMA_COMPLETE || !state)
818 spin_lock_irqsave(&vchan->vc.lock, flags);
820 vd = vchan_find_desc(&vchan->vc, cookie);
822 txd = to_owl_txd(&vd->tx);
823 list_for_each_entry(lli, &txd->lli_list, node)
824 bytes += llc_hw_flen(lli);
826 bytes = owl_dma_getbytes_chan(vchan);
829 spin_unlock_irqrestore(&vchan->vc.lock, flags);
831 dma_set_residue(state, bytes);
836 static void owl_dma_phy_alloc_and_start(struct owl_dma_vchan *vchan)
838 struct owl_dma *od = to_owl_dma(vchan->vc.chan.device);
839 struct owl_dma_pchan *pchan;
841 pchan = owl_dma_get_pchan(od, vchan);
845 dev_dbg(od->dma.dev, "allocated pchan %d\n", pchan->id);
847 vchan->pchan = pchan;
848 owl_dma_start_next_txd(vchan);
851 static void owl_dma_issue_pending(struct dma_chan *chan)
853 struct owl_dma_vchan *vchan = to_owl_vchan(chan);
856 spin_lock_irqsave(&vchan->vc.lock, flags);
857 if (vchan_issue_pending(&vchan->vc)) {
859 owl_dma_phy_alloc_and_start(vchan);
861 spin_unlock_irqrestore(&vchan->vc.lock, flags);
864 static struct dma_async_tx_descriptor
865 *owl_dma_prep_memcpy(struct dma_chan *chan,
866 dma_addr_t dst, dma_addr_t src,
867 size_t len, unsigned long flags)
869 struct owl_dma *od = to_owl_dma(chan->device);
870 struct owl_dma_vchan *vchan = to_owl_vchan(chan);
871 struct owl_dma_txd *txd;
872 struct owl_dma_lli *lli, *prev = NULL;
873 size_t offset, bytes;
879 txd = kzalloc(sizeof(*txd), GFP_NOWAIT);
883 INIT_LIST_HEAD(&txd->lli_list);
885 /* Process the transfer as frame by frame */
886 for (offset = 0; offset < len; offset += bytes) {
887 lli = owl_dma_alloc_lli(od);
889 dev_warn(chan2dev(chan), "failed to allocate lli\n");
893 bytes = min_t(size_t, (len - offset), OWL_DMA_FRAME_MAX_LENGTH);
895 ret = owl_dma_cfg_lli(vchan, lli, src + offset, dst + offset,
896 bytes, DMA_MEM_TO_MEM,
897 &vchan->cfg, txd->cyclic);
899 dev_warn(chan2dev(chan), "failed to config lli\n");
903 prev = owl_dma_add_lli(txd, prev, lli, false);
906 return vchan_tx_prep(&vchan->vc, &txd->vd, flags);
909 owl_dma_free_txd(od, txd);
913 static struct dma_async_tx_descriptor
914 *owl_dma_prep_slave_sg(struct dma_chan *chan,
915 struct scatterlist *sgl,
917 enum dma_transfer_direction dir,
918 unsigned long flags, void *context)
920 struct owl_dma *od = to_owl_dma(chan->device);
921 struct owl_dma_vchan *vchan = to_owl_vchan(chan);
922 struct dma_slave_config *sconfig = &vchan->cfg;
923 struct owl_dma_txd *txd;
924 struct owl_dma_lli *lli, *prev = NULL;
925 struct scatterlist *sg;
926 dma_addr_t addr, src = 0, dst = 0;
930 txd = kzalloc(sizeof(*txd), GFP_NOWAIT);
934 INIT_LIST_HEAD(&txd->lli_list);
936 for_each_sg(sgl, sg, sg_len, i) {
937 addr = sg_dma_address(sg);
938 len = sg_dma_len(sg);
940 if (len > OWL_DMA_FRAME_MAX_LENGTH) {
942 "frame length exceeds max supported length");
946 lli = owl_dma_alloc_lli(od);
948 dev_err(chan2dev(chan), "failed to allocate lli");
952 if (dir == DMA_MEM_TO_DEV) {
954 dst = sconfig->dst_addr;
956 src = sconfig->src_addr;
960 ret = owl_dma_cfg_lli(vchan, lli, src, dst, len, dir, sconfig,
963 dev_warn(chan2dev(chan), "failed to config lli");
967 prev = owl_dma_add_lli(txd, prev, lli, false);
970 return vchan_tx_prep(&vchan->vc, &txd->vd, flags);
973 owl_dma_free_txd(od, txd);
978 static struct dma_async_tx_descriptor
979 *owl_prep_dma_cyclic(struct dma_chan *chan,
980 dma_addr_t buf_addr, size_t buf_len,
982 enum dma_transfer_direction dir,
985 struct owl_dma *od = to_owl_dma(chan->device);
986 struct owl_dma_vchan *vchan = to_owl_vchan(chan);
987 struct dma_slave_config *sconfig = &vchan->cfg;
988 struct owl_dma_txd *txd;
989 struct owl_dma_lli *lli, *prev = NULL, *first = NULL;
990 dma_addr_t src = 0, dst = 0;
991 unsigned int periods = buf_len / period_len;
994 txd = kzalloc(sizeof(*txd), GFP_NOWAIT);
998 INIT_LIST_HEAD(&txd->lli_list);
1001 for (i = 0; i < periods; i++) {
1002 lli = owl_dma_alloc_lli(od);
1004 dev_warn(chan2dev(chan), "failed to allocate lli");
1008 if (dir == DMA_MEM_TO_DEV) {
1009 src = buf_addr + (period_len * i);
1010 dst = sconfig->dst_addr;
1011 } else if (dir == DMA_DEV_TO_MEM) {
1012 src = sconfig->src_addr;
1013 dst = buf_addr + (period_len * i);
1016 ret = owl_dma_cfg_lli(vchan, lli, src, dst, period_len,
1017 dir, sconfig, txd->cyclic);
1019 dev_warn(chan2dev(chan), "failed to config lli");
1026 prev = owl_dma_add_lli(txd, prev, lli, false);
1029 /* close the cyclic list */
1030 owl_dma_add_lli(txd, prev, first, true);
1032 return vchan_tx_prep(&vchan->vc, &txd->vd, flags);
1035 owl_dma_free_txd(od, txd);
1040 static void owl_dma_free_chan_resources(struct dma_chan *chan)
1042 struct owl_dma_vchan *vchan = to_owl_vchan(chan);
1044 /* Ensure all queued descriptors are freed */
1045 vchan_free_chan_resources(&vchan->vc);
1048 static inline void owl_dma_free(struct owl_dma *od)
1050 struct owl_dma_vchan *vchan = NULL;
1051 struct owl_dma_vchan *next;
1053 list_for_each_entry_safe(vchan,
1054 next, &od->dma.channels, vc.chan.device_node) {
1055 list_del(&vchan->vc.chan.device_node);
1056 tasklet_kill(&vchan->vc.task);
1060 static struct dma_chan *owl_dma_of_xlate(struct of_phandle_args *dma_spec,
1061 struct of_dma *ofdma)
1063 struct owl_dma *od = ofdma->of_dma_data;
1064 struct owl_dma_vchan *vchan;
1065 struct dma_chan *chan;
1066 u8 drq = dma_spec->args[0];
1068 if (drq > od->nr_vchans)
1071 chan = dma_get_any_slave_channel(&od->dma);
1075 vchan = to_owl_vchan(chan);
1081 static const struct of_device_id owl_dma_match[] = {
1082 { .compatible = "actions,s900-dma", .data = (void *)S900_DMA,},
1083 { .compatible = "actions,s700-dma", .data = (void *)S700_DMA,},
1086 MODULE_DEVICE_TABLE(of, owl_dma_match);
1088 static int owl_dma_probe(struct platform_device *pdev)
1090 struct device_node *np = pdev->dev.of_node;
1092 int ret, i, nr_channels, nr_requests;
1094 od = devm_kzalloc(&pdev->dev, sizeof(*od), GFP_KERNEL);
1098 od->base = devm_platform_ioremap_resource(pdev, 0);
1099 if (IS_ERR(od->base))
1100 return PTR_ERR(od->base);
1102 ret = of_property_read_u32(np, "dma-channels", &nr_channels);
1104 dev_err(&pdev->dev, "can't get dma-channels\n");
1108 ret = of_property_read_u32(np, "dma-requests", &nr_requests);
1110 dev_err(&pdev->dev, "can't get dma-requests\n");
1114 dev_info(&pdev->dev, "dma-channels %d, dma-requests %d\n",
1115 nr_channels, nr_requests);
1117 od->devid = (enum owl_dma_id)of_device_get_match_data(&pdev->dev);
1119 od->nr_pchans = nr_channels;
1120 od->nr_vchans = nr_requests;
1122 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
1124 platform_set_drvdata(pdev, od);
1125 spin_lock_init(&od->lock);
1127 dma_cap_set(DMA_MEMCPY, od->dma.cap_mask);
1128 dma_cap_set(DMA_SLAVE, od->dma.cap_mask);
1129 dma_cap_set(DMA_CYCLIC, od->dma.cap_mask);
1131 od->dma.dev = &pdev->dev;
1132 od->dma.device_free_chan_resources = owl_dma_free_chan_resources;
1133 od->dma.device_tx_status = owl_dma_tx_status;
1134 od->dma.device_issue_pending = owl_dma_issue_pending;
1135 od->dma.device_prep_dma_memcpy = owl_dma_prep_memcpy;
1136 od->dma.device_prep_slave_sg = owl_dma_prep_slave_sg;
1137 od->dma.device_prep_dma_cyclic = owl_prep_dma_cyclic;
1138 od->dma.device_config = owl_dma_config;
1139 od->dma.device_pause = owl_dma_pause;
1140 od->dma.device_resume = owl_dma_resume;
1141 od->dma.device_terminate_all = owl_dma_terminate_all;
1142 od->dma.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
1143 od->dma.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
1144 od->dma.directions = BIT(DMA_MEM_TO_MEM);
1145 od->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
1147 INIT_LIST_HEAD(&od->dma.channels);
1149 od->clk = devm_clk_get(&pdev->dev, NULL);
1150 if (IS_ERR(od->clk)) {
1151 dev_err(&pdev->dev, "unable to get clock\n");
1152 return PTR_ERR(od->clk);
1156 * Eventhough the DMA controller is capable of generating 4
1157 * IRQ's for DMA priority feature, we only use 1 IRQ for
1160 od->irq = platform_get_irq(pdev, 0);
1161 ret = devm_request_irq(&pdev->dev, od->irq, owl_dma_interrupt, 0,
1162 dev_name(&pdev->dev), od);
1164 dev_err(&pdev->dev, "unable to request IRQ\n");
1168 /* Init physical channel */
1169 od->pchans = devm_kcalloc(&pdev->dev, od->nr_pchans,
1170 sizeof(struct owl_dma_pchan), GFP_KERNEL);
1174 for (i = 0; i < od->nr_pchans; i++) {
1175 struct owl_dma_pchan *pchan = &od->pchans[i];
1178 pchan->base = od->base + OWL_DMA_CHAN_BASE(i);
1181 /* Init virtual channel */
1182 od->vchans = devm_kcalloc(&pdev->dev, od->nr_vchans,
1183 sizeof(struct owl_dma_vchan), GFP_KERNEL);
1187 for (i = 0; i < od->nr_vchans; i++) {
1188 struct owl_dma_vchan *vchan = &od->vchans[i];
1190 vchan->vc.desc_free = owl_dma_desc_free;
1191 vchan_init(&vchan->vc, &od->dma);
1194 /* Create a pool of consistent memory blocks for hardware descriptors */
1195 od->lli_pool = dma_pool_create(dev_name(od->dma.dev), od->dma.dev,
1196 sizeof(struct owl_dma_lli),
1197 __alignof__(struct owl_dma_lli),
1199 if (!od->lli_pool) {
1200 dev_err(&pdev->dev, "unable to allocate DMA descriptor pool\n");
1204 clk_prepare_enable(od->clk);
1206 ret = dma_async_device_register(&od->dma);
1208 dev_err(&pdev->dev, "failed to register DMA engine device\n");
1212 /* Device-tree DMA controller registration */
1213 ret = of_dma_controller_register(pdev->dev.of_node,
1214 owl_dma_of_xlate, od);
1216 dev_err(&pdev->dev, "of_dma_controller_register failed\n");
1217 goto err_dma_unregister;
1223 dma_async_device_unregister(&od->dma);
1225 clk_disable_unprepare(od->clk);
1226 dma_pool_destroy(od->lli_pool);
1231 static int owl_dma_remove(struct platform_device *pdev)
1233 struct owl_dma *od = platform_get_drvdata(pdev);
1235 of_dma_controller_free(pdev->dev.of_node);
1236 dma_async_device_unregister(&od->dma);
1238 /* Mask all interrupts for this execution environment */
1239 dma_writel(od, OWL_DMA_IRQ_EN0, 0x0);
1241 /* Make sure we won't have any further interrupts */
1242 devm_free_irq(od->dma.dev, od->irq, od);
1246 clk_disable_unprepare(od->clk);
1251 static struct platform_driver owl_dma_driver = {
1252 .probe = owl_dma_probe,
1253 .remove = owl_dma_remove,
1256 .of_match_table = of_match_ptr(owl_dma_match),
1260 static int owl_dma_init(void)
1262 return platform_driver_register(&owl_dma_driver);
1264 subsys_initcall(owl_dma_init);
1266 static void __exit owl_dma_exit(void)
1268 platform_driver_unregister(&owl_dma_driver);
1270 module_exit(owl_dma_exit);
1272 MODULE_AUTHOR("David Liu <liuwei@actions-semi.com>");
1273 MODULE_AUTHOR("Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>");
1274 MODULE_DESCRIPTION("Actions Semi Owl SoCs DMA driver");
1275 MODULE_LICENSE("GPL");