1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2013 - 2015 Linaro Ltd.
4 * Copyright (c) 2013 HiSilicon Limited.
6 #include <linux/sched.h>
7 #include <linux/device.h>
8 #include <linux/dma-mapping.h>
9 #include <linux/dmapool.h>
10 #include <linux/dmaengine.h>
11 #include <linux/init.h>
12 #include <linux/interrupt.h>
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/platform_device.h>
16 #include <linux/slab.h>
17 #include <linux/spinlock.h>
19 #include <linux/clk.h>
20 #include <linux/of_dma.h>
24 #define DRIVER_NAME "k3-dma"
25 #define DMA_MAX_SIZE 0x1ffc
26 #define DMA_CYCLIC_MAX_PERIOD 0x1000
27 #define LLI_BLOCK_SIZE (4 * PAGE_SIZE)
34 #define INT_TC1_MASK 0x18
35 #define INT_TC2_MASK 0x1c
36 #define INT_ERR1_MASK 0x20
37 #define INT_ERR2_MASK 0x24
38 #define INT_TC1_RAW 0x600
39 #define INT_TC2_RAW 0x608
40 #define INT_ERR1_RAW 0x610
41 #define INT_ERR2_RAW 0x618
44 #define CX_CUR_CNT 0x704
52 #define CX_LLI_CHAIN_EN 0x2
54 #define CX_CFG_NODEIRQ BIT(1)
55 #define CX_CFG_MEM2PER (0x1 << 2)
56 #define CX_CFG_PER2MEM (0x2 << 2)
57 #define CX_CFG_SRCINCR (0x1 << 31)
58 #define CX_CFG_DSTINCR (0x1 << 30)
69 struct k3_dma_desc_sw {
70 struct virt_dma_desc vd;
71 dma_addr_t desc_hw_lli;
74 struct k3_desc_hw *desc_hw;
81 struct virt_dma_chan vc;
82 struct k3_dma_phy *phy;
83 struct list_head node;
85 enum dma_status status;
87 struct dma_slave_config slave_config;
93 struct k3_dma_chan *vchan;
94 struct k3_dma_desc_sw *ds_run;
95 struct k3_dma_desc_sw *ds_done;
99 struct dma_device slave;
101 struct tasklet_struct task;
103 struct list_head chan_pending;
104 struct k3_dma_phy *phy;
105 struct k3_dma_chan *chans;
107 struct dma_pool *pool;
110 u32 dma_channel_mask;
115 #define K3_FLAG_NOCLK BIT(1)
117 struct k3dma_soc_data {
122 #define to_k3_dma(dmadev) container_of(dmadev, struct k3_dma_dev, slave)
124 static int k3_dma_config_write(struct dma_chan *chan,
125 enum dma_transfer_direction dir,
126 struct dma_slave_config *cfg);
128 static struct k3_dma_chan *to_k3_chan(struct dma_chan *chan)
130 return container_of(chan, struct k3_dma_chan, vc.chan);
133 static void k3_dma_pause_dma(struct k3_dma_phy *phy, bool on)
138 val = readl_relaxed(phy->base + CX_CFG);
140 writel_relaxed(val, phy->base + CX_CFG);
142 val = readl_relaxed(phy->base + CX_CFG);
144 writel_relaxed(val, phy->base + CX_CFG);
148 static void k3_dma_terminate_chan(struct k3_dma_phy *phy, struct k3_dma_dev *d)
152 k3_dma_pause_dma(phy, false);
154 val = 0x1 << phy->idx;
155 writel_relaxed(val, d->base + INT_TC1_RAW);
156 writel_relaxed(val, d->base + INT_TC2_RAW);
157 writel_relaxed(val, d->base + INT_ERR1_RAW);
158 writel_relaxed(val, d->base + INT_ERR2_RAW);
161 static void k3_dma_set_desc(struct k3_dma_phy *phy, struct k3_desc_hw *hw)
163 writel_relaxed(hw->lli, phy->base + CX_LLI);
164 writel_relaxed(hw->count, phy->base + CX_CNT0);
165 writel_relaxed(hw->saddr, phy->base + CX_SRC);
166 writel_relaxed(hw->daddr, phy->base + CX_DST);
167 writel_relaxed(hw->config, phy->base + CX_CFG);
170 static u32 k3_dma_get_curr_cnt(struct k3_dma_dev *d, struct k3_dma_phy *phy)
174 cnt = readl_relaxed(d->base + CX_CUR_CNT + phy->idx * 0x10);
179 static u32 k3_dma_get_curr_lli(struct k3_dma_phy *phy)
181 return readl_relaxed(phy->base + CX_LLI);
184 static u32 k3_dma_get_chan_stat(struct k3_dma_dev *d)
186 return readl_relaxed(d->base + CH_STAT);
189 static void k3_dma_enable_dma(struct k3_dma_dev *d, bool on)
192 /* set same priority */
193 writel_relaxed(0x0, d->base + CH_PRI);
196 writel_relaxed(0xffff, d->base + INT_TC1_MASK);
197 writel_relaxed(0xffff, d->base + INT_TC2_MASK);
198 writel_relaxed(0xffff, d->base + INT_ERR1_MASK);
199 writel_relaxed(0xffff, d->base + INT_ERR2_MASK);
202 writel_relaxed(0x0, d->base + INT_TC1_MASK);
203 writel_relaxed(0x0, d->base + INT_TC2_MASK);
204 writel_relaxed(0x0, d->base + INT_ERR1_MASK);
205 writel_relaxed(0x0, d->base + INT_ERR2_MASK);
209 static irqreturn_t k3_dma_int_handler(int irq, void *dev_id)
211 struct k3_dma_dev *d = (struct k3_dma_dev *)dev_id;
212 struct k3_dma_phy *p;
213 struct k3_dma_chan *c;
214 u32 stat = readl_relaxed(d->base + INT_STAT);
215 u32 tc1 = readl_relaxed(d->base + INT_TC1);
216 u32 tc2 = readl_relaxed(d->base + INT_TC2);
217 u32 err1 = readl_relaxed(d->base + INT_ERR1);
218 u32 err2 = readl_relaxed(d->base + INT_ERR2);
224 if (likely(tc1 & BIT(i)) || (tc2 & BIT(i))) {
228 if (c && (tc1 & BIT(i))) {
229 spin_lock(&c->vc.lock);
230 if (p->ds_run != NULL) {
231 vchan_cookie_complete(&p->ds_run->vd);
232 p->ds_done = p->ds_run;
235 spin_unlock(&c->vc.lock);
237 if (c && (tc2 & BIT(i))) {
238 spin_lock(&c->vc.lock);
239 if (p->ds_run != NULL)
240 vchan_cyclic_callback(&p->ds_run->vd);
241 spin_unlock(&c->vc.lock);
245 if (unlikely((err1 & BIT(i)) || (err2 & BIT(i))))
246 dev_warn(d->slave.dev, "DMA ERR\n");
249 writel_relaxed(irq_chan, d->base + INT_TC1_RAW);
250 writel_relaxed(irq_chan, d->base + INT_TC2_RAW);
251 writel_relaxed(err1, d->base + INT_ERR1_RAW);
252 writel_relaxed(err2, d->base + INT_ERR2_RAW);
255 tasklet_schedule(&d->task);
257 if (irq_chan || err1 || err2)
263 static int k3_dma_start_txd(struct k3_dma_chan *c)
265 struct k3_dma_dev *d = to_k3_dma(c->vc.chan.device);
266 struct virt_dma_desc *vd = vchan_next_desc(&c->vc);
271 if (BIT(c->phy->idx) & k3_dma_get_chan_stat(d))
274 /* Avoid losing track of ds_run if a transaction is in flight */
279 struct k3_dma_desc_sw *ds =
280 container_of(vd, struct k3_dma_desc_sw, vd);
282 * fetch and remove request from vc->desc_issued
283 * so vc->desc_issued only contains desc pending
285 list_del(&ds->vd.node);
288 c->phy->ds_done = NULL;
290 k3_dma_set_desc(c->phy, &ds->desc_hw[0]);
293 c->phy->ds_run = NULL;
294 c->phy->ds_done = NULL;
298 static void k3_dma_tasklet(struct tasklet_struct *t)
300 struct k3_dma_dev *d = from_tasklet(d, t, task);
301 struct k3_dma_phy *p;
302 struct k3_dma_chan *c, *cn;
303 unsigned pch, pch_alloc = 0;
305 /* check new dma request of running channel in vc->desc_issued */
306 list_for_each_entry_safe(c, cn, &d->slave.channels, vc.chan.device_node) {
307 spin_lock_irq(&c->vc.lock);
309 if (p && p->ds_done) {
310 if (k3_dma_start_txd(c)) {
311 /* No current txd associated with this channel */
312 dev_dbg(d->slave.dev, "pchan %u: free\n", p->idx);
313 /* Mark this channel free */
318 spin_unlock_irq(&c->vc.lock);
321 /* check new channel request in d->chan_pending */
322 spin_lock_irq(&d->lock);
323 for (pch = 0; pch < d->dma_channels; pch++) {
324 if (!(d->dma_channel_mask & (1 << pch)))
329 if (p->vchan == NULL && !list_empty(&d->chan_pending)) {
330 c = list_first_entry(&d->chan_pending,
331 struct k3_dma_chan, node);
332 /* remove from d->chan_pending */
333 list_del_init(&c->node);
334 pch_alloc |= 1 << pch;
335 /* Mark this channel allocated */
338 dev_dbg(d->slave.dev, "pchan %u: alloc vchan %p\n", pch, &c->vc);
341 spin_unlock_irq(&d->lock);
343 for (pch = 0; pch < d->dma_channels; pch++) {
344 if (!(d->dma_channel_mask & (1 << pch)))
347 if (pch_alloc & (1 << pch)) {
351 spin_lock_irq(&c->vc.lock);
353 spin_unlock_irq(&c->vc.lock);
359 static void k3_dma_free_chan_resources(struct dma_chan *chan)
361 struct k3_dma_chan *c = to_k3_chan(chan);
362 struct k3_dma_dev *d = to_k3_dma(chan->device);
365 spin_lock_irqsave(&d->lock, flags);
366 list_del_init(&c->node);
367 spin_unlock_irqrestore(&d->lock, flags);
369 vchan_free_chan_resources(&c->vc);
373 static enum dma_status k3_dma_tx_status(struct dma_chan *chan,
374 dma_cookie_t cookie, struct dma_tx_state *state)
376 struct k3_dma_chan *c = to_k3_chan(chan);
377 struct k3_dma_dev *d = to_k3_dma(chan->device);
378 struct k3_dma_phy *p;
379 struct virt_dma_desc *vd;
384 ret = dma_cookie_status(&c->vc.chan, cookie, state);
385 if (ret == DMA_COMPLETE)
388 spin_lock_irqsave(&c->vc.lock, flags);
393 * If the cookie is on our issue queue, then the residue is
396 vd = vchan_find_desc(&c->vc, cookie);
397 if (vd && !c->cyclic) {
398 bytes = container_of(vd, struct k3_dma_desc_sw, vd)->size;
399 } else if ((!p) || (!p->ds_run)) {
402 struct k3_dma_desc_sw *ds = p->ds_run;
403 u32 clli = 0, index = 0;
405 bytes = k3_dma_get_curr_cnt(d, p);
406 clli = k3_dma_get_curr_lli(p);
407 index = ((clli - ds->desc_hw_lli) /
408 sizeof(struct k3_desc_hw)) + 1;
409 for (; index < ds->desc_num; index++) {
410 bytes += ds->desc_hw[index].count;
412 if (!ds->desc_hw[index].lli)
416 spin_unlock_irqrestore(&c->vc.lock, flags);
417 dma_set_residue(state, bytes);
421 static void k3_dma_issue_pending(struct dma_chan *chan)
423 struct k3_dma_chan *c = to_k3_chan(chan);
424 struct k3_dma_dev *d = to_k3_dma(chan->device);
427 spin_lock_irqsave(&c->vc.lock, flags);
428 /* add request to vc->desc_issued */
429 if (vchan_issue_pending(&c->vc)) {
432 if (list_empty(&c->node)) {
433 /* if new channel, add chan_pending */
434 list_add_tail(&c->node, &d->chan_pending);
435 /* check in tasklet */
436 tasklet_schedule(&d->task);
437 dev_dbg(d->slave.dev, "vchan %p: issued\n", &c->vc);
440 spin_unlock(&d->lock);
442 dev_dbg(d->slave.dev, "vchan %p: nothing to issue\n", &c->vc);
443 spin_unlock_irqrestore(&c->vc.lock, flags);
446 static void k3_dma_fill_desc(struct k3_dma_desc_sw *ds, dma_addr_t dst,
447 dma_addr_t src, size_t len, u32 num, u32 ccfg)
449 if (num != ds->desc_num - 1)
450 ds->desc_hw[num].lli = ds->desc_hw_lli + (num + 1) *
451 sizeof(struct k3_desc_hw);
453 ds->desc_hw[num].lli |= CX_LLI_CHAIN_EN;
454 ds->desc_hw[num].count = len;
455 ds->desc_hw[num].saddr = src;
456 ds->desc_hw[num].daddr = dst;
457 ds->desc_hw[num].config = ccfg;
460 static struct k3_dma_desc_sw *k3_dma_alloc_desc_resource(int num,
461 struct dma_chan *chan)
463 struct k3_dma_chan *c = to_k3_chan(chan);
464 struct k3_dma_desc_sw *ds;
465 struct k3_dma_dev *d = to_k3_dma(chan->device);
466 int lli_limit = LLI_BLOCK_SIZE / sizeof(struct k3_desc_hw);
468 if (num > lli_limit) {
469 dev_dbg(chan->device->dev, "vch %p: sg num %d exceed max %d\n",
470 &c->vc, num, lli_limit);
474 ds = kzalloc(sizeof(*ds), GFP_NOWAIT);
478 ds->desc_hw = dma_pool_zalloc(d->pool, GFP_NOWAIT, &ds->desc_hw_lli);
480 dev_dbg(chan->device->dev, "vch %p: dma alloc fail\n", &c->vc);
488 static struct dma_async_tx_descriptor *k3_dma_prep_memcpy(
489 struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
490 size_t len, unsigned long flags)
492 struct k3_dma_chan *c = to_k3_chan(chan);
493 struct k3_dma_desc_sw *ds;
500 num = DIV_ROUND_UP(len, DMA_MAX_SIZE);
502 ds = k3_dma_alloc_desc_resource(num, chan);
511 /* default is memtomem, without calling device_config */
512 c->ccfg = CX_CFG_SRCINCR | CX_CFG_DSTINCR | CX_CFG_EN;
513 c->ccfg |= (0xf << 20) | (0xf << 24); /* burst = 16 */
514 c->ccfg |= (0x3 << 12) | (0x3 << 16); /* width = 64 bit */
518 copy = min_t(size_t, len, DMA_MAX_SIZE);
519 k3_dma_fill_desc(ds, dst, src, copy, num++, c->ccfg);
526 ds->desc_hw[num-1].lli = 0; /* end of link */
527 return vchan_tx_prep(&c->vc, &ds->vd, flags);
530 static struct dma_async_tx_descriptor *k3_dma_prep_slave_sg(
531 struct dma_chan *chan, struct scatterlist *sgl, unsigned int sglen,
532 enum dma_transfer_direction dir, unsigned long flags, void *context)
534 struct k3_dma_chan *c = to_k3_chan(chan);
535 struct k3_dma_desc_sw *ds;
536 size_t len, avail, total = 0;
537 struct scatterlist *sg;
538 dma_addr_t addr, src = 0, dst = 0;
546 for_each_sg(sgl, sg, sglen, i) {
547 avail = sg_dma_len(sg);
548 if (avail > DMA_MAX_SIZE)
549 num += DIV_ROUND_UP(avail, DMA_MAX_SIZE) - 1;
552 ds = k3_dma_alloc_desc_resource(num, chan);
556 k3_dma_config_write(chan, dir, &c->slave_config);
558 for_each_sg(sgl, sg, sglen, i) {
559 addr = sg_dma_address(sg);
560 avail = sg_dma_len(sg);
564 len = min_t(size_t, avail, DMA_MAX_SIZE);
566 if (dir == DMA_MEM_TO_DEV) {
569 } else if (dir == DMA_DEV_TO_MEM) {
574 k3_dma_fill_desc(ds, dst, src, len, num++, c->ccfg);
581 ds->desc_hw[num-1].lli = 0; /* end of link */
583 return vchan_tx_prep(&c->vc, &ds->vd, flags);
586 static struct dma_async_tx_descriptor *
587 k3_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr,
588 size_t buf_len, size_t period_len,
589 enum dma_transfer_direction dir,
592 struct k3_dma_chan *c = to_k3_chan(chan);
593 struct k3_dma_desc_sw *ds;
594 size_t len, avail, total = 0;
595 dma_addr_t addr, src = 0, dst = 0;
596 int num = 1, since = 0;
597 size_t modulo = DMA_CYCLIC_MAX_PERIOD;
600 dev_dbg(chan->device->dev, "%s: buf %pad, dst %pad, buf len %zu, period_len = %zu, dir %d\n",
601 __func__, &buf_addr, &to_k3_chan(chan)->dev_addr,
602 buf_len, period_len, (int)dir);
606 num += DIV_ROUND_UP(avail, modulo) - 1;
608 ds = k3_dma_alloc_desc_resource(num, chan);
617 k3_dma_config_write(chan, dir, &c->slave_config);
619 if (period_len < modulo)
623 len = min_t(size_t, avail, modulo);
625 if (dir == DMA_MEM_TO_DEV) {
628 } else if (dir == DMA_DEV_TO_MEM) {
633 if (since >= period_len) {
634 /* descriptor asks for TC2 interrupt on completion */
635 en_tc2 = CX_CFG_NODEIRQ;
640 k3_dma_fill_desc(ds, dst, src, len, num++, c->ccfg | en_tc2);
646 /* "Cyclic" == end of link points back to start of link */
647 ds->desc_hw[num - 1].lli |= ds->desc_hw_lli;
651 return vchan_tx_prep(&c->vc, &ds->vd, flags);
654 static int k3_dma_config(struct dma_chan *chan,
655 struct dma_slave_config *cfg)
657 struct k3_dma_chan *c = to_k3_chan(chan);
659 memcpy(&c->slave_config, cfg, sizeof(*cfg));
664 static int k3_dma_config_write(struct dma_chan *chan,
665 enum dma_transfer_direction dir,
666 struct dma_slave_config *cfg)
668 struct k3_dma_chan *c = to_k3_chan(chan);
669 u32 maxburst = 0, val = 0;
670 enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED;
672 if (dir == DMA_DEV_TO_MEM) {
673 c->ccfg = CX_CFG_DSTINCR;
674 c->dev_addr = cfg->src_addr;
675 maxburst = cfg->src_maxburst;
676 width = cfg->src_addr_width;
677 } else if (dir == DMA_MEM_TO_DEV) {
678 c->ccfg = CX_CFG_SRCINCR;
679 c->dev_addr = cfg->dst_addr;
680 maxburst = cfg->dst_maxburst;
681 width = cfg->dst_addr_width;
684 case DMA_SLAVE_BUSWIDTH_1_BYTE:
685 case DMA_SLAVE_BUSWIDTH_2_BYTES:
686 case DMA_SLAVE_BUSWIDTH_4_BYTES:
687 case DMA_SLAVE_BUSWIDTH_8_BYTES:
694 c->ccfg |= (val << 12) | (val << 16);
696 if ((maxburst == 0) || (maxburst > 16))
700 c->ccfg |= (val << 20) | (val << 24);
701 c->ccfg |= CX_CFG_MEM2PER | CX_CFG_EN;
703 /* specific request line */
704 c->ccfg |= c->vc.chan.chan_id << 4;
709 static void k3_dma_free_desc(struct virt_dma_desc *vd)
711 struct k3_dma_desc_sw *ds =
712 container_of(vd, struct k3_dma_desc_sw, vd);
713 struct k3_dma_dev *d = to_k3_dma(vd->tx.chan->device);
715 dma_pool_free(d->pool, ds->desc_hw, ds->desc_hw_lli);
719 static int k3_dma_terminate_all(struct dma_chan *chan)
721 struct k3_dma_chan *c = to_k3_chan(chan);
722 struct k3_dma_dev *d = to_k3_dma(chan->device);
723 struct k3_dma_phy *p = c->phy;
727 dev_dbg(d->slave.dev, "vchan %p: terminate all\n", &c->vc);
729 /* Prevent this channel being scheduled */
731 list_del_init(&c->node);
732 spin_unlock(&d->lock);
734 /* Clear the tx descriptor lists */
735 spin_lock_irqsave(&c->vc.lock, flags);
736 vchan_get_all_descriptors(&c->vc, &head);
738 /* vchan is assigned to a pchan - stop the channel */
739 k3_dma_terminate_chan(p, d);
743 vchan_terminate_vdesc(&p->ds_run->vd);
748 spin_unlock_irqrestore(&c->vc.lock, flags);
749 vchan_dma_desc_free_list(&c->vc, &head);
754 static void k3_dma_synchronize(struct dma_chan *chan)
756 struct k3_dma_chan *c = to_k3_chan(chan);
758 vchan_synchronize(&c->vc);
761 static int k3_dma_transfer_pause(struct dma_chan *chan)
763 struct k3_dma_chan *c = to_k3_chan(chan);
764 struct k3_dma_dev *d = to_k3_dma(chan->device);
765 struct k3_dma_phy *p = c->phy;
767 dev_dbg(d->slave.dev, "vchan %p: pause\n", &c->vc);
768 if (c->status == DMA_IN_PROGRESS) {
769 c->status = DMA_PAUSED;
771 k3_dma_pause_dma(p, false);
774 list_del_init(&c->node);
775 spin_unlock(&d->lock);
782 static int k3_dma_transfer_resume(struct dma_chan *chan)
784 struct k3_dma_chan *c = to_k3_chan(chan);
785 struct k3_dma_dev *d = to_k3_dma(chan->device);
786 struct k3_dma_phy *p = c->phy;
789 dev_dbg(d->slave.dev, "vchan %p: resume\n", &c->vc);
790 spin_lock_irqsave(&c->vc.lock, flags);
791 if (c->status == DMA_PAUSED) {
792 c->status = DMA_IN_PROGRESS;
794 k3_dma_pause_dma(p, true);
795 } else if (!list_empty(&c->vc.desc_issued)) {
797 list_add_tail(&c->node, &d->chan_pending);
798 spin_unlock(&d->lock);
801 spin_unlock_irqrestore(&c->vc.lock, flags);
806 static const struct k3dma_soc_data k3_v1_dma_data = {
810 static const struct k3dma_soc_data asp_v1_dma_data = {
811 .flags = K3_FLAG_NOCLK,
814 static const struct of_device_id k3_pdma_dt_ids[] = {
815 { .compatible = "hisilicon,k3-dma-1.0",
816 .data = &k3_v1_dma_data
818 { .compatible = "hisilicon,hisi-pcm-asp-dma-1.0",
819 .data = &asp_v1_dma_data
823 MODULE_DEVICE_TABLE(of, k3_pdma_dt_ids);
825 static struct dma_chan *k3_of_dma_simple_xlate(struct of_phandle_args *dma_spec,
826 struct of_dma *ofdma)
828 struct k3_dma_dev *d = ofdma->of_dma_data;
829 unsigned int request = dma_spec->args[0];
831 if (request >= d->dma_requests)
834 return dma_get_slave_channel(&(d->chans[request].vc.chan));
837 static int k3_dma_probe(struct platform_device *op)
839 const struct k3dma_soc_data *soc_data;
840 struct k3_dma_dev *d;
843 d = devm_kzalloc(&op->dev, sizeof(*d), GFP_KERNEL);
847 soc_data = device_get_match_data(&op->dev);
851 d->base = devm_platform_ioremap_resource(op, 0);
853 return PTR_ERR(d->base);
855 of_property_read_u32((&op->dev)->of_node,
856 "dma-channels", &d->dma_channels);
857 of_property_read_u32((&op->dev)->of_node,
858 "dma-requests", &d->dma_requests);
859 ret = of_property_read_u32((&op->dev)->of_node,
860 "dma-channel-mask", &d->dma_channel_mask);
863 "dma-channel-mask doesn't exist, considering all as available.\n");
864 d->dma_channel_mask = (u32)~0UL;
867 if (!(soc_data->flags & K3_FLAG_NOCLK)) {
868 d->clk = devm_clk_get(&op->dev, NULL);
869 if (IS_ERR(d->clk)) {
870 dev_err(&op->dev, "no dma clk\n");
871 return PTR_ERR(d->clk);
875 irq = platform_get_irq(op, 0);
876 ret = devm_request_irq(&op->dev, irq,
877 k3_dma_int_handler, 0, DRIVER_NAME, d);
883 /* A DMA memory pool for LLIs, align on 32-byte boundary */
884 d->pool = dmam_pool_create(DRIVER_NAME, &op->dev,
885 LLI_BLOCK_SIZE, 32, 0);
889 /* init phy channel */
890 d->phy = devm_kcalloc(&op->dev,
891 d->dma_channels, sizeof(struct k3_dma_phy), GFP_KERNEL);
895 for (i = 0; i < d->dma_channels; i++) {
896 struct k3_dma_phy *p;
898 if (!(d->dma_channel_mask & BIT(i)))
903 p->base = d->base + i * 0x40;
906 INIT_LIST_HEAD(&d->slave.channels);
907 dma_cap_set(DMA_SLAVE, d->slave.cap_mask);
908 dma_cap_set(DMA_MEMCPY, d->slave.cap_mask);
909 dma_cap_set(DMA_CYCLIC, d->slave.cap_mask);
910 d->slave.dev = &op->dev;
911 d->slave.device_free_chan_resources = k3_dma_free_chan_resources;
912 d->slave.device_tx_status = k3_dma_tx_status;
913 d->slave.device_prep_dma_memcpy = k3_dma_prep_memcpy;
914 d->slave.device_prep_slave_sg = k3_dma_prep_slave_sg;
915 d->slave.device_prep_dma_cyclic = k3_dma_prep_dma_cyclic;
916 d->slave.device_issue_pending = k3_dma_issue_pending;
917 d->slave.device_config = k3_dma_config;
918 d->slave.device_pause = k3_dma_transfer_pause;
919 d->slave.device_resume = k3_dma_transfer_resume;
920 d->slave.device_terminate_all = k3_dma_terminate_all;
921 d->slave.device_synchronize = k3_dma_synchronize;
922 d->slave.copy_align = DMAENGINE_ALIGN_8_BYTES;
924 /* init virtual channel */
925 d->chans = devm_kcalloc(&op->dev,
926 d->dma_requests, sizeof(struct k3_dma_chan), GFP_KERNEL);
927 if (d->chans == NULL)
930 for (i = 0; i < d->dma_requests; i++) {
931 struct k3_dma_chan *c = &d->chans[i];
933 c->status = DMA_IN_PROGRESS;
934 INIT_LIST_HEAD(&c->node);
935 c->vc.desc_free = k3_dma_free_desc;
936 vchan_init(&c->vc, &d->slave);
939 /* Enable clock before accessing registers */
940 ret = clk_prepare_enable(d->clk);
942 dev_err(&op->dev, "clk_prepare_enable failed: %d\n", ret);
946 k3_dma_enable_dma(d, true);
948 ret = dma_async_device_register(&d->slave);
950 goto dma_async_register_fail;
952 ret = of_dma_controller_register((&op->dev)->of_node,
953 k3_of_dma_simple_xlate, d);
955 goto of_dma_register_fail;
957 spin_lock_init(&d->lock);
958 INIT_LIST_HEAD(&d->chan_pending);
959 tasklet_setup(&d->task, k3_dma_tasklet);
960 platform_set_drvdata(op, d);
961 dev_info(&op->dev, "initialized\n");
965 of_dma_register_fail:
966 dma_async_device_unregister(&d->slave);
967 dma_async_register_fail:
968 clk_disable_unprepare(d->clk);
972 static void k3_dma_remove(struct platform_device *op)
974 struct k3_dma_chan *c, *cn;
975 struct k3_dma_dev *d = platform_get_drvdata(op);
977 dma_async_device_unregister(&d->slave);
978 of_dma_controller_free((&op->dev)->of_node);
980 devm_free_irq(&op->dev, d->irq, d);
982 list_for_each_entry_safe(c, cn, &d->slave.channels, vc.chan.device_node) {
983 list_del(&c->vc.chan.device_node);
984 tasklet_kill(&c->vc.task);
986 tasklet_kill(&d->task);
987 clk_disable_unprepare(d->clk);
990 #ifdef CONFIG_PM_SLEEP
991 static int k3_dma_suspend_dev(struct device *dev)
993 struct k3_dma_dev *d = dev_get_drvdata(dev);
996 stat = k3_dma_get_chan_stat(d);
998 dev_warn(d->slave.dev,
999 "chan %d is running fail to suspend\n", stat);
1002 k3_dma_enable_dma(d, false);
1003 clk_disable_unprepare(d->clk);
1007 static int k3_dma_resume_dev(struct device *dev)
1009 struct k3_dma_dev *d = dev_get_drvdata(dev);
1012 ret = clk_prepare_enable(d->clk);
1014 dev_err(d->slave.dev, "clk_prepare_enable failed: %d\n", ret);
1017 k3_dma_enable_dma(d, true);
1022 static SIMPLE_DEV_PM_OPS(k3_dma_pmops, k3_dma_suspend_dev, k3_dma_resume_dev);
1024 static struct platform_driver k3_pdma_driver = {
1026 .name = DRIVER_NAME,
1027 .pm = &k3_dma_pmops,
1028 .of_match_table = k3_pdma_dt_ids,
1030 .probe = k3_dma_probe,
1031 .remove_new = k3_dma_remove,
1034 module_platform_driver(k3_pdma_driver);
1036 MODULE_DESCRIPTION("HiSilicon k3 DMA Driver");
1037 MODULE_ALIAS("platform:k3dma");
1038 MODULE_LICENSE("GPL v2");