1 // SPDX-License-Identifier: GPL-2.0-only
3 * Virtual DMA channel support for DMAengine
5 * Copyright (C) 2012 Russell King
7 #include <linux/device.h>
8 #include <linux/dmaengine.h>
9 #include <linux/module.h>
10 #include <linux/spinlock.h>
14 static struct virt_dma_desc *to_virt_desc(struct dma_async_tx_descriptor *tx)
16 return container_of(tx, struct virt_dma_desc, tx);
19 dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *tx)
21 struct virt_dma_chan *vc = to_virt_chan(tx->chan);
22 struct virt_dma_desc *vd = to_virt_desc(tx);
26 spin_lock_irqsave(&vc->lock, flags);
27 cookie = dma_cookie_assign(tx);
29 list_move_tail(&vd->node, &vc->desc_submitted);
30 spin_unlock_irqrestore(&vc->lock, flags);
32 dev_dbg(vc->chan.device->dev, "vchan %p: txd %p[%x]: submitted\n",
37 EXPORT_SYMBOL_GPL(vchan_tx_submit);
40 * vchan_tx_desc_free - free a reusable descriptor
43 * This function frees a previously allocated reusable descriptor. The only
44 * other way is to clear the DMA_CTRL_REUSE flag and submit one last time the
47 * Returns 0 upon success
49 int vchan_tx_desc_free(struct dma_async_tx_descriptor *tx)
51 struct virt_dma_chan *vc = to_virt_chan(tx->chan);
52 struct virt_dma_desc *vd = to_virt_desc(tx);
55 spin_lock_irqsave(&vc->lock, flags);
57 spin_unlock_irqrestore(&vc->lock, flags);
59 dev_dbg(vc->chan.device->dev, "vchan %p: txd %p[%x]: freeing\n",
60 vc, vd, vd->tx.cookie);
64 EXPORT_SYMBOL_GPL(vchan_tx_desc_free);
66 struct virt_dma_desc *vchan_find_desc(struct virt_dma_chan *vc,
69 struct virt_dma_desc *vd;
71 list_for_each_entry(vd, &vc->desc_issued, node)
72 if (vd->tx.cookie == cookie)
77 EXPORT_SYMBOL_GPL(vchan_find_desc);
80 * This tasklet handles the completion of a DMA descriptor by
81 * calling its callback and freeing it.
83 static void vchan_complete(unsigned long arg)
85 struct virt_dma_chan *vc = (struct virt_dma_chan *)arg;
86 struct virt_dma_desc *vd, *_vd;
87 struct dmaengine_desc_callback cb;
90 spin_lock_irq(&vc->lock);
91 list_splice_tail_init(&vc->desc_completed, &head);
95 dmaengine_desc_get_callback(&vd->tx, &cb);
97 memset(&cb, 0, sizeof(cb));
99 spin_unlock_irq(&vc->lock);
101 dmaengine_desc_callback_invoke(&cb, &vd->tx_result);
103 list_for_each_entry_safe(vd, _vd, &head, node) {
104 dmaengine_desc_get_callback(&vd->tx, &cb);
107 vchan_vdesc_fini(vd);
109 dmaengine_desc_callback_invoke(&cb, &vd->tx_result);
113 void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head)
115 struct virt_dma_desc *vd, *_vd;
117 list_for_each_entry_safe(vd, _vd, head, node) {
118 if (dmaengine_desc_test_reuse(&vd->tx)) {
119 list_move_tail(&vd->node, &vc->desc_allocated);
121 dev_dbg(vc->chan.device->dev, "txd %p: freeing\n", vd);
127 EXPORT_SYMBOL_GPL(vchan_dma_desc_free_list);
129 void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev)
131 dma_cookie_init(&vc->chan);
133 spin_lock_init(&vc->lock);
134 INIT_LIST_HEAD(&vc->desc_allocated);
135 INIT_LIST_HEAD(&vc->desc_submitted);
136 INIT_LIST_HEAD(&vc->desc_issued);
137 INIT_LIST_HEAD(&vc->desc_completed);
139 tasklet_init(&vc->task, vchan_complete, (unsigned long)vc);
141 vc->chan.device = dmadev;
142 list_add_tail(&vc->chan.device_node, &dmadev->channels);
144 EXPORT_SYMBOL_GPL(vchan_init);
146 MODULE_AUTHOR("Russell King");
147 MODULE_LICENSE("GPL");