2 * Qualcomm Technologies HIDMA DMA engine interface
4 * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
17 * Copyright (C) Freescale Semicondutor, Inc. 2007, 2008.
18 * Copyright (C) Semihalf 2009
19 * Copyright (C) Ilya Yanok, Emcraft Systems 2010
20 * Copyright (C) Alexander Popov, Promcontroller 2014
22 * Written by Piotr Ziecik <kosmo@semihalf.com>. Hardware description
23 * (defines, structures and comments) was taken from MPC5121 DMA driver
24 * written by Hongjun Chen <hong-jun.chen@freescale.com>.
26 * Approved as OSADL project by a majority of OSADL members and funded
27 * by OSADL membership fees in 2009; for details see www.osadl.org.
29 * This program is free software; you can redistribute it and/or modify it
30 * under the terms of the GNU General Public License as published by the Free
31 * Software Foundation; either version 2 of the License, or (at your option)
34 * This program is distributed in the hope that it will be useful, but WITHOUT
35 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
36 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
39 * The full GNU General Public License is included in this distribution in the
40 * file called COPYING.
43 /* Linux Foundation elects GPLv2 license only. */
45 #include <linux/dmaengine.h>
46 #include <linux/dma-mapping.h>
47 #include <linux/list.h>
48 #include <linux/module.h>
49 #include <linux/platform_device.h>
50 #include <linux/slab.h>
51 #include <linux/spinlock.h>
52 #include <linux/of_dma.h>
53 #include <linux/of_device.h>
54 #include <linux/property.h>
55 #include <linux/delay.h>
56 #include <linux/acpi.h>
57 #include <linux/irq.h>
58 #include <linux/atomic.h>
59 #include <linux/pm_runtime.h>
60 #include <linux/msi.h>
62 #include "../dmaengine.h"
66 * Default idle time is 2 seconds. This parameter can
67 * be overridden by changing the following
68 * /sys/bus/platform/devices/QCOM8061:<xy>/power/autosuspend_delay_ms
71 #define HIDMA_AUTOSUSPEND_TIMEOUT 2000
72 #define HIDMA_ERR_INFO_SW 0xFF
73 #define HIDMA_ERR_CODE_UNEXPECTED_TERMINATE 0x0
74 #define HIDMA_NR_DEFAULT_DESC 10
75 #define HIDMA_MSI_INTS 11
77 static inline struct hidma_dev *to_hidma_dev(struct dma_device *dmadev)
79 return container_of(dmadev, struct hidma_dev, ddev);
83 struct hidma_dev *to_hidma_dev_from_lldev(struct hidma_lldev **_lldevp)
85 return container_of(_lldevp, struct hidma_dev, lldev);
88 static inline struct hidma_chan *to_hidma_chan(struct dma_chan *dmach)
90 return container_of(dmach, struct hidma_chan, chan);
93 static void hidma_free(struct hidma_dev *dmadev)
95 INIT_LIST_HEAD(&dmadev->ddev.channels);
98 static unsigned int nr_desc_prm;
99 module_param(nr_desc_prm, uint, 0644);
100 MODULE_PARM_DESC(nr_desc_prm, "number of descriptors (default: 0)");
107 /* process completed descriptors */
108 static void hidma_process_completed(struct hidma_chan *mchan)
110 struct dma_device *ddev = mchan->chan.device;
111 struct hidma_dev *mdma = to_hidma_dev(ddev);
112 struct dma_async_tx_descriptor *desc;
113 dma_cookie_t last_cookie;
114 struct hidma_desc *mdesc;
115 struct hidma_desc *next;
116 unsigned long irqflags;
117 struct list_head list;
119 INIT_LIST_HEAD(&list);
121 /* Get all completed descriptors */
122 spin_lock_irqsave(&mchan->lock, irqflags);
123 list_splice_tail_init(&mchan->completed, &list);
124 spin_unlock_irqrestore(&mchan->lock, irqflags);
126 /* Execute callbacks and run dependencies */
127 list_for_each_entry_safe(mdesc, next, &list, node) {
128 enum dma_status llstat;
129 struct dmaengine_desc_callback cb;
130 struct dmaengine_result result;
133 last_cookie = desc->cookie;
135 llstat = hidma_ll_status(mdma->lldev, mdesc->tre_ch);
137 spin_lock_irqsave(&mchan->lock, irqflags);
138 if (llstat == DMA_COMPLETE) {
139 mchan->last_success = last_cookie;
140 result.result = DMA_TRANS_NOERROR;
142 result.result = DMA_TRANS_ABORTED;
145 dma_cookie_complete(desc);
146 spin_unlock_irqrestore(&mchan->lock, irqflags);
148 dmaengine_desc_get_callback(desc, &cb);
150 dma_run_dependencies(desc);
152 spin_lock_irqsave(&mchan->lock, irqflags);
153 list_move(&mdesc->node, &mchan->free);
154 spin_unlock_irqrestore(&mchan->lock, irqflags);
156 dmaengine_desc_callback_invoke(&cb, &result);
161 * Called once for each submitted descriptor.
162 * PM is locked once for each descriptor that is currently
165 static void hidma_callback(void *data)
167 struct hidma_desc *mdesc = data;
168 struct hidma_chan *mchan = to_hidma_chan(mdesc->desc.chan);
169 struct dma_device *ddev = mchan->chan.device;
170 struct hidma_dev *dmadev = to_hidma_dev(ddev);
171 unsigned long irqflags;
174 spin_lock_irqsave(&mchan->lock, irqflags);
175 if (mdesc->node.next) {
176 /* Delete from the active list, add to completed list */
177 list_move_tail(&mdesc->node, &mchan->completed);
180 /* calculate the next running descriptor */
181 mchan->running = list_first_entry(&mchan->active,
182 struct hidma_desc, node);
184 spin_unlock_irqrestore(&mchan->lock, irqflags);
186 hidma_process_completed(mchan);
189 pm_runtime_mark_last_busy(dmadev->ddev.dev);
190 pm_runtime_put_autosuspend(dmadev->ddev.dev);
194 static int hidma_chan_init(struct hidma_dev *dmadev, u32 dma_sig)
196 struct hidma_chan *mchan;
197 struct dma_device *ddev;
199 mchan = devm_kzalloc(dmadev->ddev.dev, sizeof(*mchan), GFP_KERNEL);
203 ddev = &dmadev->ddev;
204 mchan->dma_sig = dma_sig;
205 mchan->dmadev = dmadev;
206 mchan->chan.device = ddev;
207 dma_cookie_init(&mchan->chan);
209 INIT_LIST_HEAD(&mchan->free);
210 INIT_LIST_HEAD(&mchan->prepared);
211 INIT_LIST_HEAD(&mchan->active);
212 INIT_LIST_HEAD(&mchan->completed);
213 INIT_LIST_HEAD(&mchan->queued);
215 spin_lock_init(&mchan->lock);
216 list_add_tail(&mchan->chan.device_node, &ddev->channels);
217 dmadev->ddev.chancnt++;
221 static void hidma_issue_task(struct tasklet_struct *t)
223 struct hidma_dev *dmadev = from_tasklet(dmadev, t, task);
225 pm_runtime_get_sync(dmadev->ddev.dev);
226 hidma_ll_start(dmadev->lldev);
229 static void hidma_issue_pending(struct dma_chan *dmach)
231 struct hidma_chan *mchan = to_hidma_chan(dmach);
232 struct hidma_dev *dmadev = mchan->dmadev;
234 struct hidma_desc *qdesc, *next;
237 spin_lock_irqsave(&mchan->lock, flags);
238 list_for_each_entry_safe(qdesc, next, &mchan->queued, node) {
239 hidma_ll_queue_request(dmadev->lldev, qdesc->tre_ch);
240 list_move_tail(&qdesc->node, &mchan->active);
243 if (!mchan->running) {
244 struct hidma_desc *desc = list_first_entry(&mchan->active,
247 mchan->running = desc;
249 spin_unlock_irqrestore(&mchan->lock, flags);
251 /* PM will be released in hidma_callback function. */
252 status = pm_runtime_get(dmadev->ddev.dev);
254 tasklet_schedule(&dmadev->task);
256 hidma_ll_start(dmadev->lldev);
259 static inline bool hidma_txn_is_success(dma_cookie_t cookie,
260 dma_cookie_t last_success, dma_cookie_t last_used)
262 if (last_success <= last_used) {
263 if ((cookie <= last_success) || (cookie > last_used))
266 if ((cookie <= last_success) && (cookie > last_used))
272 static enum dma_status hidma_tx_status(struct dma_chan *dmach,
274 struct dma_tx_state *txstate)
276 struct hidma_chan *mchan = to_hidma_chan(dmach);
279 ret = dma_cookie_status(dmach, cookie, txstate);
280 if (ret == DMA_COMPLETE) {
283 is_success = hidma_txn_is_success(cookie, mchan->last_success,
285 return is_success ? ret : DMA_ERROR;
288 if (mchan->paused && (ret == DMA_IN_PROGRESS)) {
290 dma_cookie_t runcookie;
292 spin_lock_irqsave(&mchan->lock, flags);
294 runcookie = mchan->running->desc.cookie;
298 if (runcookie == cookie)
301 spin_unlock_irqrestore(&mchan->lock, flags);
308 * Submit descriptor to hardware.
309 * Lock the PM for each descriptor we are sending.
311 static dma_cookie_t hidma_tx_submit(struct dma_async_tx_descriptor *txd)
313 struct hidma_chan *mchan = to_hidma_chan(txd->chan);
314 struct hidma_dev *dmadev = mchan->dmadev;
315 struct hidma_desc *mdesc;
316 unsigned long irqflags;
319 pm_runtime_get_sync(dmadev->ddev.dev);
320 if (!hidma_ll_isenabled(dmadev->lldev)) {
321 pm_runtime_mark_last_busy(dmadev->ddev.dev);
322 pm_runtime_put_autosuspend(dmadev->ddev.dev);
325 pm_runtime_mark_last_busy(dmadev->ddev.dev);
326 pm_runtime_put_autosuspend(dmadev->ddev.dev);
328 mdesc = container_of(txd, struct hidma_desc, desc);
329 spin_lock_irqsave(&mchan->lock, irqflags);
331 /* Move descriptor to queued */
332 list_move_tail(&mdesc->node, &mchan->queued);
335 cookie = dma_cookie_assign(txd);
337 spin_unlock_irqrestore(&mchan->lock, irqflags);
342 static int hidma_alloc_chan_resources(struct dma_chan *dmach)
344 struct hidma_chan *mchan = to_hidma_chan(dmach);
345 struct hidma_dev *dmadev = mchan->dmadev;
346 struct hidma_desc *mdesc, *tmp;
347 unsigned long irqflags;
352 if (mchan->allocated)
355 /* Alloc descriptors for this channel */
356 for (i = 0; i < dmadev->nr_descriptors; i++) {
357 mdesc = kzalloc(sizeof(struct hidma_desc), GFP_NOWAIT);
362 dma_async_tx_descriptor_init(&mdesc->desc, dmach);
363 mdesc->desc.tx_submit = hidma_tx_submit;
365 rc = hidma_ll_request(dmadev->lldev, mchan->dma_sig,
366 "DMA engine", hidma_callback, mdesc,
369 dev_err(dmach->device->dev,
370 "channel alloc failed at %u\n", i);
374 list_add_tail(&mdesc->node, &descs);
378 /* return the allocated descriptors */
379 list_for_each_entry_safe(mdesc, tmp, &descs, node) {
380 hidma_ll_free(dmadev->lldev, mdesc->tre_ch);
386 spin_lock_irqsave(&mchan->lock, irqflags);
387 list_splice_tail_init(&descs, &mchan->free);
388 mchan->allocated = true;
389 spin_unlock_irqrestore(&mchan->lock, irqflags);
393 static struct dma_async_tx_descriptor *
394 hidma_prep_dma_memcpy(struct dma_chan *dmach, dma_addr_t dest, dma_addr_t src,
395 size_t len, unsigned long flags)
397 struct hidma_chan *mchan = to_hidma_chan(dmach);
398 struct hidma_desc *mdesc = NULL;
399 struct hidma_dev *mdma = mchan->dmadev;
400 unsigned long irqflags;
402 /* Get free descriptor */
403 spin_lock_irqsave(&mchan->lock, irqflags);
404 if (!list_empty(&mchan->free)) {
405 mdesc = list_first_entry(&mchan->free, struct hidma_desc, node);
406 list_del(&mdesc->node);
408 spin_unlock_irqrestore(&mchan->lock, irqflags);
413 mdesc->desc.flags = flags;
414 hidma_ll_set_transfer_params(mdma->lldev, mdesc->tre_ch,
415 src, dest, len, flags,
418 /* Place descriptor in prepared list */
419 spin_lock_irqsave(&mchan->lock, irqflags);
420 list_add_tail(&mdesc->node, &mchan->prepared);
421 spin_unlock_irqrestore(&mchan->lock, irqflags);
426 static struct dma_async_tx_descriptor *
427 hidma_prep_dma_memset(struct dma_chan *dmach, dma_addr_t dest, int value,
428 size_t len, unsigned long flags)
430 struct hidma_chan *mchan = to_hidma_chan(dmach);
431 struct hidma_desc *mdesc = NULL;
432 struct hidma_dev *mdma = mchan->dmadev;
433 unsigned long irqflags;
435 /* Get free descriptor */
436 spin_lock_irqsave(&mchan->lock, irqflags);
437 if (!list_empty(&mchan->free)) {
438 mdesc = list_first_entry(&mchan->free, struct hidma_desc, node);
439 list_del(&mdesc->node);
441 spin_unlock_irqrestore(&mchan->lock, irqflags);
446 mdesc->desc.flags = flags;
447 hidma_ll_set_transfer_params(mdma->lldev, mdesc->tre_ch,
448 value, dest, len, flags,
451 /* Place descriptor in prepared list */
452 spin_lock_irqsave(&mchan->lock, irqflags);
453 list_add_tail(&mdesc->node, &mchan->prepared);
454 spin_unlock_irqrestore(&mchan->lock, irqflags);
459 static int hidma_terminate_channel(struct dma_chan *chan)
461 struct hidma_chan *mchan = to_hidma_chan(chan);
462 struct hidma_dev *dmadev = to_hidma_dev(mchan->chan.device);
463 struct hidma_desc *tmp, *mdesc;
464 unsigned long irqflags;
468 pm_runtime_get_sync(dmadev->ddev.dev);
469 /* give completed requests a chance to finish */
470 hidma_process_completed(mchan);
472 spin_lock_irqsave(&mchan->lock, irqflags);
473 mchan->last_success = 0;
474 list_splice_init(&mchan->active, &list);
475 list_splice_init(&mchan->prepared, &list);
476 list_splice_init(&mchan->completed, &list);
477 list_splice_init(&mchan->queued, &list);
478 spin_unlock_irqrestore(&mchan->lock, irqflags);
480 /* this suspends the existing transfer */
481 rc = hidma_ll_disable(dmadev->lldev);
483 dev_err(dmadev->ddev.dev, "channel did not pause\n");
487 /* return all user requests */
488 list_for_each_entry_safe(mdesc, tmp, &list, node) {
489 struct dma_async_tx_descriptor *txd = &mdesc->desc;
491 dma_descriptor_unmap(txd);
492 dmaengine_desc_get_callback_invoke(txd, NULL);
493 dma_run_dependencies(txd);
495 /* move myself to free_list */
496 list_move(&mdesc->node, &mchan->free);
499 rc = hidma_ll_enable(dmadev->lldev);
501 pm_runtime_mark_last_busy(dmadev->ddev.dev);
502 pm_runtime_put_autosuspend(dmadev->ddev.dev);
506 static int hidma_terminate_all(struct dma_chan *chan)
508 struct hidma_chan *mchan = to_hidma_chan(chan);
509 struct hidma_dev *dmadev = to_hidma_dev(mchan->chan.device);
512 rc = hidma_terminate_channel(chan);
516 /* reinitialize the hardware */
517 pm_runtime_get_sync(dmadev->ddev.dev);
518 rc = hidma_ll_setup(dmadev->lldev);
519 pm_runtime_mark_last_busy(dmadev->ddev.dev);
520 pm_runtime_put_autosuspend(dmadev->ddev.dev);
524 static void hidma_free_chan_resources(struct dma_chan *dmach)
526 struct hidma_chan *mchan = to_hidma_chan(dmach);
527 struct hidma_dev *mdma = mchan->dmadev;
528 struct hidma_desc *mdesc, *tmp;
529 unsigned long irqflags;
532 /* terminate running transactions and free descriptors */
533 hidma_terminate_channel(dmach);
535 spin_lock_irqsave(&mchan->lock, irqflags);
538 list_splice_tail_init(&mchan->free, &descs);
540 /* Free descriptors */
541 list_for_each_entry_safe(mdesc, tmp, &descs, node) {
542 hidma_ll_free(mdma->lldev, mdesc->tre_ch);
543 list_del(&mdesc->node);
547 mchan->allocated = false;
548 spin_unlock_irqrestore(&mchan->lock, irqflags);
551 static int hidma_pause(struct dma_chan *chan)
553 struct hidma_chan *mchan;
554 struct hidma_dev *dmadev;
556 mchan = to_hidma_chan(chan);
557 dmadev = to_hidma_dev(mchan->chan.device);
558 if (!mchan->paused) {
559 pm_runtime_get_sync(dmadev->ddev.dev);
560 if (hidma_ll_disable(dmadev->lldev))
561 dev_warn(dmadev->ddev.dev, "channel did not stop\n");
562 mchan->paused = true;
563 pm_runtime_mark_last_busy(dmadev->ddev.dev);
564 pm_runtime_put_autosuspend(dmadev->ddev.dev);
569 static int hidma_resume(struct dma_chan *chan)
571 struct hidma_chan *mchan;
572 struct hidma_dev *dmadev;
575 mchan = to_hidma_chan(chan);
576 dmadev = to_hidma_dev(mchan->chan.device);
578 pm_runtime_get_sync(dmadev->ddev.dev);
579 rc = hidma_ll_enable(dmadev->lldev);
581 mchan->paused = false;
583 dev_err(dmadev->ddev.dev,
584 "failed to resume the channel");
585 pm_runtime_mark_last_busy(dmadev->ddev.dev);
586 pm_runtime_put_autosuspend(dmadev->ddev.dev);
591 static irqreturn_t hidma_chirq_handler(int chirq, void *arg)
593 struct hidma_lldev *lldev = arg;
596 * All interrupts are request driven.
597 * HW doesn't send an interrupt by itself.
599 return hidma_ll_inthandler(chirq, lldev);
602 #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
603 static irqreturn_t hidma_chirq_handler_msi(int chirq, void *arg)
605 struct hidma_lldev **lldevp = arg;
606 struct hidma_dev *dmadev = to_hidma_dev_from_lldev(lldevp);
608 return hidma_ll_inthandler_msi(chirq, *lldevp,
609 1 << (chirq - dmadev->msi_virqbase));
613 static ssize_t hidma_show_values(struct device *dev,
614 struct device_attribute *attr, char *buf)
616 struct hidma_dev *mdev = dev_get_drvdata(dev);
620 if (strcmp(attr->attr.name, "chid") == 0)
621 sprintf(buf, "%d\n", mdev->chidx);
626 static inline void hidma_sysfs_uninit(struct hidma_dev *dev)
628 device_remove_file(dev->ddev.dev, dev->chid_attrs);
631 static struct device_attribute*
632 hidma_create_sysfs_entry(struct hidma_dev *dev, char *name, int mode)
634 struct device_attribute *attrs;
637 attrs = devm_kmalloc(dev->ddev.dev, sizeof(struct device_attribute),
642 name_copy = devm_kstrdup(dev->ddev.dev, name, GFP_KERNEL);
646 attrs->attr.name = name_copy;
647 attrs->attr.mode = mode;
648 attrs->show = hidma_show_values;
649 sysfs_attr_init(&attrs->attr);
654 static int hidma_sysfs_init(struct hidma_dev *dev)
656 dev->chid_attrs = hidma_create_sysfs_entry(dev, "chid", S_IRUGO);
657 if (!dev->chid_attrs)
660 return device_create_file(dev->ddev.dev, dev->chid_attrs);
663 #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
664 static void hidma_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
666 struct device *dev = msi_desc_to_dev(desc);
667 struct hidma_dev *dmadev = dev_get_drvdata(dev);
669 if (!desc->platform.msi_index) {
670 writel(msg->address_lo, dmadev->dev_evca + 0x118);
671 writel(msg->address_hi, dmadev->dev_evca + 0x11C);
672 writel(msg->data, dmadev->dev_evca + 0x120);
677 static void hidma_free_msis(struct hidma_dev *dmadev)
679 #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
680 struct device *dev = dmadev->ddev.dev;
681 struct msi_desc *desc;
683 /* free allocated MSI interrupts above */
684 for_each_msi_entry(desc, dev)
685 devm_free_irq(dev, desc->irq, &dmadev->lldev);
687 platform_msi_domain_free_irqs(dev);
691 static int hidma_request_msi(struct hidma_dev *dmadev,
692 struct platform_device *pdev)
694 #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
696 struct msi_desc *desc;
697 struct msi_desc *failed_desc = NULL;
699 rc = platform_msi_domain_alloc_irqs(&pdev->dev, HIDMA_MSI_INTS,
700 hidma_write_msi_msg);
704 for_each_msi_entry(desc, &pdev->dev) {
705 if (!desc->platform.msi_index)
706 dmadev->msi_virqbase = desc->irq;
708 rc = devm_request_irq(&pdev->dev, desc->irq,
709 hidma_chirq_handler_msi,
719 /* free allocated MSI interrupts above */
720 for_each_msi_entry(desc, &pdev->dev) {
721 if (desc == failed_desc)
723 devm_free_irq(&pdev->dev, desc->irq,
727 /* Add callback to free MSIs on teardown */
728 hidma_ll_setup_irq(dmadev->lldev, true);
733 "failed to request MSI irq, falling back to wired IRQ\n");
740 static bool hidma_test_capability(struct device *dev, enum hidma_cap test_cap)
744 cap = (enum hidma_cap) device_get_match_data(dev);
745 return cap ? ((cap & test_cap) > 0) : 0;
748 static int hidma_probe(struct platform_device *pdev)
750 struct hidma_dev *dmadev;
751 struct resource *trca_resource;
752 struct resource *evca_resource;
759 pm_runtime_set_autosuspend_delay(&pdev->dev, HIDMA_AUTOSUSPEND_TIMEOUT);
760 pm_runtime_use_autosuspend(&pdev->dev);
761 pm_runtime_set_active(&pdev->dev);
762 pm_runtime_enable(&pdev->dev);
764 trca_resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
765 trca = devm_ioremap_resource(&pdev->dev, trca_resource);
771 evca_resource = platform_get_resource(pdev, IORESOURCE_MEM, 1);
772 evca = devm_ioremap_resource(&pdev->dev, evca_resource);
779 * This driver only handles the channel IRQs.
780 * Common IRQ is handled by the management driver.
782 chirq = platform_get_irq(pdev, 0);
788 dmadev = devm_kzalloc(&pdev->dev, sizeof(*dmadev), GFP_KERNEL);
794 INIT_LIST_HEAD(&dmadev->ddev.channels);
795 spin_lock_init(&dmadev->lock);
796 dmadev->ddev.dev = &pdev->dev;
797 pm_runtime_get_sync(dmadev->ddev.dev);
799 dma_cap_set(DMA_MEMCPY, dmadev->ddev.cap_mask);
800 dma_cap_set(DMA_MEMSET, dmadev->ddev.cap_mask);
801 if (WARN_ON(!pdev->dev.dma_mask)) {
806 dmadev->dev_evca = evca;
807 dmadev->evca_resource = evca_resource;
808 dmadev->dev_trca = trca;
809 dmadev->trca_resource = trca_resource;
810 dmadev->ddev.device_prep_dma_memcpy = hidma_prep_dma_memcpy;
811 dmadev->ddev.device_prep_dma_memset = hidma_prep_dma_memset;
812 dmadev->ddev.device_alloc_chan_resources = hidma_alloc_chan_resources;
813 dmadev->ddev.device_free_chan_resources = hidma_free_chan_resources;
814 dmadev->ddev.device_tx_status = hidma_tx_status;
815 dmadev->ddev.device_issue_pending = hidma_issue_pending;
816 dmadev->ddev.device_pause = hidma_pause;
817 dmadev->ddev.device_resume = hidma_resume;
818 dmadev->ddev.device_terminate_all = hidma_terminate_all;
819 dmadev->ddev.copy_align = 8;
822 * Determine the MSI capability of the platform. Old HW doesn't
825 msi = hidma_test_capability(&pdev->dev, HIDMA_MSI_CAP);
826 device_property_read_u32(&pdev->dev, "desc-count",
827 &dmadev->nr_descriptors);
830 dev_info(&pdev->dev, "overriding number of descriptors as %d\n",
832 dmadev->nr_descriptors = nr_desc_prm;
835 if (!dmadev->nr_descriptors)
836 dmadev->nr_descriptors = HIDMA_NR_DEFAULT_DESC;
838 if (hidma_test_capability(&pdev->dev, HIDMA_IDENTITY_CAP))
839 dmadev->chidx = readl(dmadev->dev_trca + 0x40);
841 dmadev->chidx = readl(dmadev->dev_trca + 0x28);
843 /* Set DMA mask to 64 bits. */
844 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
846 dev_warn(&pdev->dev, "unable to set coherent mask to 64");
847 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
852 dmadev->lldev = hidma_ll_init(dmadev->ddev.dev,
853 dmadev->nr_descriptors, dmadev->dev_trca,
854 dmadev->dev_evca, dmadev->chidx);
855 if (!dmadev->lldev) {
860 platform_set_drvdata(pdev, dmadev);
862 rc = hidma_request_msi(dmadev, pdev);
865 hidma_ll_setup_irq(dmadev->lldev, false);
866 rc = devm_request_irq(&pdev->dev, chirq, hidma_chirq_handler,
867 0, "qcom-hidma", dmadev->lldev);
872 INIT_LIST_HEAD(&dmadev->ddev.channels);
873 rc = hidma_chan_init(dmadev, 0);
877 rc = dma_async_device_register(&dmadev->ddev);
882 tasklet_setup(&dmadev->task, hidma_issue_task);
883 hidma_debug_init(dmadev);
884 hidma_sysfs_init(dmadev);
885 dev_info(&pdev->dev, "HI-DMA engine driver registration complete\n");
886 pm_runtime_mark_last_busy(dmadev->ddev.dev);
887 pm_runtime_put_autosuspend(dmadev->ddev.dev);
892 hidma_free_msis(dmadev);
894 hidma_ll_uninit(dmadev->lldev);
899 pm_runtime_put_sync(&pdev->dev);
900 pm_runtime_disable(&pdev->dev);
904 static void hidma_shutdown(struct platform_device *pdev)
906 struct hidma_dev *dmadev = platform_get_drvdata(pdev);
908 dev_info(dmadev->ddev.dev, "HI-DMA engine shutdown\n");
910 pm_runtime_get_sync(dmadev->ddev.dev);
911 if (hidma_ll_disable(dmadev->lldev))
912 dev_warn(dmadev->ddev.dev, "channel did not stop\n");
913 pm_runtime_mark_last_busy(dmadev->ddev.dev);
914 pm_runtime_put_autosuspend(dmadev->ddev.dev);
918 static int hidma_remove(struct platform_device *pdev)
920 struct hidma_dev *dmadev = platform_get_drvdata(pdev);
922 pm_runtime_get_sync(dmadev->ddev.dev);
923 dma_async_device_unregister(&dmadev->ddev);
924 if (!dmadev->lldev->msi_support)
925 devm_free_irq(dmadev->ddev.dev, dmadev->irq, dmadev->lldev);
927 hidma_free_msis(dmadev);
929 tasklet_kill(&dmadev->task);
930 hidma_sysfs_uninit(dmadev);
931 hidma_debug_uninit(dmadev);
932 hidma_ll_uninit(dmadev->lldev);
935 dev_info(&pdev->dev, "HI-DMA engine removed\n");
936 pm_runtime_put_sync_suspend(&pdev->dev);
937 pm_runtime_disable(&pdev->dev);
942 #if IS_ENABLED(CONFIG_ACPI)
943 static const struct acpi_device_id hidma_acpi_ids[] = {
945 {"QCOM8062", HIDMA_MSI_CAP},
946 {"QCOM8063", (HIDMA_MSI_CAP | HIDMA_IDENTITY_CAP)},
949 MODULE_DEVICE_TABLE(acpi, hidma_acpi_ids);
952 static const struct of_device_id hidma_match[] = {
953 {.compatible = "qcom,hidma-1.0",},
954 {.compatible = "qcom,hidma-1.1", .data = (void *)(HIDMA_MSI_CAP),},
955 {.compatible = "qcom,hidma-1.2",
956 .data = (void *)(HIDMA_MSI_CAP | HIDMA_IDENTITY_CAP),},
959 MODULE_DEVICE_TABLE(of, hidma_match);
961 static struct platform_driver hidma_driver = {
962 .probe = hidma_probe,
963 .remove = hidma_remove,
964 .shutdown = hidma_shutdown,
967 .of_match_table = hidma_match,
968 .acpi_match_table = ACPI_PTR(hidma_acpi_ids),
972 module_platform_driver(hidma_driver);
973 MODULE_LICENSE("GPL v2");