1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) Ericsson AB 2007-2008
4 * Copyright (C) ST-Ericsson SA 2008-2010
5 * Author: Per Forlin <per.forlin@stericsson.com> for ST-Ericsson
6 * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson
9 #include <linux/dma-mapping.h>
10 #include <linux/kernel.h>
11 #include <linux/slab.h>
12 #include <linux/export.h>
13 #include <linux/dmaengine.h>
14 #include <linux/platform_device.h>
15 #include <linux/clk.h>
16 #include <linux/delay.h>
17 #include <linux/log2.h>
19 #include <linux/pm_runtime.h>
20 #include <linux/err.h>
22 #include <linux/of_dma.h>
23 #include <linux/amba/bus.h>
24 #include <linux/regulator/consumer.h>
25 #include <linux/platform_data/dma-ste-dma40.h>
27 #include "dmaengine.h"
28 #include "ste_dma40_ll.h"
30 #define D40_NAME "dma40"
32 #define D40_PHY_CHAN -1
34 /* For masking out/in 2 bit channel positions */
35 #define D40_CHAN_POS(chan) (2 * (chan / 2))
36 #define D40_CHAN_POS_MASK(chan) (0x3 << D40_CHAN_POS(chan))
38 /* Maximum iterations taken before giving up suspending a channel */
39 #define D40_SUSPEND_MAX_IT 500
42 #define DMA40_AUTOSUSPEND_DELAY 100
44 /* Hardware requirement on LCLA alignment */
45 #define LCLA_ALIGNMENT 0x40000
47 /* Max number of links per event group */
48 #define D40_LCLA_LINK_PER_EVENT_GRP 128
49 #define D40_LCLA_END D40_LCLA_LINK_PER_EVENT_GRP
51 /* Max number of logical channels per physical channel */
52 #define D40_MAX_LOG_CHAN_PER_PHY 32
54 /* Attempts before giving up to trying to get pages that are aligned */
55 #define MAX_LCLA_ALLOC_ATTEMPTS 256
57 /* Bit markings for allocation map */
58 #define D40_ALLOC_FREE BIT(31)
59 #define D40_ALLOC_PHY BIT(30)
60 #define D40_ALLOC_LOG_FREE 0
62 #define D40_MEMCPY_MAX_CHANS 8
64 /* Reserved event lines for memcpy only. */
65 #define DB8500_DMA_MEMCPY_EV_0 51
66 #define DB8500_DMA_MEMCPY_EV_1 56
67 #define DB8500_DMA_MEMCPY_EV_2 57
68 #define DB8500_DMA_MEMCPY_EV_3 58
69 #define DB8500_DMA_MEMCPY_EV_4 59
70 #define DB8500_DMA_MEMCPY_EV_5 60
72 static int dma40_memcpy_channels[] = {
73 DB8500_DMA_MEMCPY_EV_0,
74 DB8500_DMA_MEMCPY_EV_1,
75 DB8500_DMA_MEMCPY_EV_2,
76 DB8500_DMA_MEMCPY_EV_3,
77 DB8500_DMA_MEMCPY_EV_4,
78 DB8500_DMA_MEMCPY_EV_5,
81 /* Default configuration for physcial memcpy */
82 static const struct stedma40_chan_cfg dma40_memcpy_conf_phy = {
83 .mode = STEDMA40_MODE_PHYSICAL,
84 .dir = DMA_MEM_TO_MEM,
86 .src_info.data_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
87 .src_info.psize = STEDMA40_PSIZE_PHY_1,
88 .src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
90 .dst_info.data_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
91 .dst_info.psize = STEDMA40_PSIZE_PHY_1,
92 .dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
95 /* Default configuration for logical memcpy */
96 static const struct stedma40_chan_cfg dma40_memcpy_conf_log = {
97 .mode = STEDMA40_MODE_LOGICAL,
98 .dir = DMA_MEM_TO_MEM,
100 .src_info.data_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
101 .src_info.psize = STEDMA40_PSIZE_LOG_1,
102 .src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
104 .dst_info.data_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
105 .dst_info.psize = STEDMA40_PSIZE_LOG_1,
106 .dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
110 * enum 40_command - The different commands and/or statuses.
112 * @D40_DMA_STOP: DMA channel command STOP or status STOPPED,
113 * @D40_DMA_RUN: The DMA channel is RUNNING of the command RUN.
114 * @D40_DMA_SUSPEND_REQ: Request the DMA to SUSPEND as soon as possible.
115 * @D40_DMA_SUSPENDED: The DMA channel is SUSPENDED.
120 D40_DMA_SUSPEND_REQ = 2,
121 D40_DMA_SUSPENDED = 3
125 * enum d40_events - The different Event Enables for the event lines.
127 * @D40_DEACTIVATE_EVENTLINE: De-activate Event line, stopping the logical chan.
128 * @D40_ACTIVATE_EVENTLINE: Activate the Event line, to start a logical chan.
129 * @D40_SUSPEND_REQ_EVENTLINE: Requesting for suspending a event line.
130 * @D40_ROUND_EVENTLINE: Status check for event line.
134 D40_DEACTIVATE_EVENTLINE = 0,
135 D40_ACTIVATE_EVENTLINE = 1,
136 D40_SUSPEND_REQ_EVENTLINE = 2,
137 D40_ROUND_EVENTLINE = 3
141 * These are the registers that has to be saved and later restored
142 * when the DMA hw is powered off.
143 * TODO: Add save/restore of D40_DREG_GCC on dma40 v3 or later, if that works.
145 static __maybe_unused u32 d40_backup_regs[] = {
154 #define BACKUP_REGS_SZ ARRAY_SIZE(d40_backup_regs)
157 * since 9540 and 8540 has the same HW revision
158 * use v4a for 9540 or ealier
159 * use v4b for 8540 or later
161 * DB8500ed has revision 0
162 * DB8500v1 has revision 2
163 * DB8500v2 has revision 3
164 * AP9540v1 has revision 4
165 * DB8540v1 has revision 4
166 * TODO: Check if all these registers have to be saved/restored on dma40 v4a
168 static u32 d40_backup_regs_v4a[] = {
187 #define BACKUP_REGS_SZ_V4A ARRAY_SIZE(d40_backup_regs_v4a)
189 static u32 d40_backup_regs_v4b[] = {
212 #define BACKUP_REGS_SZ_V4B ARRAY_SIZE(d40_backup_regs_v4b)
214 static __maybe_unused u32 d40_backup_regs_chan[] = {
225 #define BACKUP_REGS_SZ_MAX ((BACKUP_REGS_SZ_V4A > BACKUP_REGS_SZ_V4B) ? \
226 BACKUP_REGS_SZ_V4A : BACKUP_REGS_SZ_V4B)
229 * struct d40_interrupt_lookup - lookup table for interrupt handler
231 * @src: Interrupt mask register.
232 * @clr: Interrupt clear register.
233 * @is_error: true if this is an error interrupt.
234 * @offset: start delta in the lookup_log_chans in d40_base. If equals to
235 * D40_PHY_CHAN, the lookup_phy_chans shall be used instead.
237 struct d40_interrupt_lookup {
245 static struct d40_interrupt_lookup il_v4a[] = {
246 {D40_DREG_LCTIS0, D40_DREG_LCICR0, false, 0},
247 {D40_DREG_LCTIS1, D40_DREG_LCICR1, false, 32},
248 {D40_DREG_LCTIS2, D40_DREG_LCICR2, false, 64},
249 {D40_DREG_LCTIS3, D40_DREG_LCICR3, false, 96},
250 {D40_DREG_LCEIS0, D40_DREG_LCICR0, true, 0},
251 {D40_DREG_LCEIS1, D40_DREG_LCICR1, true, 32},
252 {D40_DREG_LCEIS2, D40_DREG_LCICR2, true, 64},
253 {D40_DREG_LCEIS3, D40_DREG_LCICR3, true, 96},
254 {D40_DREG_PCTIS, D40_DREG_PCICR, false, D40_PHY_CHAN},
255 {D40_DREG_PCEIS, D40_DREG_PCICR, true, D40_PHY_CHAN},
258 static struct d40_interrupt_lookup il_v4b[] = {
259 {D40_DREG_CLCTIS1, D40_DREG_CLCICR1, false, 0},
260 {D40_DREG_CLCTIS2, D40_DREG_CLCICR2, false, 32},
261 {D40_DREG_CLCTIS3, D40_DREG_CLCICR3, false, 64},
262 {D40_DREG_CLCTIS4, D40_DREG_CLCICR4, false, 96},
263 {D40_DREG_CLCTIS5, D40_DREG_CLCICR5, false, 128},
264 {D40_DREG_CLCEIS1, D40_DREG_CLCICR1, true, 0},
265 {D40_DREG_CLCEIS2, D40_DREG_CLCICR2, true, 32},
266 {D40_DREG_CLCEIS3, D40_DREG_CLCICR3, true, 64},
267 {D40_DREG_CLCEIS4, D40_DREG_CLCICR4, true, 96},
268 {D40_DREG_CLCEIS5, D40_DREG_CLCICR5, true, 128},
269 {D40_DREG_CPCTIS, D40_DREG_CPCICR, false, D40_PHY_CHAN},
270 {D40_DREG_CPCEIS, D40_DREG_CPCICR, true, D40_PHY_CHAN},
274 * struct d40_reg_val - simple lookup struct
276 * @reg: The register.
277 * @val: The value that belongs to the register in reg.
284 static __initdata struct d40_reg_val dma_init_reg_v4a[] = {
285 /* Clock every part of the DMA block from start */
286 { .reg = D40_DREG_GCC, .val = D40_DREG_GCC_ENABLE_ALL},
288 /* Interrupts on all logical channels */
289 { .reg = D40_DREG_LCMIS0, .val = 0xFFFFFFFF},
290 { .reg = D40_DREG_LCMIS1, .val = 0xFFFFFFFF},
291 { .reg = D40_DREG_LCMIS2, .val = 0xFFFFFFFF},
292 { .reg = D40_DREG_LCMIS3, .val = 0xFFFFFFFF},
293 { .reg = D40_DREG_LCICR0, .val = 0xFFFFFFFF},
294 { .reg = D40_DREG_LCICR1, .val = 0xFFFFFFFF},
295 { .reg = D40_DREG_LCICR2, .val = 0xFFFFFFFF},
296 { .reg = D40_DREG_LCICR3, .val = 0xFFFFFFFF},
297 { .reg = D40_DREG_LCTIS0, .val = 0xFFFFFFFF},
298 { .reg = D40_DREG_LCTIS1, .val = 0xFFFFFFFF},
299 { .reg = D40_DREG_LCTIS2, .val = 0xFFFFFFFF},
300 { .reg = D40_DREG_LCTIS3, .val = 0xFFFFFFFF}
302 static __initdata struct d40_reg_val dma_init_reg_v4b[] = {
303 /* Clock every part of the DMA block from start */
304 { .reg = D40_DREG_GCC, .val = D40_DREG_GCC_ENABLE_ALL},
306 /* Interrupts on all logical channels */
307 { .reg = D40_DREG_CLCMIS1, .val = 0xFFFFFFFF},
308 { .reg = D40_DREG_CLCMIS2, .val = 0xFFFFFFFF},
309 { .reg = D40_DREG_CLCMIS3, .val = 0xFFFFFFFF},
310 { .reg = D40_DREG_CLCMIS4, .val = 0xFFFFFFFF},
311 { .reg = D40_DREG_CLCMIS5, .val = 0xFFFFFFFF},
312 { .reg = D40_DREG_CLCICR1, .val = 0xFFFFFFFF},
313 { .reg = D40_DREG_CLCICR2, .val = 0xFFFFFFFF},
314 { .reg = D40_DREG_CLCICR3, .val = 0xFFFFFFFF},
315 { .reg = D40_DREG_CLCICR4, .val = 0xFFFFFFFF},
316 { .reg = D40_DREG_CLCICR5, .val = 0xFFFFFFFF},
317 { .reg = D40_DREG_CLCTIS1, .val = 0xFFFFFFFF},
318 { .reg = D40_DREG_CLCTIS2, .val = 0xFFFFFFFF},
319 { .reg = D40_DREG_CLCTIS3, .val = 0xFFFFFFFF},
320 { .reg = D40_DREG_CLCTIS4, .val = 0xFFFFFFFF},
321 { .reg = D40_DREG_CLCTIS5, .val = 0xFFFFFFFF}
325 * struct d40_lli_pool - Structure for keeping LLIs in memory
327 * @base: Pointer to memory area when the pre_alloc_lli's are not large
328 * enough, IE bigger than the most common case, 1 dst and 1 src. NULL if
329 * pre_alloc_lli is used.
330 * @dma_addr: DMA address, if mapped
331 * @size: The size in bytes of the memory at base or the size of pre_alloc_lli.
332 * @pre_alloc_lli: Pre allocated area for the most common case of transfers,
333 * one buffer to one buffer.
335 struct d40_lli_pool {
339 /* Space for dst and src, plus an extra for padding */
340 u8 pre_alloc_lli[3 * sizeof(struct d40_phy_lli)];
344 * struct d40_desc - A descriptor is one DMA job.
346 * @lli_phy: LLI settings for physical channel. Both src and dst=
347 * points into the lli_pool, to base if lli_len > 1 or to pre_alloc_lli if
348 * lli_len equals one.
349 * @lli_log: Same as above but for logical channels.
350 * @lli_pool: The pool with two entries pre-allocated.
351 * @lli_len: Number of llis of current descriptor.
352 * @lli_current: Number of transferred llis.
353 * @lcla_alloc: Number of LCLA entries allocated.
354 * @txd: DMA engine struct. Used for among other things for communication
357 * @is_in_client_list: true if the client owns this descriptor.
358 * @cyclic: true if this is a cyclic job
360 * This descriptor is used for both logical and physical transfers.
364 struct d40_phy_lli_bidir lli_phy;
366 struct d40_log_lli_bidir lli_log;
368 struct d40_lli_pool lli_pool;
373 struct dma_async_tx_descriptor txd;
374 struct list_head node;
376 bool is_in_client_list;
381 * struct d40_lcla_pool - LCLA pool settings and data.
383 * @base: The virtual address of LCLA. 18 bit aligned.
384 * @dma_addr: DMA address, if mapped
385 * @base_unaligned: The orignal kmalloc pointer, if kmalloc is used.
386 * This pointer is only there for clean-up on error.
387 * @pages: The number of pages needed for all physical channels.
388 * Only used later for clean-up on error
389 * @lock: Lock to protect the content in this struct.
390 * @alloc_map: big map over which LCLA entry is own by which job.
392 struct d40_lcla_pool {
395 void *base_unaligned;
398 struct d40_desc **alloc_map;
402 * struct d40_phy_res - struct for handling eventlines mapped to physical
405 * @lock: A lock protection this entity.
406 * @reserved: True if used by secure world or otherwise.
407 * @num: The physical channel number of this entity.
408 * @allocated_src: Bit mapped to show which src event line's are mapped to
409 * this physical channel. Can also be free or physically allocated.
410 * @allocated_dst: Same as for src but is dst.
411 * allocated_dst and allocated_src uses the D40_ALLOC* defines as well as
413 * @use_soft_lli: To mark if the linked lists of channel are managed by SW.
427 * struct d40_chan - Struct that describes a channel.
429 * @lock: A spinlock to protect this struct.
430 * @log_num: The logical number, if any of this channel.
431 * @pending_tx: The number of pending transfers. Used between interrupt handler
433 * @busy: Set to true when transfer is ongoing on this channel.
434 * @phy_chan: Pointer to physical channel which this instance runs on. If this
435 * point is NULL, then the channel is not allocated.
436 * @chan: DMA engine handle.
437 * @tasklet: Tasklet that gets scheduled from interrupt context to complete a
438 * transfer and call client callback.
439 * @client: Cliented owned descriptor list.
440 * @pending_queue: Submitted jobs, to be issued by issue_pending()
441 * @active: Active descriptor.
442 * @done: Completed jobs
443 * @queue: Queued jobs.
444 * @prepare_queue: Prepared jobs.
445 * @dma_cfg: The client configuration of this dma channel.
446 * @slave_config: DMA slave configuration.
447 * @configured: whether the dma_cfg configuration is valid
448 * @base: Pointer to the device instance struct.
449 * @src_def_cfg: Default cfg register setting for src.
450 * @dst_def_cfg: Default cfg register setting for dst.
451 * @log_def: Default logical channel settings.
452 * @lcpa: Pointer to dst and src lcpa settings.
453 * @runtime_addr: runtime configured address.
454 * @runtime_direction: runtime configured direction.
456 * This struct can either "be" a logical or a physical channel.
463 struct d40_phy_res *phy_chan;
464 struct dma_chan chan;
465 struct tasklet_struct tasklet;
466 struct list_head client;
467 struct list_head pending_queue;
468 struct list_head active;
469 struct list_head done;
470 struct list_head queue;
471 struct list_head prepare_queue;
472 struct stedma40_chan_cfg dma_cfg;
473 struct dma_slave_config slave_config;
475 struct d40_base *base;
476 /* Default register configurations */
479 struct d40_def_lcsp log_def;
480 struct d40_log_lli_full *lcpa;
481 /* Runtime reconfiguration */
482 dma_addr_t runtime_addr;
483 enum dma_transfer_direction runtime_direction;
487 * struct d40_gen_dmac - generic values to represent u8500/u8540 DMA
490 * @backup: the pointer to the registers address array for backup
491 * @backup_size: the size of the registers address array for backup
492 * @realtime_en: the realtime enable register
493 * @realtime_clear: the realtime clear register
494 * @high_prio_en: the high priority enable register
495 * @high_prio_clear: the high priority clear register
496 * @interrupt_en: the interrupt enable register
497 * @interrupt_clear: the interrupt clear register
498 * @il: the pointer to struct d40_interrupt_lookup
499 * @il_size: the size of d40_interrupt_lookup array
500 * @init_reg: the pointer to the struct d40_reg_val
501 * @init_reg_size: the size of d40_reg_val array
503 struct d40_gen_dmac {
512 struct d40_interrupt_lookup *il;
514 struct d40_reg_val *init_reg;
519 * struct d40_base - The big global struct, one for each probe'd instance.
521 * @interrupt_lock: Lock used to make sure one interrupt is handle a time.
522 * @execmd_lock: Lock for execute command usage since several channels share
523 * the same physical register.
524 * @dev: The device structure.
525 * @virtbase: The virtual base address of the DMA's register.
526 * @rev: silicon revision detected.
527 * @clk: Pointer to the DMA clock structure.
528 * @phy_start: Physical memory start of the DMA registers.
529 * @phy_size: Size of the DMA register map.
530 * @irq: The IRQ number.
531 * @num_memcpy_chans: The number of channels used for memcpy (mem-to-mem
533 * @num_phy_chans: The number of physical channels. Read from HW. This
534 * is the number of available channels for this driver, not counting "Secure
535 * mode" allocated physical channels.
536 * @num_log_chans: The number of logical channels. Calculated from
538 * @dma_parms: DMA parameters for the channel
539 * @dma_both: dma_device channels that can do both memcpy and slave transfers.
540 * @dma_slave: dma_device channels that can do only do slave transfers.
541 * @dma_memcpy: dma_device channels that can do only do memcpy transfers.
542 * @phy_chans: Room for all possible physical channels in system.
543 * @log_chans: Room for all possible logical channels in system.
544 * @lookup_log_chans: Used to map interrupt number to logical channel. Points
545 * to log_chans entries.
546 * @lookup_phy_chans: Used to map interrupt number to physical channel. Points
547 * to phy_chans entries.
548 * @plat_data: Pointer to provided platform_data which is the driver
550 * @lcpa_regulator: Pointer to hold the regulator for the esram bank for lcla.
551 * @phy_res: Vector containing all physical channels.
552 * @lcla_pool: lcla pool settings and data.
553 * @lcpa_base: The virtual mapped address of LCPA.
554 * @phy_lcpa: The physical address of the LCPA.
555 * @lcpa_size: The size of the LCPA area.
556 * @desc_slab: cache for descriptors.
557 * @reg_val_backup: Here the values of some hardware registers are stored
558 * before the DMA is powered off. They are restored when the power is back on.
559 * @reg_val_backup_v4: Backup of registers that only exits on dma40 v3 and
561 * @reg_val_backup_chan: Backup data for standard channel parameter registers.
562 * @regs_interrupt: Scratch space for registers during interrupt.
563 * @gcc_pwr_off_mask: Mask to maintain the channels that can be turned off.
564 * @gen_dmac: the struct for generic registers values to represent u8500/8540
568 spinlock_t interrupt_lock;
569 spinlock_t execmd_lock;
571 void __iomem *virtbase;
574 phys_addr_t phy_start;
575 resource_size_t phy_size;
577 int num_memcpy_chans;
580 struct device_dma_parameters dma_parms;
581 struct dma_device dma_both;
582 struct dma_device dma_slave;
583 struct dma_device dma_memcpy;
584 struct d40_chan *phy_chans;
585 struct d40_chan *log_chans;
586 struct d40_chan **lookup_log_chans;
587 struct d40_chan **lookup_phy_chans;
588 struct stedma40_platform_data *plat_data;
589 struct regulator *lcpa_regulator;
590 /* Physical half channels */
591 struct d40_phy_res *phy_res;
592 struct d40_lcla_pool lcla_pool;
595 resource_size_t lcpa_size;
596 struct kmem_cache *desc_slab;
597 u32 reg_val_backup[BACKUP_REGS_SZ];
598 u32 reg_val_backup_v4[BACKUP_REGS_SZ_MAX];
599 u32 *reg_val_backup_chan;
601 u16 gcc_pwr_off_mask;
602 struct d40_gen_dmac gen_dmac;
605 static struct device *chan2dev(struct d40_chan *d40c)
607 return &d40c->chan.dev->device;
610 static bool chan_is_physical(struct d40_chan *chan)
612 return chan->log_num == D40_PHY_CHAN;
615 static bool chan_is_logical(struct d40_chan *chan)
617 return !chan_is_physical(chan);
620 static void __iomem *chan_base(struct d40_chan *chan)
622 return chan->base->virtbase + D40_DREG_PCBASE +
623 chan->phy_chan->num * D40_DREG_PCDELTA;
626 #define d40_err(dev, format, arg...) \
627 dev_err(dev, "[%s] " format, __func__, ## arg)
629 #define chan_err(d40c, format, arg...) \
630 d40_err(chan2dev(d40c), format, ## arg)
632 static int d40_set_runtime_config_write(struct dma_chan *chan,
633 struct dma_slave_config *config,
634 enum dma_transfer_direction direction);
636 static int d40_pool_lli_alloc(struct d40_chan *d40c, struct d40_desc *d40d,
639 bool is_log = chan_is_logical(d40c);
644 align = sizeof(struct d40_log_lli);
646 align = sizeof(struct d40_phy_lli);
649 base = d40d->lli_pool.pre_alloc_lli;
650 d40d->lli_pool.size = sizeof(d40d->lli_pool.pre_alloc_lli);
651 d40d->lli_pool.base = NULL;
653 d40d->lli_pool.size = lli_len * 2 * align;
655 base = kmalloc(d40d->lli_pool.size + align, GFP_NOWAIT);
656 d40d->lli_pool.base = base;
658 if (d40d->lli_pool.base == NULL)
663 d40d->lli_log.src = PTR_ALIGN(base, align);
664 d40d->lli_log.dst = d40d->lli_log.src + lli_len;
666 d40d->lli_pool.dma_addr = 0;
668 d40d->lli_phy.src = PTR_ALIGN(base, align);
669 d40d->lli_phy.dst = d40d->lli_phy.src + lli_len;
671 d40d->lli_pool.dma_addr = dma_map_single(d40c->base->dev,
676 if (dma_mapping_error(d40c->base->dev,
677 d40d->lli_pool.dma_addr)) {
678 kfree(d40d->lli_pool.base);
679 d40d->lli_pool.base = NULL;
680 d40d->lli_pool.dma_addr = 0;
688 static void d40_pool_lli_free(struct d40_chan *d40c, struct d40_desc *d40d)
690 if (d40d->lli_pool.dma_addr)
691 dma_unmap_single(d40c->base->dev, d40d->lli_pool.dma_addr,
692 d40d->lli_pool.size, DMA_TO_DEVICE);
694 kfree(d40d->lli_pool.base);
695 d40d->lli_pool.base = NULL;
696 d40d->lli_pool.size = 0;
697 d40d->lli_log.src = NULL;
698 d40d->lli_log.dst = NULL;
699 d40d->lli_phy.src = NULL;
700 d40d->lli_phy.dst = NULL;
703 static int d40_lcla_alloc_one(struct d40_chan *d40c,
704 struct d40_desc *d40d)
710 spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
713 * Allocate both src and dst at the same time, therefore the half
714 * start on 1 since 0 can't be used since zero is used as end marker.
716 for (i = 1 ; i < D40_LCLA_LINK_PER_EVENT_GRP / 2; i++) {
717 int idx = d40c->phy_chan->num * D40_LCLA_LINK_PER_EVENT_GRP + i;
719 if (!d40c->base->lcla_pool.alloc_map[idx]) {
720 d40c->base->lcla_pool.alloc_map[idx] = d40d;
727 spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
732 static int d40_lcla_free_all(struct d40_chan *d40c,
733 struct d40_desc *d40d)
739 if (chan_is_physical(d40c))
742 spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
744 for (i = 1 ; i < D40_LCLA_LINK_PER_EVENT_GRP / 2; i++) {
745 int idx = d40c->phy_chan->num * D40_LCLA_LINK_PER_EVENT_GRP + i;
747 if (d40c->base->lcla_pool.alloc_map[idx] == d40d) {
748 d40c->base->lcla_pool.alloc_map[idx] = NULL;
750 if (d40d->lcla_alloc == 0) {
757 spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
763 static void d40_desc_remove(struct d40_desc *d40d)
765 list_del(&d40d->node);
768 static struct d40_desc *d40_desc_get(struct d40_chan *d40c)
770 struct d40_desc *desc = NULL;
772 if (!list_empty(&d40c->client)) {
776 list_for_each_entry_safe(d, _d, &d40c->client, node) {
777 if (async_tx_test_ack(&d->txd)) {
780 memset(desc, 0, sizeof(*desc));
787 desc = kmem_cache_zalloc(d40c->base->desc_slab, GFP_NOWAIT);
790 INIT_LIST_HEAD(&desc->node);
795 static void d40_desc_free(struct d40_chan *d40c, struct d40_desc *d40d)
798 d40_pool_lli_free(d40c, d40d);
799 d40_lcla_free_all(d40c, d40d);
800 kmem_cache_free(d40c->base->desc_slab, d40d);
803 static void d40_desc_submit(struct d40_chan *d40c, struct d40_desc *desc)
805 list_add_tail(&desc->node, &d40c->active);
808 static void d40_phy_lli_load(struct d40_chan *chan, struct d40_desc *desc)
810 struct d40_phy_lli *lli_dst = desc->lli_phy.dst;
811 struct d40_phy_lli *lli_src = desc->lli_phy.src;
812 void __iomem *base = chan_base(chan);
814 writel(lli_src->reg_cfg, base + D40_CHAN_REG_SSCFG);
815 writel(lli_src->reg_elt, base + D40_CHAN_REG_SSELT);
816 writel(lli_src->reg_ptr, base + D40_CHAN_REG_SSPTR);
817 writel(lli_src->reg_lnk, base + D40_CHAN_REG_SSLNK);
819 writel(lli_dst->reg_cfg, base + D40_CHAN_REG_SDCFG);
820 writel(lli_dst->reg_elt, base + D40_CHAN_REG_SDELT);
821 writel(lli_dst->reg_ptr, base + D40_CHAN_REG_SDPTR);
822 writel(lli_dst->reg_lnk, base + D40_CHAN_REG_SDLNK);
825 static void d40_desc_done(struct d40_chan *d40c, struct d40_desc *desc)
827 list_add_tail(&desc->node, &d40c->done);
830 static void d40_log_lli_to_lcxa(struct d40_chan *chan, struct d40_desc *desc)
832 struct d40_lcla_pool *pool = &chan->base->lcla_pool;
833 struct d40_log_lli_bidir *lli = &desc->lli_log;
834 int lli_current = desc->lli_current;
835 int lli_len = desc->lli_len;
836 bool cyclic = desc->cyclic;
837 int curr_lcla = -EINVAL;
839 bool use_esram_lcla = chan->base->plat_data->use_esram_lcla;
843 * We may have partially running cyclic transfers, in case we did't get
844 * enough LCLA entries.
846 linkback = cyclic && lli_current == 0;
849 * For linkback, we need one LCLA even with only one link, because we
850 * can't link back to the one in LCPA space
852 if (linkback || (lli_len - lli_current > 1)) {
854 * If the channel is expected to use only soft_lli don't
855 * allocate a lcla. This is to avoid a HW issue that exists
856 * in some controller during a peripheral to memory transfer
857 * that uses linked lists.
859 if (!(chan->phy_chan->use_soft_lli &&
860 chan->dma_cfg.dir == DMA_DEV_TO_MEM))
861 curr_lcla = d40_lcla_alloc_one(chan, desc);
863 first_lcla = curr_lcla;
867 * For linkback, we normally load the LCPA in the loop since we need to
868 * link it to the second LCLA and not the first. However, if we
869 * couldn't even get a first LCLA, then we have to run in LCPA and
872 if (!linkback || curr_lcla == -EINVAL) {
873 unsigned int flags = 0;
875 if (curr_lcla == -EINVAL)
876 flags |= LLI_TERM_INT;
878 d40_log_lli_lcpa_write(chan->lcpa,
879 &lli->dst[lli_current],
880 &lli->src[lli_current],
889 for (; lli_current < lli_len; lli_current++) {
890 unsigned int lcla_offset = chan->phy_chan->num * 1024 +
892 struct d40_log_lli *lcla = pool->base + lcla_offset;
893 unsigned int flags = 0;
896 if (lli_current + 1 < lli_len)
897 next_lcla = d40_lcla_alloc_one(chan, desc);
899 next_lcla = linkback ? first_lcla : -EINVAL;
901 if (cyclic || next_lcla == -EINVAL)
902 flags |= LLI_TERM_INT;
904 if (linkback && curr_lcla == first_lcla) {
905 /* First link goes in both LCPA and LCLA */
906 d40_log_lli_lcpa_write(chan->lcpa,
907 &lli->dst[lli_current],
908 &lli->src[lli_current],
913 * One unused LCLA in the cyclic case if the very first
916 d40_log_lli_lcla_write(lcla,
917 &lli->dst[lli_current],
918 &lli->src[lli_current],
922 * Cache maintenance is not needed if lcla is
925 if (!use_esram_lcla) {
926 dma_sync_single_range_for_device(chan->base->dev,
927 pool->dma_addr, lcla_offset,
928 2 * sizeof(struct d40_log_lli),
931 curr_lcla = next_lcla;
933 if (curr_lcla == -EINVAL || curr_lcla == first_lcla) {
939 desc->lli_current = lli_current;
942 static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d)
944 if (chan_is_physical(d40c)) {
945 d40_phy_lli_load(d40c, d40d);
946 d40d->lli_current = d40d->lli_len;
948 d40_log_lli_to_lcxa(d40c, d40d);
951 static struct d40_desc *d40_first_active_get(struct d40_chan *d40c)
953 return list_first_entry_or_null(&d40c->active, struct d40_desc, node);
956 /* remove desc from current queue and add it to the pending_queue */
957 static void d40_desc_queue(struct d40_chan *d40c, struct d40_desc *desc)
959 d40_desc_remove(desc);
960 desc->is_in_client_list = false;
961 list_add_tail(&desc->node, &d40c->pending_queue);
964 static struct d40_desc *d40_first_pending(struct d40_chan *d40c)
966 return list_first_entry_or_null(&d40c->pending_queue, struct d40_desc,
970 static struct d40_desc *d40_first_queued(struct d40_chan *d40c)
972 return list_first_entry_or_null(&d40c->queue, struct d40_desc, node);
975 static struct d40_desc *d40_first_done(struct d40_chan *d40c)
977 return list_first_entry_or_null(&d40c->done, struct d40_desc, node);
980 static int d40_psize_2_burst_size(bool is_log, int psize)
983 if (psize == STEDMA40_PSIZE_LOG_1)
986 if (psize == STEDMA40_PSIZE_PHY_1)
994 * The dma only supports transmitting packages up to
995 * STEDMA40_MAX_SEG_SIZE * data_width, where data_width is stored in Bytes.
997 * Calculate the total number of dma elements required to send the entire sg list.
999 static int d40_size_2_dmalen(int size, u32 data_width1, u32 data_width2)
1002 u32 max_w = max(data_width1, data_width2);
1003 u32 min_w = min(data_width1, data_width2);
1004 u32 seg_max = ALIGN(STEDMA40_MAX_SEG_SIZE * min_w, max_w);
1006 if (seg_max > STEDMA40_MAX_SEG_SIZE)
1009 if (!IS_ALIGNED(size, max_w))
1012 if (size <= seg_max)
1015 dmalen = size / seg_max;
1016 if (dmalen * seg_max < size)
1022 static int d40_sg_2_dmalen(struct scatterlist *sgl, int sg_len,
1023 u32 data_width1, u32 data_width2)
1025 struct scatterlist *sg;
1030 for_each_sg(sgl, sg, sg_len, i) {
1031 ret = d40_size_2_dmalen(sg_dma_len(sg),
1032 data_width1, data_width2);
1040 static int __d40_execute_command_phy(struct d40_chan *d40c,
1041 enum d40_command command)
1045 void __iomem *active_reg;
1047 unsigned long flags;
1050 if (command == D40_DMA_STOP) {
1051 ret = __d40_execute_command_phy(d40c, D40_DMA_SUSPEND_REQ);
1056 spin_lock_irqsave(&d40c->base->execmd_lock, flags);
1058 if (d40c->phy_chan->num % 2 == 0)
1059 active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
1061 active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
1063 if (command == D40_DMA_SUSPEND_REQ) {
1064 status = (readl(active_reg) &
1065 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
1066 D40_CHAN_POS(d40c->phy_chan->num);
1068 if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP)
1072 wmask = 0xffffffff & ~(D40_CHAN_POS_MASK(d40c->phy_chan->num));
1073 writel(wmask | (command << D40_CHAN_POS(d40c->phy_chan->num)),
1076 if (command == D40_DMA_SUSPEND_REQ) {
1078 for (i = 0 ; i < D40_SUSPEND_MAX_IT; i++) {
1079 status = (readl(active_reg) &
1080 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
1081 D40_CHAN_POS(d40c->phy_chan->num);
1085 * Reduce the number of bus accesses while
1086 * waiting for the DMA to suspend.
1090 if (status == D40_DMA_STOP ||
1091 status == D40_DMA_SUSPENDED)
1095 if (i == D40_SUSPEND_MAX_IT) {
1097 "unable to suspend the chl %d (log: %d) status %x\n",
1098 d40c->phy_chan->num, d40c->log_num,
1106 spin_unlock_irqrestore(&d40c->base->execmd_lock, flags);
1110 static void d40_term_all(struct d40_chan *d40c)
1112 struct d40_desc *d40d;
1113 struct d40_desc *_d;
1115 /* Release completed descriptors */
1116 while ((d40d = d40_first_done(d40c))) {
1117 d40_desc_remove(d40d);
1118 d40_desc_free(d40c, d40d);
1121 /* Release active descriptors */
1122 while ((d40d = d40_first_active_get(d40c))) {
1123 d40_desc_remove(d40d);
1124 d40_desc_free(d40c, d40d);
1127 /* Release queued descriptors waiting for transfer */
1128 while ((d40d = d40_first_queued(d40c))) {
1129 d40_desc_remove(d40d);
1130 d40_desc_free(d40c, d40d);
1133 /* Release pending descriptors */
1134 while ((d40d = d40_first_pending(d40c))) {
1135 d40_desc_remove(d40d);
1136 d40_desc_free(d40c, d40d);
1139 /* Release client owned descriptors */
1140 if (!list_empty(&d40c->client))
1141 list_for_each_entry_safe(d40d, _d, &d40c->client, node) {
1142 d40_desc_remove(d40d);
1143 d40_desc_free(d40c, d40d);
1146 /* Release descriptors in prepare queue */
1147 if (!list_empty(&d40c->prepare_queue))
1148 list_for_each_entry_safe(d40d, _d,
1149 &d40c->prepare_queue, node) {
1150 d40_desc_remove(d40d);
1151 d40_desc_free(d40c, d40d);
1154 d40c->pending_tx = 0;
1157 static void __d40_config_set_event(struct d40_chan *d40c,
1158 enum d40_events event_type, u32 event,
1161 void __iomem *addr = chan_base(d40c) + reg;
1165 switch (event_type) {
1167 case D40_DEACTIVATE_EVENTLINE:
1169 writel((D40_DEACTIVATE_EVENTLINE << D40_EVENTLINE_POS(event))
1170 | ~D40_EVENTLINE_MASK(event), addr);
1173 case D40_SUSPEND_REQ_EVENTLINE:
1174 status = (readl(addr) & D40_EVENTLINE_MASK(event)) >>
1175 D40_EVENTLINE_POS(event);
1177 if (status == D40_DEACTIVATE_EVENTLINE ||
1178 status == D40_SUSPEND_REQ_EVENTLINE)
1181 writel((D40_SUSPEND_REQ_EVENTLINE << D40_EVENTLINE_POS(event))
1182 | ~D40_EVENTLINE_MASK(event), addr);
1184 for (tries = 0 ; tries < D40_SUSPEND_MAX_IT; tries++) {
1186 status = (readl(addr) & D40_EVENTLINE_MASK(event)) >>
1187 D40_EVENTLINE_POS(event);
1191 * Reduce the number of bus accesses while
1192 * waiting for the DMA to suspend.
1196 if (status == D40_DEACTIVATE_EVENTLINE)
1200 if (tries == D40_SUSPEND_MAX_IT) {
1202 "unable to stop the event_line chl %d (log: %d)"
1203 "status %x\n", d40c->phy_chan->num,
1204 d40c->log_num, status);
1208 case D40_ACTIVATE_EVENTLINE:
1210 * The hardware sometimes doesn't register the enable when src and dst
1211 * event lines are active on the same logical channel. Retry to ensure
1212 * it does. Usually only one retry is sufficient.
1216 writel((D40_ACTIVATE_EVENTLINE <<
1217 D40_EVENTLINE_POS(event)) |
1218 ~D40_EVENTLINE_MASK(event), addr);
1220 if (readl(addr) & D40_EVENTLINE_MASK(event))
1225 dev_dbg(chan2dev(d40c),
1226 "[%s] workaround enable S%cLNK (%d tries)\n",
1227 __func__, reg == D40_CHAN_REG_SSLNK ? 'S' : 'D',
1233 case D40_ROUND_EVENTLINE:
1240 static void d40_config_set_event(struct d40_chan *d40c,
1241 enum d40_events event_type)
1243 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dev_type);
1245 /* Enable event line connected to device (or memcpy) */
1246 if ((d40c->dma_cfg.dir == DMA_DEV_TO_MEM) ||
1247 (d40c->dma_cfg.dir == DMA_DEV_TO_DEV))
1248 __d40_config_set_event(d40c, event_type, event,
1249 D40_CHAN_REG_SSLNK);
1251 if (d40c->dma_cfg.dir != DMA_DEV_TO_MEM)
1252 __d40_config_set_event(d40c, event_type, event,
1253 D40_CHAN_REG_SDLNK);
1256 static u32 d40_chan_has_events(struct d40_chan *d40c)
1258 void __iomem *chanbase = chan_base(d40c);
1261 val = readl(chanbase + D40_CHAN_REG_SSLNK);
1262 val |= readl(chanbase + D40_CHAN_REG_SDLNK);
1268 __d40_execute_command_log(struct d40_chan *d40c, enum d40_command command)
1270 unsigned long flags;
1273 void __iomem *active_reg;
1275 if (d40c->phy_chan->num % 2 == 0)
1276 active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
1278 active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
1281 spin_lock_irqsave(&d40c->phy_chan->lock, flags);
1285 case D40_DMA_SUSPEND_REQ:
1287 active_status = (readl(active_reg) &
1288 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
1289 D40_CHAN_POS(d40c->phy_chan->num);
1291 if (active_status == D40_DMA_RUN)
1292 d40_config_set_event(d40c, D40_SUSPEND_REQ_EVENTLINE);
1294 d40_config_set_event(d40c, D40_DEACTIVATE_EVENTLINE);
1296 if (!d40_chan_has_events(d40c) && (command == D40_DMA_STOP))
1297 ret = __d40_execute_command_phy(d40c, command);
1303 d40_config_set_event(d40c, D40_ACTIVATE_EVENTLINE);
1304 ret = __d40_execute_command_phy(d40c, command);
1307 case D40_DMA_SUSPENDED:
1312 spin_unlock_irqrestore(&d40c->phy_chan->lock, flags);
1316 static int d40_channel_execute_command(struct d40_chan *d40c,
1317 enum d40_command command)
1319 if (chan_is_logical(d40c))
1320 return __d40_execute_command_log(d40c, command);
1322 return __d40_execute_command_phy(d40c, command);
1325 static u32 d40_get_prmo(struct d40_chan *d40c)
1327 static const unsigned int phy_map[] = {
1328 [STEDMA40_PCHAN_BASIC_MODE]
1329 = D40_DREG_PRMO_PCHAN_BASIC,
1330 [STEDMA40_PCHAN_MODULO_MODE]
1331 = D40_DREG_PRMO_PCHAN_MODULO,
1332 [STEDMA40_PCHAN_DOUBLE_DST_MODE]
1333 = D40_DREG_PRMO_PCHAN_DOUBLE_DST,
1335 static const unsigned int log_map[] = {
1336 [STEDMA40_LCHAN_SRC_PHY_DST_LOG]
1337 = D40_DREG_PRMO_LCHAN_SRC_PHY_DST_LOG,
1338 [STEDMA40_LCHAN_SRC_LOG_DST_PHY]
1339 = D40_DREG_PRMO_LCHAN_SRC_LOG_DST_PHY,
1340 [STEDMA40_LCHAN_SRC_LOG_DST_LOG]
1341 = D40_DREG_PRMO_LCHAN_SRC_LOG_DST_LOG,
1344 if (chan_is_physical(d40c))
1345 return phy_map[d40c->dma_cfg.mode_opt];
1347 return log_map[d40c->dma_cfg.mode_opt];
1350 static void d40_config_write(struct d40_chan *d40c)
1355 /* Odd addresses are even addresses + 4 */
1356 addr_base = (d40c->phy_chan->num % 2) * 4;
1357 /* Setup channel mode to logical or physical */
1358 var = ((u32)(chan_is_logical(d40c)) + 1) <<
1359 D40_CHAN_POS(d40c->phy_chan->num);
1360 writel(var, d40c->base->virtbase + D40_DREG_PRMSE + addr_base);
1362 /* Setup operational mode option register */
1363 var = d40_get_prmo(d40c) << D40_CHAN_POS(d40c->phy_chan->num);
1365 writel(var, d40c->base->virtbase + D40_DREG_PRMOE + addr_base);
1367 if (chan_is_logical(d40c)) {
1368 int lidx = (d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS)
1369 & D40_SREG_ELEM_LOG_LIDX_MASK;
1370 void __iomem *chanbase = chan_base(d40c);
1372 /* Set default config for CFG reg */
1373 writel(d40c->src_def_cfg, chanbase + D40_CHAN_REG_SSCFG);
1374 writel(d40c->dst_def_cfg, chanbase + D40_CHAN_REG_SDCFG);
1376 /* Set LIDX for lcla */
1377 writel(lidx, chanbase + D40_CHAN_REG_SSELT);
1378 writel(lidx, chanbase + D40_CHAN_REG_SDELT);
1380 /* Clear LNK which will be used by d40_chan_has_events() */
1381 writel(0, chanbase + D40_CHAN_REG_SSLNK);
1382 writel(0, chanbase + D40_CHAN_REG_SDLNK);
1386 static u32 d40_residue(struct d40_chan *d40c)
1390 if (chan_is_logical(d40c))
1391 num_elt = (readl(&d40c->lcpa->lcsp2) & D40_MEM_LCSP2_ECNT_MASK)
1392 >> D40_MEM_LCSP2_ECNT_POS;
1394 u32 val = readl(chan_base(d40c) + D40_CHAN_REG_SDELT);
1395 num_elt = (val & D40_SREG_ELEM_PHY_ECNT_MASK)
1396 >> D40_SREG_ELEM_PHY_ECNT_POS;
1399 return num_elt * d40c->dma_cfg.dst_info.data_width;
1402 static bool d40_tx_is_linked(struct d40_chan *d40c)
1406 if (chan_is_logical(d40c))
1407 is_link = readl(&d40c->lcpa->lcsp3) & D40_MEM_LCSP3_DLOS_MASK;
1409 is_link = readl(chan_base(d40c) + D40_CHAN_REG_SDLNK)
1410 & D40_SREG_LNK_PHYS_LNK_MASK;
1415 static int d40_pause(struct dma_chan *chan)
1417 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
1419 unsigned long flags;
1421 if (d40c->phy_chan == NULL) {
1422 chan_err(d40c, "Channel is not allocated!\n");
1429 spin_lock_irqsave(&d40c->lock, flags);
1430 pm_runtime_get_sync(d40c->base->dev);
1432 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
1434 pm_runtime_mark_last_busy(d40c->base->dev);
1435 pm_runtime_put_autosuspend(d40c->base->dev);
1436 spin_unlock_irqrestore(&d40c->lock, flags);
1440 static int d40_resume(struct dma_chan *chan)
1442 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
1444 unsigned long flags;
1446 if (d40c->phy_chan == NULL) {
1447 chan_err(d40c, "Channel is not allocated!\n");
1454 spin_lock_irqsave(&d40c->lock, flags);
1455 pm_runtime_get_sync(d40c->base->dev);
1457 /* If bytes left to transfer or linked tx resume job */
1458 if (d40_residue(d40c) || d40_tx_is_linked(d40c))
1459 res = d40_channel_execute_command(d40c, D40_DMA_RUN);
1461 pm_runtime_mark_last_busy(d40c->base->dev);
1462 pm_runtime_put_autosuspend(d40c->base->dev);
1463 spin_unlock_irqrestore(&d40c->lock, flags);
1467 static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx)
1469 struct d40_chan *d40c = container_of(tx->chan,
1472 struct d40_desc *d40d = container_of(tx, struct d40_desc, txd);
1473 unsigned long flags;
1474 dma_cookie_t cookie;
1476 spin_lock_irqsave(&d40c->lock, flags);
1477 cookie = dma_cookie_assign(tx);
1478 d40_desc_queue(d40c, d40d);
1479 spin_unlock_irqrestore(&d40c->lock, flags);
1484 static int d40_start(struct d40_chan *d40c)
1486 return d40_channel_execute_command(d40c, D40_DMA_RUN);
1489 static struct d40_desc *d40_queue_start(struct d40_chan *d40c)
1491 struct d40_desc *d40d;
1494 /* Start queued jobs, if any */
1495 d40d = d40_first_queued(d40c);
1500 pm_runtime_get_sync(d40c->base->dev);
1503 /* Remove from queue */
1504 d40_desc_remove(d40d);
1506 /* Add to active queue */
1507 d40_desc_submit(d40c, d40d);
1509 /* Initiate DMA job */
1510 d40_desc_load(d40c, d40d);
1513 err = d40_start(d40c);
1522 /* called from interrupt context */
1523 static void dma_tc_handle(struct d40_chan *d40c)
1525 struct d40_desc *d40d;
1527 /* Get first active entry from list */
1528 d40d = d40_first_active_get(d40c);
1535 * If this was a paritially loaded list, we need to reloaded
1536 * it, and only when the list is completed. We need to check
1537 * for done because the interrupt will hit for every link, and
1538 * not just the last one.
1540 if (d40d->lli_current < d40d->lli_len
1541 && !d40_tx_is_linked(d40c)
1542 && !d40_residue(d40c)) {
1543 d40_lcla_free_all(d40c, d40d);
1544 d40_desc_load(d40c, d40d);
1545 (void) d40_start(d40c);
1547 if (d40d->lli_current == d40d->lli_len)
1548 d40d->lli_current = 0;
1551 d40_lcla_free_all(d40c, d40d);
1553 if (d40d->lli_current < d40d->lli_len) {
1554 d40_desc_load(d40c, d40d);
1556 (void) d40_start(d40c);
1560 if (d40_queue_start(d40c) == NULL) {
1563 pm_runtime_mark_last_busy(d40c->base->dev);
1564 pm_runtime_put_autosuspend(d40c->base->dev);
1567 d40_desc_remove(d40d);
1568 d40_desc_done(d40c, d40d);
1572 tasklet_schedule(&d40c->tasklet);
1576 static void dma_tasklet(unsigned long data)
1578 struct d40_chan *d40c = (struct d40_chan *) data;
1579 struct d40_desc *d40d;
1580 unsigned long flags;
1581 bool callback_active;
1582 struct dmaengine_desc_callback cb;
1584 spin_lock_irqsave(&d40c->lock, flags);
1586 /* Get first entry from the done list */
1587 d40d = d40_first_done(d40c);
1589 /* Check if we have reached here for cyclic job */
1590 d40d = d40_first_active_get(d40c);
1591 if (d40d == NULL || !d40d->cyclic)
1592 goto check_pending_tx;
1596 dma_cookie_complete(&d40d->txd);
1599 * If terminating a channel pending_tx is set to zero.
1600 * This prevents any finished active jobs to return to the client.
1602 if (d40c->pending_tx == 0) {
1603 spin_unlock_irqrestore(&d40c->lock, flags);
1607 /* Callback to client */
1608 callback_active = !!(d40d->txd.flags & DMA_PREP_INTERRUPT);
1609 dmaengine_desc_get_callback(&d40d->txd, &cb);
1611 if (!d40d->cyclic) {
1612 if (async_tx_test_ack(&d40d->txd)) {
1613 d40_desc_remove(d40d);
1614 d40_desc_free(d40c, d40d);
1615 } else if (!d40d->is_in_client_list) {
1616 d40_desc_remove(d40d);
1617 d40_lcla_free_all(d40c, d40d);
1618 list_add_tail(&d40d->node, &d40c->client);
1619 d40d->is_in_client_list = true;
1625 if (d40c->pending_tx)
1626 tasklet_schedule(&d40c->tasklet);
1628 spin_unlock_irqrestore(&d40c->lock, flags);
1630 if (callback_active)
1631 dmaengine_desc_callback_invoke(&cb, NULL);
1635 /* Rescue manouver if receiving double interrupts */
1636 if (d40c->pending_tx > 0)
1638 spin_unlock_irqrestore(&d40c->lock, flags);
1641 static irqreturn_t d40_handle_interrupt(int irq, void *data)
1647 struct d40_chan *d40c;
1648 unsigned long flags;
1649 struct d40_base *base = data;
1650 u32 *regs = base->regs_interrupt;
1651 struct d40_interrupt_lookup *il = base->gen_dmac.il;
1652 u32 il_size = base->gen_dmac.il_size;
1654 spin_lock_irqsave(&base->interrupt_lock, flags);
1656 /* Read interrupt status of both logical and physical channels */
1657 for (i = 0; i < il_size; i++)
1658 regs[i] = readl(base->virtbase + il[i].src);
1662 chan = find_next_bit((unsigned long *)regs,
1663 BITS_PER_LONG * il_size, chan + 1);
1665 /* No more set bits found? */
1666 if (chan == BITS_PER_LONG * il_size)
1669 row = chan / BITS_PER_LONG;
1670 idx = chan & (BITS_PER_LONG - 1);
1672 if (il[row].offset == D40_PHY_CHAN)
1673 d40c = base->lookup_phy_chans[idx];
1675 d40c = base->lookup_log_chans[il[row].offset + idx];
1679 * No error because this can happen if something else
1680 * in the system is using the channel.
1686 writel(BIT(idx), base->virtbase + il[row].clr);
1688 spin_lock(&d40c->lock);
1690 if (!il[row].is_error)
1691 dma_tc_handle(d40c);
1693 d40_err(base->dev, "IRQ chan: %ld offset %d idx %d\n",
1694 chan, il[row].offset, idx);
1696 spin_unlock(&d40c->lock);
1699 spin_unlock_irqrestore(&base->interrupt_lock, flags);
1704 static int d40_validate_conf(struct d40_chan *d40c,
1705 struct stedma40_chan_cfg *conf)
1708 bool is_log = conf->mode == STEDMA40_MODE_LOGICAL;
1711 chan_err(d40c, "Invalid direction.\n");
1715 if ((is_log && conf->dev_type > d40c->base->num_log_chans) ||
1716 (!is_log && conf->dev_type > d40c->base->num_phy_chans) ||
1717 (conf->dev_type < 0)) {
1718 chan_err(d40c, "Invalid device type (%d)\n", conf->dev_type);
1722 if (conf->dir == DMA_DEV_TO_DEV) {
1724 * DMAC HW supports it. Will be added to this driver,
1725 * in case any dma client requires it.
1727 chan_err(d40c, "periph to periph not supported\n");
1731 if (d40_psize_2_burst_size(is_log, conf->src_info.psize) *
1732 conf->src_info.data_width !=
1733 d40_psize_2_burst_size(is_log, conf->dst_info.psize) *
1734 conf->dst_info.data_width) {
1736 * The DMAC hardware only supports
1737 * src (burst x width) == dst (burst x width)
1740 chan_err(d40c, "src (burst x width) != dst (burst x width)\n");
1747 static bool d40_alloc_mask_set(struct d40_phy_res *phy,
1748 bool is_src, int log_event_line, bool is_log,
1751 unsigned long flags;
1752 spin_lock_irqsave(&phy->lock, flags);
1754 *first_user = ((phy->allocated_src | phy->allocated_dst)
1758 /* Physical interrupts are masked per physical full channel */
1759 if (phy->allocated_src == D40_ALLOC_FREE &&
1760 phy->allocated_dst == D40_ALLOC_FREE) {
1761 phy->allocated_dst = D40_ALLOC_PHY;
1762 phy->allocated_src = D40_ALLOC_PHY;
1765 goto not_found_unlock;
1768 /* Logical channel */
1770 if (phy->allocated_src == D40_ALLOC_PHY)
1771 goto not_found_unlock;
1773 if (phy->allocated_src == D40_ALLOC_FREE)
1774 phy->allocated_src = D40_ALLOC_LOG_FREE;
1776 if (!(phy->allocated_src & BIT(log_event_line))) {
1777 phy->allocated_src |= BIT(log_event_line);
1780 goto not_found_unlock;
1782 if (phy->allocated_dst == D40_ALLOC_PHY)
1783 goto not_found_unlock;
1785 if (phy->allocated_dst == D40_ALLOC_FREE)
1786 phy->allocated_dst = D40_ALLOC_LOG_FREE;
1788 if (!(phy->allocated_dst & BIT(log_event_line))) {
1789 phy->allocated_dst |= BIT(log_event_line);
1794 spin_unlock_irqrestore(&phy->lock, flags);
1797 spin_unlock_irqrestore(&phy->lock, flags);
1801 static bool d40_alloc_mask_free(struct d40_phy_res *phy, bool is_src,
1804 unsigned long flags;
1805 bool is_free = false;
1807 spin_lock_irqsave(&phy->lock, flags);
1808 if (!log_event_line) {
1809 phy->allocated_dst = D40_ALLOC_FREE;
1810 phy->allocated_src = D40_ALLOC_FREE;
1815 /* Logical channel */
1817 phy->allocated_src &= ~BIT(log_event_line);
1818 if (phy->allocated_src == D40_ALLOC_LOG_FREE)
1819 phy->allocated_src = D40_ALLOC_FREE;
1821 phy->allocated_dst &= ~BIT(log_event_line);
1822 if (phy->allocated_dst == D40_ALLOC_LOG_FREE)
1823 phy->allocated_dst = D40_ALLOC_FREE;
1826 is_free = ((phy->allocated_src | phy->allocated_dst) ==
1829 spin_unlock_irqrestore(&phy->lock, flags);
1834 static int d40_allocate_channel(struct d40_chan *d40c, bool *first_phy_user)
1836 int dev_type = d40c->dma_cfg.dev_type;
1839 struct d40_phy_res *phys;
1845 bool is_log = d40c->dma_cfg.mode == STEDMA40_MODE_LOGICAL;
1847 phys = d40c->base->phy_res;
1848 num_phy_chans = d40c->base->num_phy_chans;
1850 if (d40c->dma_cfg.dir == DMA_DEV_TO_MEM) {
1851 log_num = 2 * dev_type;
1853 } else if (d40c->dma_cfg.dir == DMA_MEM_TO_DEV ||
1854 d40c->dma_cfg.dir == DMA_MEM_TO_MEM) {
1855 /* dst event lines are used for logical memcpy */
1856 log_num = 2 * dev_type + 1;
1861 event_group = D40_TYPE_TO_GROUP(dev_type);
1862 event_line = D40_TYPE_TO_EVENT(dev_type);
1865 if (d40c->dma_cfg.dir == DMA_MEM_TO_MEM) {
1866 /* Find physical half channel */
1867 if (d40c->dma_cfg.use_fixed_channel) {
1868 i = d40c->dma_cfg.phy_channel;
1869 if (d40_alloc_mask_set(&phys[i], is_src,
1874 for (i = 0; i < num_phy_chans; i++) {
1875 if (d40_alloc_mask_set(&phys[i], is_src,
1882 for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
1883 int phy_num = j + event_group * 2;
1884 for (i = phy_num; i < phy_num + 2; i++) {
1885 if (d40_alloc_mask_set(&phys[i],
1895 d40c->phy_chan = &phys[i];
1896 d40c->log_num = D40_PHY_CHAN;
1902 /* Find logical channel */
1903 for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
1904 int phy_num = j + event_group * 2;
1906 if (d40c->dma_cfg.use_fixed_channel) {
1907 i = d40c->dma_cfg.phy_channel;
1909 if ((i != phy_num) && (i != phy_num + 1)) {
1910 dev_err(chan2dev(d40c),
1911 "invalid fixed phy channel %d\n", i);
1915 if (d40_alloc_mask_set(&phys[i], is_src, event_line,
1916 is_log, first_phy_user))
1919 dev_err(chan2dev(d40c),
1920 "could not allocate fixed phy channel %d\n", i);
1925 * Spread logical channels across all available physical rather
1926 * than pack every logical channel at the first available phy
1930 for (i = phy_num; i < phy_num + 2; i++) {
1931 if (d40_alloc_mask_set(&phys[i], is_src,
1937 for (i = phy_num + 1; i >= phy_num; i--) {
1938 if (d40_alloc_mask_set(&phys[i], is_src,
1948 d40c->phy_chan = &phys[i];
1949 d40c->log_num = log_num;
1953 d40c->base->lookup_log_chans[d40c->log_num] = d40c;
1955 d40c->base->lookup_phy_chans[d40c->phy_chan->num] = d40c;
1961 static int d40_config_memcpy(struct d40_chan *d40c)
1963 dma_cap_mask_t cap = d40c->chan.device->cap_mask;
1965 if (dma_has_cap(DMA_MEMCPY, cap) && !dma_has_cap(DMA_SLAVE, cap)) {
1966 d40c->dma_cfg = dma40_memcpy_conf_log;
1967 d40c->dma_cfg.dev_type = dma40_memcpy_channels[d40c->chan.chan_id];
1969 d40_log_cfg(&d40c->dma_cfg,
1970 &d40c->log_def.lcsp1, &d40c->log_def.lcsp3);
1972 } else if (dma_has_cap(DMA_MEMCPY, cap) &&
1973 dma_has_cap(DMA_SLAVE, cap)) {
1974 d40c->dma_cfg = dma40_memcpy_conf_phy;
1976 /* Generate interrrupt at end of transfer or relink. */
1977 d40c->dst_def_cfg |= BIT(D40_SREG_CFG_TIM_POS);
1979 /* Generate interrupt on error. */
1980 d40c->src_def_cfg |= BIT(D40_SREG_CFG_EIM_POS);
1981 d40c->dst_def_cfg |= BIT(D40_SREG_CFG_EIM_POS);
1984 chan_err(d40c, "No memcpy\n");
1991 static int d40_free_dma(struct d40_chan *d40c)
1995 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dev_type);
1996 struct d40_phy_res *phy = d40c->phy_chan;
1999 /* Terminate all queued and active transfers */
2003 chan_err(d40c, "phy == null\n");
2007 if (phy->allocated_src == D40_ALLOC_FREE &&
2008 phy->allocated_dst == D40_ALLOC_FREE) {
2009 chan_err(d40c, "channel already free\n");
2013 if (d40c->dma_cfg.dir == DMA_MEM_TO_DEV ||
2014 d40c->dma_cfg.dir == DMA_MEM_TO_MEM)
2016 else if (d40c->dma_cfg.dir == DMA_DEV_TO_MEM)
2019 chan_err(d40c, "Unknown direction\n");
2023 pm_runtime_get_sync(d40c->base->dev);
2024 res = d40_channel_execute_command(d40c, D40_DMA_STOP);
2026 chan_err(d40c, "stop failed\n");
2027 goto mark_last_busy;
2030 d40_alloc_mask_free(phy, is_src, chan_is_logical(d40c) ? event : 0);
2032 if (chan_is_logical(d40c))
2033 d40c->base->lookup_log_chans[d40c->log_num] = NULL;
2035 d40c->base->lookup_phy_chans[phy->num] = NULL;
2038 pm_runtime_mark_last_busy(d40c->base->dev);
2039 pm_runtime_put_autosuspend(d40c->base->dev);
2043 d40c->phy_chan = NULL;
2044 d40c->configured = false;
2046 pm_runtime_mark_last_busy(d40c->base->dev);
2047 pm_runtime_put_autosuspend(d40c->base->dev);
2051 static bool d40_is_paused(struct d40_chan *d40c)
2053 void __iomem *chanbase = chan_base(d40c);
2054 bool is_paused = false;
2055 unsigned long flags;
2056 void __iomem *active_reg;
2058 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dev_type);
2060 spin_lock_irqsave(&d40c->lock, flags);
2062 if (chan_is_physical(d40c)) {
2063 if (d40c->phy_chan->num % 2 == 0)
2064 active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
2066 active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
2068 status = (readl(active_reg) &
2069 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
2070 D40_CHAN_POS(d40c->phy_chan->num);
2071 if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP)
2076 if (d40c->dma_cfg.dir == DMA_MEM_TO_DEV ||
2077 d40c->dma_cfg.dir == DMA_MEM_TO_MEM) {
2078 status = readl(chanbase + D40_CHAN_REG_SDLNK);
2079 } else if (d40c->dma_cfg.dir == DMA_DEV_TO_MEM) {
2080 status = readl(chanbase + D40_CHAN_REG_SSLNK);
2082 chan_err(d40c, "Unknown direction\n");
2086 status = (status & D40_EVENTLINE_MASK(event)) >>
2087 D40_EVENTLINE_POS(event);
2089 if (status != D40_DMA_RUN)
2092 spin_unlock_irqrestore(&d40c->lock, flags);
2097 static u32 stedma40_residue(struct dma_chan *chan)
2099 struct d40_chan *d40c =
2100 container_of(chan, struct d40_chan, chan);
2102 unsigned long flags;
2104 spin_lock_irqsave(&d40c->lock, flags);
2105 bytes_left = d40_residue(d40c);
2106 spin_unlock_irqrestore(&d40c->lock, flags);
2112 d40_prep_sg_log(struct d40_chan *chan, struct d40_desc *desc,
2113 struct scatterlist *sg_src, struct scatterlist *sg_dst,
2114 unsigned int sg_len, dma_addr_t src_dev_addr,
2115 dma_addr_t dst_dev_addr)
2117 struct stedma40_chan_cfg *cfg = &chan->dma_cfg;
2118 struct stedma40_half_channel_info *src_info = &cfg->src_info;
2119 struct stedma40_half_channel_info *dst_info = &cfg->dst_info;
2122 ret = d40_log_sg_to_lli(sg_src, sg_len,
2125 chan->log_def.lcsp1,
2126 src_info->data_width,
2127 dst_info->data_width);
2129 ret = d40_log_sg_to_lli(sg_dst, sg_len,
2132 chan->log_def.lcsp3,
2133 dst_info->data_width,
2134 src_info->data_width);
2136 return ret < 0 ? ret : 0;
2140 d40_prep_sg_phy(struct d40_chan *chan, struct d40_desc *desc,
2141 struct scatterlist *sg_src, struct scatterlist *sg_dst,
2142 unsigned int sg_len, dma_addr_t src_dev_addr,
2143 dma_addr_t dst_dev_addr)
2145 struct stedma40_chan_cfg *cfg = &chan->dma_cfg;
2146 struct stedma40_half_channel_info *src_info = &cfg->src_info;
2147 struct stedma40_half_channel_info *dst_info = &cfg->dst_info;
2148 unsigned long flags = 0;
2152 flags |= LLI_CYCLIC | LLI_TERM_INT;
2154 ret = d40_phy_sg_to_lli(sg_src, sg_len, src_dev_addr,
2156 virt_to_phys(desc->lli_phy.src),
2158 src_info, dst_info, flags);
2160 ret = d40_phy_sg_to_lli(sg_dst, sg_len, dst_dev_addr,
2162 virt_to_phys(desc->lli_phy.dst),
2164 dst_info, src_info, flags);
2166 dma_sync_single_for_device(chan->base->dev, desc->lli_pool.dma_addr,
2167 desc->lli_pool.size, DMA_TO_DEVICE);
2169 return ret < 0 ? ret : 0;
2172 static struct d40_desc *
2173 d40_prep_desc(struct d40_chan *chan, struct scatterlist *sg,
2174 unsigned int sg_len, unsigned long dma_flags)
2176 struct stedma40_chan_cfg *cfg;
2177 struct d40_desc *desc;
2180 desc = d40_desc_get(chan);
2184 cfg = &chan->dma_cfg;
2185 desc->lli_len = d40_sg_2_dmalen(sg, sg_len, cfg->src_info.data_width,
2186 cfg->dst_info.data_width);
2187 if (desc->lli_len < 0) {
2188 chan_err(chan, "Unaligned size\n");
2192 ret = d40_pool_lli_alloc(chan, desc, desc->lli_len);
2194 chan_err(chan, "Could not allocate lli\n");
2198 desc->lli_current = 0;
2199 desc->txd.flags = dma_flags;
2200 desc->txd.tx_submit = d40_tx_submit;
2202 dma_async_tx_descriptor_init(&desc->txd, &chan->chan);
2206 d40_desc_free(chan, desc);
2210 static struct dma_async_tx_descriptor *
2211 d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src,
2212 struct scatterlist *sg_dst, unsigned int sg_len,
2213 enum dma_transfer_direction direction, unsigned long dma_flags)
2215 struct d40_chan *chan = container_of(dchan, struct d40_chan, chan);
2216 dma_addr_t src_dev_addr;
2217 dma_addr_t dst_dev_addr;
2218 struct d40_desc *desc;
2219 unsigned long flags;
2222 if (!chan->phy_chan) {
2223 chan_err(chan, "Cannot prepare unallocated channel\n");
2227 d40_set_runtime_config_write(dchan, &chan->slave_config, direction);
2229 spin_lock_irqsave(&chan->lock, flags);
2231 desc = d40_prep_desc(chan, sg_src, sg_len, dma_flags);
2235 if (sg_next(&sg_src[sg_len - 1]) == sg_src)
2236 desc->cyclic = true;
2240 if (direction == DMA_DEV_TO_MEM)
2241 src_dev_addr = chan->runtime_addr;
2242 else if (direction == DMA_MEM_TO_DEV)
2243 dst_dev_addr = chan->runtime_addr;
2245 if (chan_is_logical(chan))
2246 ret = d40_prep_sg_log(chan, desc, sg_src, sg_dst,
2247 sg_len, src_dev_addr, dst_dev_addr);
2249 ret = d40_prep_sg_phy(chan, desc, sg_src, sg_dst,
2250 sg_len, src_dev_addr, dst_dev_addr);
2253 chan_err(chan, "Failed to prepare %s sg job: %d\n",
2254 chan_is_logical(chan) ? "log" : "phy", ret);
2259 * add descriptor to the prepare queue in order to be able
2260 * to free them later in terminate_all
2262 list_add_tail(&desc->node, &chan->prepare_queue);
2264 spin_unlock_irqrestore(&chan->lock, flags);
2268 d40_desc_free(chan, desc);
2270 spin_unlock_irqrestore(&chan->lock, flags);
2274 bool stedma40_filter(struct dma_chan *chan, void *data)
2276 struct stedma40_chan_cfg *info = data;
2277 struct d40_chan *d40c =
2278 container_of(chan, struct d40_chan, chan);
2282 err = d40_validate_conf(d40c, info);
2284 d40c->dma_cfg = *info;
2286 err = d40_config_memcpy(d40c);
2289 d40c->configured = true;
2293 EXPORT_SYMBOL(stedma40_filter);
2295 static void __d40_set_prio_rt(struct d40_chan *d40c, int dev_type, bool src)
2297 bool realtime = d40c->dma_cfg.realtime;
2298 bool highprio = d40c->dma_cfg.high_priority;
2300 u32 event = D40_TYPE_TO_EVENT(dev_type);
2301 u32 group = D40_TYPE_TO_GROUP(dev_type);
2302 u32 bit = BIT(event);
2304 struct d40_gen_dmac *dmac = &d40c->base->gen_dmac;
2306 rtreg = realtime ? dmac->realtime_en : dmac->realtime_clear;
2308 * Due to a hardware bug, in some cases a logical channel triggered by
2309 * a high priority destination event line can generate extra packet
2312 * The workaround is to not set the high priority level for the
2313 * destination event lines that trigger logical channels.
2315 if (!src && chan_is_logical(d40c))
2318 prioreg = highprio ? dmac->high_prio_en : dmac->high_prio_clear;
2320 /* Destination event lines are stored in the upper halfword */
2324 writel(bit, d40c->base->virtbase + prioreg + group * 4);
2325 writel(bit, d40c->base->virtbase + rtreg + group * 4);
2328 static void d40_set_prio_realtime(struct d40_chan *d40c)
2330 if (d40c->base->rev < 3)
2333 if ((d40c->dma_cfg.dir == DMA_DEV_TO_MEM) ||
2334 (d40c->dma_cfg.dir == DMA_DEV_TO_DEV))
2335 __d40_set_prio_rt(d40c, d40c->dma_cfg.dev_type, true);
2337 if ((d40c->dma_cfg.dir == DMA_MEM_TO_DEV) ||
2338 (d40c->dma_cfg.dir == DMA_DEV_TO_DEV))
2339 __d40_set_prio_rt(d40c, d40c->dma_cfg.dev_type, false);
2342 #define D40_DT_FLAGS_MODE(flags) ((flags >> 0) & 0x1)
2343 #define D40_DT_FLAGS_DIR(flags) ((flags >> 1) & 0x1)
2344 #define D40_DT_FLAGS_BIG_ENDIAN(flags) ((flags >> 2) & 0x1)
2345 #define D40_DT_FLAGS_FIXED_CHAN(flags) ((flags >> 3) & 0x1)
2346 #define D40_DT_FLAGS_HIGH_PRIO(flags) ((flags >> 4) & 0x1)
2348 static struct dma_chan *d40_xlate(struct of_phandle_args *dma_spec,
2349 struct of_dma *ofdma)
2351 struct stedma40_chan_cfg cfg;
2355 memset(&cfg, 0, sizeof(struct stedma40_chan_cfg));
2358 dma_cap_set(DMA_SLAVE, cap);
2360 cfg.dev_type = dma_spec->args[0];
2361 flags = dma_spec->args[2];
2363 switch (D40_DT_FLAGS_MODE(flags)) {
2364 case 0: cfg.mode = STEDMA40_MODE_LOGICAL; break;
2365 case 1: cfg.mode = STEDMA40_MODE_PHYSICAL; break;
2368 switch (D40_DT_FLAGS_DIR(flags)) {
2370 cfg.dir = DMA_MEM_TO_DEV;
2371 cfg.dst_info.big_endian = D40_DT_FLAGS_BIG_ENDIAN(flags);
2374 cfg.dir = DMA_DEV_TO_MEM;
2375 cfg.src_info.big_endian = D40_DT_FLAGS_BIG_ENDIAN(flags);
2379 if (D40_DT_FLAGS_FIXED_CHAN(flags)) {
2380 cfg.phy_channel = dma_spec->args[1];
2381 cfg.use_fixed_channel = true;
2384 if (D40_DT_FLAGS_HIGH_PRIO(flags))
2385 cfg.high_priority = true;
2387 return dma_request_channel(cap, stedma40_filter, &cfg);
2390 /* DMA ENGINE functions */
2391 static int d40_alloc_chan_resources(struct dma_chan *chan)
2394 unsigned long flags;
2395 struct d40_chan *d40c =
2396 container_of(chan, struct d40_chan, chan);
2398 spin_lock_irqsave(&d40c->lock, flags);
2400 dma_cookie_init(chan);
2402 /* If no dma configuration is set use default configuration (memcpy) */
2403 if (!d40c->configured) {
2404 err = d40_config_memcpy(d40c);
2406 chan_err(d40c, "Failed to configure memcpy channel\n");
2407 goto mark_last_busy;
2411 err = d40_allocate_channel(d40c, &is_free_phy);
2413 chan_err(d40c, "Failed to allocate channel\n");
2414 d40c->configured = false;
2415 goto mark_last_busy;
2418 pm_runtime_get_sync(d40c->base->dev);
2420 d40_set_prio_realtime(d40c);
2422 if (chan_is_logical(d40c)) {
2423 if (d40c->dma_cfg.dir == DMA_DEV_TO_MEM)
2424 d40c->lcpa = d40c->base->lcpa_base +
2425 d40c->dma_cfg.dev_type * D40_LCPA_CHAN_SIZE;
2427 d40c->lcpa = d40c->base->lcpa_base +
2428 d40c->dma_cfg.dev_type *
2429 D40_LCPA_CHAN_SIZE + D40_LCPA_CHAN_DST_DELTA;
2431 /* Unmask the Global Interrupt Mask. */
2432 d40c->src_def_cfg |= BIT(D40_SREG_CFG_LOG_GIM_POS);
2433 d40c->dst_def_cfg |= BIT(D40_SREG_CFG_LOG_GIM_POS);
2436 dev_dbg(chan2dev(d40c), "allocated %s channel (phy %d%s)\n",
2437 chan_is_logical(d40c) ? "logical" : "physical",
2438 d40c->phy_chan->num,
2439 d40c->dma_cfg.use_fixed_channel ? ", fixed" : "");
2443 * Only write channel configuration to the DMA if the physical
2444 * resource is free. In case of multiple logical channels
2445 * on the same physical resource, only the first write is necessary.
2448 d40_config_write(d40c);
2450 pm_runtime_mark_last_busy(d40c->base->dev);
2451 pm_runtime_put_autosuspend(d40c->base->dev);
2452 spin_unlock_irqrestore(&d40c->lock, flags);
2456 static void d40_free_chan_resources(struct dma_chan *chan)
2458 struct d40_chan *d40c =
2459 container_of(chan, struct d40_chan, chan);
2461 unsigned long flags;
2463 if (d40c->phy_chan == NULL) {
2464 chan_err(d40c, "Cannot free unallocated channel\n");
2468 spin_lock_irqsave(&d40c->lock, flags);
2470 err = d40_free_dma(d40c);
2473 chan_err(d40c, "Failed to free channel\n");
2474 spin_unlock_irqrestore(&d40c->lock, flags);
2477 static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
2481 unsigned long dma_flags)
2483 struct scatterlist dst_sg;
2484 struct scatterlist src_sg;
2486 sg_init_table(&dst_sg, 1);
2487 sg_init_table(&src_sg, 1);
2489 sg_dma_address(&dst_sg) = dst;
2490 sg_dma_address(&src_sg) = src;
2492 sg_dma_len(&dst_sg) = size;
2493 sg_dma_len(&src_sg) = size;
2495 return d40_prep_sg(chan, &src_sg, &dst_sg, 1,
2496 DMA_MEM_TO_MEM, dma_flags);
2499 static struct dma_async_tx_descriptor *
2500 d40_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
2501 unsigned int sg_len, enum dma_transfer_direction direction,
2502 unsigned long dma_flags, void *context)
2504 if (!is_slave_direction(direction))
2507 return d40_prep_sg(chan, sgl, sgl, sg_len, direction, dma_flags);
2510 static struct dma_async_tx_descriptor *
2511 dma40_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
2512 size_t buf_len, size_t period_len,
2513 enum dma_transfer_direction direction, unsigned long flags)
2515 unsigned int periods = buf_len / period_len;
2516 struct dma_async_tx_descriptor *txd;
2517 struct scatterlist *sg;
2520 sg = kcalloc(periods + 1, sizeof(struct scatterlist), GFP_NOWAIT);
2524 for (i = 0; i < periods; i++) {
2525 sg_dma_address(&sg[i]) = dma_addr;
2526 sg_dma_len(&sg[i]) = period_len;
2527 dma_addr += period_len;
2530 sg_chain(sg, periods + 1, sg);
2532 txd = d40_prep_sg(chan, sg, sg, periods, direction,
2533 DMA_PREP_INTERRUPT);
2540 static enum dma_status d40_tx_status(struct dma_chan *chan,
2541 dma_cookie_t cookie,
2542 struct dma_tx_state *txstate)
2544 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2545 enum dma_status ret;
2547 if (d40c->phy_chan == NULL) {
2548 chan_err(d40c, "Cannot read status of unallocated channel\n");
2552 ret = dma_cookie_status(chan, cookie, txstate);
2553 if (ret != DMA_COMPLETE && txstate)
2554 dma_set_residue(txstate, stedma40_residue(chan));
2556 if (d40_is_paused(d40c))
2562 static void d40_issue_pending(struct dma_chan *chan)
2564 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2565 unsigned long flags;
2567 if (d40c->phy_chan == NULL) {
2568 chan_err(d40c, "Channel is not allocated!\n");
2572 spin_lock_irqsave(&d40c->lock, flags);
2574 list_splice_tail_init(&d40c->pending_queue, &d40c->queue);
2576 /* Busy means that queued jobs are already being processed */
2578 (void) d40_queue_start(d40c);
2580 spin_unlock_irqrestore(&d40c->lock, flags);
2583 static int d40_terminate_all(struct dma_chan *chan)
2585 unsigned long flags;
2586 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2589 if (d40c->phy_chan == NULL) {
2590 chan_err(d40c, "Channel is not allocated!\n");
2594 spin_lock_irqsave(&d40c->lock, flags);
2596 pm_runtime_get_sync(d40c->base->dev);
2597 ret = d40_channel_execute_command(d40c, D40_DMA_STOP);
2599 chan_err(d40c, "Failed to stop channel\n");
2602 pm_runtime_mark_last_busy(d40c->base->dev);
2603 pm_runtime_put_autosuspend(d40c->base->dev);
2605 pm_runtime_mark_last_busy(d40c->base->dev);
2606 pm_runtime_put_autosuspend(d40c->base->dev);
2610 spin_unlock_irqrestore(&d40c->lock, flags);
2615 dma40_config_to_halfchannel(struct d40_chan *d40c,
2616 struct stedma40_half_channel_info *info,
2621 if (chan_is_logical(d40c)) {
2623 psize = STEDMA40_PSIZE_LOG_16;
2624 else if (maxburst >= 8)
2625 psize = STEDMA40_PSIZE_LOG_8;
2626 else if (maxburst >= 4)
2627 psize = STEDMA40_PSIZE_LOG_4;
2629 psize = STEDMA40_PSIZE_LOG_1;
2632 psize = STEDMA40_PSIZE_PHY_16;
2633 else if (maxburst >= 8)
2634 psize = STEDMA40_PSIZE_PHY_8;
2635 else if (maxburst >= 4)
2636 psize = STEDMA40_PSIZE_PHY_4;
2638 psize = STEDMA40_PSIZE_PHY_1;
2641 info->psize = psize;
2642 info->flow_ctrl = STEDMA40_NO_FLOW_CTRL;
2647 static int d40_set_runtime_config(struct dma_chan *chan,
2648 struct dma_slave_config *config)
2650 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2652 memcpy(&d40c->slave_config, config, sizeof(*config));
2657 /* Runtime reconfiguration extension */
2658 static int d40_set_runtime_config_write(struct dma_chan *chan,
2659 struct dma_slave_config *config,
2660 enum dma_transfer_direction direction)
2662 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2663 struct stedma40_chan_cfg *cfg = &d40c->dma_cfg;
2664 enum dma_slave_buswidth src_addr_width, dst_addr_width;
2665 dma_addr_t config_addr;
2666 u32 src_maxburst, dst_maxburst;
2669 if (d40c->phy_chan == NULL) {
2670 chan_err(d40c, "Channel is not allocated!\n");
2674 src_addr_width = config->src_addr_width;
2675 src_maxburst = config->src_maxburst;
2676 dst_addr_width = config->dst_addr_width;
2677 dst_maxburst = config->dst_maxburst;
2679 if (direction == DMA_DEV_TO_MEM) {
2680 config_addr = config->src_addr;
2682 if (cfg->dir != DMA_DEV_TO_MEM)
2683 dev_dbg(d40c->base->dev,
2684 "channel was not configured for peripheral "
2685 "to memory transfer (%d) overriding\n",
2687 cfg->dir = DMA_DEV_TO_MEM;
2689 /* Configure the memory side */
2690 if (dst_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
2691 dst_addr_width = src_addr_width;
2692 if (dst_maxburst == 0)
2693 dst_maxburst = src_maxburst;
2695 } else if (direction == DMA_MEM_TO_DEV) {
2696 config_addr = config->dst_addr;
2698 if (cfg->dir != DMA_MEM_TO_DEV)
2699 dev_dbg(d40c->base->dev,
2700 "channel was not configured for memory "
2701 "to peripheral transfer (%d) overriding\n",
2703 cfg->dir = DMA_MEM_TO_DEV;
2705 /* Configure the memory side */
2706 if (src_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
2707 src_addr_width = dst_addr_width;
2708 if (src_maxburst == 0)
2709 src_maxburst = dst_maxburst;
2711 dev_err(d40c->base->dev,
2712 "unrecognized channel direction %d\n",
2717 if (config_addr <= 0) {
2718 dev_err(d40c->base->dev, "no address supplied\n");
2722 if (src_maxburst * src_addr_width != dst_maxburst * dst_addr_width) {
2723 dev_err(d40c->base->dev,
2724 "src/dst width/maxburst mismatch: %d*%d != %d*%d\n",
2732 if (src_maxburst > 16) {
2734 dst_maxburst = src_maxburst * src_addr_width / dst_addr_width;
2735 } else if (dst_maxburst > 16) {
2737 src_maxburst = dst_maxburst * dst_addr_width / src_addr_width;
2740 /* Only valid widths are; 1, 2, 4 and 8. */
2741 if (src_addr_width <= DMA_SLAVE_BUSWIDTH_UNDEFINED ||
2742 src_addr_width > DMA_SLAVE_BUSWIDTH_8_BYTES ||
2743 dst_addr_width <= DMA_SLAVE_BUSWIDTH_UNDEFINED ||
2744 dst_addr_width > DMA_SLAVE_BUSWIDTH_8_BYTES ||
2745 !is_power_of_2(src_addr_width) ||
2746 !is_power_of_2(dst_addr_width))
2749 cfg->src_info.data_width = src_addr_width;
2750 cfg->dst_info.data_width = dst_addr_width;
2752 ret = dma40_config_to_halfchannel(d40c, &cfg->src_info,
2757 ret = dma40_config_to_halfchannel(d40c, &cfg->dst_info,
2762 /* Fill in register values */
2763 if (chan_is_logical(d40c))
2764 d40_log_cfg(cfg, &d40c->log_def.lcsp1, &d40c->log_def.lcsp3);
2766 d40_phy_cfg(cfg, &d40c->src_def_cfg, &d40c->dst_def_cfg);
2768 /* These settings will take precedence later */
2769 d40c->runtime_addr = config_addr;
2770 d40c->runtime_direction = direction;
2771 dev_dbg(d40c->base->dev,
2772 "configured channel %s for %s, data width %d/%d, "
2773 "maxburst %d/%d elements, LE, no flow control\n",
2774 dma_chan_name(chan),
2775 (direction == DMA_DEV_TO_MEM) ? "RX" : "TX",
2776 src_addr_width, dst_addr_width,
2777 src_maxburst, dst_maxburst);
2782 /* Initialization functions */
2784 static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma,
2785 struct d40_chan *chans, int offset,
2789 struct d40_chan *d40c;
2791 INIT_LIST_HEAD(&dma->channels);
2793 for (i = offset; i < offset + num_chans; i++) {
2796 d40c->chan.device = dma;
2798 spin_lock_init(&d40c->lock);
2800 d40c->log_num = D40_PHY_CHAN;
2802 INIT_LIST_HEAD(&d40c->done);
2803 INIT_LIST_HEAD(&d40c->active);
2804 INIT_LIST_HEAD(&d40c->queue);
2805 INIT_LIST_HEAD(&d40c->pending_queue);
2806 INIT_LIST_HEAD(&d40c->client);
2807 INIT_LIST_HEAD(&d40c->prepare_queue);
2809 tasklet_init(&d40c->tasklet, dma_tasklet,
2810 (unsigned long) d40c);
2812 list_add_tail(&d40c->chan.device_node,
2817 static void d40_ops_init(struct d40_base *base, struct dma_device *dev)
2819 if (dma_has_cap(DMA_SLAVE, dev->cap_mask)) {
2820 dev->device_prep_slave_sg = d40_prep_slave_sg;
2821 dev->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
2824 if (dma_has_cap(DMA_MEMCPY, dev->cap_mask)) {
2825 dev->device_prep_dma_memcpy = d40_prep_memcpy;
2826 dev->directions = BIT(DMA_MEM_TO_MEM);
2828 * This controller can only access address at even
2829 * 32bit boundaries, i.e. 2^2
2831 dev->copy_align = DMAENGINE_ALIGN_4_BYTES;
2834 if (dma_has_cap(DMA_CYCLIC, dev->cap_mask))
2835 dev->device_prep_dma_cyclic = dma40_prep_dma_cyclic;
2837 dev->device_alloc_chan_resources = d40_alloc_chan_resources;
2838 dev->device_free_chan_resources = d40_free_chan_resources;
2839 dev->device_issue_pending = d40_issue_pending;
2840 dev->device_tx_status = d40_tx_status;
2841 dev->device_config = d40_set_runtime_config;
2842 dev->device_pause = d40_pause;
2843 dev->device_resume = d40_resume;
2844 dev->device_terminate_all = d40_terminate_all;
2845 dev->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
2846 dev->dev = base->dev;
2849 static int __init d40_dmaengine_init(struct d40_base *base,
2850 int num_reserved_chans)
2854 d40_chan_init(base, &base->dma_slave, base->log_chans,
2855 0, base->num_log_chans);
2857 dma_cap_zero(base->dma_slave.cap_mask);
2858 dma_cap_set(DMA_SLAVE, base->dma_slave.cap_mask);
2859 dma_cap_set(DMA_CYCLIC, base->dma_slave.cap_mask);
2861 d40_ops_init(base, &base->dma_slave);
2863 err = dmaenginem_async_device_register(&base->dma_slave);
2866 d40_err(base->dev, "Failed to register slave channels\n");
2870 d40_chan_init(base, &base->dma_memcpy, base->log_chans,
2871 base->num_log_chans, base->num_memcpy_chans);
2873 dma_cap_zero(base->dma_memcpy.cap_mask);
2874 dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask);
2876 d40_ops_init(base, &base->dma_memcpy);
2878 err = dmaenginem_async_device_register(&base->dma_memcpy);
2882 "Failed to register memcpy only channels\n");
2886 d40_chan_init(base, &base->dma_both, base->phy_chans,
2887 0, num_reserved_chans);
2889 dma_cap_zero(base->dma_both.cap_mask);
2890 dma_cap_set(DMA_SLAVE, base->dma_both.cap_mask);
2891 dma_cap_set(DMA_MEMCPY, base->dma_both.cap_mask);
2892 dma_cap_set(DMA_CYCLIC, base->dma_slave.cap_mask);
2894 d40_ops_init(base, &base->dma_both);
2895 err = dmaenginem_async_device_register(&base->dma_both);
2899 "Failed to register logical and physical capable channels\n");
2907 /* Suspend resume functionality */
2908 #ifdef CONFIG_PM_SLEEP
2909 static int dma40_suspend(struct device *dev)
2911 struct d40_base *base = dev_get_drvdata(dev);
2914 ret = pm_runtime_force_suspend(dev);
2918 if (base->lcpa_regulator)
2919 ret = regulator_disable(base->lcpa_regulator);
2923 static int dma40_resume(struct device *dev)
2925 struct d40_base *base = dev_get_drvdata(dev);
2928 if (base->lcpa_regulator) {
2929 ret = regulator_enable(base->lcpa_regulator);
2934 return pm_runtime_force_resume(dev);
2939 static void dma40_backup(void __iomem *baseaddr, u32 *backup,
2940 u32 *regaddr, int num, bool save)
2944 for (i = 0; i < num; i++) {
2945 void __iomem *addr = baseaddr + regaddr[i];
2948 backup[i] = readl_relaxed(addr);
2950 writel_relaxed(backup[i], addr);
2954 static void d40_save_restore_registers(struct d40_base *base, bool save)
2958 /* Save/Restore channel specific registers */
2959 for (i = 0; i < base->num_phy_chans; i++) {
2963 if (base->phy_res[i].reserved)
2966 addr = base->virtbase + D40_DREG_PCBASE + i * D40_DREG_PCDELTA;
2967 idx = i * ARRAY_SIZE(d40_backup_regs_chan);
2969 dma40_backup(addr, &base->reg_val_backup_chan[idx],
2970 d40_backup_regs_chan,
2971 ARRAY_SIZE(d40_backup_regs_chan),
2975 /* Save/Restore global registers */
2976 dma40_backup(base->virtbase, base->reg_val_backup,
2977 d40_backup_regs, ARRAY_SIZE(d40_backup_regs),
2980 /* Save/Restore registers only existing on dma40 v3 and later */
2981 if (base->gen_dmac.backup)
2982 dma40_backup(base->virtbase, base->reg_val_backup_v4,
2983 base->gen_dmac.backup,
2984 base->gen_dmac.backup_size,
2988 static int dma40_runtime_suspend(struct device *dev)
2990 struct d40_base *base = dev_get_drvdata(dev);
2992 d40_save_restore_registers(base, true);
2994 /* Don't disable/enable clocks for v1 due to HW bugs */
2996 writel_relaxed(base->gcc_pwr_off_mask,
2997 base->virtbase + D40_DREG_GCC);
3002 static int dma40_runtime_resume(struct device *dev)
3004 struct d40_base *base = dev_get_drvdata(dev);
3006 d40_save_restore_registers(base, false);
3008 writel_relaxed(D40_DREG_GCC_ENABLE_ALL,
3009 base->virtbase + D40_DREG_GCC);
3014 static const struct dev_pm_ops dma40_pm_ops = {
3015 SET_LATE_SYSTEM_SLEEP_PM_OPS(dma40_suspend, dma40_resume)
3016 SET_RUNTIME_PM_OPS(dma40_runtime_suspend,
3017 dma40_runtime_resume,
3021 /* Initialization functions. */
3023 static int __init d40_phy_res_init(struct d40_base *base)
3026 int num_phy_chans_avail = 0;
3028 int odd_even_bit = -2;
3029 int gcc = D40_DREG_GCC_ENA;
3031 val[0] = readl(base->virtbase + D40_DREG_PRSME);
3032 val[1] = readl(base->virtbase + D40_DREG_PRSMO);
3034 for (i = 0; i < base->num_phy_chans; i++) {
3035 base->phy_res[i].num = i;
3036 odd_even_bit += 2 * ((i % 2) == 0);
3037 if (((val[i % 2] >> odd_even_bit) & 3) == 1) {
3038 /* Mark security only channels as occupied */
3039 base->phy_res[i].allocated_src = D40_ALLOC_PHY;
3040 base->phy_res[i].allocated_dst = D40_ALLOC_PHY;
3041 base->phy_res[i].reserved = true;
3042 gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(i),
3044 gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(i),
3049 base->phy_res[i].allocated_src = D40_ALLOC_FREE;
3050 base->phy_res[i].allocated_dst = D40_ALLOC_FREE;
3051 base->phy_res[i].reserved = false;
3052 num_phy_chans_avail++;
3054 spin_lock_init(&base->phy_res[i].lock);
3057 /* Mark disabled channels as occupied */
3058 for (i = 0; base->plat_data->disabled_channels[i] != -1; i++) {
3059 int chan = base->plat_data->disabled_channels[i];
3061 base->phy_res[chan].allocated_src = D40_ALLOC_PHY;
3062 base->phy_res[chan].allocated_dst = D40_ALLOC_PHY;
3063 base->phy_res[chan].reserved = true;
3064 gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(chan),
3066 gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(chan),
3068 num_phy_chans_avail--;
3071 /* Mark soft_lli channels */
3072 for (i = 0; i < base->plat_data->num_of_soft_lli_chans; i++) {
3073 int chan = base->plat_data->soft_lli_chans[i];
3075 base->phy_res[chan].use_soft_lli = true;
3078 dev_info(base->dev, "%d of %d physical DMA channels available\n",
3079 num_phy_chans_avail, base->num_phy_chans);
3081 /* Verify settings extended vs standard */
3082 val[0] = readl(base->virtbase + D40_DREG_PRTYP);
3084 for (i = 0; i < base->num_phy_chans; i++) {
3086 if (base->phy_res[i].allocated_src == D40_ALLOC_FREE &&
3087 (val[0] & 0x3) != 1)
3089 "[%s] INFO: channel %d is misconfigured (%d)\n",
3090 __func__, i, val[0] & 0x3);
3092 val[0] = val[0] >> 2;
3096 * To keep things simple, Enable all clocks initially.
3097 * The clocks will get managed later post channel allocation.
3098 * The clocks for the event lines on which reserved channels exists
3099 * are not managed here.
3101 writel(D40_DREG_GCC_ENABLE_ALL, base->virtbase + D40_DREG_GCC);
3102 base->gcc_pwr_off_mask = gcc;
3104 return num_phy_chans_avail;
3107 static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
3109 struct stedma40_platform_data *plat_data = dev_get_platdata(&pdev->dev);
3111 void __iomem *virtbase;
3112 struct resource *res;
3113 struct d40_base *base;
3116 int num_memcpy_chans;
3117 int clk_ret = -EINVAL;
3123 clk = clk_get(&pdev->dev, NULL);
3125 d40_err(&pdev->dev, "No matching clock found\n");
3126 goto check_prepare_enabled;
3129 clk_ret = clk_prepare_enable(clk);
3131 d40_err(&pdev->dev, "Failed to prepare/enable clock\n");
3132 goto disable_unprepare;
3135 /* Get IO for DMAC base address */
3136 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base");
3138 goto disable_unprepare;
3140 if (request_mem_region(res->start, resource_size(res),
3141 D40_NAME " I/O base") == NULL)
3142 goto release_region;
3144 virtbase = ioremap(res->start, resource_size(res));
3146 goto release_region;
3148 /* This is just a regular AMBA PrimeCell ID actually */
3149 for (pid = 0, i = 0; i < 4; i++)
3150 pid |= (readl(virtbase + resource_size(res) - 0x20 + 4 * i)
3152 for (cid = 0, i = 0; i < 4; i++)
3153 cid |= (readl(virtbase + resource_size(res) - 0x10 + 4 * i)
3156 if (cid != AMBA_CID) {
3157 d40_err(&pdev->dev, "Unknown hardware! No PrimeCell ID\n");
3160 if (AMBA_MANF_BITS(pid) != AMBA_VENDOR_ST) {
3161 d40_err(&pdev->dev, "Unknown designer! Got %x wanted %x\n",
3162 AMBA_MANF_BITS(pid),
3168 * DB8500ed has revision 0
3170 * DB8500v1 has revision 2
3171 * DB8500v2 has revision 3
3172 * AP9540v1 has revision 4
3173 * DB8540v1 has revision 4
3175 rev = AMBA_REV_BITS(pid);
3177 d40_err(&pdev->dev, "hardware revision: %d is not supported", rev);
3181 /* The number of physical channels on this HW */
3182 if (plat_data->num_of_phy_chans)
3183 num_phy_chans = plat_data->num_of_phy_chans;
3185 num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4;
3187 /* The number of channels used for memcpy */
3188 if (plat_data->num_of_memcpy_chans)
3189 num_memcpy_chans = plat_data->num_of_memcpy_chans;
3191 num_memcpy_chans = ARRAY_SIZE(dma40_memcpy_channels);
3193 num_log_chans = num_phy_chans * D40_MAX_LOG_CHAN_PER_PHY;
3195 dev_info(&pdev->dev,
3196 "hardware rev: %d @ %pa with %d physical and %d logical channels\n",
3197 rev, &res->start, num_phy_chans, num_log_chans);
3199 base = kzalloc(ALIGN(sizeof(struct d40_base), 4) +
3200 (num_phy_chans + num_log_chans + num_memcpy_chans) *
3201 sizeof(struct d40_chan), GFP_KERNEL);
3208 base->num_memcpy_chans = num_memcpy_chans;
3209 base->num_phy_chans = num_phy_chans;
3210 base->num_log_chans = num_log_chans;
3211 base->phy_start = res->start;
3212 base->phy_size = resource_size(res);
3213 base->virtbase = virtbase;
3214 base->plat_data = plat_data;
3215 base->dev = &pdev->dev;
3216 base->phy_chans = ((void *)base) + ALIGN(sizeof(struct d40_base), 4);
3217 base->log_chans = &base->phy_chans[num_phy_chans];
3219 if (base->plat_data->num_of_phy_chans == 14) {
3220 base->gen_dmac.backup = d40_backup_regs_v4b;
3221 base->gen_dmac.backup_size = BACKUP_REGS_SZ_V4B;
3222 base->gen_dmac.interrupt_en = D40_DREG_CPCMIS;
3223 base->gen_dmac.interrupt_clear = D40_DREG_CPCICR;
3224 base->gen_dmac.realtime_en = D40_DREG_CRSEG1;
3225 base->gen_dmac.realtime_clear = D40_DREG_CRCEG1;
3226 base->gen_dmac.high_prio_en = D40_DREG_CPSEG1;
3227 base->gen_dmac.high_prio_clear = D40_DREG_CPCEG1;
3228 base->gen_dmac.il = il_v4b;
3229 base->gen_dmac.il_size = ARRAY_SIZE(il_v4b);
3230 base->gen_dmac.init_reg = dma_init_reg_v4b;
3231 base->gen_dmac.init_reg_size = ARRAY_SIZE(dma_init_reg_v4b);
3233 if (base->rev >= 3) {
3234 base->gen_dmac.backup = d40_backup_regs_v4a;
3235 base->gen_dmac.backup_size = BACKUP_REGS_SZ_V4A;
3237 base->gen_dmac.interrupt_en = D40_DREG_PCMIS;
3238 base->gen_dmac.interrupt_clear = D40_DREG_PCICR;
3239 base->gen_dmac.realtime_en = D40_DREG_RSEG1;
3240 base->gen_dmac.realtime_clear = D40_DREG_RCEG1;
3241 base->gen_dmac.high_prio_en = D40_DREG_PSEG1;
3242 base->gen_dmac.high_prio_clear = D40_DREG_PCEG1;
3243 base->gen_dmac.il = il_v4a;
3244 base->gen_dmac.il_size = ARRAY_SIZE(il_v4a);
3245 base->gen_dmac.init_reg = dma_init_reg_v4a;
3246 base->gen_dmac.init_reg_size = ARRAY_SIZE(dma_init_reg_v4a);
3249 base->phy_res = kcalloc(num_phy_chans,
3250 sizeof(*base->phy_res),
3255 base->lookup_phy_chans = kcalloc(num_phy_chans,
3256 sizeof(*base->lookup_phy_chans),
3258 if (!base->lookup_phy_chans)
3261 base->lookup_log_chans = kcalloc(num_log_chans,
3262 sizeof(*base->lookup_log_chans),
3264 if (!base->lookup_log_chans)
3265 goto free_phy_chans;
3267 base->reg_val_backup_chan = kmalloc_array(base->num_phy_chans,
3268 sizeof(d40_backup_regs_chan),
3270 if (!base->reg_val_backup_chan)
3271 goto free_log_chans;
3273 base->lcla_pool.alloc_map = kcalloc(num_phy_chans
3274 * D40_LCLA_LINK_PER_EVENT_GRP,
3275 sizeof(*base->lcla_pool.alloc_map),
3277 if (!base->lcla_pool.alloc_map)
3278 goto free_backup_chan;
3280 base->regs_interrupt = kmalloc_array(base->gen_dmac.il_size,
3281 sizeof(*base->regs_interrupt),
3283 if (!base->regs_interrupt)
3286 base->desc_slab = kmem_cache_create(D40_NAME, sizeof(struct d40_desc),
3287 0, SLAB_HWCACHE_ALIGN,
3289 if (base->desc_slab == NULL)
3295 kfree(base->regs_interrupt);
3297 kfree(base->lcla_pool.alloc_map);
3299 kfree(base->reg_val_backup_chan);
3301 kfree(base->lookup_log_chans);
3303 kfree(base->lookup_phy_chans);
3305 kfree(base->phy_res);
3311 release_mem_region(res->start, resource_size(res));
3312 check_prepare_enabled:
3315 clk_disable_unprepare(clk);
3321 static void __init d40_hw_init(struct d40_base *base)
3325 u32 prmseo[2] = {0, 0};
3326 u32 activeo[2] = {0xFFFFFFFF, 0xFFFFFFFF};
3329 struct d40_reg_val *dma_init_reg = base->gen_dmac.init_reg;
3330 u32 reg_size = base->gen_dmac.init_reg_size;
3332 for (i = 0; i < reg_size; i++)
3333 writel(dma_init_reg[i].val,
3334 base->virtbase + dma_init_reg[i].reg);
3336 /* Configure all our dma channels to default settings */
3337 for (i = 0; i < base->num_phy_chans; i++) {
3339 activeo[i % 2] = activeo[i % 2] << 2;
3341 if (base->phy_res[base->num_phy_chans - i - 1].allocated_src
3343 activeo[i % 2] |= 3;
3347 /* Enable interrupt # */
3348 pcmis = (pcmis << 1) | 1;
3350 /* Clear interrupt # */
3351 pcicr = (pcicr << 1) | 1;
3353 /* Set channel to physical mode */
3354 prmseo[i % 2] = prmseo[i % 2] << 2;
3359 writel(prmseo[1], base->virtbase + D40_DREG_PRMSE);
3360 writel(prmseo[0], base->virtbase + D40_DREG_PRMSO);
3361 writel(activeo[1], base->virtbase + D40_DREG_ACTIVE);
3362 writel(activeo[0], base->virtbase + D40_DREG_ACTIVO);
3364 /* Write which interrupt to enable */
3365 writel(pcmis, base->virtbase + base->gen_dmac.interrupt_en);
3367 /* Write which interrupt to clear */
3368 writel(pcicr, base->virtbase + base->gen_dmac.interrupt_clear);
3370 /* These are __initdata and cannot be accessed after init */
3371 base->gen_dmac.init_reg = NULL;
3372 base->gen_dmac.init_reg_size = 0;
3375 static int __init d40_lcla_allocate(struct d40_base *base)
3377 struct d40_lcla_pool *pool = &base->lcla_pool;
3378 unsigned long *page_list;
3383 * This is somewhat ugly. We need 8192 bytes that are 18 bit aligned,
3384 * To full fill this hardware requirement without wasting 256 kb
3385 * we allocate pages until we get an aligned one.
3387 page_list = kmalloc_array(MAX_LCLA_ALLOC_ATTEMPTS,
3393 /* Calculating how many pages that are required */
3394 base->lcla_pool.pages = SZ_1K * base->num_phy_chans / PAGE_SIZE;
3396 for (i = 0; i < MAX_LCLA_ALLOC_ATTEMPTS; i++) {
3397 page_list[i] = __get_free_pages(GFP_KERNEL,
3398 base->lcla_pool.pages);
3399 if (!page_list[i]) {
3401 d40_err(base->dev, "Failed to allocate %d pages.\n",
3402 base->lcla_pool.pages);
3405 for (j = 0; j < i; j++)
3406 free_pages(page_list[j], base->lcla_pool.pages);
3407 goto free_page_list;
3410 if ((virt_to_phys((void *)page_list[i]) &
3411 (LCLA_ALIGNMENT - 1)) == 0)
3415 for (j = 0; j < i; j++)
3416 free_pages(page_list[j], base->lcla_pool.pages);
3418 if (i < MAX_LCLA_ALLOC_ATTEMPTS) {
3419 base->lcla_pool.base = (void *)page_list[i];
3422 * After many attempts and no succees with finding the correct
3423 * alignment, try with allocating a big buffer.
3426 "[%s] Failed to get %d pages @ 18 bit align.\n",
3427 __func__, base->lcla_pool.pages);
3428 base->lcla_pool.base_unaligned = kmalloc(SZ_1K *
3429 base->num_phy_chans +
3432 if (!base->lcla_pool.base_unaligned) {
3434 goto free_page_list;
3437 base->lcla_pool.base = PTR_ALIGN(base->lcla_pool.base_unaligned,
3441 pool->dma_addr = dma_map_single(base->dev, pool->base,
3442 SZ_1K * base->num_phy_chans,
3444 if (dma_mapping_error(base->dev, pool->dma_addr)) {
3447 goto free_page_list;
3450 writel(virt_to_phys(base->lcla_pool.base),
3451 base->virtbase + D40_DREG_LCLA);
3458 static int __init d40_of_probe(struct platform_device *pdev,
3459 struct device_node *np)
3461 struct stedma40_platform_data *pdata;
3462 int num_phy = 0, num_memcpy = 0, num_disabled = 0;
3465 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
3469 /* If absent this value will be obtained from h/w. */
3470 of_property_read_u32(np, "dma-channels", &num_phy);
3472 pdata->num_of_phy_chans = num_phy;
3474 list = of_get_property(np, "memcpy-channels", &num_memcpy);
3475 num_memcpy /= sizeof(*list);
3477 if (num_memcpy > D40_MEMCPY_MAX_CHANS || num_memcpy <= 0) {
3479 "Invalid number of memcpy channels specified (%d)\n",
3483 pdata->num_of_memcpy_chans = num_memcpy;
3485 of_property_read_u32_array(np, "memcpy-channels",
3486 dma40_memcpy_channels,
3489 list = of_get_property(np, "disabled-channels", &num_disabled);
3490 num_disabled /= sizeof(*list);
3492 if (num_disabled >= STEDMA40_MAX_PHYS || num_disabled < 0) {
3494 "Invalid number of disabled channels specified (%d)\n",
3499 of_property_read_u32_array(np, "disabled-channels",
3500 pdata->disabled_channels,
3502 pdata->disabled_channels[num_disabled] = -1;
3504 pdev->dev.platform_data = pdata;
3509 static int __init d40_probe(struct platform_device *pdev)
3511 struct stedma40_platform_data *plat_data = dev_get_platdata(&pdev->dev);
3512 struct device_node *np = pdev->dev.of_node;
3514 struct d40_base *base;
3515 struct resource *res;
3516 int num_reserved_chans;
3521 if (d40_of_probe(pdev, np)) {
3523 goto report_failure;
3526 d40_err(&pdev->dev, "No pdata or Device Tree provided\n");
3527 goto report_failure;
3531 base = d40_hw_detect_init(pdev);
3533 goto report_failure;
3535 num_reserved_chans = d40_phy_res_init(base);
3537 platform_set_drvdata(pdev, base);
3539 spin_lock_init(&base->interrupt_lock);
3540 spin_lock_init(&base->execmd_lock);
3542 /* Get IO for logical channel parameter address */
3543 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lcpa");
3546 d40_err(&pdev->dev, "No \"lcpa\" memory resource\n");
3549 base->lcpa_size = resource_size(res);
3550 base->phy_lcpa = res->start;
3552 if (request_mem_region(res->start, resource_size(res),
3553 D40_NAME " I/O lcpa") == NULL) {
3555 d40_err(&pdev->dev, "Failed to request LCPA region %pR\n", res);
3559 /* We make use of ESRAM memory for this. */
3560 val = readl(base->virtbase + D40_DREG_LCPA);
3561 if (res->start != val && val != 0) {
3562 dev_warn(&pdev->dev,
3563 "[%s] Mismatch LCPA dma 0x%x, def %pa\n",
3564 __func__, val, &res->start);
3566 writel(res->start, base->virtbase + D40_DREG_LCPA);
3568 base->lcpa_base = ioremap(res->start, resource_size(res));
3569 if (!base->lcpa_base) {
3571 d40_err(&pdev->dev, "Failed to ioremap LCPA region\n");
3574 /* If lcla has to be located in ESRAM we don't need to allocate */
3575 if (base->plat_data->use_esram_lcla) {
3576 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
3581 "No \"lcla_esram\" memory resource\n");
3584 base->lcla_pool.base = ioremap(res->start,
3585 resource_size(res));
3586 if (!base->lcla_pool.base) {
3588 d40_err(&pdev->dev, "Failed to ioremap LCLA region\n");
3591 writel(res->start, base->virtbase + D40_DREG_LCLA);
3594 ret = d40_lcla_allocate(base);
3596 d40_err(&pdev->dev, "Failed to allocate LCLA area\n");
3601 spin_lock_init(&base->lcla_pool.lock);
3603 base->irq = platform_get_irq(pdev, 0);
3605 ret = request_irq(base->irq, d40_handle_interrupt, 0, D40_NAME, base);
3607 d40_err(&pdev->dev, "No IRQ defined\n");
3611 if (base->plat_data->use_esram_lcla) {
3613 base->lcpa_regulator = regulator_get(base->dev, "lcla_esram");
3614 if (IS_ERR(base->lcpa_regulator)) {
3615 d40_err(&pdev->dev, "Failed to get lcpa_regulator\n");
3616 ret = PTR_ERR(base->lcpa_regulator);
3617 base->lcpa_regulator = NULL;
3621 ret = regulator_enable(base->lcpa_regulator);
3624 "Failed to enable lcpa_regulator\n");
3625 regulator_put(base->lcpa_regulator);
3626 base->lcpa_regulator = NULL;
3631 writel_relaxed(D40_DREG_GCC_ENABLE_ALL, base->virtbase + D40_DREG_GCC);
3633 pm_runtime_irq_safe(base->dev);
3634 pm_runtime_set_autosuspend_delay(base->dev, DMA40_AUTOSUSPEND_DELAY);
3635 pm_runtime_use_autosuspend(base->dev);
3636 pm_runtime_mark_last_busy(base->dev);
3637 pm_runtime_set_active(base->dev);
3638 pm_runtime_enable(base->dev);
3640 ret = d40_dmaengine_init(base, num_reserved_chans);
3644 base->dev->dma_parms = &base->dma_parms;
3645 ret = dma_set_max_seg_size(base->dev, STEDMA40_MAX_SEG_SIZE);
3647 d40_err(&pdev->dev, "Failed to set dma max seg size\n");
3654 ret = of_dma_controller_register(np, d40_xlate, NULL);
3657 "could not register of_dma_controller\n");
3660 dev_info(base->dev, "initialized\n");
3663 kmem_cache_destroy(base->desc_slab);
3665 iounmap(base->virtbase);
3667 if (base->lcla_pool.base && base->plat_data->use_esram_lcla) {
3668 iounmap(base->lcla_pool.base);
3669 base->lcla_pool.base = NULL;
3672 if (base->lcla_pool.dma_addr)
3673 dma_unmap_single(base->dev, base->lcla_pool.dma_addr,
3674 SZ_1K * base->num_phy_chans,
3677 if (!base->lcla_pool.base_unaligned && base->lcla_pool.base)
3678 free_pages((unsigned long)base->lcla_pool.base,
3679 base->lcla_pool.pages);
3681 kfree(base->lcla_pool.base_unaligned);
3684 release_mem_region(base->phy_lcpa,
3686 if (base->phy_start)
3687 release_mem_region(base->phy_start,
3690 clk_disable_unprepare(base->clk);
3694 if (base->lcpa_regulator) {
3695 regulator_disable(base->lcpa_regulator);
3696 regulator_put(base->lcpa_regulator);
3699 kfree(base->lcla_pool.alloc_map);
3700 kfree(base->lookup_log_chans);
3701 kfree(base->lookup_phy_chans);
3702 kfree(base->phy_res);
3705 d40_err(&pdev->dev, "probe failed\n");
3709 static const struct of_device_id d40_match[] = {
3710 { .compatible = "stericsson,dma40", },
3714 static struct platform_driver d40_driver = {
3717 .pm = &dma40_pm_ops,
3718 .of_match_table = d40_match,
3722 static int __init stedma40_init(void)
3724 return platform_driver_probe(&d40_driver, d40_probe);
3726 subsys_initcall(stedma40_init);