Merge branch 'turbostat' of git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux
[linux-2.6-microblaze.git] / drivers / dma / ste_dma40.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) Ericsson AB 2007-2008
4  * Copyright (C) ST-Ericsson SA 2008-2010
5  * Author: Per Forlin <per.forlin@stericsson.com> for ST-Ericsson
6  * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson
7  */
8
9 #include <linux/dma-mapping.h>
10 #include <linux/kernel.h>
11 #include <linux/slab.h>
12 #include <linux/export.h>
13 #include <linux/dmaengine.h>
14 #include <linux/platform_device.h>
15 #include <linux/clk.h>
16 #include <linux/delay.h>
17 #include <linux/log2.h>
18 #include <linux/pm.h>
19 #include <linux/pm_runtime.h>
20 #include <linux/err.h>
21 #include <linux/of.h>
22 #include <linux/of_dma.h>
23 #include <linux/amba/bus.h>
24 #include <linux/regulator/consumer.h>
25 #include <linux/platform_data/dma-ste-dma40.h>
26
27 #include "dmaengine.h"
28 #include "ste_dma40_ll.h"
29
30 #define D40_NAME "dma40"
31
32 #define D40_PHY_CHAN -1
33
34 /* For masking out/in 2 bit channel positions */
35 #define D40_CHAN_POS(chan)  (2 * (chan / 2))
36 #define D40_CHAN_POS_MASK(chan) (0x3 << D40_CHAN_POS(chan))
37
38 /* Maximum iterations taken before giving up suspending a channel */
39 #define D40_SUSPEND_MAX_IT 500
40
41 /* Milliseconds */
42 #define DMA40_AUTOSUSPEND_DELAY 100
43
44 /* Hardware requirement on LCLA alignment */
45 #define LCLA_ALIGNMENT 0x40000
46
47 /* Max number of links per event group */
48 #define D40_LCLA_LINK_PER_EVENT_GRP 128
49 #define D40_LCLA_END D40_LCLA_LINK_PER_EVENT_GRP
50
51 /* Max number of logical channels per physical channel */
52 #define D40_MAX_LOG_CHAN_PER_PHY 32
53
54 /* Attempts before giving up to trying to get pages that are aligned */
55 #define MAX_LCLA_ALLOC_ATTEMPTS 256
56
57 /* Bit markings for allocation map */
58 #define D40_ALLOC_FREE          BIT(31)
59 #define D40_ALLOC_PHY           BIT(30)
60 #define D40_ALLOC_LOG_FREE      0
61
62 #define D40_MEMCPY_MAX_CHANS    8
63
64 /* Reserved event lines for memcpy only. */
65 #define DB8500_DMA_MEMCPY_EV_0  51
66 #define DB8500_DMA_MEMCPY_EV_1  56
67 #define DB8500_DMA_MEMCPY_EV_2  57
68 #define DB8500_DMA_MEMCPY_EV_3  58
69 #define DB8500_DMA_MEMCPY_EV_4  59
70 #define DB8500_DMA_MEMCPY_EV_5  60
71
72 static int dma40_memcpy_channels[] = {
73         DB8500_DMA_MEMCPY_EV_0,
74         DB8500_DMA_MEMCPY_EV_1,
75         DB8500_DMA_MEMCPY_EV_2,
76         DB8500_DMA_MEMCPY_EV_3,
77         DB8500_DMA_MEMCPY_EV_4,
78         DB8500_DMA_MEMCPY_EV_5,
79 };
80
81 /* Default configuration for physcial memcpy */
82 static const struct stedma40_chan_cfg dma40_memcpy_conf_phy = {
83         .mode = STEDMA40_MODE_PHYSICAL,
84         .dir = DMA_MEM_TO_MEM,
85
86         .src_info.data_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
87         .src_info.psize = STEDMA40_PSIZE_PHY_1,
88         .src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
89
90         .dst_info.data_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
91         .dst_info.psize = STEDMA40_PSIZE_PHY_1,
92         .dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
93 };
94
95 /* Default configuration for logical memcpy */
96 static const struct stedma40_chan_cfg dma40_memcpy_conf_log = {
97         .mode = STEDMA40_MODE_LOGICAL,
98         .dir = DMA_MEM_TO_MEM,
99
100         .src_info.data_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
101         .src_info.psize = STEDMA40_PSIZE_LOG_1,
102         .src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
103
104         .dst_info.data_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
105         .dst_info.psize = STEDMA40_PSIZE_LOG_1,
106         .dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
107 };
108
109 /**
110  * enum 40_command - The different commands and/or statuses.
111  *
112  * @D40_DMA_STOP: DMA channel command STOP or status STOPPED,
113  * @D40_DMA_RUN: The DMA channel is RUNNING of the command RUN.
114  * @D40_DMA_SUSPEND_REQ: Request the DMA to SUSPEND as soon as possible.
115  * @D40_DMA_SUSPENDED: The DMA channel is SUSPENDED.
116  */
117 enum d40_command {
118         D40_DMA_STOP            = 0,
119         D40_DMA_RUN             = 1,
120         D40_DMA_SUSPEND_REQ     = 2,
121         D40_DMA_SUSPENDED       = 3
122 };
123
124 /*
125  * enum d40_events - The different Event Enables for the event lines.
126  *
127  * @D40_DEACTIVATE_EVENTLINE: De-activate Event line, stopping the logical chan.
128  * @D40_ACTIVATE_EVENTLINE: Activate the Event line, to start a logical chan.
129  * @D40_SUSPEND_REQ_EVENTLINE: Requesting for suspending a event line.
130  * @D40_ROUND_EVENTLINE: Status check for event line.
131  */
132
133 enum d40_events {
134         D40_DEACTIVATE_EVENTLINE        = 0,
135         D40_ACTIVATE_EVENTLINE          = 1,
136         D40_SUSPEND_REQ_EVENTLINE       = 2,
137         D40_ROUND_EVENTLINE             = 3
138 };
139
140 /*
141  * These are the registers that has to be saved and later restored
142  * when the DMA hw is powered off.
143  * TODO: Add save/restore of D40_DREG_GCC on dma40 v3 or later, if that works.
144  */
145 static __maybe_unused u32 d40_backup_regs[] = {
146         D40_DREG_LCPA,
147         D40_DREG_LCLA,
148         D40_DREG_PRMSE,
149         D40_DREG_PRMSO,
150         D40_DREG_PRMOE,
151         D40_DREG_PRMOO,
152 };
153
154 #define BACKUP_REGS_SZ ARRAY_SIZE(d40_backup_regs)
155
156 /*
157  * since 9540 and 8540 has the same HW revision
158  * use v4a for 9540 or ealier
159  * use v4b for 8540 or later
160  * HW revision:
161  * DB8500ed has revision 0
162  * DB8500v1 has revision 2
163  * DB8500v2 has revision 3
164  * AP9540v1 has revision 4
165  * DB8540v1 has revision 4
166  * TODO: Check if all these registers have to be saved/restored on dma40 v4a
167  */
168 static u32 d40_backup_regs_v4a[] = {
169         D40_DREG_PSEG1,
170         D40_DREG_PSEG2,
171         D40_DREG_PSEG3,
172         D40_DREG_PSEG4,
173         D40_DREG_PCEG1,
174         D40_DREG_PCEG2,
175         D40_DREG_PCEG3,
176         D40_DREG_PCEG4,
177         D40_DREG_RSEG1,
178         D40_DREG_RSEG2,
179         D40_DREG_RSEG3,
180         D40_DREG_RSEG4,
181         D40_DREG_RCEG1,
182         D40_DREG_RCEG2,
183         D40_DREG_RCEG3,
184         D40_DREG_RCEG4,
185 };
186
187 #define BACKUP_REGS_SZ_V4A ARRAY_SIZE(d40_backup_regs_v4a)
188
189 static u32 d40_backup_regs_v4b[] = {
190         D40_DREG_CPSEG1,
191         D40_DREG_CPSEG2,
192         D40_DREG_CPSEG3,
193         D40_DREG_CPSEG4,
194         D40_DREG_CPSEG5,
195         D40_DREG_CPCEG1,
196         D40_DREG_CPCEG2,
197         D40_DREG_CPCEG3,
198         D40_DREG_CPCEG4,
199         D40_DREG_CPCEG5,
200         D40_DREG_CRSEG1,
201         D40_DREG_CRSEG2,
202         D40_DREG_CRSEG3,
203         D40_DREG_CRSEG4,
204         D40_DREG_CRSEG5,
205         D40_DREG_CRCEG1,
206         D40_DREG_CRCEG2,
207         D40_DREG_CRCEG3,
208         D40_DREG_CRCEG4,
209         D40_DREG_CRCEG5,
210 };
211
212 #define BACKUP_REGS_SZ_V4B ARRAY_SIZE(d40_backup_regs_v4b)
213
214 static __maybe_unused u32 d40_backup_regs_chan[] = {
215         D40_CHAN_REG_SSCFG,
216         D40_CHAN_REG_SSELT,
217         D40_CHAN_REG_SSPTR,
218         D40_CHAN_REG_SSLNK,
219         D40_CHAN_REG_SDCFG,
220         D40_CHAN_REG_SDELT,
221         D40_CHAN_REG_SDPTR,
222         D40_CHAN_REG_SDLNK,
223 };
224
225 #define BACKUP_REGS_SZ_MAX ((BACKUP_REGS_SZ_V4A > BACKUP_REGS_SZ_V4B) ? \
226                              BACKUP_REGS_SZ_V4A : BACKUP_REGS_SZ_V4B)
227
228 /**
229  * struct d40_interrupt_lookup - lookup table for interrupt handler
230  *
231  * @src: Interrupt mask register.
232  * @clr: Interrupt clear register.
233  * @is_error: true if this is an error interrupt.
234  * @offset: start delta in the lookup_log_chans in d40_base. If equals to
235  * D40_PHY_CHAN, the lookup_phy_chans shall be used instead.
236  */
237 struct d40_interrupt_lookup {
238         u32 src;
239         u32 clr;
240         bool is_error;
241         int offset;
242 };
243
244
245 static struct d40_interrupt_lookup il_v4a[] = {
246         {D40_DREG_LCTIS0, D40_DREG_LCICR0, false,  0},
247         {D40_DREG_LCTIS1, D40_DREG_LCICR1, false, 32},
248         {D40_DREG_LCTIS2, D40_DREG_LCICR2, false, 64},
249         {D40_DREG_LCTIS3, D40_DREG_LCICR3, false, 96},
250         {D40_DREG_LCEIS0, D40_DREG_LCICR0, true,   0},
251         {D40_DREG_LCEIS1, D40_DREG_LCICR1, true,  32},
252         {D40_DREG_LCEIS2, D40_DREG_LCICR2, true,  64},
253         {D40_DREG_LCEIS3, D40_DREG_LCICR3, true,  96},
254         {D40_DREG_PCTIS,  D40_DREG_PCICR,  false, D40_PHY_CHAN},
255         {D40_DREG_PCEIS,  D40_DREG_PCICR,  true,  D40_PHY_CHAN},
256 };
257
258 static struct d40_interrupt_lookup il_v4b[] = {
259         {D40_DREG_CLCTIS1, D40_DREG_CLCICR1, false,  0},
260         {D40_DREG_CLCTIS2, D40_DREG_CLCICR2, false, 32},
261         {D40_DREG_CLCTIS3, D40_DREG_CLCICR3, false, 64},
262         {D40_DREG_CLCTIS4, D40_DREG_CLCICR4, false, 96},
263         {D40_DREG_CLCTIS5, D40_DREG_CLCICR5, false, 128},
264         {D40_DREG_CLCEIS1, D40_DREG_CLCICR1, true,   0},
265         {D40_DREG_CLCEIS2, D40_DREG_CLCICR2, true,  32},
266         {D40_DREG_CLCEIS3, D40_DREG_CLCICR3, true,  64},
267         {D40_DREG_CLCEIS4, D40_DREG_CLCICR4, true,  96},
268         {D40_DREG_CLCEIS5, D40_DREG_CLCICR5, true,  128},
269         {D40_DREG_CPCTIS,  D40_DREG_CPCICR,  false, D40_PHY_CHAN},
270         {D40_DREG_CPCEIS,  D40_DREG_CPCICR,  true,  D40_PHY_CHAN},
271 };
272
273 /**
274  * struct d40_reg_val - simple lookup struct
275  *
276  * @reg: The register.
277  * @val: The value that belongs to the register in reg.
278  */
279 struct d40_reg_val {
280         unsigned int reg;
281         unsigned int val;
282 };
283
284 static __initdata struct d40_reg_val dma_init_reg_v4a[] = {
285         /* Clock every part of the DMA block from start */
286         { .reg = D40_DREG_GCC,    .val = D40_DREG_GCC_ENABLE_ALL},
287
288         /* Interrupts on all logical channels */
289         { .reg = D40_DREG_LCMIS0, .val = 0xFFFFFFFF},
290         { .reg = D40_DREG_LCMIS1, .val = 0xFFFFFFFF},
291         { .reg = D40_DREG_LCMIS2, .val = 0xFFFFFFFF},
292         { .reg = D40_DREG_LCMIS3, .val = 0xFFFFFFFF},
293         { .reg = D40_DREG_LCICR0, .val = 0xFFFFFFFF},
294         { .reg = D40_DREG_LCICR1, .val = 0xFFFFFFFF},
295         { .reg = D40_DREG_LCICR2, .val = 0xFFFFFFFF},
296         { .reg = D40_DREG_LCICR3, .val = 0xFFFFFFFF},
297         { .reg = D40_DREG_LCTIS0, .val = 0xFFFFFFFF},
298         { .reg = D40_DREG_LCTIS1, .val = 0xFFFFFFFF},
299         { .reg = D40_DREG_LCTIS2, .val = 0xFFFFFFFF},
300         { .reg = D40_DREG_LCTIS3, .val = 0xFFFFFFFF}
301 };
302 static __initdata struct d40_reg_val dma_init_reg_v4b[] = {
303         /* Clock every part of the DMA block from start */
304         { .reg = D40_DREG_GCC,    .val = D40_DREG_GCC_ENABLE_ALL},
305
306         /* Interrupts on all logical channels */
307         { .reg = D40_DREG_CLCMIS1, .val = 0xFFFFFFFF},
308         { .reg = D40_DREG_CLCMIS2, .val = 0xFFFFFFFF},
309         { .reg = D40_DREG_CLCMIS3, .val = 0xFFFFFFFF},
310         { .reg = D40_DREG_CLCMIS4, .val = 0xFFFFFFFF},
311         { .reg = D40_DREG_CLCMIS5, .val = 0xFFFFFFFF},
312         { .reg = D40_DREG_CLCICR1, .val = 0xFFFFFFFF},
313         { .reg = D40_DREG_CLCICR2, .val = 0xFFFFFFFF},
314         { .reg = D40_DREG_CLCICR3, .val = 0xFFFFFFFF},
315         { .reg = D40_DREG_CLCICR4, .val = 0xFFFFFFFF},
316         { .reg = D40_DREG_CLCICR5, .val = 0xFFFFFFFF},
317         { .reg = D40_DREG_CLCTIS1, .val = 0xFFFFFFFF},
318         { .reg = D40_DREG_CLCTIS2, .val = 0xFFFFFFFF},
319         { .reg = D40_DREG_CLCTIS3, .val = 0xFFFFFFFF},
320         { .reg = D40_DREG_CLCTIS4, .val = 0xFFFFFFFF},
321         { .reg = D40_DREG_CLCTIS5, .val = 0xFFFFFFFF}
322 };
323
324 /**
325  * struct d40_lli_pool - Structure for keeping LLIs in memory
326  *
327  * @base: Pointer to memory area when the pre_alloc_lli's are not large
328  * enough, IE bigger than the most common case, 1 dst and 1 src. NULL if
329  * pre_alloc_lli is used.
330  * @dma_addr: DMA address, if mapped
331  * @size: The size in bytes of the memory at base or the size of pre_alloc_lli.
332  * @pre_alloc_lli: Pre allocated area for the most common case of transfers,
333  * one buffer to one buffer.
334  */
335 struct d40_lli_pool {
336         void    *base;
337         int      size;
338         dma_addr_t      dma_addr;
339         /* Space for dst and src, plus an extra for padding */
340         u8       pre_alloc_lli[3 * sizeof(struct d40_phy_lli)];
341 };
342
343 /**
344  * struct d40_desc - A descriptor is one DMA job.
345  *
346  * @lli_phy: LLI settings for physical channel. Both src and dst=
347  * points into the lli_pool, to base if lli_len > 1 or to pre_alloc_lli if
348  * lli_len equals one.
349  * @lli_log: Same as above but for logical channels.
350  * @lli_pool: The pool with two entries pre-allocated.
351  * @lli_len: Number of llis of current descriptor.
352  * @lli_current: Number of transferred llis.
353  * @lcla_alloc: Number of LCLA entries allocated.
354  * @txd: DMA engine struct. Used for among other things for communication
355  * during a transfer.
356  * @node: List entry.
357  * @is_in_client_list: true if the client owns this descriptor.
358  * @cyclic: true if this is a cyclic job
359  *
360  * This descriptor is used for both logical and physical transfers.
361  */
362 struct d40_desc {
363         /* LLI physical */
364         struct d40_phy_lli_bidir         lli_phy;
365         /* LLI logical */
366         struct d40_log_lli_bidir         lli_log;
367
368         struct d40_lli_pool              lli_pool;
369         int                              lli_len;
370         int                              lli_current;
371         int                              lcla_alloc;
372
373         struct dma_async_tx_descriptor   txd;
374         struct list_head                 node;
375
376         bool                             is_in_client_list;
377         bool                             cyclic;
378 };
379
380 /**
381  * struct d40_lcla_pool - LCLA pool settings and data.
382  *
383  * @base: The virtual address of LCLA. 18 bit aligned.
384  * @dma_addr: DMA address, if mapped
385  * @base_unaligned: The orignal kmalloc pointer, if kmalloc is used.
386  * This pointer is only there for clean-up on error.
387  * @pages: The number of pages needed for all physical channels.
388  * Only used later for clean-up on error
389  * @lock: Lock to protect the content in this struct.
390  * @alloc_map: big map over which LCLA entry is own by which job.
391  */
392 struct d40_lcla_pool {
393         void            *base;
394         dma_addr_t      dma_addr;
395         void            *base_unaligned;
396         int              pages;
397         spinlock_t       lock;
398         struct d40_desc **alloc_map;
399 };
400
401 /**
402  * struct d40_phy_res - struct for handling eventlines mapped to physical
403  * channels.
404  *
405  * @lock: A lock protection this entity.
406  * @reserved: True if used by secure world or otherwise.
407  * @num: The physical channel number of this entity.
408  * @allocated_src: Bit mapped to show which src event line's are mapped to
409  * this physical channel. Can also be free or physically allocated.
410  * @allocated_dst: Same as for src but is dst.
411  * allocated_dst and allocated_src uses the D40_ALLOC* defines as well as
412  * event line number.
413  * @use_soft_lli: To mark if the linked lists of channel are managed by SW.
414  */
415 struct d40_phy_res {
416         spinlock_t lock;
417         bool       reserved;
418         int        num;
419         u32        allocated_src;
420         u32        allocated_dst;
421         bool       use_soft_lli;
422 };
423
424 struct d40_base;
425
426 /**
427  * struct d40_chan - Struct that describes a channel.
428  *
429  * @lock: A spinlock to protect this struct.
430  * @log_num: The logical number, if any of this channel.
431  * @pending_tx: The number of pending transfers. Used between interrupt handler
432  * and tasklet.
433  * @busy: Set to true when transfer is ongoing on this channel.
434  * @phy_chan: Pointer to physical channel which this instance runs on. If this
435  * point is NULL, then the channel is not allocated.
436  * @chan: DMA engine handle.
437  * @tasklet: Tasklet that gets scheduled from interrupt context to complete a
438  * transfer and call client callback.
439  * @client: Cliented owned descriptor list.
440  * @pending_queue: Submitted jobs, to be issued by issue_pending()
441  * @active: Active descriptor.
442  * @done: Completed jobs
443  * @queue: Queued jobs.
444  * @prepare_queue: Prepared jobs.
445  * @dma_cfg: The client configuration of this dma channel.
446  * @slave_config: DMA slave configuration.
447  * @configured: whether the dma_cfg configuration is valid
448  * @base: Pointer to the device instance struct.
449  * @src_def_cfg: Default cfg register setting for src.
450  * @dst_def_cfg: Default cfg register setting for dst.
451  * @log_def: Default logical channel settings.
452  * @lcpa: Pointer to dst and src lcpa settings.
453  * @runtime_addr: runtime configured address.
454  * @runtime_direction: runtime configured direction.
455  *
456  * This struct can either "be" a logical or a physical channel.
457  */
458 struct d40_chan {
459         spinlock_t                       lock;
460         int                              log_num;
461         int                              pending_tx;
462         bool                             busy;
463         struct d40_phy_res              *phy_chan;
464         struct dma_chan                  chan;
465         struct tasklet_struct            tasklet;
466         struct list_head                 client;
467         struct list_head                 pending_queue;
468         struct list_head                 active;
469         struct list_head                 done;
470         struct list_head                 queue;
471         struct list_head                 prepare_queue;
472         struct stedma40_chan_cfg         dma_cfg;
473         struct dma_slave_config          slave_config;
474         bool                             configured;
475         struct d40_base                 *base;
476         /* Default register configurations */
477         u32                              src_def_cfg;
478         u32                              dst_def_cfg;
479         struct d40_def_lcsp              log_def;
480         struct d40_log_lli_full         *lcpa;
481         /* Runtime reconfiguration */
482         dma_addr_t                      runtime_addr;
483         enum dma_transfer_direction     runtime_direction;
484 };
485
486 /**
487  * struct d40_gen_dmac - generic values to represent u8500/u8540 DMA
488  * controller
489  *
490  * @backup: the pointer to the registers address array for backup
491  * @backup_size: the size of the registers address array for backup
492  * @realtime_en: the realtime enable register
493  * @realtime_clear: the realtime clear register
494  * @high_prio_en: the high priority enable register
495  * @high_prio_clear: the high priority clear register
496  * @interrupt_en: the interrupt enable register
497  * @interrupt_clear: the interrupt clear register
498  * @il: the pointer to struct d40_interrupt_lookup
499  * @il_size: the size of d40_interrupt_lookup array
500  * @init_reg: the pointer to the struct d40_reg_val
501  * @init_reg_size: the size of d40_reg_val array
502  */
503 struct d40_gen_dmac {
504         u32                             *backup;
505         u32                              backup_size;
506         u32                              realtime_en;
507         u32                              realtime_clear;
508         u32                              high_prio_en;
509         u32                              high_prio_clear;
510         u32                              interrupt_en;
511         u32                              interrupt_clear;
512         struct d40_interrupt_lookup     *il;
513         u32                              il_size;
514         struct d40_reg_val              *init_reg;
515         u32                              init_reg_size;
516 };
517
518 /**
519  * struct d40_base - The big global struct, one for each probe'd instance.
520  *
521  * @interrupt_lock: Lock used to make sure one interrupt is handle a time.
522  * @execmd_lock: Lock for execute command usage since several channels share
523  * the same physical register.
524  * @dev: The device structure.
525  * @virtbase: The virtual base address of the DMA's register.
526  * @rev: silicon revision detected.
527  * @clk: Pointer to the DMA clock structure.
528  * @phy_start: Physical memory start of the DMA registers.
529  * @phy_size: Size of the DMA register map.
530  * @irq: The IRQ number.
531  * @num_memcpy_chans: The number of channels used for memcpy (mem-to-mem
532  * transfers).
533  * @num_phy_chans: The number of physical channels. Read from HW. This
534  * is the number of available channels for this driver, not counting "Secure
535  * mode" allocated physical channels.
536  * @num_log_chans: The number of logical channels. Calculated from
537  * num_phy_chans.
538  * @dma_both: dma_device channels that can do both memcpy and slave transfers.
539  * @dma_slave: dma_device channels that can do only do slave transfers.
540  * @dma_memcpy: dma_device channels that can do only do memcpy transfers.
541  * @phy_chans: Room for all possible physical channels in system.
542  * @log_chans: Room for all possible logical channels in system.
543  * @lookup_log_chans: Used to map interrupt number to logical channel. Points
544  * to log_chans entries.
545  * @lookup_phy_chans: Used to map interrupt number to physical channel. Points
546  * to phy_chans entries.
547  * @plat_data: Pointer to provided platform_data which is the driver
548  * configuration.
549  * @lcpa_regulator: Pointer to hold the regulator for the esram bank for lcla.
550  * @phy_res: Vector containing all physical channels.
551  * @lcla_pool: lcla pool settings and data.
552  * @lcpa_base: The virtual mapped address of LCPA.
553  * @phy_lcpa: The physical address of the LCPA.
554  * @lcpa_size: The size of the LCPA area.
555  * @desc_slab: cache for descriptors.
556  * @reg_val_backup: Here the values of some hardware registers are stored
557  * before the DMA is powered off. They are restored when the power is back on.
558  * @reg_val_backup_v4: Backup of registers that only exits on dma40 v3 and
559  * later
560  * @reg_val_backup_chan: Backup data for standard channel parameter registers.
561  * @regs_interrupt: Scratch space for registers during interrupt.
562  * @gcc_pwr_off_mask: Mask to maintain the channels that can be turned off.
563  * @gen_dmac: the struct for generic registers values to represent u8500/8540
564  * DMA controller
565  */
566 struct d40_base {
567         spinlock_t                       interrupt_lock;
568         spinlock_t                       execmd_lock;
569         struct device                    *dev;
570         void __iomem                     *virtbase;
571         u8                                rev:4;
572         struct clk                       *clk;
573         phys_addr_t                       phy_start;
574         resource_size_t                   phy_size;
575         int                               irq;
576         int                               num_memcpy_chans;
577         int                               num_phy_chans;
578         int                               num_log_chans;
579         struct dma_device                 dma_both;
580         struct dma_device                 dma_slave;
581         struct dma_device                 dma_memcpy;
582         struct d40_chan                  *phy_chans;
583         struct d40_chan                  *log_chans;
584         struct d40_chan                 **lookup_log_chans;
585         struct d40_chan                 **lookup_phy_chans;
586         struct stedma40_platform_data    *plat_data;
587         struct regulator                 *lcpa_regulator;
588         /* Physical half channels */
589         struct d40_phy_res               *phy_res;
590         struct d40_lcla_pool              lcla_pool;
591         void                             *lcpa_base;
592         dma_addr_t                        phy_lcpa;
593         resource_size_t                   lcpa_size;
594         struct kmem_cache                *desc_slab;
595         u32                               reg_val_backup[BACKUP_REGS_SZ];
596         u32                               reg_val_backup_v4[BACKUP_REGS_SZ_MAX];
597         u32                              *reg_val_backup_chan;
598         u32                              *regs_interrupt;
599         u16                               gcc_pwr_off_mask;
600         struct d40_gen_dmac               gen_dmac;
601 };
602
603 static struct device *chan2dev(struct d40_chan *d40c)
604 {
605         return &d40c->chan.dev->device;
606 }
607
608 static bool chan_is_physical(struct d40_chan *chan)
609 {
610         return chan->log_num == D40_PHY_CHAN;
611 }
612
613 static bool chan_is_logical(struct d40_chan *chan)
614 {
615         return !chan_is_physical(chan);
616 }
617
618 static void __iomem *chan_base(struct d40_chan *chan)
619 {
620         return chan->base->virtbase + D40_DREG_PCBASE +
621                chan->phy_chan->num * D40_DREG_PCDELTA;
622 }
623
624 #define d40_err(dev, format, arg...)            \
625         dev_err(dev, "[%s] " format, __func__, ## arg)
626
627 #define chan_err(d40c, format, arg...)          \
628         d40_err(chan2dev(d40c), format, ## arg)
629
630 static int d40_set_runtime_config_write(struct dma_chan *chan,
631                                   struct dma_slave_config *config,
632                                   enum dma_transfer_direction direction);
633
634 static int d40_pool_lli_alloc(struct d40_chan *d40c, struct d40_desc *d40d,
635                               int lli_len)
636 {
637         bool is_log = chan_is_logical(d40c);
638         u32 align;
639         void *base;
640
641         if (is_log)
642                 align = sizeof(struct d40_log_lli);
643         else
644                 align = sizeof(struct d40_phy_lli);
645
646         if (lli_len == 1) {
647                 base = d40d->lli_pool.pre_alloc_lli;
648                 d40d->lli_pool.size = sizeof(d40d->lli_pool.pre_alloc_lli);
649                 d40d->lli_pool.base = NULL;
650         } else {
651                 d40d->lli_pool.size = lli_len * 2 * align;
652
653                 base = kmalloc(d40d->lli_pool.size + align, GFP_NOWAIT);
654                 d40d->lli_pool.base = base;
655
656                 if (d40d->lli_pool.base == NULL)
657                         return -ENOMEM;
658         }
659
660         if (is_log) {
661                 d40d->lli_log.src = PTR_ALIGN(base, align);
662                 d40d->lli_log.dst = d40d->lli_log.src + lli_len;
663
664                 d40d->lli_pool.dma_addr = 0;
665         } else {
666                 d40d->lli_phy.src = PTR_ALIGN(base, align);
667                 d40d->lli_phy.dst = d40d->lli_phy.src + lli_len;
668
669                 d40d->lli_pool.dma_addr = dma_map_single(d40c->base->dev,
670                                                          d40d->lli_phy.src,
671                                                          d40d->lli_pool.size,
672                                                          DMA_TO_DEVICE);
673
674                 if (dma_mapping_error(d40c->base->dev,
675                                       d40d->lli_pool.dma_addr)) {
676                         kfree(d40d->lli_pool.base);
677                         d40d->lli_pool.base = NULL;
678                         d40d->lli_pool.dma_addr = 0;
679                         return -ENOMEM;
680                 }
681         }
682
683         return 0;
684 }
685
686 static void d40_pool_lli_free(struct d40_chan *d40c, struct d40_desc *d40d)
687 {
688         if (d40d->lli_pool.dma_addr)
689                 dma_unmap_single(d40c->base->dev, d40d->lli_pool.dma_addr,
690                                  d40d->lli_pool.size, DMA_TO_DEVICE);
691
692         kfree(d40d->lli_pool.base);
693         d40d->lli_pool.base = NULL;
694         d40d->lli_pool.size = 0;
695         d40d->lli_log.src = NULL;
696         d40d->lli_log.dst = NULL;
697         d40d->lli_phy.src = NULL;
698         d40d->lli_phy.dst = NULL;
699 }
700
701 static int d40_lcla_alloc_one(struct d40_chan *d40c,
702                               struct d40_desc *d40d)
703 {
704         unsigned long flags;
705         int i;
706         int ret = -EINVAL;
707
708         spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
709
710         /*
711          * Allocate both src and dst at the same time, therefore the half
712          * start on 1 since 0 can't be used since zero is used as end marker.
713          */
714         for (i = 1 ; i < D40_LCLA_LINK_PER_EVENT_GRP / 2; i++) {
715                 int idx = d40c->phy_chan->num * D40_LCLA_LINK_PER_EVENT_GRP + i;
716
717                 if (!d40c->base->lcla_pool.alloc_map[idx]) {
718                         d40c->base->lcla_pool.alloc_map[idx] = d40d;
719                         d40d->lcla_alloc++;
720                         ret = i;
721                         break;
722                 }
723         }
724
725         spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
726
727         return ret;
728 }
729
730 static int d40_lcla_free_all(struct d40_chan *d40c,
731                              struct d40_desc *d40d)
732 {
733         unsigned long flags;
734         int i;
735         int ret = -EINVAL;
736
737         if (chan_is_physical(d40c))
738                 return 0;
739
740         spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
741
742         for (i = 1 ; i < D40_LCLA_LINK_PER_EVENT_GRP / 2; i++) {
743                 int idx = d40c->phy_chan->num * D40_LCLA_LINK_PER_EVENT_GRP + i;
744
745                 if (d40c->base->lcla_pool.alloc_map[idx] == d40d) {
746                         d40c->base->lcla_pool.alloc_map[idx] = NULL;
747                         d40d->lcla_alloc--;
748                         if (d40d->lcla_alloc == 0) {
749                                 ret = 0;
750                                 break;
751                         }
752                 }
753         }
754
755         spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
756
757         return ret;
758
759 }
760
761 static void d40_desc_remove(struct d40_desc *d40d)
762 {
763         list_del(&d40d->node);
764 }
765
766 static struct d40_desc *d40_desc_get(struct d40_chan *d40c)
767 {
768         struct d40_desc *desc = NULL;
769
770         if (!list_empty(&d40c->client)) {
771                 struct d40_desc *d;
772                 struct d40_desc *_d;
773
774                 list_for_each_entry_safe(d, _d, &d40c->client, node) {
775                         if (async_tx_test_ack(&d->txd)) {
776                                 d40_desc_remove(d);
777                                 desc = d;
778                                 memset(desc, 0, sizeof(*desc));
779                                 break;
780                         }
781                 }
782         }
783
784         if (!desc)
785                 desc = kmem_cache_zalloc(d40c->base->desc_slab, GFP_NOWAIT);
786
787         if (desc)
788                 INIT_LIST_HEAD(&desc->node);
789
790         return desc;
791 }
792
793 static void d40_desc_free(struct d40_chan *d40c, struct d40_desc *d40d)
794 {
795
796         d40_pool_lli_free(d40c, d40d);
797         d40_lcla_free_all(d40c, d40d);
798         kmem_cache_free(d40c->base->desc_slab, d40d);
799 }
800
801 static void d40_desc_submit(struct d40_chan *d40c, struct d40_desc *desc)
802 {
803         list_add_tail(&desc->node, &d40c->active);
804 }
805
806 static void d40_phy_lli_load(struct d40_chan *chan, struct d40_desc *desc)
807 {
808         struct d40_phy_lli *lli_dst = desc->lli_phy.dst;
809         struct d40_phy_lli *lli_src = desc->lli_phy.src;
810         void __iomem *base = chan_base(chan);
811
812         writel(lli_src->reg_cfg, base + D40_CHAN_REG_SSCFG);
813         writel(lli_src->reg_elt, base + D40_CHAN_REG_SSELT);
814         writel(lli_src->reg_ptr, base + D40_CHAN_REG_SSPTR);
815         writel(lli_src->reg_lnk, base + D40_CHAN_REG_SSLNK);
816
817         writel(lli_dst->reg_cfg, base + D40_CHAN_REG_SDCFG);
818         writel(lli_dst->reg_elt, base + D40_CHAN_REG_SDELT);
819         writel(lli_dst->reg_ptr, base + D40_CHAN_REG_SDPTR);
820         writel(lli_dst->reg_lnk, base + D40_CHAN_REG_SDLNK);
821 }
822
823 static void d40_desc_done(struct d40_chan *d40c, struct d40_desc *desc)
824 {
825         list_add_tail(&desc->node, &d40c->done);
826 }
827
828 static void d40_log_lli_to_lcxa(struct d40_chan *chan, struct d40_desc *desc)
829 {
830         struct d40_lcla_pool *pool = &chan->base->lcla_pool;
831         struct d40_log_lli_bidir *lli = &desc->lli_log;
832         int lli_current = desc->lli_current;
833         int lli_len = desc->lli_len;
834         bool cyclic = desc->cyclic;
835         int curr_lcla = -EINVAL;
836         int first_lcla = 0;
837         bool use_esram_lcla = chan->base->plat_data->use_esram_lcla;
838         bool linkback;
839
840         /*
841          * We may have partially running cyclic transfers, in case we did't get
842          * enough LCLA entries.
843          */
844         linkback = cyclic && lli_current == 0;
845
846         /*
847          * For linkback, we need one LCLA even with only one link, because we
848          * can't link back to the one in LCPA space
849          */
850         if (linkback || (lli_len - lli_current > 1)) {
851                 /*
852                  * If the channel is expected to use only soft_lli don't
853                  * allocate a lcla. This is to avoid a HW issue that exists
854                  * in some controller during a peripheral to memory transfer
855                  * that uses linked lists.
856                  */
857                 if (!(chan->phy_chan->use_soft_lli &&
858                         chan->dma_cfg.dir == DMA_DEV_TO_MEM))
859                         curr_lcla = d40_lcla_alloc_one(chan, desc);
860
861                 first_lcla = curr_lcla;
862         }
863
864         /*
865          * For linkback, we normally load the LCPA in the loop since we need to
866          * link it to the second LCLA and not the first.  However, if we
867          * couldn't even get a first LCLA, then we have to run in LCPA and
868          * reload manually.
869          */
870         if (!linkback || curr_lcla == -EINVAL) {
871                 unsigned int flags = 0;
872
873                 if (curr_lcla == -EINVAL)
874                         flags |= LLI_TERM_INT;
875
876                 d40_log_lli_lcpa_write(chan->lcpa,
877                                        &lli->dst[lli_current],
878                                        &lli->src[lli_current],
879                                        curr_lcla,
880                                        flags);
881                 lli_current++;
882         }
883
884         if (curr_lcla < 0)
885                 goto set_current;
886
887         for (; lli_current < lli_len; lli_current++) {
888                 unsigned int lcla_offset = chan->phy_chan->num * 1024 +
889                                            8 * curr_lcla * 2;
890                 struct d40_log_lli *lcla = pool->base + lcla_offset;
891                 unsigned int flags = 0;
892                 int next_lcla;
893
894                 if (lli_current + 1 < lli_len)
895                         next_lcla = d40_lcla_alloc_one(chan, desc);
896                 else
897                         next_lcla = linkback ? first_lcla : -EINVAL;
898
899                 if (cyclic || next_lcla == -EINVAL)
900                         flags |= LLI_TERM_INT;
901
902                 if (linkback && curr_lcla == first_lcla) {
903                         /* First link goes in both LCPA and LCLA */
904                         d40_log_lli_lcpa_write(chan->lcpa,
905                                                &lli->dst[lli_current],
906                                                &lli->src[lli_current],
907                                                next_lcla, flags);
908                 }
909
910                 /*
911                  * One unused LCLA in the cyclic case if the very first
912                  * next_lcla fails...
913                  */
914                 d40_log_lli_lcla_write(lcla,
915                                        &lli->dst[lli_current],
916                                        &lli->src[lli_current],
917                                        next_lcla, flags);
918
919                 /*
920                  * Cache maintenance is not needed if lcla is
921                  * mapped in esram
922                  */
923                 if (!use_esram_lcla) {
924                         dma_sync_single_range_for_device(chan->base->dev,
925                                                 pool->dma_addr, lcla_offset,
926                                                 2 * sizeof(struct d40_log_lli),
927                                                 DMA_TO_DEVICE);
928                 }
929                 curr_lcla = next_lcla;
930
931                 if (curr_lcla == -EINVAL || curr_lcla == first_lcla) {
932                         lli_current++;
933                         break;
934                 }
935         }
936  set_current:
937         desc->lli_current = lli_current;
938 }
939
940 static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d)
941 {
942         if (chan_is_physical(d40c)) {
943                 d40_phy_lli_load(d40c, d40d);
944                 d40d->lli_current = d40d->lli_len;
945         } else
946                 d40_log_lli_to_lcxa(d40c, d40d);
947 }
948
949 static struct d40_desc *d40_first_active_get(struct d40_chan *d40c)
950 {
951         return list_first_entry_or_null(&d40c->active, struct d40_desc, node);
952 }
953
954 /* remove desc from current queue and add it to the pending_queue */
955 static void d40_desc_queue(struct d40_chan *d40c, struct d40_desc *desc)
956 {
957         d40_desc_remove(desc);
958         desc->is_in_client_list = false;
959         list_add_tail(&desc->node, &d40c->pending_queue);
960 }
961
962 static struct d40_desc *d40_first_pending(struct d40_chan *d40c)
963 {
964         return list_first_entry_or_null(&d40c->pending_queue, struct d40_desc,
965                                         node);
966 }
967
968 static struct d40_desc *d40_first_queued(struct d40_chan *d40c)
969 {
970         return list_first_entry_or_null(&d40c->queue, struct d40_desc, node);
971 }
972
973 static struct d40_desc *d40_first_done(struct d40_chan *d40c)
974 {
975         return list_first_entry_or_null(&d40c->done, struct d40_desc, node);
976 }
977
978 static int d40_psize_2_burst_size(bool is_log, int psize)
979 {
980         if (is_log) {
981                 if (psize == STEDMA40_PSIZE_LOG_1)
982                         return 1;
983         } else {
984                 if (psize == STEDMA40_PSIZE_PHY_1)
985                         return 1;
986         }
987
988         return 2 << psize;
989 }
990
991 /*
992  * The dma only supports transmitting packages up to
993  * STEDMA40_MAX_SEG_SIZE * data_width, where data_width is stored in Bytes.
994  *
995  * Calculate the total number of dma elements required to send the entire sg list.
996  */
997 static int d40_size_2_dmalen(int size, u32 data_width1, u32 data_width2)
998 {
999         int dmalen;
1000         u32 max_w = max(data_width1, data_width2);
1001         u32 min_w = min(data_width1, data_width2);
1002         u32 seg_max = ALIGN(STEDMA40_MAX_SEG_SIZE * min_w, max_w);
1003
1004         if (seg_max > STEDMA40_MAX_SEG_SIZE)
1005                 seg_max -= max_w;
1006
1007         if (!IS_ALIGNED(size, max_w))
1008                 return -EINVAL;
1009
1010         if (size <= seg_max)
1011                 dmalen = 1;
1012         else {
1013                 dmalen = size / seg_max;
1014                 if (dmalen * seg_max < size)
1015                         dmalen++;
1016         }
1017         return dmalen;
1018 }
1019
1020 static int d40_sg_2_dmalen(struct scatterlist *sgl, int sg_len,
1021                            u32 data_width1, u32 data_width2)
1022 {
1023         struct scatterlist *sg;
1024         int i;
1025         int len = 0;
1026         int ret;
1027
1028         for_each_sg(sgl, sg, sg_len, i) {
1029                 ret = d40_size_2_dmalen(sg_dma_len(sg),
1030                                         data_width1, data_width2);
1031                 if (ret < 0)
1032                         return ret;
1033                 len += ret;
1034         }
1035         return len;
1036 }
1037
1038 static int __d40_execute_command_phy(struct d40_chan *d40c,
1039                                      enum d40_command command)
1040 {
1041         u32 status;
1042         int i;
1043         void __iomem *active_reg;
1044         int ret = 0;
1045         unsigned long flags;
1046         u32 wmask;
1047
1048         if (command == D40_DMA_STOP) {
1049                 ret = __d40_execute_command_phy(d40c, D40_DMA_SUSPEND_REQ);
1050                 if (ret)
1051                         return ret;
1052         }
1053
1054         spin_lock_irqsave(&d40c->base->execmd_lock, flags);
1055
1056         if (d40c->phy_chan->num % 2 == 0)
1057                 active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
1058         else
1059                 active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
1060
1061         if (command == D40_DMA_SUSPEND_REQ) {
1062                 status = (readl(active_reg) &
1063                           D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
1064                         D40_CHAN_POS(d40c->phy_chan->num);
1065
1066                 if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP)
1067                         goto unlock;
1068         }
1069
1070         wmask = 0xffffffff & ~(D40_CHAN_POS_MASK(d40c->phy_chan->num));
1071         writel(wmask | (command << D40_CHAN_POS(d40c->phy_chan->num)),
1072                active_reg);
1073
1074         if (command == D40_DMA_SUSPEND_REQ) {
1075
1076                 for (i = 0 ; i < D40_SUSPEND_MAX_IT; i++) {
1077                         status = (readl(active_reg) &
1078                                   D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
1079                                 D40_CHAN_POS(d40c->phy_chan->num);
1080
1081                         cpu_relax();
1082                         /*
1083                          * Reduce the number of bus accesses while
1084                          * waiting for the DMA to suspend.
1085                          */
1086                         udelay(3);
1087
1088                         if (status == D40_DMA_STOP ||
1089                             status == D40_DMA_SUSPENDED)
1090                                 break;
1091                 }
1092
1093                 if (i == D40_SUSPEND_MAX_IT) {
1094                         chan_err(d40c,
1095                                 "unable to suspend the chl %d (log: %d) status %x\n",
1096                                 d40c->phy_chan->num, d40c->log_num,
1097                                 status);
1098                         dump_stack();
1099                         ret = -EBUSY;
1100                 }
1101
1102         }
1103  unlock:
1104         spin_unlock_irqrestore(&d40c->base->execmd_lock, flags);
1105         return ret;
1106 }
1107
1108 static void d40_term_all(struct d40_chan *d40c)
1109 {
1110         struct d40_desc *d40d;
1111         struct d40_desc *_d;
1112
1113         /* Release completed descriptors */
1114         while ((d40d = d40_first_done(d40c))) {
1115                 d40_desc_remove(d40d);
1116                 d40_desc_free(d40c, d40d);
1117         }
1118
1119         /* Release active descriptors */
1120         while ((d40d = d40_first_active_get(d40c))) {
1121                 d40_desc_remove(d40d);
1122                 d40_desc_free(d40c, d40d);
1123         }
1124
1125         /* Release queued descriptors waiting for transfer */
1126         while ((d40d = d40_first_queued(d40c))) {
1127                 d40_desc_remove(d40d);
1128                 d40_desc_free(d40c, d40d);
1129         }
1130
1131         /* Release pending descriptors */
1132         while ((d40d = d40_first_pending(d40c))) {
1133                 d40_desc_remove(d40d);
1134                 d40_desc_free(d40c, d40d);
1135         }
1136
1137         /* Release client owned descriptors */
1138         if (!list_empty(&d40c->client))
1139                 list_for_each_entry_safe(d40d, _d, &d40c->client, node) {
1140                         d40_desc_remove(d40d);
1141                         d40_desc_free(d40c, d40d);
1142                 }
1143
1144         /* Release descriptors in prepare queue */
1145         if (!list_empty(&d40c->prepare_queue))
1146                 list_for_each_entry_safe(d40d, _d,
1147                                          &d40c->prepare_queue, node) {
1148                         d40_desc_remove(d40d);
1149                         d40_desc_free(d40c, d40d);
1150                 }
1151
1152         d40c->pending_tx = 0;
1153 }
1154
1155 static void __d40_config_set_event(struct d40_chan *d40c,
1156                                    enum d40_events event_type, u32 event,
1157                                    int reg)
1158 {
1159         void __iomem *addr = chan_base(d40c) + reg;
1160         int tries;
1161         u32 status;
1162
1163         switch (event_type) {
1164
1165         case D40_DEACTIVATE_EVENTLINE:
1166
1167                 writel((D40_DEACTIVATE_EVENTLINE << D40_EVENTLINE_POS(event))
1168                        | ~D40_EVENTLINE_MASK(event), addr);
1169                 break;
1170
1171         case D40_SUSPEND_REQ_EVENTLINE:
1172                 status = (readl(addr) & D40_EVENTLINE_MASK(event)) >>
1173                           D40_EVENTLINE_POS(event);
1174
1175                 if (status == D40_DEACTIVATE_EVENTLINE ||
1176                     status == D40_SUSPEND_REQ_EVENTLINE)
1177                         break;
1178
1179                 writel((D40_SUSPEND_REQ_EVENTLINE << D40_EVENTLINE_POS(event))
1180                        | ~D40_EVENTLINE_MASK(event), addr);
1181
1182                 for (tries = 0 ; tries < D40_SUSPEND_MAX_IT; tries++) {
1183
1184                         status = (readl(addr) & D40_EVENTLINE_MASK(event)) >>
1185                                   D40_EVENTLINE_POS(event);
1186
1187                         cpu_relax();
1188                         /*
1189                          * Reduce the number of bus accesses while
1190                          * waiting for the DMA to suspend.
1191                          */
1192                         udelay(3);
1193
1194                         if (status == D40_DEACTIVATE_EVENTLINE)
1195                                 break;
1196                 }
1197
1198                 if (tries == D40_SUSPEND_MAX_IT) {
1199                         chan_err(d40c,
1200                                 "unable to stop the event_line chl %d (log: %d)"
1201                                 "status %x\n", d40c->phy_chan->num,
1202                                  d40c->log_num, status);
1203                 }
1204                 break;
1205
1206         case D40_ACTIVATE_EVENTLINE:
1207         /*
1208          * The hardware sometimes doesn't register the enable when src and dst
1209          * event lines are active on the same logical channel.  Retry to ensure
1210          * it does.  Usually only one retry is sufficient.
1211          */
1212                 tries = 100;
1213                 while (--tries) {
1214                         writel((D40_ACTIVATE_EVENTLINE <<
1215                                 D40_EVENTLINE_POS(event)) |
1216                                 ~D40_EVENTLINE_MASK(event), addr);
1217
1218                         if (readl(addr) & D40_EVENTLINE_MASK(event))
1219                                 break;
1220                 }
1221
1222                 if (tries != 99)
1223                         dev_dbg(chan2dev(d40c),
1224                                 "[%s] workaround enable S%cLNK (%d tries)\n",
1225                                 __func__, reg == D40_CHAN_REG_SSLNK ? 'S' : 'D',
1226                                 100 - tries);
1227
1228                 WARN_ON(!tries);
1229                 break;
1230
1231         case D40_ROUND_EVENTLINE:
1232                 BUG();
1233                 break;
1234
1235         }
1236 }
1237
1238 static void d40_config_set_event(struct d40_chan *d40c,
1239                                  enum d40_events event_type)
1240 {
1241         u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dev_type);
1242
1243         /* Enable event line connected to device (or memcpy) */
1244         if ((d40c->dma_cfg.dir == DMA_DEV_TO_MEM) ||
1245             (d40c->dma_cfg.dir == DMA_DEV_TO_DEV))
1246                 __d40_config_set_event(d40c, event_type, event,
1247                                        D40_CHAN_REG_SSLNK);
1248
1249         if (d40c->dma_cfg.dir !=  DMA_DEV_TO_MEM)
1250                 __d40_config_set_event(d40c, event_type, event,
1251                                        D40_CHAN_REG_SDLNK);
1252 }
1253
1254 static u32 d40_chan_has_events(struct d40_chan *d40c)
1255 {
1256         void __iomem *chanbase = chan_base(d40c);
1257         u32 val;
1258
1259         val = readl(chanbase + D40_CHAN_REG_SSLNK);
1260         val |= readl(chanbase + D40_CHAN_REG_SDLNK);
1261
1262         return val;
1263 }
1264
1265 static int
1266 __d40_execute_command_log(struct d40_chan *d40c, enum d40_command command)
1267 {
1268         unsigned long flags;
1269         int ret = 0;
1270         u32 active_status;
1271         void __iomem *active_reg;
1272
1273         if (d40c->phy_chan->num % 2 == 0)
1274                 active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
1275         else
1276                 active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
1277
1278
1279         spin_lock_irqsave(&d40c->phy_chan->lock, flags);
1280
1281         switch (command) {
1282         case D40_DMA_STOP:
1283         case D40_DMA_SUSPEND_REQ:
1284
1285                 active_status = (readl(active_reg) &
1286                                  D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
1287                                  D40_CHAN_POS(d40c->phy_chan->num);
1288
1289                 if (active_status == D40_DMA_RUN)
1290                         d40_config_set_event(d40c, D40_SUSPEND_REQ_EVENTLINE);
1291                 else
1292                         d40_config_set_event(d40c, D40_DEACTIVATE_EVENTLINE);
1293
1294                 if (!d40_chan_has_events(d40c) && (command == D40_DMA_STOP))
1295                         ret = __d40_execute_command_phy(d40c, command);
1296
1297                 break;
1298
1299         case D40_DMA_RUN:
1300
1301                 d40_config_set_event(d40c, D40_ACTIVATE_EVENTLINE);
1302                 ret = __d40_execute_command_phy(d40c, command);
1303                 break;
1304
1305         case D40_DMA_SUSPENDED:
1306                 BUG();
1307                 break;
1308         }
1309
1310         spin_unlock_irqrestore(&d40c->phy_chan->lock, flags);
1311         return ret;
1312 }
1313
1314 static int d40_channel_execute_command(struct d40_chan *d40c,
1315                                        enum d40_command command)
1316 {
1317         if (chan_is_logical(d40c))
1318                 return __d40_execute_command_log(d40c, command);
1319         else
1320                 return __d40_execute_command_phy(d40c, command);
1321 }
1322
1323 static u32 d40_get_prmo(struct d40_chan *d40c)
1324 {
1325         static const unsigned int phy_map[] = {
1326                 [STEDMA40_PCHAN_BASIC_MODE]
1327                         = D40_DREG_PRMO_PCHAN_BASIC,
1328                 [STEDMA40_PCHAN_MODULO_MODE]
1329                         = D40_DREG_PRMO_PCHAN_MODULO,
1330                 [STEDMA40_PCHAN_DOUBLE_DST_MODE]
1331                         = D40_DREG_PRMO_PCHAN_DOUBLE_DST,
1332         };
1333         static const unsigned int log_map[] = {
1334                 [STEDMA40_LCHAN_SRC_PHY_DST_LOG]
1335                         = D40_DREG_PRMO_LCHAN_SRC_PHY_DST_LOG,
1336                 [STEDMA40_LCHAN_SRC_LOG_DST_PHY]
1337                         = D40_DREG_PRMO_LCHAN_SRC_LOG_DST_PHY,
1338                 [STEDMA40_LCHAN_SRC_LOG_DST_LOG]
1339                         = D40_DREG_PRMO_LCHAN_SRC_LOG_DST_LOG,
1340         };
1341
1342         if (chan_is_physical(d40c))
1343                 return phy_map[d40c->dma_cfg.mode_opt];
1344         else
1345                 return log_map[d40c->dma_cfg.mode_opt];
1346 }
1347
1348 static void d40_config_write(struct d40_chan *d40c)
1349 {
1350         u32 addr_base;
1351         u32 var;
1352
1353         /* Odd addresses are even addresses + 4 */
1354         addr_base = (d40c->phy_chan->num % 2) * 4;
1355         /* Setup channel mode to logical or physical */
1356         var = ((u32)(chan_is_logical(d40c)) + 1) <<
1357                 D40_CHAN_POS(d40c->phy_chan->num);
1358         writel(var, d40c->base->virtbase + D40_DREG_PRMSE + addr_base);
1359
1360         /* Setup operational mode option register */
1361         var = d40_get_prmo(d40c) << D40_CHAN_POS(d40c->phy_chan->num);
1362
1363         writel(var, d40c->base->virtbase + D40_DREG_PRMOE + addr_base);
1364
1365         if (chan_is_logical(d40c)) {
1366                 int lidx = (d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS)
1367                            & D40_SREG_ELEM_LOG_LIDX_MASK;
1368                 void __iomem *chanbase = chan_base(d40c);
1369
1370                 /* Set default config for CFG reg */
1371                 writel(d40c->src_def_cfg, chanbase + D40_CHAN_REG_SSCFG);
1372                 writel(d40c->dst_def_cfg, chanbase + D40_CHAN_REG_SDCFG);
1373
1374                 /* Set LIDX for lcla */
1375                 writel(lidx, chanbase + D40_CHAN_REG_SSELT);
1376                 writel(lidx, chanbase + D40_CHAN_REG_SDELT);
1377
1378                 /* Clear LNK which will be used by d40_chan_has_events() */
1379                 writel(0, chanbase + D40_CHAN_REG_SSLNK);
1380                 writel(0, chanbase + D40_CHAN_REG_SDLNK);
1381         }
1382 }
1383
1384 static u32 d40_residue(struct d40_chan *d40c)
1385 {
1386         u32 num_elt;
1387
1388         if (chan_is_logical(d40c))
1389                 num_elt = (readl(&d40c->lcpa->lcsp2) & D40_MEM_LCSP2_ECNT_MASK)
1390                         >> D40_MEM_LCSP2_ECNT_POS;
1391         else {
1392                 u32 val = readl(chan_base(d40c) + D40_CHAN_REG_SDELT);
1393                 num_elt = (val & D40_SREG_ELEM_PHY_ECNT_MASK)
1394                           >> D40_SREG_ELEM_PHY_ECNT_POS;
1395         }
1396
1397         return num_elt * d40c->dma_cfg.dst_info.data_width;
1398 }
1399
1400 static bool d40_tx_is_linked(struct d40_chan *d40c)
1401 {
1402         bool is_link;
1403
1404         if (chan_is_logical(d40c))
1405                 is_link = readl(&d40c->lcpa->lcsp3) &  D40_MEM_LCSP3_DLOS_MASK;
1406         else
1407                 is_link = readl(chan_base(d40c) + D40_CHAN_REG_SDLNK)
1408                           & D40_SREG_LNK_PHYS_LNK_MASK;
1409
1410         return is_link;
1411 }
1412
1413 static int d40_pause(struct dma_chan *chan)
1414 {
1415         struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
1416         int res = 0;
1417         unsigned long flags;
1418
1419         if (d40c->phy_chan == NULL) {
1420                 chan_err(d40c, "Channel is not allocated!\n");
1421                 return -EINVAL;
1422         }
1423
1424         if (!d40c->busy)
1425                 return 0;
1426
1427         spin_lock_irqsave(&d40c->lock, flags);
1428         pm_runtime_get_sync(d40c->base->dev);
1429
1430         res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
1431
1432         pm_runtime_mark_last_busy(d40c->base->dev);
1433         pm_runtime_put_autosuspend(d40c->base->dev);
1434         spin_unlock_irqrestore(&d40c->lock, flags);
1435         return res;
1436 }
1437
1438 static int d40_resume(struct dma_chan *chan)
1439 {
1440         struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
1441         int res = 0;
1442         unsigned long flags;
1443
1444         if (d40c->phy_chan == NULL) {
1445                 chan_err(d40c, "Channel is not allocated!\n");
1446                 return -EINVAL;
1447         }
1448
1449         if (!d40c->busy)
1450                 return 0;
1451
1452         spin_lock_irqsave(&d40c->lock, flags);
1453         pm_runtime_get_sync(d40c->base->dev);
1454
1455         /* If bytes left to transfer or linked tx resume job */
1456         if (d40_residue(d40c) || d40_tx_is_linked(d40c))
1457                 res = d40_channel_execute_command(d40c, D40_DMA_RUN);
1458
1459         pm_runtime_mark_last_busy(d40c->base->dev);
1460         pm_runtime_put_autosuspend(d40c->base->dev);
1461         spin_unlock_irqrestore(&d40c->lock, flags);
1462         return res;
1463 }
1464
1465 static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx)
1466 {
1467         struct d40_chan *d40c = container_of(tx->chan,
1468                                              struct d40_chan,
1469                                              chan);
1470         struct d40_desc *d40d = container_of(tx, struct d40_desc, txd);
1471         unsigned long flags;
1472         dma_cookie_t cookie;
1473
1474         spin_lock_irqsave(&d40c->lock, flags);
1475         cookie = dma_cookie_assign(tx);
1476         d40_desc_queue(d40c, d40d);
1477         spin_unlock_irqrestore(&d40c->lock, flags);
1478
1479         return cookie;
1480 }
1481
1482 static int d40_start(struct d40_chan *d40c)
1483 {
1484         return d40_channel_execute_command(d40c, D40_DMA_RUN);
1485 }
1486
1487 static struct d40_desc *d40_queue_start(struct d40_chan *d40c)
1488 {
1489         struct d40_desc *d40d;
1490         int err;
1491
1492         /* Start queued jobs, if any */
1493         d40d = d40_first_queued(d40c);
1494
1495         if (d40d != NULL) {
1496                 if (!d40c->busy) {
1497                         d40c->busy = true;
1498                         pm_runtime_get_sync(d40c->base->dev);
1499                 }
1500
1501                 /* Remove from queue */
1502                 d40_desc_remove(d40d);
1503
1504                 /* Add to active queue */
1505                 d40_desc_submit(d40c, d40d);
1506
1507                 /* Initiate DMA job */
1508                 d40_desc_load(d40c, d40d);
1509
1510                 /* Start dma job */
1511                 err = d40_start(d40c);
1512
1513                 if (err)
1514                         return NULL;
1515         }
1516
1517         return d40d;
1518 }
1519
1520 /* called from interrupt context */
1521 static void dma_tc_handle(struct d40_chan *d40c)
1522 {
1523         struct d40_desc *d40d;
1524
1525         /* Get first active entry from list */
1526         d40d = d40_first_active_get(d40c);
1527
1528         if (d40d == NULL)
1529                 return;
1530
1531         if (d40d->cyclic) {
1532                 /*
1533                  * If this was a paritially loaded list, we need to reloaded
1534                  * it, and only when the list is completed.  We need to check
1535                  * for done because the interrupt will hit for every link, and
1536                  * not just the last one.
1537                  */
1538                 if (d40d->lli_current < d40d->lli_len
1539                     && !d40_tx_is_linked(d40c)
1540                     && !d40_residue(d40c)) {
1541                         d40_lcla_free_all(d40c, d40d);
1542                         d40_desc_load(d40c, d40d);
1543                         (void) d40_start(d40c);
1544
1545                         if (d40d->lli_current == d40d->lli_len)
1546                                 d40d->lli_current = 0;
1547                 }
1548         } else {
1549                 d40_lcla_free_all(d40c, d40d);
1550
1551                 if (d40d->lli_current < d40d->lli_len) {
1552                         d40_desc_load(d40c, d40d);
1553                         /* Start dma job */
1554                         (void) d40_start(d40c);
1555                         return;
1556                 }
1557
1558                 if (d40_queue_start(d40c) == NULL) {
1559                         d40c->busy = false;
1560
1561                         pm_runtime_mark_last_busy(d40c->base->dev);
1562                         pm_runtime_put_autosuspend(d40c->base->dev);
1563                 }
1564
1565                 d40_desc_remove(d40d);
1566                 d40_desc_done(d40c, d40d);
1567         }
1568
1569         d40c->pending_tx++;
1570         tasklet_schedule(&d40c->tasklet);
1571
1572 }
1573
1574 static void dma_tasklet(struct tasklet_struct *t)
1575 {
1576         struct d40_chan *d40c = from_tasklet(d40c, t, tasklet);
1577         struct d40_desc *d40d;
1578         unsigned long flags;
1579         bool callback_active;
1580         struct dmaengine_desc_callback cb;
1581
1582         spin_lock_irqsave(&d40c->lock, flags);
1583
1584         /* Get first entry from the done list */
1585         d40d = d40_first_done(d40c);
1586         if (d40d == NULL) {
1587                 /* Check if we have reached here for cyclic job */
1588                 d40d = d40_first_active_get(d40c);
1589                 if (d40d == NULL || !d40d->cyclic)
1590                         goto check_pending_tx;
1591         }
1592
1593         if (!d40d->cyclic)
1594                 dma_cookie_complete(&d40d->txd);
1595
1596         /*
1597          * If terminating a channel pending_tx is set to zero.
1598          * This prevents any finished active jobs to return to the client.
1599          */
1600         if (d40c->pending_tx == 0) {
1601                 spin_unlock_irqrestore(&d40c->lock, flags);
1602                 return;
1603         }
1604
1605         /* Callback to client */
1606         callback_active = !!(d40d->txd.flags & DMA_PREP_INTERRUPT);
1607         dmaengine_desc_get_callback(&d40d->txd, &cb);
1608
1609         if (!d40d->cyclic) {
1610                 if (async_tx_test_ack(&d40d->txd)) {
1611                         d40_desc_remove(d40d);
1612                         d40_desc_free(d40c, d40d);
1613                 } else if (!d40d->is_in_client_list) {
1614                         d40_desc_remove(d40d);
1615                         d40_lcla_free_all(d40c, d40d);
1616                         list_add_tail(&d40d->node, &d40c->client);
1617                         d40d->is_in_client_list = true;
1618                 }
1619         }
1620
1621         d40c->pending_tx--;
1622
1623         if (d40c->pending_tx)
1624                 tasklet_schedule(&d40c->tasklet);
1625
1626         spin_unlock_irqrestore(&d40c->lock, flags);
1627
1628         if (callback_active)
1629                 dmaengine_desc_callback_invoke(&cb, NULL);
1630
1631         return;
1632  check_pending_tx:
1633         /* Rescue manouver if receiving double interrupts */
1634         if (d40c->pending_tx > 0)
1635                 d40c->pending_tx--;
1636         spin_unlock_irqrestore(&d40c->lock, flags);
1637 }
1638
1639 static irqreturn_t d40_handle_interrupt(int irq, void *data)
1640 {
1641         int i;
1642         u32 idx;
1643         u32 row;
1644         long chan = -1;
1645         struct d40_chan *d40c;
1646         unsigned long flags;
1647         struct d40_base *base = data;
1648         u32 *regs = base->regs_interrupt;
1649         struct d40_interrupt_lookup *il = base->gen_dmac.il;
1650         u32 il_size = base->gen_dmac.il_size;
1651
1652         spin_lock_irqsave(&base->interrupt_lock, flags);
1653
1654         /* Read interrupt status of both logical and physical channels */
1655         for (i = 0; i < il_size; i++)
1656                 regs[i] = readl(base->virtbase + il[i].src);
1657
1658         for (;;) {
1659
1660                 chan = find_next_bit((unsigned long *)regs,
1661                                      BITS_PER_LONG * il_size, chan + 1);
1662
1663                 /* No more set bits found? */
1664                 if (chan == BITS_PER_LONG * il_size)
1665                         break;
1666
1667                 row = chan / BITS_PER_LONG;
1668                 idx = chan & (BITS_PER_LONG - 1);
1669
1670                 if (il[row].offset == D40_PHY_CHAN)
1671                         d40c = base->lookup_phy_chans[idx];
1672                 else
1673                         d40c = base->lookup_log_chans[il[row].offset + idx];
1674
1675                 if (!d40c) {
1676                         /*
1677                          * No error because this can happen if something else
1678                          * in the system is using the channel.
1679                          */
1680                         continue;
1681                 }
1682
1683                 /* ACK interrupt */
1684                 writel(BIT(idx), base->virtbase + il[row].clr);
1685
1686                 spin_lock(&d40c->lock);
1687
1688                 if (!il[row].is_error)
1689                         dma_tc_handle(d40c);
1690                 else
1691                         d40_err(base->dev, "IRQ chan: %ld offset %d idx %d\n",
1692                                 chan, il[row].offset, idx);
1693
1694                 spin_unlock(&d40c->lock);
1695         }
1696
1697         spin_unlock_irqrestore(&base->interrupt_lock, flags);
1698
1699         return IRQ_HANDLED;
1700 }
1701
1702 static int d40_validate_conf(struct d40_chan *d40c,
1703                              struct stedma40_chan_cfg *conf)
1704 {
1705         int res = 0;
1706         bool is_log = conf->mode == STEDMA40_MODE_LOGICAL;
1707
1708         if (!conf->dir) {
1709                 chan_err(d40c, "Invalid direction.\n");
1710                 res = -EINVAL;
1711         }
1712
1713         if ((is_log && conf->dev_type > d40c->base->num_log_chans)  ||
1714             (!is_log && conf->dev_type > d40c->base->num_phy_chans) ||
1715             (conf->dev_type < 0)) {
1716                 chan_err(d40c, "Invalid device type (%d)\n", conf->dev_type);
1717                 res = -EINVAL;
1718         }
1719
1720         if (conf->dir == DMA_DEV_TO_DEV) {
1721                 /*
1722                  * DMAC HW supports it. Will be added to this driver,
1723                  * in case any dma client requires it.
1724                  */
1725                 chan_err(d40c, "periph to periph not supported\n");
1726                 res = -EINVAL;
1727         }
1728
1729         if (d40_psize_2_burst_size(is_log, conf->src_info.psize) *
1730             conf->src_info.data_width !=
1731             d40_psize_2_burst_size(is_log, conf->dst_info.psize) *
1732             conf->dst_info.data_width) {
1733                 /*
1734                  * The DMAC hardware only supports
1735                  * src (burst x width) == dst (burst x width)
1736                  */
1737
1738                 chan_err(d40c, "src (burst x width) != dst (burst x width)\n");
1739                 res = -EINVAL;
1740         }
1741
1742         return res;
1743 }
1744
1745 static bool d40_alloc_mask_set(struct d40_phy_res *phy,
1746                                bool is_src, int log_event_line, bool is_log,
1747                                bool *first_user)
1748 {
1749         unsigned long flags;
1750         spin_lock_irqsave(&phy->lock, flags);
1751
1752         *first_user = ((phy->allocated_src | phy->allocated_dst)
1753                         == D40_ALLOC_FREE);
1754
1755         if (!is_log) {
1756                 /* Physical interrupts are masked per physical full channel */
1757                 if (phy->allocated_src == D40_ALLOC_FREE &&
1758                     phy->allocated_dst == D40_ALLOC_FREE) {
1759                         phy->allocated_dst = D40_ALLOC_PHY;
1760                         phy->allocated_src = D40_ALLOC_PHY;
1761                         goto found_unlock;
1762                 } else
1763                         goto not_found_unlock;
1764         }
1765
1766         /* Logical channel */
1767         if (is_src) {
1768                 if (phy->allocated_src == D40_ALLOC_PHY)
1769                         goto not_found_unlock;
1770
1771                 if (phy->allocated_src == D40_ALLOC_FREE)
1772                         phy->allocated_src = D40_ALLOC_LOG_FREE;
1773
1774                 if (!(phy->allocated_src & BIT(log_event_line))) {
1775                         phy->allocated_src |= BIT(log_event_line);
1776                         goto found_unlock;
1777                 } else
1778                         goto not_found_unlock;
1779         } else {
1780                 if (phy->allocated_dst == D40_ALLOC_PHY)
1781                         goto not_found_unlock;
1782
1783                 if (phy->allocated_dst == D40_ALLOC_FREE)
1784                         phy->allocated_dst = D40_ALLOC_LOG_FREE;
1785
1786                 if (!(phy->allocated_dst & BIT(log_event_line))) {
1787                         phy->allocated_dst |= BIT(log_event_line);
1788                         goto found_unlock;
1789                 }
1790         }
1791  not_found_unlock:
1792         spin_unlock_irqrestore(&phy->lock, flags);
1793         return false;
1794  found_unlock:
1795         spin_unlock_irqrestore(&phy->lock, flags);
1796         return true;
1797 }
1798
1799 static bool d40_alloc_mask_free(struct d40_phy_res *phy, bool is_src,
1800                                int log_event_line)
1801 {
1802         unsigned long flags;
1803         bool is_free = false;
1804
1805         spin_lock_irqsave(&phy->lock, flags);
1806         if (!log_event_line) {
1807                 phy->allocated_dst = D40_ALLOC_FREE;
1808                 phy->allocated_src = D40_ALLOC_FREE;
1809                 is_free = true;
1810                 goto unlock;
1811         }
1812
1813         /* Logical channel */
1814         if (is_src) {
1815                 phy->allocated_src &= ~BIT(log_event_line);
1816                 if (phy->allocated_src == D40_ALLOC_LOG_FREE)
1817                         phy->allocated_src = D40_ALLOC_FREE;
1818         } else {
1819                 phy->allocated_dst &= ~BIT(log_event_line);
1820                 if (phy->allocated_dst == D40_ALLOC_LOG_FREE)
1821                         phy->allocated_dst = D40_ALLOC_FREE;
1822         }
1823
1824         is_free = ((phy->allocated_src | phy->allocated_dst) ==
1825                    D40_ALLOC_FREE);
1826  unlock:
1827         spin_unlock_irqrestore(&phy->lock, flags);
1828
1829         return is_free;
1830 }
1831
1832 static int d40_allocate_channel(struct d40_chan *d40c, bool *first_phy_user)
1833 {
1834         int dev_type = d40c->dma_cfg.dev_type;
1835         int event_group;
1836         int event_line;
1837         struct d40_phy_res *phys;
1838         int i;
1839         int j;
1840         int log_num;
1841         int num_phy_chans;
1842         bool is_src;
1843         bool is_log = d40c->dma_cfg.mode == STEDMA40_MODE_LOGICAL;
1844
1845         phys = d40c->base->phy_res;
1846         num_phy_chans = d40c->base->num_phy_chans;
1847
1848         if (d40c->dma_cfg.dir == DMA_DEV_TO_MEM) {
1849                 log_num = 2 * dev_type;
1850                 is_src = true;
1851         } else if (d40c->dma_cfg.dir == DMA_MEM_TO_DEV ||
1852                    d40c->dma_cfg.dir == DMA_MEM_TO_MEM) {
1853                 /* dst event lines are used for logical memcpy */
1854                 log_num = 2 * dev_type + 1;
1855                 is_src = false;
1856         } else
1857                 return -EINVAL;
1858
1859         event_group = D40_TYPE_TO_GROUP(dev_type);
1860         event_line = D40_TYPE_TO_EVENT(dev_type);
1861
1862         if (!is_log) {
1863                 if (d40c->dma_cfg.dir == DMA_MEM_TO_MEM) {
1864                         /* Find physical half channel */
1865                         if (d40c->dma_cfg.use_fixed_channel) {
1866                                 i = d40c->dma_cfg.phy_channel;
1867                                 if (d40_alloc_mask_set(&phys[i], is_src,
1868                                                        0, is_log,
1869                                                        first_phy_user))
1870                                         goto found_phy;
1871                         } else {
1872                                 for (i = 0; i < num_phy_chans; i++) {
1873                                         if (d40_alloc_mask_set(&phys[i], is_src,
1874                                                        0, is_log,
1875                                                        first_phy_user))
1876                                                 goto found_phy;
1877                                 }
1878                         }
1879                 } else
1880                         for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
1881                                 int phy_num = j  + event_group * 2;
1882                                 for (i = phy_num; i < phy_num + 2; i++) {
1883                                         if (d40_alloc_mask_set(&phys[i],
1884                                                                is_src,
1885                                                                0,
1886                                                                is_log,
1887                                                                first_phy_user))
1888                                                 goto found_phy;
1889                                 }
1890                         }
1891                 return -EINVAL;
1892 found_phy:
1893                 d40c->phy_chan = &phys[i];
1894                 d40c->log_num = D40_PHY_CHAN;
1895                 goto out;
1896         }
1897         if (dev_type == -1)
1898                 return -EINVAL;
1899
1900         /* Find logical channel */
1901         for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
1902                 int phy_num = j + event_group * 2;
1903
1904                 if (d40c->dma_cfg.use_fixed_channel) {
1905                         i = d40c->dma_cfg.phy_channel;
1906
1907                         if ((i != phy_num) && (i != phy_num + 1)) {
1908                                 dev_err(chan2dev(d40c),
1909                                         "invalid fixed phy channel %d\n", i);
1910                                 return -EINVAL;
1911                         }
1912
1913                         if (d40_alloc_mask_set(&phys[i], is_src, event_line,
1914                                                is_log, first_phy_user))
1915                                 goto found_log;
1916
1917                         dev_err(chan2dev(d40c),
1918                                 "could not allocate fixed phy channel %d\n", i);
1919                         return -EINVAL;
1920                 }
1921
1922                 /*
1923                  * Spread logical channels across all available physical rather
1924                  * than pack every logical channel at the first available phy
1925                  * channels.
1926                  */
1927                 if (is_src) {
1928                         for (i = phy_num; i < phy_num + 2; i++) {
1929                                 if (d40_alloc_mask_set(&phys[i], is_src,
1930                                                        event_line, is_log,
1931                                                        first_phy_user))
1932                                         goto found_log;
1933                         }
1934                 } else {
1935                         for (i = phy_num + 1; i >= phy_num; i--) {
1936                                 if (d40_alloc_mask_set(&phys[i], is_src,
1937                                                        event_line, is_log,
1938                                                        first_phy_user))
1939                                         goto found_log;
1940                         }
1941                 }
1942         }
1943         return -EINVAL;
1944
1945 found_log:
1946         d40c->phy_chan = &phys[i];
1947         d40c->log_num = log_num;
1948 out:
1949
1950         if (is_log)
1951                 d40c->base->lookup_log_chans[d40c->log_num] = d40c;
1952         else
1953                 d40c->base->lookup_phy_chans[d40c->phy_chan->num] = d40c;
1954
1955         return 0;
1956
1957 }
1958
1959 static int d40_config_memcpy(struct d40_chan *d40c)
1960 {
1961         dma_cap_mask_t cap = d40c->chan.device->cap_mask;
1962
1963         if (dma_has_cap(DMA_MEMCPY, cap) && !dma_has_cap(DMA_SLAVE, cap)) {
1964                 d40c->dma_cfg = dma40_memcpy_conf_log;
1965                 d40c->dma_cfg.dev_type = dma40_memcpy_channels[d40c->chan.chan_id];
1966
1967                 d40_log_cfg(&d40c->dma_cfg,
1968                             &d40c->log_def.lcsp1, &d40c->log_def.lcsp3);
1969
1970         } else if (dma_has_cap(DMA_MEMCPY, cap) &&
1971                    dma_has_cap(DMA_SLAVE, cap)) {
1972                 d40c->dma_cfg = dma40_memcpy_conf_phy;
1973
1974                 /* Generate interrrupt at end of transfer or relink. */
1975                 d40c->dst_def_cfg |= BIT(D40_SREG_CFG_TIM_POS);
1976
1977                 /* Generate interrupt on error. */
1978                 d40c->src_def_cfg |= BIT(D40_SREG_CFG_EIM_POS);
1979                 d40c->dst_def_cfg |= BIT(D40_SREG_CFG_EIM_POS);
1980
1981         } else {
1982                 chan_err(d40c, "No memcpy\n");
1983                 return -EINVAL;
1984         }
1985
1986         return 0;
1987 }
1988
1989 static int d40_free_dma(struct d40_chan *d40c)
1990 {
1991
1992         int res = 0;
1993         u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dev_type);
1994         struct d40_phy_res *phy = d40c->phy_chan;
1995         bool is_src;
1996
1997         /* Terminate all queued and active transfers */
1998         d40_term_all(d40c);
1999
2000         if (phy == NULL) {
2001                 chan_err(d40c, "phy == null\n");
2002                 return -EINVAL;
2003         }
2004
2005         if (phy->allocated_src == D40_ALLOC_FREE &&
2006             phy->allocated_dst == D40_ALLOC_FREE) {
2007                 chan_err(d40c, "channel already free\n");
2008                 return -EINVAL;
2009         }
2010
2011         if (d40c->dma_cfg.dir == DMA_MEM_TO_DEV ||
2012             d40c->dma_cfg.dir == DMA_MEM_TO_MEM)
2013                 is_src = false;
2014         else if (d40c->dma_cfg.dir == DMA_DEV_TO_MEM)
2015                 is_src = true;
2016         else {
2017                 chan_err(d40c, "Unknown direction\n");
2018                 return -EINVAL;
2019         }
2020
2021         pm_runtime_get_sync(d40c->base->dev);
2022         res = d40_channel_execute_command(d40c, D40_DMA_STOP);
2023         if (res) {
2024                 chan_err(d40c, "stop failed\n");
2025                 goto mark_last_busy;
2026         }
2027
2028         d40_alloc_mask_free(phy, is_src, chan_is_logical(d40c) ? event : 0);
2029
2030         if (chan_is_logical(d40c))
2031                 d40c->base->lookup_log_chans[d40c->log_num] = NULL;
2032         else
2033                 d40c->base->lookup_phy_chans[phy->num] = NULL;
2034
2035         if (d40c->busy) {
2036                 pm_runtime_mark_last_busy(d40c->base->dev);
2037                 pm_runtime_put_autosuspend(d40c->base->dev);
2038         }
2039
2040         d40c->busy = false;
2041         d40c->phy_chan = NULL;
2042         d40c->configured = false;
2043  mark_last_busy:
2044         pm_runtime_mark_last_busy(d40c->base->dev);
2045         pm_runtime_put_autosuspend(d40c->base->dev);
2046         return res;
2047 }
2048
2049 static bool d40_is_paused(struct d40_chan *d40c)
2050 {
2051         void __iomem *chanbase = chan_base(d40c);
2052         bool is_paused = false;
2053         unsigned long flags;
2054         void __iomem *active_reg;
2055         u32 status;
2056         u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dev_type);
2057
2058         spin_lock_irqsave(&d40c->lock, flags);
2059
2060         if (chan_is_physical(d40c)) {
2061                 if (d40c->phy_chan->num % 2 == 0)
2062                         active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
2063                 else
2064                         active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
2065
2066                 status = (readl(active_reg) &
2067                           D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
2068                         D40_CHAN_POS(d40c->phy_chan->num);
2069                 if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP)
2070                         is_paused = true;
2071                 goto unlock;
2072         }
2073
2074         if (d40c->dma_cfg.dir == DMA_MEM_TO_DEV ||
2075             d40c->dma_cfg.dir == DMA_MEM_TO_MEM) {
2076                 status = readl(chanbase + D40_CHAN_REG_SDLNK);
2077         } else if (d40c->dma_cfg.dir == DMA_DEV_TO_MEM) {
2078                 status = readl(chanbase + D40_CHAN_REG_SSLNK);
2079         } else {
2080                 chan_err(d40c, "Unknown direction\n");
2081                 goto unlock;
2082         }
2083
2084         status = (status & D40_EVENTLINE_MASK(event)) >>
2085                 D40_EVENTLINE_POS(event);
2086
2087         if (status != D40_DMA_RUN)
2088                 is_paused = true;
2089  unlock:
2090         spin_unlock_irqrestore(&d40c->lock, flags);
2091         return is_paused;
2092
2093 }
2094
2095 static u32 stedma40_residue(struct dma_chan *chan)
2096 {
2097         struct d40_chan *d40c =
2098                 container_of(chan, struct d40_chan, chan);
2099         u32 bytes_left;
2100         unsigned long flags;
2101
2102         spin_lock_irqsave(&d40c->lock, flags);
2103         bytes_left = d40_residue(d40c);
2104         spin_unlock_irqrestore(&d40c->lock, flags);
2105
2106         return bytes_left;
2107 }
2108
2109 static int
2110 d40_prep_sg_log(struct d40_chan *chan, struct d40_desc *desc,
2111                 struct scatterlist *sg_src, struct scatterlist *sg_dst,
2112                 unsigned int sg_len, dma_addr_t src_dev_addr,
2113                 dma_addr_t dst_dev_addr)
2114 {
2115         struct stedma40_chan_cfg *cfg = &chan->dma_cfg;
2116         struct stedma40_half_channel_info *src_info = &cfg->src_info;
2117         struct stedma40_half_channel_info *dst_info = &cfg->dst_info;
2118         int ret;
2119
2120         ret = d40_log_sg_to_lli(sg_src, sg_len,
2121                                 src_dev_addr,
2122                                 desc->lli_log.src,
2123                                 chan->log_def.lcsp1,
2124                                 src_info->data_width,
2125                                 dst_info->data_width);
2126
2127         ret = d40_log_sg_to_lli(sg_dst, sg_len,
2128                                 dst_dev_addr,
2129                                 desc->lli_log.dst,
2130                                 chan->log_def.lcsp3,
2131                                 dst_info->data_width,
2132                                 src_info->data_width);
2133
2134         return ret < 0 ? ret : 0;
2135 }
2136
2137 static int
2138 d40_prep_sg_phy(struct d40_chan *chan, struct d40_desc *desc,
2139                 struct scatterlist *sg_src, struct scatterlist *sg_dst,
2140                 unsigned int sg_len, dma_addr_t src_dev_addr,
2141                 dma_addr_t dst_dev_addr)
2142 {
2143         struct stedma40_chan_cfg *cfg = &chan->dma_cfg;
2144         struct stedma40_half_channel_info *src_info = &cfg->src_info;
2145         struct stedma40_half_channel_info *dst_info = &cfg->dst_info;
2146         unsigned long flags = 0;
2147         int ret;
2148
2149         if (desc->cyclic)
2150                 flags |= LLI_CYCLIC | LLI_TERM_INT;
2151
2152         ret = d40_phy_sg_to_lli(sg_src, sg_len, src_dev_addr,
2153                                 desc->lli_phy.src,
2154                                 virt_to_phys(desc->lli_phy.src),
2155                                 chan->src_def_cfg,
2156                                 src_info, dst_info, flags);
2157
2158         ret = d40_phy_sg_to_lli(sg_dst, sg_len, dst_dev_addr,
2159                                 desc->lli_phy.dst,
2160                                 virt_to_phys(desc->lli_phy.dst),
2161                                 chan->dst_def_cfg,
2162                                 dst_info, src_info, flags);
2163
2164         dma_sync_single_for_device(chan->base->dev, desc->lli_pool.dma_addr,
2165                                    desc->lli_pool.size, DMA_TO_DEVICE);
2166
2167         return ret < 0 ? ret : 0;
2168 }
2169
2170 static struct d40_desc *
2171 d40_prep_desc(struct d40_chan *chan, struct scatterlist *sg,
2172               unsigned int sg_len, unsigned long dma_flags)
2173 {
2174         struct stedma40_chan_cfg *cfg;
2175         struct d40_desc *desc;
2176         int ret;
2177
2178         desc = d40_desc_get(chan);
2179         if (!desc)
2180                 return NULL;
2181
2182         cfg = &chan->dma_cfg;
2183         desc->lli_len = d40_sg_2_dmalen(sg, sg_len, cfg->src_info.data_width,
2184                                         cfg->dst_info.data_width);
2185         if (desc->lli_len < 0) {
2186                 chan_err(chan, "Unaligned size\n");
2187                 goto free_desc;
2188         }
2189
2190         ret = d40_pool_lli_alloc(chan, desc, desc->lli_len);
2191         if (ret < 0) {
2192                 chan_err(chan, "Could not allocate lli\n");
2193                 goto free_desc;
2194         }
2195
2196         desc->lli_current = 0;
2197         desc->txd.flags = dma_flags;
2198         desc->txd.tx_submit = d40_tx_submit;
2199
2200         dma_async_tx_descriptor_init(&desc->txd, &chan->chan);
2201
2202         return desc;
2203  free_desc:
2204         d40_desc_free(chan, desc);
2205         return NULL;
2206 }
2207
2208 static struct dma_async_tx_descriptor *
2209 d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src,
2210             struct scatterlist *sg_dst, unsigned int sg_len,
2211             enum dma_transfer_direction direction, unsigned long dma_flags)
2212 {
2213         struct d40_chan *chan = container_of(dchan, struct d40_chan, chan);
2214         dma_addr_t src_dev_addr;
2215         dma_addr_t dst_dev_addr;
2216         struct d40_desc *desc;
2217         unsigned long flags;
2218         int ret;
2219
2220         if (!chan->phy_chan) {
2221                 chan_err(chan, "Cannot prepare unallocated channel\n");
2222                 return NULL;
2223         }
2224
2225         d40_set_runtime_config_write(dchan, &chan->slave_config, direction);
2226
2227         spin_lock_irqsave(&chan->lock, flags);
2228
2229         desc = d40_prep_desc(chan, sg_src, sg_len, dma_flags);
2230         if (desc == NULL)
2231                 goto unlock;
2232
2233         if (sg_next(&sg_src[sg_len - 1]) == sg_src)
2234                 desc->cyclic = true;
2235
2236         src_dev_addr = 0;
2237         dst_dev_addr = 0;
2238         if (direction == DMA_DEV_TO_MEM)
2239                 src_dev_addr = chan->runtime_addr;
2240         else if (direction == DMA_MEM_TO_DEV)
2241                 dst_dev_addr = chan->runtime_addr;
2242
2243         if (chan_is_logical(chan))
2244                 ret = d40_prep_sg_log(chan, desc, sg_src, sg_dst,
2245                                       sg_len, src_dev_addr, dst_dev_addr);
2246         else
2247                 ret = d40_prep_sg_phy(chan, desc, sg_src, sg_dst,
2248                                       sg_len, src_dev_addr, dst_dev_addr);
2249
2250         if (ret) {
2251                 chan_err(chan, "Failed to prepare %s sg job: %d\n",
2252                          chan_is_logical(chan) ? "log" : "phy", ret);
2253                 goto free_desc;
2254         }
2255
2256         /*
2257          * add descriptor to the prepare queue in order to be able
2258          * to free them later in terminate_all
2259          */
2260         list_add_tail(&desc->node, &chan->prepare_queue);
2261
2262         spin_unlock_irqrestore(&chan->lock, flags);
2263
2264         return &desc->txd;
2265  free_desc:
2266         d40_desc_free(chan, desc);
2267  unlock:
2268         spin_unlock_irqrestore(&chan->lock, flags);
2269         return NULL;
2270 }
2271
2272 bool stedma40_filter(struct dma_chan *chan, void *data)
2273 {
2274         struct stedma40_chan_cfg *info = data;
2275         struct d40_chan *d40c =
2276                 container_of(chan, struct d40_chan, chan);
2277         int err;
2278
2279         if (data) {
2280                 err = d40_validate_conf(d40c, info);
2281                 if (!err)
2282                         d40c->dma_cfg = *info;
2283         } else
2284                 err = d40_config_memcpy(d40c);
2285
2286         if (!err)
2287                 d40c->configured = true;
2288
2289         return err == 0;
2290 }
2291 EXPORT_SYMBOL(stedma40_filter);
2292
2293 static void __d40_set_prio_rt(struct d40_chan *d40c, int dev_type, bool src)
2294 {
2295         bool realtime = d40c->dma_cfg.realtime;
2296         bool highprio = d40c->dma_cfg.high_priority;
2297         u32 rtreg;
2298         u32 event = D40_TYPE_TO_EVENT(dev_type);
2299         u32 group = D40_TYPE_TO_GROUP(dev_type);
2300         u32 bit = BIT(event);
2301         u32 prioreg;
2302         struct d40_gen_dmac *dmac = &d40c->base->gen_dmac;
2303
2304         rtreg = realtime ? dmac->realtime_en : dmac->realtime_clear;
2305         /*
2306          * Due to a hardware bug, in some cases a logical channel triggered by
2307          * a high priority destination event line can generate extra packet
2308          * transactions.
2309          *
2310          * The workaround is to not set the high priority level for the
2311          * destination event lines that trigger logical channels.
2312          */
2313         if (!src && chan_is_logical(d40c))
2314                 highprio = false;
2315
2316         prioreg = highprio ? dmac->high_prio_en : dmac->high_prio_clear;
2317
2318         /* Destination event lines are stored in the upper halfword */
2319         if (!src)
2320                 bit <<= 16;
2321
2322         writel(bit, d40c->base->virtbase + prioreg + group * 4);
2323         writel(bit, d40c->base->virtbase + rtreg + group * 4);
2324 }
2325
2326 static void d40_set_prio_realtime(struct d40_chan *d40c)
2327 {
2328         if (d40c->base->rev < 3)
2329                 return;
2330
2331         if ((d40c->dma_cfg.dir ==  DMA_DEV_TO_MEM) ||
2332             (d40c->dma_cfg.dir == DMA_DEV_TO_DEV))
2333                 __d40_set_prio_rt(d40c, d40c->dma_cfg.dev_type, true);
2334
2335         if ((d40c->dma_cfg.dir ==  DMA_MEM_TO_DEV) ||
2336             (d40c->dma_cfg.dir == DMA_DEV_TO_DEV))
2337                 __d40_set_prio_rt(d40c, d40c->dma_cfg.dev_type, false);
2338 }
2339
2340 #define D40_DT_FLAGS_MODE(flags)       ((flags >> 0) & 0x1)
2341 #define D40_DT_FLAGS_DIR(flags)        ((flags >> 1) & 0x1)
2342 #define D40_DT_FLAGS_BIG_ENDIAN(flags) ((flags >> 2) & 0x1)
2343 #define D40_DT_FLAGS_FIXED_CHAN(flags) ((flags >> 3) & 0x1)
2344 #define D40_DT_FLAGS_HIGH_PRIO(flags)  ((flags >> 4) & 0x1)
2345
2346 static struct dma_chan *d40_xlate(struct of_phandle_args *dma_spec,
2347                                   struct of_dma *ofdma)
2348 {
2349         struct stedma40_chan_cfg cfg;
2350         dma_cap_mask_t cap;
2351         u32 flags;
2352
2353         memset(&cfg, 0, sizeof(struct stedma40_chan_cfg));
2354
2355         dma_cap_zero(cap);
2356         dma_cap_set(DMA_SLAVE, cap);
2357
2358         cfg.dev_type = dma_spec->args[0];
2359         flags = dma_spec->args[2];
2360
2361         switch (D40_DT_FLAGS_MODE(flags)) {
2362         case 0: cfg.mode = STEDMA40_MODE_LOGICAL; break;
2363         case 1: cfg.mode = STEDMA40_MODE_PHYSICAL; break;
2364         }
2365
2366         switch (D40_DT_FLAGS_DIR(flags)) {
2367         case 0:
2368                 cfg.dir = DMA_MEM_TO_DEV;
2369                 cfg.dst_info.big_endian = D40_DT_FLAGS_BIG_ENDIAN(flags);
2370                 break;
2371         case 1:
2372                 cfg.dir = DMA_DEV_TO_MEM;
2373                 cfg.src_info.big_endian = D40_DT_FLAGS_BIG_ENDIAN(flags);
2374                 break;
2375         }
2376
2377         if (D40_DT_FLAGS_FIXED_CHAN(flags)) {
2378                 cfg.phy_channel = dma_spec->args[1];
2379                 cfg.use_fixed_channel = true;
2380         }
2381
2382         if (D40_DT_FLAGS_HIGH_PRIO(flags))
2383                 cfg.high_priority = true;
2384
2385         return dma_request_channel(cap, stedma40_filter, &cfg);
2386 }
2387
2388 /* DMA ENGINE functions */
2389 static int d40_alloc_chan_resources(struct dma_chan *chan)
2390 {
2391         int err;
2392         unsigned long flags;
2393         struct d40_chan *d40c =
2394                 container_of(chan, struct d40_chan, chan);
2395         bool is_free_phy;
2396         spin_lock_irqsave(&d40c->lock, flags);
2397
2398         dma_cookie_init(chan);
2399
2400         /* If no dma configuration is set use default configuration (memcpy) */
2401         if (!d40c->configured) {
2402                 err = d40_config_memcpy(d40c);
2403                 if (err) {
2404                         chan_err(d40c, "Failed to configure memcpy channel\n");
2405                         goto mark_last_busy;
2406                 }
2407         }
2408
2409         err = d40_allocate_channel(d40c, &is_free_phy);
2410         if (err) {
2411                 chan_err(d40c, "Failed to allocate channel\n");
2412                 d40c->configured = false;
2413                 goto mark_last_busy;
2414         }
2415
2416         pm_runtime_get_sync(d40c->base->dev);
2417
2418         d40_set_prio_realtime(d40c);
2419
2420         if (chan_is_logical(d40c)) {
2421                 if (d40c->dma_cfg.dir == DMA_DEV_TO_MEM)
2422                         d40c->lcpa = d40c->base->lcpa_base +
2423                                 d40c->dma_cfg.dev_type * D40_LCPA_CHAN_SIZE;
2424                 else
2425                         d40c->lcpa = d40c->base->lcpa_base +
2426                                 d40c->dma_cfg.dev_type *
2427                                 D40_LCPA_CHAN_SIZE + D40_LCPA_CHAN_DST_DELTA;
2428
2429                 /* Unmask the Global Interrupt Mask. */
2430                 d40c->src_def_cfg |= BIT(D40_SREG_CFG_LOG_GIM_POS);
2431                 d40c->dst_def_cfg |= BIT(D40_SREG_CFG_LOG_GIM_POS);
2432         }
2433
2434         dev_dbg(chan2dev(d40c), "allocated %s channel (phy %d%s)\n",
2435                  chan_is_logical(d40c) ? "logical" : "physical",
2436                  d40c->phy_chan->num,
2437                  d40c->dma_cfg.use_fixed_channel ? ", fixed" : "");
2438
2439
2440         /*
2441          * Only write channel configuration to the DMA if the physical
2442          * resource is free. In case of multiple logical channels
2443          * on the same physical resource, only the first write is necessary.
2444          */
2445         if (is_free_phy)
2446                 d40_config_write(d40c);
2447  mark_last_busy:
2448         pm_runtime_mark_last_busy(d40c->base->dev);
2449         pm_runtime_put_autosuspend(d40c->base->dev);
2450         spin_unlock_irqrestore(&d40c->lock, flags);
2451         return err;
2452 }
2453
2454 static void d40_free_chan_resources(struct dma_chan *chan)
2455 {
2456         struct d40_chan *d40c =
2457                 container_of(chan, struct d40_chan, chan);
2458         int err;
2459         unsigned long flags;
2460
2461         if (d40c->phy_chan == NULL) {
2462                 chan_err(d40c, "Cannot free unallocated channel\n");
2463                 return;
2464         }
2465
2466         spin_lock_irqsave(&d40c->lock, flags);
2467
2468         err = d40_free_dma(d40c);
2469
2470         if (err)
2471                 chan_err(d40c, "Failed to free channel\n");
2472         spin_unlock_irqrestore(&d40c->lock, flags);
2473 }
2474
2475 static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
2476                                                        dma_addr_t dst,
2477                                                        dma_addr_t src,
2478                                                        size_t size,
2479                                                        unsigned long dma_flags)
2480 {
2481         struct scatterlist dst_sg;
2482         struct scatterlist src_sg;
2483
2484         sg_init_table(&dst_sg, 1);
2485         sg_init_table(&src_sg, 1);
2486
2487         sg_dma_address(&dst_sg) = dst;
2488         sg_dma_address(&src_sg) = src;
2489
2490         sg_dma_len(&dst_sg) = size;
2491         sg_dma_len(&src_sg) = size;
2492
2493         return d40_prep_sg(chan, &src_sg, &dst_sg, 1,
2494                            DMA_MEM_TO_MEM, dma_flags);
2495 }
2496
2497 static struct dma_async_tx_descriptor *
2498 d40_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
2499                   unsigned int sg_len, enum dma_transfer_direction direction,
2500                   unsigned long dma_flags, void *context)
2501 {
2502         if (!is_slave_direction(direction))
2503                 return NULL;
2504
2505         return d40_prep_sg(chan, sgl, sgl, sg_len, direction, dma_flags);
2506 }
2507
2508 static struct dma_async_tx_descriptor *
2509 dma40_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
2510                      size_t buf_len, size_t period_len,
2511                      enum dma_transfer_direction direction, unsigned long flags)
2512 {
2513         unsigned int periods = buf_len / period_len;
2514         struct dma_async_tx_descriptor *txd;
2515         struct scatterlist *sg;
2516         int i;
2517
2518         sg = kcalloc(periods + 1, sizeof(struct scatterlist), GFP_NOWAIT);
2519         if (!sg)
2520                 return NULL;
2521
2522         for (i = 0; i < periods; i++) {
2523                 sg_dma_address(&sg[i]) = dma_addr;
2524                 sg_dma_len(&sg[i]) = period_len;
2525                 dma_addr += period_len;
2526         }
2527
2528         sg_chain(sg, periods + 1, sg);
2529
2530         txd = d40_prep_sg(chan, sg, sg, periods, direction,
2531                           DMA_PREP_INTERRUPT);
2532
2533         kfree(sg);
2534
2535         return txd;
2536 }
2537
2538 static enum dma_status d40_tx_status(struct dma_chan *chan,
2539                                      dma_cookie_t cookie,
2540                                      struct dma_tx_state *txstate)
2541 {
2542         struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2543         enum dma_status ret;
2544
2545         if (d40c->phy_chan == NULL) {
2546                 chan_err(d40c, "Cannot read status of unallocated channel\n");
2547                 return -EINVAL;
2548         }
2549
2550         ret = dma_cookie_status(chan, cookie, txstate);
2551         if (ret != DMA_COMPLETE && txstate)
2552                 dma_set_residue(txstate, stedma40_residue(chan));
2553
2554         if (d40_is_paused(d40c))
2555                 ret = DMA_PAUSED;
2556
2557         return ret;
2558 }
2559
2560 static void d40_issue_pending(struct dma_chan *chan)
2561 {
2562         struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2563         unsigned long flags;
2564
2565         if (d40c->phy_chan == NULL) {
2566                 chan_err(d40c, "Channel is not allocated!\n");
2567                 return;
2568         }
2569
2570         spin_lock_irqsave(&d40c->lock, flags);
2571
2572         list_splice_tail_init(&d40c->pending_queue, &d40c->queue);
2573
2574         /* Busy means that queued jobs are already being processed */
2575         if (!d40c->busy)
2576                 (void) d40_queue_start(d40c);
2577
2578         spin_unlock_irqrestore(&d40c->lock, flags);
2579 }
2580
2581 static int d40_terminate_all(struct dma_chan *chan)
2582 {
2583         unsigned long flags;
2584         struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2585         int ret;
2586
2587         if (d40c->phy_chan == NULL) {
2588                 chan_err(d40c, "Channel is not allocated!\n");
2589                 return -EINVAL;
2590         }
2591
2592         spin_lock_irqsave(&d40c->lock, flags);
2593
2594         pm_runtime_get_sync(d40c->base->dev);
2595         ret = d40_channel_execute_command(d40c, D40_DMA_STOP);
2596         if (ret)
2597                 chan_err(d40c, "Failed to stop channel\n");
2598
2599         d40_term_all(d40c);
2600         pm_runtime_mark_last_busy(d40c->base->dev);
2601         pm_runtime_put_autosuspend(d40c->base->dev);
2602         if (d40c->busy) {
2603                 pm_runtime_mark_last_busy(d40c->base->dev);
2604                 pm_runtime_put_autosuspend(d40c->base->dev);
2605         }
2606         d40c->busy = false;
2607
2608         spin_unlock_irqrestore(&d40c->lock, flags);
2609         return 0;
2610 }
2611
2612 static int
2613 dma40_config_to_halfchannel(struct d40_chan *d40c,
2614                             struct stedma40_half_channel_info *info,
2615                             u32 maxburst)
2616 {
2617         int psize;
2618
2619         if (chan_is_logical(d40c)) {
2620                 if (maxburst >= 16)
2621                         psize = STEDMA40_PSIZE_LOG_16;
2622                 else if (maxburst >= 8)
2623                         psize = STEDMA40_PSIZE_LOG_8;
2624                 else if (maxburst >= 4)
2625                         psize = STEDMA40_PSIZE_LOG_4;
2626                 else
2627                         psize = STEDMA40_PSIZE_LOG_1;
2628         } else {
2629                 if (maxburst >= 16)
2630                         psize = STEDMA40_PSIZE_PHY_16;
2631                 else if (maxburst >= 8)
2632                         psize = STEDMA40_PSIZE_PHY_8;
2633                 else if (maxburst >= 4)
2634                         psize = STEDMA40_PSIZE_PHY_4;
2635                 else
2636                         psize = STEDMA40_PSIZE_PHY_1;
2637         }
2638
2639         info->psize = psize;
2640         info->flow_ctrl = STEDMA40_NO_FLOW_CTRL;
2641
2642         return 0;
2643 }
2644
2645 static int d40_set_runtime_config(struct dma_chan *chan,
2646                                   struct dma_slave_config *config)
2647 {
2648         struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2649
2650         memcpy(&d40c->slave_config, config, sizeof(*config));
2651
2652         return 0;
2653 }
2654
2655 /* Runtime reconfiguration extension */
2656 static int d40_set_runtime_config_write(struct dma_chan *chan,
2657                                   struct dma_slave_config *config,
2658                                   enum dma_transfer_direction direction)
2659 {
2660         struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2661         struct stedma40_chan_cfg *cfg = &d40c->dma_cfg;
2662         enum dma_slave_buswidth src_addr_width, dst_addr_width;
2663         dma_addr_t config_addr;
2664         u32 src_maxburst, dst_maxburst;
2665         int ret;
2666
2667         if (d40c->phy_chan == NULL) {
2668                 chan_err(d40c, "Channel is not allocated!\n");
2669                 return -EINVAL;
2670         }
2671
2672         src_addr_width = config->src_addr_width;
2673         src_maxburst = config->src_maxburst;
2674         dst_addr_width = config->dst_addr_width;
2675         dst_maxburst = config->dst_maxburst;
2676
2677         if (direction == DMA_DEV_TO_MEM) {
2678                 config_addr = config->src_addr;
2679
2680                 if (cfg->dir != DMA_DEV_TO_MEM)
2681                         dev_dbg(d40c->base->dev,
2682                                 "channel was not configured for peripheral "
2683                                 "to memory transfer (%d) overriding\n",
2684                                 cfg->dir);
2685                 cfg->dir = DMA_DEV_TO_MEM;
2686
2687                 /* Configure the memory side */
2688                 if (dst_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
2689                         dst_addr_width = src_addr_width;
2690                 if (dst_maxburst == 0)
2691                         dst_maxburst = src_maxburst;
2692
2693         } else if (direction == DMA_MEM_TO_DEV) {
2694                 config_addr = config->dst_addr;
2695
2696                 if (cfg->dir != DMA_MEM_TO_DEV)
2697                         dev_dbg(d40c->base->dev,
2698                                 "channel was not configured for memory "
2699                                 "to peripheral transfer (%d) overriding\n",
2700                                 cfg->dir);
2701                 cfg->dir = DMA_MEM_TO_DEV;
2702
2703                 /* Configure the memory side */
2704                 if (src_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
2705                         src_addr_width = dst_addr_width;
2706                 if (src_maxburst == 0)
2707                         src_maxburst = dst_maxburst;
2708         } else {
2709                 dev_err(d40c->base->dev,
2710                         "unrecognized channel direction %d\n",
2711                         direction);
2712                 return -EINVAL;
2713         }
2714
2715         if (config_addr <= 0) {
2716                 dev_err(d40c->base->dev, "no address supplied\n");
2717                 return -EINVAL;
2718         }
2719
2720         if (src_maxburst * src_addr_width != dst_maxburst * dst_addr_width) {
2721                 dev_err(d40c->base->dev,
2722                         "src/dst width/maxburst mismatch: %d*%d != %d*%d\n",
2723                         src_maxburst,
2724                         src_addr_width,
2725                         dst_maxburst,
2726                         dst_addr_width);
2727                 return -EINVAL;
2728         }
2729
2730         if (src_maxburst > 16) {
2731                 src_maxburst = 16;
2732                 dst_maxburst = src_maxburst * src_addr_width / dst_addr_width;
2733         } else if (dst_maxburst > 16) {
2734                 dst_maxburst = 16;
2735                 src_maxburst = dst_maxburst * dst_addr_width / src_addr_width;
2736         }
2737
2738         /* Only valid widths are; 1, 2, 4 and 8. */
2739         if (src_addr_width <= DMA_SLAVE_BUSWIDTH_UNDEFINED ||
2740             src_addr_width >  DMA_SLAVE_BUSWIDTH_8_BYTES   ||
2741             dst_addr_width <= DMA_SLAVE_BUSWIDTH_UNDEFINED ||
2742             dst_addr_width >  DMA_SLAVE_BUSWIDTH_8_BYTES   ||
2743             !is_power_of_2(src_addr_width) ||
2744             !is_power_of_2(dst_addr_width))
2745                 return -EINVAL;
2746
2747         cfg->src_info.data_width = src_addr_width;
2748         cfg->dst_info.data_width = dst_addr_width;
2749
2750         ret = dma40_config_to_halfchannel(d40c, &cfg->src_info,
2751                                           src_maxburst);
2752         if (ret)
2753                 return ret;
2754
2755         ret = dma40_config_to_halfchannel(d40c, &cfg->dst_info,
2756                                           dst_maxburst);
2757         if (ret)
2758                 return ret;
2759
2760         /* Fill in register values */
2761         if (chan_is_logical(d40c))
2762                 d40_log_cfg(cfg, &d40c->log_def.lcsp1, &d40c->log_def.lcsp3);
2763         else
2764                 d40_phy_cfg(cfg, &d40c->src_def_cfg, &d40c->dst_def_cfg);
2765
2766         /* These settings will take precedence later */
2767         d40c->runtime_addr = config_addr;
2768         d40c->runtime_direction = direction;
2769         dev_dbg(d40c->base->dev,
2770                 "configured channel %s for %s, data width %d/%d, "
2771                 "maxburst %d/%d elements, LE, no flow control\n",
2772                 dma_chan_name(chan),
2773                 (direction == DMA_DEV_TO_MEM) ? "RX" : "TX",
2774                 src_addr_width, dst_addr_width,
2775                 src_maxburst, dst_maxburst);
2776
2777         return 0;
2778 }
2779
2780 /* Initialization functions */
2781
2782 static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma,
2783                                  struct d40_chan *chans, int offset,
2784                                  int num_chans)
2785 {
2786         int i = 0;
2787         struct d40_chan *d40c;
2788
2789         INIT_LIST_HEAD(&dma->channels);
2790
2791         for (i = offset; i < offset + num_chans; i++) {
2792                 d40c = &chans[i];
2793                 d40c->base = base;
2794                 d40c->chan.device = dma;
2795
2796                 spin_lock_init(&d40c->lock);
2797
2798                 d40c->log_num = D40_PHY_CHAN;
2799
2800                 INIT_LIST_HEAD(&d40c->done);
2801                 INIT_LIST_HEAD(&d40c->active);
2802                 INIT_LIST_HEAD(&d40c->queue);
2803                 INIT_LIST_HEAD(&d40c->pending_queue);
2804                 INIT_LIST_HEAD(&d40c->client);
2805                 INIT_LIST_HEAD(&d40c->prepare_queue);
2806
2807                 tasklet_setup(&d40c->tasklet, dma_tasklet);
2808
2809                 list_add_tail(&d40c->chan.device_node,
2810                               &dma->channels);
2811         }
2812 }
2813
2814 static void d40_ops_init(struct d40_base *base, struct dma_device *dev)
2815 {
2816         if (dma_has_cap(DMA_SLAVE, dev->cap_mask)) {
2817                 dev->device_prep_slave_sg = d40_prep_slave_sg;
2818                 dev->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
2819         }
2820
2821         if (dma_has_cap(DMA_MEMCPY, dev->cap_mask)) {
2822                 dev->device_prep_dma_memcpy = d40_prep_memcpy;
2823                 dev->directions = BIT(DMA_MEM_TO_MEM);
2824                 /*
2825                  * This controller can only access address at even
2826                  * 32bit boundaries, i.e. 2^2
2827                  */
2828                 dev->copy_align = DMAENGINE_ALIGN_4_BYTES;
2829         }
2830
2831         if (dma_has_cap(DMA_CYCLIC, dev->cap_mask))
2832                 dev->device_prep_dma_cyclic = dma40_prep_dma_cyclic;
2833
2834         dev->device_alloc_chan_resources = d40_alloc_chan_resources;
2835         dev->device_free_chan_resources = d40_free_chan_resources;
2836         dev->device_issue_pending = d40_issue_pending;
2837         dev->device_tx_status = d40_tx_status;
2838         dev->device_config = d40_set_runtime_config;
2839         dev->device_pause = d40_pause;
2840         dev->device_resume = d40_resume;
2841         dev->device_terminate_all = d40_terminate_all;
2842         dev->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
2843         dev->dev = base->dev;
2844 }
2845
2846 static int __init d40_dmaengine_init(struct d40_base *base,
2847                                      int num_reserved_chans)
2848 {
2849         int err ;
2850
2851         d40_chan_init(base, &base->dma_slave, base->log_chans,
2852                       0, base->num_log_chans);
2853
2854         dma_cap_zero(base->dma_slave.cap_mask);
2855         dma_cap_set(DMA_SLAVE, base->dma_slave.cap_mask);
2856         dma_cap_set(DMA_CYCLIC, base->dma_slave.cap_mask);
2857
2858         d40_ops_init(base, &base->dma_slave);
2859
2860         err = dmaenginem_async_device_register(&base->dma_slave);
2861
2862         if (err) {
2863                 d40_err(base->dev, "Failed to register slave channels\n");
2864                 goto exit;
2865         }
2866
2867         d40_chan_init(base, &base->dma_memcpy, base->log_chans,
2868                       base->num_log_chans, base->num_memcpy_chans);
2869
2870         dma_cap_zero(base->dma_memcpy.cap_mask);
2871         dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask);
2872
2873         d40_ops_init(base, &base->dma_memcpy);
2874
2875         err = dmaenginem_async_device_register(&base->dma_memcpy);
2876
2877         if (err) {
2878                 d40_err(base->dev,
2879                         "Failed to register memcpy only channels\n");
2880                 goto exit;
2881         }
2882
2883         d40_chan_init(base, &base->dma_both, base->phy_chans,
2884                       0, num_reserved_chans);
2885
2886         dma_cap_zero(base->dma_both.cap_mask);
2887         dma_cap_set(DMA_SLAVE, base->dma_both.cap_mask);
2888         dma_cap_set(DMA_MEMCPY, base->dma_both.cap_mask);
2889         dma_cap_set(DMA_CYCLIC, base->dma_slave.cap_mask);
2890
2891         d40_ops_init(base, &base->dma_both);
2892         err = dmaenginem_async_device_register(&base->dma_both);
2893
2894         if (err) {
2895                 d40_err(base->dev,
2896                         "Failed to register logical and physical capable channels\n");
2897                 goto exit;
2898         }
2899         return 0;
2900  exit:
2901         return err;
2902 }
2903
2904 /* Suspend resume functionality */
2905 #ifdef CONFIG_PM_SLEEP
2906 static int dma40_suspend(struct device *dev)
2907 {
2908         struct d40_base *base = dev_get_drvdata(dev);
2909         int ret;
2910
2911         ret = pm_runtime_force_suspend(dev);
2912         if (ret)
2913                 return ret;
2914
2915         if (base->lcpa_regulator)
2916                 ret = regulator_disable(base->lcpa_regulator);
2917         return ret;
2918 }
2919
2920 static int dma40_resume(struct device *dev)
2921 {
2922         struct d40_base *base = dev_get_drvdata(dev);
2923         int ret = 0;
2924
2925         if (base->lcpa_regulator) {
2926                 ret = regulator_enable(base->lcpa_regulator);
2927                 if (ret)
2928                         return ret;
2929         }
2930
2931         return pm_runtime_force_resume(dev);
2932 }
2933 #endif
2934
2935 #ifdef CONFIG_PM
2936 static void dma40_backup(void __iomem *baseaddr, u32 *backup,
2937                          u32 *regaddr, int num, bool save)
2938 {
2939         int i;
2940
2941         for (i = 0; i < num; i++) {
2942                 void __iomem *addr = baseaddr + regaddr[i];
2943
2944                 if (save)
2945                         backup[i] = readl_relaxed(addr);
2946                 else
2947                         writel_relaxed(backup[i], addr);
2948         }
2949 }
2950
2951 static void d40_save_restore_registers(struct d40_base *base, bool save)
2952 {
2953         int i;
2954
2955         /* Save/Restore channel specific registers */
2956         for (i = 0; i < base->num_phy_chans; i++) {
2957                 void __iomem *addr;
2958                 int idx;
2959
2960                 if (base->phy_res[i].reserved)
2961                         continue;
2962
2963                 addr = base->virtbase + D40_DREG_PCBASE + i * D40_DREG_PCDELTA;
2964                 idx = i * ARRAY_SIZE(d40_backup_regs_chan);
2965
2966                 dma40_backup(addr, &base->reg_val_backup_chan[idx],
2967                              d40_backup_regs_chan,
2968                              ARRAY_SIZE(d40_backup_regs_chan),
2969                              save);
2970         }
2971
2972         /* Save/Restore global registers */
2973         dma40_backup(base->virtbase, base->reg_val_backup,
2974                      d40_backup_regs, ARRAY_SIZE(d40_backup_regs),
2975                      save);
2976
2977         /* Save/Restore registers only existing on dma40 v3 and later */
2978         if (base->gen_dmac.backup)
2979                 dma40_backup(base->virtbase, base->reg_val_backup_v4,
2980                              base->gen_dmac.backup,
2981                         base->gen_dmac.backup_size,
2982                         save);
2983 }
2984
2985 static int dma40_runtime_suspend(struct device *dev)
2986 {
2987         struct d40_base *base = dev_get_drvdata(dev);
2988
2989         d40_save_restore_registers(base, true);
2990
2991         /* Don't disable/enable clocks for v1 due to HW bugs */
2992         if (base->rev != 1)
2993                 writel_relaxed(base->gcc_pwr_off_mask,
2994                                base->virtbase + D40_DREG_GCC);
2995
2996         return 0;
2997 }
2998
2999 static int dma40_runtime_resume(struct device *dev)
3000 {
3001         struct d40_base *base = dev_get_drvdata(dev);
3002
3003         d40_save_restore_registers(base, false);
3004
3005         writel_relaxed(D40_DREG_GCC_ENABLE_ALL,
3006                        base->virtbase + D40_DREG_GCC);
3007         return 0;
3008 }
3009 #endif
3010
3011 static const struct dev_pm_ops dma40_pm_ops = {
3012         SET_LATE_SYSTEM_SLEEP_PM_OPS(dma40_suspend, dma40_resume)
3013         SET_RUNTIME_PM_OPS(dma40_runtime_suspend,
3014                                 dma40_runtime_resume,
3015                                 NULL)
3016 };
3017
3018 /* Initialization functions. */
3019
3020 static int __init d40_phy_res_init(struct d40_base *base)
3021 {
3022         int i;
3023         int num_phy_chans_avail = 0;
3024         u32 val[2];
3025         int odd_even_bit = -2;
3026         int gcc = D40_DREG_GCC_ENA;
3027
3028         val[0] = readl(base->virtbase + D40_DREG_PRSME);
3029         val[1] = readl(base->virtbase + D40_DREG_PRSMO);
3030
3031         for (i = 0; i < base->num_phy_chans; i++) {
3032                 base->phy_res[i].num = i;
3033                 odd_even_bit += 2 * ((i % 2) == 0);
3034                 if (((val[i % 2] >> odd_even_bit) & 3) == 1) {
3035                         /* Mark security only channels as occupied */
3036                         base->phy_res[i].allocated_src = D40_ALLOC_PHY;
3037                         base->phy_res[i].allocated_dst = D40_ALLOC_PHY;
3038                         base->phy_res[i].reserved = true;
3039                         gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(i),
3040                                                        D40_DREG_GCC_SRC);
3041                         gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(i),
3042                                                        D40_DREG_GCC_DST);
3043
3044
3045                 } else {
3046                         base->phy_res[i].allocated_src = D40_ALLOC_FREE;
3047                         base->phy_res[i].allocated_dst = D40_ALLOC_FREE;
3048                         base->phy_res[i].reserved = false;
3049                         num_phy_chans_avail++;
3050                 }
3051                 spin_lock_init(&base->phy_res[i].lock);
3052         }
3053
3054         /* Mark disabled channels as occupied */
3055         for (i = 0; base->plat_data->disabled_channels[i] != -1; i++) {
3056                 int chan = base->plat_data->disabled_channels[i];
3057
3058                 base->phy_res[chan].allocated_src = D40_ALLOC_PHY;
3059                 base->phy_res[chan].allocated_dst = D40_ALLOC_PHY;
3060                 base->phy_res[chan].reserved = true;
3061                 gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(chan),
3062                                                D40_DREG_GCC_SRC);
3063                 gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(chan),
3064                                                D40_DREG_GCC_DST);
3065                 num_phy_chans_avail--;
3066         }
3067
3068         /* Mark soft_lli channels */
3069         for (i = 0; i < base->plat_data->num_of_soft_lli_chans; i++) {
3070                 int chan = base->plat_data->soft_lli_chans[i];
3071
3072                 base->phy_res[chan].use_soft_lli = true;
3073         }
3074
3075         dev_info(base->dev, "%d of %d physical DMA channels available\n",
3076                  num_phy_chans_avail, base->num_phy_chans);
3077
3078         /* Verify settings extended vs standard */
3079         val[0] = readl(base->virtbase + D40_DREG_PRTYP);
3080
3081         for (i = 0; i < base->num_phy_chans; i++) {
3082
3083                 if (base->phy_res[i].allocated_src == D40_ALLOC_FREE &&
3084                     (val[0] & 0x3) != 1)
3085                         dev_info(base->dev,
3086                                  "[%s] INFO: channel %d is misconfigured (%d)\n",
3087                                  __func__, i, val[0] & 0x3);
3088
3089                 val[0] = val[0] >> 2;
3090         }
3091
3092         /*
3093          * To keep things simple, Enable all clocks initially.
3094          * The clocks will get managed later post channel allocation.
3095          * The clocks for the event lines on which reserved channels exists
3096          * are not managed here.
3097          */
3098         writel(D40_DREG_GCC_ENABLE_ALL, base->virtbase + D40_DREG_GCC);
3099         base->gcc_pwr_off_mask = gcc;
3100
3101         return num_phy_chans_avail;
3102 }
3103
3104 static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
3105 {
3106         struct stedma40_platform_data *plat_data = dev_get_platdata(&pdev->dev);
3107         struct clk *clk;
3108         void __iomem *virtbase;
3109         struct resource *res;
3110         struct d40_base *base;
3111         int num_log_chans;
3112         int num_phy_chans;
3113         int num_memcpy_chans;
3114         int clk_ret = -EINVAL;
3115         int i;
3116         u32 pid;
3117         u32 cid;
3118         u8 rev;
3119
3120         clk = clk_get(&pdev->dev, NULL);
3121         if (IS_ERR(clk)) {
3122                 d40_err(&pdev->dev, "No matching clock found\n");
3123                 goto check_prepare_enabled;
3124         }
3125
3126         clk_ret = clk_prepare_enable(clk);
3127         if (clk_ret) {
3128                 d40_err(&pdev->dev, "Failed to prepare/enable clock\n");
3129                 goto disable_unprepare;
3130         }
3131
3132         /* Get IO for DMAC base address */
3133         res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base");
3134         if (!res)
3135                 goto disable_unprepare;
3136
3137         if (request_mem_region(res->start, resource_size(res),
3138                                D40_NAME " I/O base") == NULL)
3139                 goto release_region;
3140
3141         virtbase = ioremap(res->start, resource_size(res));
3142         if (!virtbase)
3143                 goto release_region;
3144
3145         /* This is just a regular AMBA PrimeCell ID actually */
3146         for (pid = 0, i = 0; i < 4; i++)
3147                 pid |= (readl(virtbase + resource_size(res) - 0x20 + 4 * i)
3148                         & 255) << (i * 8);
3149         for (cid = 0, i = 0; i < 4; i++)
3150                 cid |= (readl(virtbase + resource_size(res) - 0x10 + 4 * i)
3151                         & 255) << (i * 8);
3152
3153         if (cid != AMBA_CID) {
3154                 d40_err(&pdev->dev, "Unknown hardware! No PrimeCell ID\n");
3155                 goto unmap_io;
3156         }
3157         if (AMBA_MANF_BITS(pid) != AMBA_VENDOR_ST) {
3158                 d40_err(&pdev->dev, "Unknown designer! Got %x wanted %x\n",
3159                         AMBA_MANF_BITS(pid),
3160                         AMBA_VENDOR_ST);
3161                 goto unmap_io;
3162         }
3163         /*
3164          * HW revision:
3165          * DB8500ed has revision 0
3166          * ? has revision 1
3167          * DB8500v1 has revision 2
3168          * DB8500v2 has revision 3
3169          * AP9540v1 has revision 4
3170          * DB8540v1 has revision 4
3171          */
3172         rev = AMBA_REV_BITS(pid);
3173         if (rev < 2) {
3174                 d40_err(&pdev->dev, "hardware revision: %d is not supported", rev);
3175                 goto unmap_io;
3176         }
3177
3178         /* The number of physical channels on this HW */
3179         if (plat_data->num_of_phy_chans)
3180                 num_phy_chans = plat_data->num_of_phy_chans;
3181         else
3182                 num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4;
3183
3184         /* The number of channels used for memcpy */
3185         if (plat_data->num_of_memcpy_chans)
3186                 num_memcpy_chans = plat_data->num_of_memcpy_chans;
3187         else
3188                 num_memcpy_chans = ARRAY_SIZE(dma40_memcpy_channels);
3189
3190         num_log_chans = num_phy_chans * D40_MAX_LOG_CHAN_PER_PHY;
3191
3192         dev_info(&pdev->dev,
3193                  "hardware rev: %d @ %pa with %d physical and %d logical channels\n",
3194                  rev, &res->start, num_phy_chans, num_log_chans);
3195
3196         base = kzalloc(ALIGN(sizeof(struct d40_base), 4) +
3197                        (num_phy_chans + num_log_chans + num_memcpy_chans) *
3198                        sizeof(struct d40_chan), GFP_KERNEL);
3199
3200         if (base == NULL)
3201                 goto unmap_io;
3202
3203         base->rev = rev;
3204         base->clk = clk;
3205         base->num_memcpy_chans = num_memcpy_chans;
3206         base->num_phy_chans = num_phy_chans;
3207         base->num_log_chans = num_log_chans;
3208         base->phy_start = res->start;
3209         base->phy_size = resource_size(res);
3210         base->virtbase = virtbase;
3211         base->plat_data = plat_data;
3212         base->dev = &pdev->dev;
3213         base->phy_chans = ((void *)base) + ALIGN(sizeof(struct d40_base), 4);
3214         base->log_chans = &base->phy_chans[num_phy_chans];
3215
3216         if (base->plat_data->num_of_phy_chans == 14) {
3217                 base->gen_dmac.backup = d40_backup_regs_v4b;
3218                 base->gen_dmac.backup_size = BACKUP_REGS_SZ_V4B;
3219                 base->gen_dmac.interrupt_en = D40_DREG_CPCMIS;
3220                 base->gen_dmac.interrupt_clear = D40_DREG_CPCICR;
3221                 base->gen_dmac.realtime_en = D40_DREG_CRSEG1;
3222                 base->gen_dmac.realtime_clear = D40_DREG_CRCEG1;
3223                 base->gen_dmac.high_prio_en = D40_DREG_CPSEG1;
3224                 base->gen_dmac.high_prio_clear = D40_DREG_CPCEG1;
3225                 base->gen_dmac.il = il_v4b;
3226                 base->gen_dmac.il_size = ARRAY_SIZE(il_v4b);
3227                 base->gen_dmac.init_reg = dma_init_reg_v4b;
3228                 base->gen_dmac.init_reg_size = ARRAY_SIZE(dma_init_reg_v4b);
3229         } else {
3230                 if (base->rev >= 3) {
3231                         base->gen_dmac.backup = d40_backup_regs_v4a;
3232                         base->gen_dmac.backup_size = BACKUP_REGS_SZ_V4A;
3233                 }
3234                 base->gen_dmac.interrupt_en = D40_DREG_PCMIS;
3235                 base->gen_dmac.interrupt_clear = D40_DREG_PCICR;
3236                 base->gen_dmac.realtime_en = D40_DREG_RSEG1;
3237                 base->gen_dmac.realtime_clear = D40_DREG_RCEG1;
3238                 base->gen_dmac.high_prio_en = D40_DREG_PSEG1;
3239                 base->gen_dmac.high_prio_clear = D40_DREG_PCEG1;
3240                 base->gen_dmac.il = il_v4a;
3241                 base->gen_dmac.il_size = ARRAY_SIZE(il_v4a);
3242                 base->gen_dmac.init_reg = dma_init_reg_v4a;
3243                 base->gen_dmac.init_reg_size = ARRAY_SIZE(dma_init_reg_v4a);
3244         }
3245
3246         base->phy_res = kcalloc(num_phy_chans,
3247                                 sizeof(*base->phy_res),
3248                                 GFP_KERNEL);
3249         if (!base->phy_res)
3250                 goto free_base;
3251
3252         base->lookup_phy_chans = kcalloc(num_phy_chans,
3253                                          sizeof(*base->lookup_phy_chans),
3254                                          GFP_KERNEL);
3255         if (!base->lookup_phy_chans)
3256                 goto free_phy_res;
3257
3258         base->lookup_log_chans = kcalloc(num_log_chans,
3259                                          sizeof(*base->lookup_log_chans),
3260                                          GFP_KERNEL);
3261         if (!base->lookup_log_chans)
3262                 goto free_phy_chans;
3263
3264         base->reg_val_backup_chan = kmalloc_array(base->num_phy_chans,
3265                                                   sizeof(d40_backup_regs_chan),
3266                                                   GFP_KERNEL);
3267         if (!base->reg_val_backup_chan)
3268                 goto free_log_chans;
3269
3270         base->lcla_pool.alloc_map = kcalloc(num_phy_chans
3271                                             * D40_LCLA_LINK_PER_EVENT_GRP,
3272                                             sizeof(*base->lcla_pool.alloc_map),
3273                                             GFP_KERNEL);
3274         if (!base->lcla_pool.alloc_map)
3275                 goto free_backup_chan;
3276
3277         base->regs_interrupt = kmalloc_array(base->gen_dmac.il_size,
3278                                              sizeof(*base->regs_interrupt),
3279                                              GFP_KERNEL);
3280         if (!base->regs_interrupt)
3281                 goto free_map;
3282
3283         base->desc_slab = kmem_cache_create(D40_NAME, sizeof(struct d40_desc),
3284                                             0, SLAB_HWCACHE_ALIGN,
3285                                             NULL);
3286         if (base->desc_slab == NULL)
3287                 goto free_regs;
3288
3289
3290         return base;
3291  free_regs:
3292         kfree(base->regs_interrupt);
3293  free_map:
3294         kfree(base->lcla_pool.alloc_map);
3295  free_backup_chan:
3296         kfree(base->reg_val_backup_chan);
3297  free_log_chans:
3298         kfree(base->lookup_log_chans);
3299  free_phy_chans:
3300         kfree(base->lookup_phy_chans);
3301  free_phy_res:
3302         kfree(base->phy_res);
3303  free_base:
3304         kfree(base);
3305  unmap_io:
3306         iounmap(virtbase);
3307  release_region:
3308         release_mem_region(res->start, resource_size(res));
3309  check_prepare_enabled:
3310         if (!clk_ret)
3311  disable_unprepare:
3312                 clk_disable_unprepare(clk);
3313         if (!IS_ERR(clk))
3314                 clk_put(clk);
3315         return NULL;
3316 }
3317
3318 static void __init d40_hw_init(struct d40_base *base)
3319 {
3320
3321         int i;
3322         u32 prmseo[2] = {0, 0};
3323         u32 activeo[2] = {0xFFFFFFFF, 0xFFFFFFFF};
3324         u32 pcmis = 0;
3325         u32 pcicr = 0;
3326         struct d40_reg_val *dma_init_reg = base->gen_dmac.init_reg;
3327         u32 reg_size = base->gen_dmac.init_reg_size;
3328
3329         for (i = 0; i < reg_size; i++)
3330                 writel(dma_init_reg[i].val,
3331                        base->virtbase + dma_init_reg[i].reg);
3332
3333         /* Configure all our dma channels to default settings */
3334         for (i = 0; i < base->num_phy_chans; i++) {
3335
3336                 activeo[i % 2] = activeo[i % 2] << 2;
3337
3338                 if (base->phy_res[base->num_phy_chans - i - 1].allocated_src
3339                     == D40_ALLOC_PHY) {
3340                         activeo[i % 2] |= 3;
3341                         continue;
3342                 }
3343
3344                 /* Enable interrupt # */
3345                 pcmis = (pcmis << 1) | 1;
3346
3347                 /* Clear interrupt # */
3348                 pcicr = (pcicr << 1) | 1;
3349
3350                 /* Set channel to physical mode */
3351                 prmseo[i % 2] = prmseo[i % 2] << 2;
3352                 prmseo[i % 2] |= 1;
3353
3354         }
3355
3356         writel(prmseo[1], base->virtbase + D40_DREG_PRMSE);
3357         writel(prmseo[0], base->virtbase + D40_DREG_PRMSO);
3358         writel(activeo[1], base->virtbase + D40_DREG_ACTIVE);
3359         writel(activeo[0], base->virtbase + D40_DREG_ACTIVO);
3360
3361         /* Write which interrupt to enable */
3362         writel(pcmis, base->virtbase + base->gen_dmac.interrupt_en);
3363
3364         /* Write which interrupt to clear */
3365         writel(pcicr, base->virtbase + base->gen_dmac.interrupt_clear);
3366
3367         /* These are __initdata and cannot be accessed after init */
3368         base->gen_dmac.init_reg = NULL;
3369         base->gen_dmac.init_reg_size = 0;
3370 }
3371
3372 static int __init d40_lcla_allocate(struct d40_base *base)
3373 {
3374         struct d40_lcla_pool *pool = &base->lcla_pool;
3375         unsigned long *page_list;
3376         int i, j;
3377         int ret;
3378
3379         /*
3380          * This is somewhat ugly. We need 8192 bytes that are 18 bit aligned,
3381          * To full fill this hardware requirement without wasting 256 kb
3382          * we allocate pages until we get an aligned one.
3383          */
3384         page_list = kmalloc_array(MAX_LCLA_ALLOC_ATTEMPTS,
3385                                   sizeof(*page_list),
3386                                   GFP_KERNEL);
3387         if (!page_list)
3388                 return -ENOMEM;
3389
3390         /* Calculating how many pages that are required */
3391         base->lcla_pool.pages = SZ_1K * base->num_phy_chans / PAGE_SIZE;
3392
3393         for (i = 0; i < MAX_LCLA_ALLOC_ATTEMPTS; i++) {
3394                 page_list[i] = __get_free_pages(GFP_KERNEL,
3395                                                 base->lcla_pool.pages);
3396                 if (!page_list[i]) {
3397
3398                         d40_err(base->dev, "Failed to allocate %d pages.\n",
3399                                 base->lcla_pool.pages);
3400                         ret = -ENOMEM;
3401
3402                         for (j = 0; j < i; j++)
3403                                 free_pages(page_list[j], base->lcla_pool.pages);
3404                         goto free_page_list;
3405                 }
3406
3407                 if ((virt_to_phys((void *)page_list[i]) &
3408                      (LCLA_ALIGNMENT - 1)) == 0)
3409                         break;
3410         }
3411
3412         for (j = 0; j < i; j++)
3413                 free_pages(page_list[j], base->lcla_pool.pages);
3414
3415         if (i < MAX_LCLA_ALLOC_ATTEMPTS) {
3416                 base->lcla_pool.base = (void *)page_list[i];
3417         } else {
3418                 /*
3419                  * After many attempts and no succees with finding the correct
3420                  * alignment, try with allocating a big buffer.
3421                  */
3422                 dev_warn(base->dev,
3423                          "[%s] Failed to get %d pages @ 18 bit align.\n",
3424                          __func__, base->lcla_pool.pages);
3425                 base->lcla_pool.base_unaligned = kmalloc(SZ_1K *
3426                                                          base->num_phy_chans +
3427                                                          LCLA_ALIGNMENT,
3428                                                          GFP_KERNEL);
3429                 if (!base->lcla_pool.base_unaligned) {
3430                         ret = -ENOMEM;
3431                         goto free_page_list;
3432                 }
3433
3434                 base->lcla_pool.base = PTR_ALIGN(base->lcla_pool.base_unaligned,
3435                                                  LCLA_ALIGNMENT);
3436         }
3437
3438         pool->dma_addr = dma_map_single(base->dev, pool->base,
3439                                         SZ_1K * base->num_phy_chans,
3440                                         DMA_TO_DEVICE);
3441         if (dma_mapping_error(base->dev, pool->dma_addr)) {
3442                 pool->dma_addr = 0;
3443                 ret = -ENOMEM;
3444                 goto free_page_list;
3445         }
3446
3447         writel(virt_to_phys(base->lcla_pool.base),
3448                base->virtbase + D40_DREG_LCLA);
3449         ret = 0;
3450  free_page_list:
3451         kfree(page_list);
3452         return ret;
3453 }
3454
3455 static int __init d40_of_probe(struct platform_device *pdev,
3456                                struct device_node *np)
3457 {
3458         struct stedma40_platform_data *pdata;
3459         int num_phy = 0, num_memcpy = 0, num_disabled = 0;
3460         const __be32 *list;
3461
3462         pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
3463         if (!pdata)
3464                 return -ENOMEM;
3465
3466         /* If absent this value will be obtained from h/w. */
3467         of_property_read_u32(np, "dma-channels", &num_phy);
3468         if (num_phy > 0)
3469                 pdata->num_of_phy_chans = num_phy;
3470
3471         list = of_get_property(np, "memcpy-channels", &num_memcpy);
3472         num_memcpy /= sizeof(*list);
3473
3474         if (num_memcpy > D40_MEMCPY_MAX_CHANS || num_memcpy <= 0) {
3475                 d40_err(&pdev->dev,
3476                         "Invalid number of memcpy channels specified (%d)\n",
3477                         num_memcpy);
3478                 return -EINVAL;
3479         }
3480         pdata->num_of_memcpy_chans = num_memcpy;
3481
3482         of_property_read_u32_array(np, "memcpy-channels",
3483                                    dma40_memcpy_channels,
3484                                    num_memcpy);
3485
3486         list = of_get_property(np, "disabled-channels", &num_disabled);
3487         num_disabled /= sizeof(*list);
3488
3489         if (num_disabled >= STEDMA40_MAX_PHYS || num_disabled < 0) {
3490                 d40_err(&pdev->dev,
3491                         "Invalid number of disabled channels specified (%d)\n",
3492                         num_disabled);
3493                 return -EINVAL;
3494         }
3495
3496         of_property_read_u32_array(np, "disabled-channels",
3497                                    pdata->disabled_channels,
3498                                    num_disabled);
3499         pdata->disabled_channels[num_disabled] = -1;
3500
3501         pdev->dev.platform_data = pdata;
3502
3503         return 0;
3504 }
3505
3506 static int __init d40_probe(struct platform_device *pdev)
3507 {
3508         struct stedma40_platform_data *plat_data = dev_get_platdata(&pdev->dev);
3509         struct device_node *np = pdev->dev.of_node;
3510         int ret = -ENOENT;
3511         struct d40_base *base;
3512         struct resource *res;
3513         int num_reserved_chans;
3514         u32 val;
3515
3516         if (!plat_data) {
3517                 if (np) {
3518                         if (d40_of_probe(pdev, np)) {
3519                                 ret = -ENOMEM;
3520                                 goto report_failure;
3521                         }
3522                 } else {
3523                         d40_err(&pdev->dev, "No pdata or Device Tree provided\n");
3524                         goto report_failure;
3525                 }
3526         }
3527
3528         base = d40_hw_detect_init(pdev);
3529         if (!base)
3530                 goto report_failure;
3531
3532         num_reserved_chans = d40_phy_res_init(base);
3533
3534         platform_set_drvdata(pdev, base);
3535
3536         spin_lock_init(&base->interrupt_lock);
3537         spin_lock_init(&base->execmd_lock);
3538
3539         /* Get IO for logical channel parameter address */
3540         res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lcpa");
3541         if (!res) {
3542                 ret = -ENOENT;
3543                 d40_err(&pdev->dev, "No \"lcpa\" memory resource\n");
3544                 goto destroy_cache;
3545         }
3546         base->lcpa_size = resource_size(res);
3547         base->phy_lcpa = res->start;
3548
3549         if (request_mem_region(res->start, resource_size(res),
3550                                D40_NAME " I/O lcpa") == NULL) {
3551                 ret = -EBUSY;
3552                 d40_err(&pdev->dev, "Failed to request LCPA region %pR\n", res);
3553                 goto destroy_cache;
3554         }
3555
3556         /* We make use of ESRAM memory for this. */
3557         val = readl(base->virtbase + D40_DREG_LCPA);
3558         if (res->start != val && val != 0) {
3559                 dev_warn(&pdev->dev,
3560                          "[%s] Mismatch LCPA dma 0x%x, def %pa\n",
3561                          __func__, val, &res->start);
3562         } else
3563                 writel(res->start, base->virtbase + D40_DREG_LCPA);
3564
3565         base->lcpa_base = ioremap(res->start, resource_size(res));
3566         if (!base->lcpa_base) {
3567                 ret = -ENOMEM;
3568                 d40_err(&pdev->dev, "Failed to ioremap LCPA region\n");
3569                 goto destroy_cache;
3570         }
3571         /* If lcla has to be located in ESRAM we don't need to allocate */
3572         if (base->plat_data->use_esram_lcla) {
3573                 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
3574                                                         "lcla_esram");
3575                 if (!res) {
3576                         ret = -ENOENT;
3577                         d40_err(&pdev->dev,
3578                                 "No \"lcla_esram\" memory resource\n");
3579                         goto destroy_cache;
3580                 }
3581                 base->lcla_pool.base = ioremap(res->start,
3582                                                 resource_size(res));
3583                 if (!base->lcla_pool.base) {
3584                         ret = -ENOMEM;
3585                         d40_err(&pdev->dev, "Failed to ioremap LCLA region\n");
3586                         goto destroy_cache;
3587                 }
3588                 writel(res->start, base->virtbase + D40_DREG_LCLA);
3589
3590         } else {
3591                 ret = d40_lcla_allocate(base);
3592                 if (ret) {
3593                         d40_err(&pdev->dev, "Failed to allocate LCLA area\n");
3594                         goto destroy_cache;
3595                 }
3596         }
3597
3598         spin_lock_init(&base->lcla_pool.lock);
3599
3600         base->irq = platform_get_irq(pdev, 0);
3601
3602         ret = request_irq(base->irq, d40_handle_interrupt, 0, D40_NAME, base);
3603         if (ret) {
3604                 d40_err(&pdev->dev, "No IRQ defined\n");
3605                 goto destroy_cache;
3606         }
3607
3608         if (base->plat_data->use_esram_lcla) {
3609
3610                 base->lcpa_regulator = regulator_get(base->dev, "lcla_esram");
3611                 if (IS_ERR(base->lcpa_regulator)) {
3612                         d40_err(&pdev->dev, "Failed to get lcpa_regulator\n");
3613                         ret = PTR_ERR(base->lcpa_regulator);
3614                         base->lcpa_regulator = NULL;
3615                         goto destroy_cache;
3616                 }
3617
3618                 ret = regulator_enable(base->lcpa_regulator);
3619                 if (ret) {
3620                         d40_err(&pdev->dev,
3621                                 "Failed to enable lcpa_regulator\n");
3622                         regulator_put(base->lcpa_regulator);
3623                         base->lcpa_regulator = NULL;
3624                         goto destroy_cache;
3625                 }
3626         }
3627
3628         writel_relaxed(D40_DREG_GCC_ENABLE_ALL, base->virtbase + D40_DREG_GCC);
3629
3630         pm_runtime_irq_safe(base->dev);
3631         pm_runtime_set_autosuspend_delay(base->dev, DMA40_AUTOSUSPEND_DELAY);
3632         pm_runtime_use_autosuspend(base->dev);
3633         pm_runtime_mark_last_busy(base->dev);
3634         pm_runtime_set_active(base->dev);
3635         pm_runtime_enable(base->dev);
3636
3637         ret = d40_dmaengine_init(base, num_reserved_chans);
3638         if (ret)
3639                 goto destroy_cache;
3640
3641         ret = dma_set_max_seg_size(base->dev, STEDMA40_MAX_SEG_SIZE);
3642         if (ret) {
3643                 d40_err(&pdev->dev, "Failed to set dma max seg size\n");
3644                 goto destroy_cache;
3645         }
3646
3647         d40_hw_init(base);
3648
3649         if (np) {
3650                 ret = of_dma_controller_register(np, d40_xlate, NULL);
3651                 if (ret)
3652                         dev_err(&pdev->dev,
3653                                 "could not register of_dma_controller\n");
3654         }
3655
3656         dev_info(base->dev, "initialized\n");
3657         return 0;
3658  destroy_cache:
3659         kmem_cache_destroy(base->desc_slab);
3660         if (base->virtbase)
3661                 iounmap(base->virtbase);
3662
3663         if (base->lcla_pool.base && base->plat_data->use_esram_lcla) {
3664                 iounmap(base->lcla_pool.base);
3665                 base->lcla_pool.base = NULL;
3666         }
3667
3668         if (base->lcla_pool.dma_addr)
3669                 dma_unmap_single(base->dev, base->lcla_pool.dma_addr,
3670                                  SZ_1K * base->num_phy_chans,
3671                                  DMA_TO_DEVICE);
3672
3673         if (!base->lcla_pool.base_unaligned && base->lcla_pool.base)
3674                 free_pages((unsigned long)base->lcla_pool.base,
3675                            base->lcla_pool.pages);
3676
3677         kfree(base->lcla_pool.base_unaligned);
3678
3679         if (base->phy_lcpa)
3680                 release_mem_region(base->phy_lcpa,
3681                                    base->lcpa_size);
3682         if (base->phy_start)
3683                 release_mem_region(base->phy_start,
3684                                    base->phy_size);
3685         if (base->clk) {
3686                 clk_disable_unprepare(base->clk);
3687                 clk_put(base->clk);
3688         }
3689
3690         if (base->lcpa_regulator) {
3691                 regulator_disable(base->lcpa_regulator);
3692                 regulator_put(base->lcpa_regulator);
3693         }
3694
3695         kfree(base->lcla_pool.alloc_map);
3696         kfree(base->lookup_log_chans);
3697         kfree(base->lookup_phy_chans);
3698         kfree(base->phy_res);
3699         kfree(base);
3700  report_failure:
3701         d40_err(&pdev->dev, "probe failed\n");
3702         return ret;
3703 }
3704
3705 static const struct of_device_id d40_match[] = {
3706         { .compatible = "stericsson,dma40", },
3707         {}
3708 };
3709
3710 static struct platform_driver d40_driver = {
3711         .driver = {
3712                 .name  = D40_NAME,
3713                 .pm = &dma40_pm_ops,
3714                 .of_match_table = d40_match,
3715         },
3716 };
3717
3718 static int __init stedma40_init(void)
3719 {
3720         return platform_driver_probe(&d40_driver, d40_probe);
3721 }
3722 subsys_initcall(stedma40_init);