Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost
[linux-2.6-microblaze.git] / drivers / dma / qcom / bam_dma.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
4  */
5 /*
6  * QCOM BAM DMA engine driver
7  *
8  * QCOM BAM DMA blocks are distributed amongst a number of the on-chip
9  * peripherals on the MSM 8x74.  The configuration of the channels are dependent
10  * on the way they are hard wired to that specific peripheral.  The peripheral
11  * device tree entries specify the configuration of each channel.
12  *
13  * The DMA controller requires the use of external memory for storage of the
14  * hardware descriptors for each channel.  The descriptor FIFO is accessed as a
15  * circular buffer and operations are managed according to the offset within the
16  * FIFO.  After pipe/channel reset, all of the pipe registers and internal state
17  * are back to defaults.
18  *
19  * During DMA operations, we write descriptors to the FIFO, being careful to
20  * handle wrapping and then write the last FIFO offset to that channel's
21  * P_EVNT_REG register to kick off the transaction.  The P_SW_OFSTS register
22  * indicates the current FIFO offset that is being processed, so there is some
23  * indication of where the hardware is currently working.
24  */
25
26 #include <linux/kernel.h>
27 #include <linux/io.h>
28 #include <linux/init.h>
29 #include <linux/slab.h>
30 #include <linux/module.h>
31 #include <linux/interrupt.h>
32 #include <linux/dma-mapping.h>
33 #include <linux/scatterlist.h>
34 #include <linux/device.h>
35 #include <linux/platform_device.h>
36 #include <linux/of.h>
37 #include <linux/of_address.h>
38 #include <linux/of_irq.h>
39 #include <linux/of_dma.h>
40 #include <linux/circ_buf.h>
41 #include <linux/clk.h>
42 #include <linux/dmaengine.h>
43 #include <linux/pm_runtime.h>
44
45 #include "../dmaengine.h"
46 #include "../virt-dma.h"
47
48 struct bam_desc_hw {
49         __le32 addr;            /* Buffer physical address */
50         __le16 size;            /* Buffer size in bytes */
51         __le16 flags;
52 };
53
54 #define BAM_DMA_AUTOSUSPEND_DELAY 100
55
56 #define DESC_FLAG_INT BIT(15)
57 #define DESC_FLAG_EOT BIT(14)
58 #define DESC_FLAG_EOB BIT(13)
59 #define DESC_FLAG_NWD BIT(12)
60 #define DESC_FLAG_CMD BIT(11)
61
62 struct bam_async_desc {
63         struct virt_dma_desc vd;
64
65         u32 num_desc;
66         u32 xfer_len;
67
68         /* transaction flags, EOT|EOB|NWD */
69         u16 flags;
70
71         struct bam_desc_hw *curr_desc;
72
73         /* list node for the desc in the bam_chan list of descriptors */
74         struct list_head desc_node;
75         enum dma_transfer_direction dir;
76         size_t length;
77         struct bam_desc_hw desc[];
78 };
79
80 enum bam_reg {
81         BAM_CTRL,
82         BAM_REVISION,
83         BAM_NUM_PIPES,
84         BAM_DESC_CNT_TRSHLD,
85         BAM_IRQ_SRCS,
86         BAM_IRQ_SRCS_MSK,
87         BAM_IRQ_SRCS_UNMASKED,
88         BAM_IRQ_STTS,
89         BAM_IRQ_CLR,
90         BAM_IRQ_EN,
91         BAM_CNFG_BITS,
92         BAM_IRQ_SRCS_EE,
93         BAM_IRQ_SRCS_MSK_EE,
94         BAM_P_CTRL,
95         BAM_P_RST,
96         BAM_P_HALT,
97         BAM_P_IRQ_STTS,
98         BAM_P_IRQ_CLR,
99         BAM_P_IRQ_EN,
100         BAM_P_EVNT_DEST_ADDR,
101         BAM_P_EVNT_REG,
102         BAM_P_SW_OFSTS,
103         BAM_P_DATA_FIFO_ADDR,
104         BAM_P_DESC_FIFO_ADDR,
105         BAM_P_EVNT_GEN_TRSHLD,
106         BAM_P_FIFO_SIZES,
107 };
108
109 struct reg_offset_data {
110         u32 base_offset;
111         unsigned int pipe_mult, evnt_mult, ee_mult;
112 };
113
114 static const struct reg_offset_data bam_v1_3_reg_info[] = {
115         [BAM_CTRL]              = { 0x0F80, 0x00, 0x00, 0x00 },
116         [BAM_REVISION]          = { 0x0F84, 0x00, 0x00, 0x00 },
117         [BAM_NUM_PIPES]         = { 0x0FBC, 0x00, 0x00, 0x00 },
118         [BAM_DESC_CNT_TRSHLD]   = { 0x0F88, 0x00, 0x00, 0x00 },
119         [BAM_IRQ_SRCS]          = { 0x0F8C, 0x00, 0x00, 0x00 },
120         [BAM_IRQ_SRCS_MSK]      = { 0x0F90, 0x00, 0x00, 0x00 },
121         [BAM_IRQ_SRCS_UNMASKED] = { 0x0FB0, 0x00, 0x00, 0x00 },
122         [BAM_IRQ_STTS]          = { 0x0F94, 0x00, 0x00, 0x00 },
123         [BAM_IRQ_CLR]           = { 0x0F98, 0x00, 0x00, 0x00 },
124         [BAM_IRQ_EN]            = { 0x0F9C, 0x00, 0x00, 0x00 },
125         [BAM_CNFG_BITS]         = { 0x0FFC, 0x00, 0x00, 0x00 },
126         [BAM_IRQ_SRCS_EE]       = { 0x1800, 0x00, 0x00, 0x80 },
127         [BAM_IRQ_SRCS_MSK_EE]   = { 0x1804, 0x00, 0x00, 0x80 },
128         [BAM_P_CTRL]            = { 0x0000, 0x80, 0x00, 0x00 },
129         [BAM_P_RST]             = { 0x0004, 0x80, 0x00, 0x00 },
130         [BAM_P_HALT]            = { 0x0008, 0x80, 0x00, 0x00 },
131         [BAM_P_IRQ_STTS]        = { 0x0010, 0x80, 0x00, 0x00 },
132         [BAM_P_IRQ_CLR]         = { 0x0014, 0x80, 0x00, 0x00 },
133         [BAM_P_IRQ_EN]          = { 0x0018, 0x80, 0x00, 0x00 },
134         [BAM_P_EVNT_DEST_ADDR]  = { 0x102C, 0x00, 0x40, 0x00 },
135         [BAM_P_EVNT_REG]        = { 0x1018, 0x00, 0x40, 0x00 },
136         [BAM_P_SW_OFSTS]        = { 0x1000, 0x00, 0x40, 0x00 },
137         [BAM_P_DATA_FIFO_ADDR]  = { 0x1024, 0x00, 0x40, 0x00 },
138         [BAM_P_DESC_FIFO_ADDR]  = { 0x101C, 0x00, 0x40, 0x00 },
139         [BAM_P_EVNT_GEN_TRSHLD] = { 0x1028, 0x00, 0x40, 0x00 },
140         [BAM_P_FIFO_SIZES]      = { 0x1020, 0x00, 0x40, 0x00 },
141 };
142
143 static const struct reg_offset_data bam_v1_4_reg_info[] = {
144         [BAM_CTRL]              = { 0x0000, 0x00, 0x00, 0x00 },
145         [BAM_REVISION]          = { 0x0004, 0x00, 0x00, 0x00 },
146         [BAM_NUM_PIPES]         = { 0x003C, 0x00, 0x00, 0x00 },
147         [BAM_DESC_CNT_TRSHLD]   = { 0x0008, 0x00, 0x00, 0x00 },
148         [BAM_IRQ_SRCS]          = { 0x000C, 0x00, 0x00, 0x00 },
149         [BAM_IRQ_SRCS_MSK]      = { 0x0010, 0x00, 0x00, 0x00 },
150         [BAM_IRQ_SRCS_UNMASKED] = { 0x0030, 0x00, 0x00, 0x00 },
151         [BAM_IRQ_STTS]          = { 0x0014, 0x00, 0x00, 0x00 },
152         [BAM_IRQ_CLR]           = { 0x0018, 0x00, 0x00, 0x00 },
153         [BAM_IRQ_EN]            = { 0x001C, 0x00, 0x00, 0x00 },
154         [BAM_CNFG_BITS]         = { 0x007C, 0x00, 0x00, 0x00 },
155         [BAM_IRQ_SRCS_EE]       = { 0x0800, 0x00, 0x00, 0x80 },
156         [BAM_IRQ_SRCS_MSK_EE]   = { 0x0804, 0x00, 0x00, 0x80 },
157         [BAM_P_CTRL]            = { 0x1000, 0x1000, 0x00, 0x00 },
158         [BAM_P_RST]             = { 0x1004, 0x1000, 0x00, 0x00 },
159         [BAM_P_HALT]            = { 0x1008, 0x1000, 0x00, 0x00 },
160         [BAM_P_IRQ_STTS]        = { 0x1010, 0x1000, 0x00, 0x00 },
161         [BAM_P_IRQ_CLR]         = { 0x1014, 0x1000, 0x00, 0x00 },
162         [BAM_P_IRQ_EN]          = { 0x1018, 0x1000, 0x00, 0x00 },
163         [BAM_P_EVNT_DEST_ADDR]  = { 0x182C, 0x00, 0x1000, 0x00 },
164         [BAM_P_EVNT_REG]        = { 0x1818, 0x00, 0x1000, 0x00 },
165         [BAM_P_SW_OFSTS]        = { 0x1800, 0x00, 0x1000, 0x00 },
166         [BAM_P_DATA_FIFO_ADDR]  = { 0x1824, 0x00, 0x1000, 0x00 },
167         [BAM_P_DESC_FIFO_ADDR]  = { 0x181C, 0x00, 0x1000, 0x00 },
168         [BAM_P_EVNT_GEN_TRSHLD] = { 0x1828, 0x00, 0x1000, 0x00 },
169         [BAM_P_FIFO_SIZES]      = { 0x1820, 0x00, 0x1000, 0x00 },
170 };
171
172 static const struct reg_offset_data bam_v1_7_reg_info[] = {
173         [BAM_CTRL]              = { 0x00000, 0x00, 0x00, 0x00 },
174         [BAM_REVISION]          = { 0x01000, 0x00, 0x00, 0x00 },
175         [BAM_NUM_PIPES]         = { 0x01008, 0x00, 0x00, 0x00 },
176         [BAM_DESC_CNT_TRSHLD]   = { 0x00008, 0x00, 0x00, 0x00 },
177         [BAM_IRQ_SRCS]          = { 0x03010, 0x00, 0x00, 0x00 },
178         [BAM_IRQ_SRCS_MSK]      = { 0x03014, 0x00, 0x00, 0x00 },
179         [BAM_IRQ_SRCS_UNMASKED] = { 0x03018, 0x00, 0x00, 0x00 },
180         [BAM_IRQ_STTS]          = { 0x00014, 0x00, 0x00, 0x00 },
181         [BAM_IRQ_CLR]           = { 0x00018, 0x00, 0x00, 0x00 },
182         [BAM_IRQ_EN]            = { 0x0001C, 0x00, 0x00, 0x00 },
183         [BAM_CNFG_BITS]         = { 0x0007C, 0x00, 0x00, 0x00 },
184         [BAM_IRQ_SRCS_EE]       = { 0x03000, 0x00, 0x00, 0x1000 },
185         [BAM_IRQ_SRCS_MSK_EE]   = { 0x03004, 0x00, 0x00, 0x1000 },
186         [BAM_P_CTRL]            = { 0x13000, 0x1000, 0x00, 0x00 },
187         [BAM_P_RST]             = { 0x13004, 0x1000, 0x00, 0x00 },
188         [BAM_P_HALT]            = { 0x13008, 0x1000, 0x00, 0x00 },
189         [BAM_P_IRQ_STTS]        = { 0x13010, 0x1000, 0x00, 0x00 },
190         [BAM_P_IRQ_CLR]         = { 0x13014, 0x1000, 0x00, 0x00 },
191         [BAM_P_IRQ_EN]          = { 0x13018, 0x1000, 0x00, 0x00 },
192         [BAM_P_EVNT_DEST_ADDR]  = { 0x1382C, 0x00, 0x1000, 0x00 },
193         [BAM_P_EVNT_REG]        = { 0x13818, 0x00, 0x1000, 0x00 },
194         [BAM_P_SW_OFSTS]        = { 0x13800, 0x00, 0x1000, 0x00 },
195         [BAM_P_DATA_FIFO_ADDR]  = { 0x13824, 0x00, 0x1000, 0x00 },
196         [BAM_P_DESC_FIFO_ADDR]  = { 0x1381C, 0x00, 0x1000, 0x00 },
197         [BAM_P_EVNT_GEN_TRSHLD] = { 0x13828, 0x00, 0x1000, 0x00 },
198         [BAM_P_FIFO_SIZES]      = { 0x13820, 0x00, 0x1000, 0x00 },
199 };
200
201 /* BAM CTRL */
202 #define BAM_SW_RST                      BIT(0)
203 #define BAM_EN                          BIT(1)
204 #define BAM_EN_ACCUM                    BIT(4)
205 #define BAM_TESTBUS_SEL_SHIFT           5
206 #define BAM_TESTBUS_SEL_MASK            0x3F
207 #define BAM_DESC_CACHE_SEL_SHIFT        13
208 #define BAM_DESC_CACHE_SEL_MASK         0x3
209 #define BAM_CACHED_DESC_STORE           BIT(15)
210 #define IBC_DISABLE                     BIT(16)
211
212 /* BAM REVISION */
213 #define REVISION_SHIFT          0
214 #define REVISION_MASK           0xFF
215 #define NUM_EES_SHIFT           8
216 #define NUM_EES_MASK            0xF
217 #define CE_BUFFER_SIZE          BIT(13)
218 #define AXI_ACTIVE              BIT(14)
219 #define USE_VMIDMT              BIT(15)
220 #define SECURED                 BIT(16)
221 #define BAM_HAS_NO_BYPASS       BIT(17)
222 #define HIGH_FREQUENCY_BAM      BIT(18)
223 #define INACTIV_TMRS_EXST       BIT(19)
224 #define NUM_INACTIV_TMRS        BIT(20)
225 #define DESC_CACHE_DEPTH_SHIFT  21
226 #define DESC_CACHE_DEPTH_1      (0 << DESC_CACHE_DEPTH_SHIFT)
227 #define DESC_CACHE_DEPTH_2      (1 << DESC_CACHE_DEPTH_SHIFT)
228 #define DESC_CACHE_DEPTH_3      (2 << DESC_CACHE_DEPTH_SHIFT)
229 #define DESC_CACHE_DEPTH_4      (3 << DESC_CACHE_DEPTH_SHIFT)
230 #define CMD_DESC_EN             BIT(23)
231 #define INACTIV_TMR_BASE_SHIFT  24
232 #define INACTIV_TMR_BASE_MASK   0xFF
233
234 /* BAM NUM PIPES */
235 #define BAM_NUM_PIPES_SHIFT             0
236 #define BAM_NUM_PIPES_MASK              0xFF
237 #define PERIPH_NON_PIPE_GRP_SHIFT       16
238 #define PERIPH_NON_PIP_GRP_MASK         0xFF
239 #define BAM_NON_PIPE_GRP_SHIFT          24
240 #define BAM_NON_PIPE_GRP_MASK           0xFF
241
242 /* BAM CNFG BITS */
243 #define BAM_PIPE_CNFG           BIT(2)
244 #define BAM_FULL_PIPE           BIT(11)
245 #define BAM_NO_EXT_P_RST        BIT(12)
246 #define BAM_IBC_DISABLE         BIT(13)
247 #define BAM_SB_CLK_REQ          BIT(14)
248 #define BAM_PSM_CSW_REQ         BIT(15)
249 #define BAM_PSM_P_RES           BIT(16)
250 #define BAM_AU_P_RES            BIT(17)
251 #define BAM_SI_P_RES            BIT(18)
252 #define BAM_WB_P_RES            BIT(19)
253 #define BAM_WB_BLK_CSW          BIT(20)
254 #define BAM_WB_CSW_ACK_IDL      BIT(21)
255 #define BAM_WB_RETR_SVPNT       BIT(22)
256 #define BAM_WB_DSC_AVL_P_RST    BIT(23)
257 #define BAM_REG_P_EN            BIT(24)
258 #define BAM_PSM_P_HD_DATA       BIT(25)
259 #define BAM_AU_ACCUMED          BIT(26)
260 #define BAM_CMD_ENABLE          BIT(27)
261
262 #define BAM_CNFG_BITS_DEFAULT   (BAM_PIPE_CNFG |        \
263                                  BAM_NO_EXT_P_RST |     \
264                                  BAM_IBC_DISABLE |      \
265                                  BAM_SB_CLK_REQ |       \
266                                  BAM_PSM_CSW_REQ |      \
267                                  BAM_PSM_P_RES |        \
268                                  BAM_AU_P_RES |         \
269                                  BAM_SI_P_RES |         \
270                                  BAM_WB_P_RES |         \
271                                  BAM_WB_BLK_CSW |       \
272                                  BAM_WB_CSW_ACK_IDL |   \
273                                  BAM_WB_RETR_SVPNT |    \
274                                  BAM_WB_DSC_AVL_P_RST | \
275                                  BAM_REG_P_EN |         \
276                                  BAM_PSM_P_HD_DATA |    \
277                                  BAM_AU_ACCUMED |       \
278                                  BAM_CMD_ENABLE)
279
280 /* PIPE CTRL */
281 #define P_EN                    BIT(1)
282 #define P_DIRECTION             BIT(3)
283 #define P_SYS_STRM              BIT(4)
284 #define P_SYS_MODE              BIT(5)
285 #define P_AUTO_EOB              BIT(6)
286 #define P_AUTO_EOB_SEL_SHIFT    7
287 #define P_AUTO_EOB_SEL_512      (0 << P_AUTO_EOB_SEL_SHIFT)
288 #define P_AUTO_EOB_SEL_256      (1 << P_AUTO_EOB_SEL_SHIFT)
289 #define P_AUTO_EOB_SEL_128      (2 << P_AUTO_EOB_SEL_SHIFT)
290 #define P_AUTO_EOB_SEL_64       (3 << P_AUTO_EOB_SEL_SHIFT)
291 #define P_PREFETCH_LIMIT_SHIFT  9
292 #define P_PREFETCH_LIMIT_32     (0 << P_PREFETCH_LIMIT_SHIFT)
293 #define P_PREFETCH_LIMIT_16     (1 << P_PREFETCH_LIMIT_SHIFT)
294 #define P_PREFETCH_LIMIT_4      (2 << P_PREFETCH_LIMIT_SHIFT)
295 #define P_WRITE_NWD             BIT(11)
296 #define P_LOCK_GROUP_SHIFT      16
297 #define P_LOCK_GROUP_MASK       0x1F
298
299 /* BAM_DESC_CNT_TRSHLD */
300 #define CNT_TRSHLD              0xffff
301 #define DEFAULT_CNT_THRSHLD     0x4
302
303 /* BAM_IRQ_SRCS */
304 #define BAM_IRQ                 BIT(31)
305 #define P_IRQ                   0x7fffffff
306
307 /* BAM_IRQ_SRCS_MSK */
308 #define BAM_IRQ_MSK             BAM_IRQ
309 #define P_IRQ_MSK               P_IRQ
310
311 /* BAM_IRQ_STTS */
312 #define BAM_TIMER_IRQ           BIT(4)
313 #define BAM_EMPTY_IRQ           BIT(3)
314 #define BAM_ERROR_IRQ           BIT(2)
315 #define BAM_HRESP_ERR_IRQ       BIT(1)
316
317 /* BAM_IRQ_CLR */
318 #define BAM_TIMER_CLR           BIT(4)
319 #define BAM_EMPTY_CLR           BIT(3)
320 #define BAM_ERROR_CLR           BIT(2)
321 #define BAM_HRESP_ERR_CLR       BIT(1)
322
323 /* BAM_IRQ_EN */
324 #define BAM_TIMER_EN            BIT(4)
325 #define BAM_EMPTY_EN            BIT(3)
326 #define BAM_ERROR_EN            BIT(2)
327 #define BAM_HRESP_ERR_EN        BIT(1)
328
329 /* BAM_P_IRQ_EN */
330 #define P_PRCSD_DESC_EN         BIT(0)
331 #define P_TIMER_EN              BIT(1)
332 #define P_WAKE_EN               BIT(2)
333 #define P_OUT_OF_DESC_EN        BIT(3)
334 #define P_ERR_EN                BIT(4)
335 #define P_TRNSFR_END_EN         BIT(5)
336 #define P_DEFAULT_IRQS_EN       (P_PRCSD_DESC_EN | P_ERR_EN | P_TRNSFR_END_EN)
337
338 /* BAM_P_SW_OFSTS */
339 #define P_SW_OFSTS_MASK         0xffff
340
341 #define BAM_DESC_FIFO_SIZE      SZ_32K
342 #define MAX_DESCRIPTORS (BAM_DESC_FIFO_SIZE / sizeof(struct bam_desc_hw) - 1)
343 #define BAM_FIFO_SIZE   (SZ_32K - 8)
344 #define IS_BUSY(chan)   (CIRC_SPACE(bchan->tail, bchan->head,\
345                          MAX_DESCRIPTORS + 1) == 0)
346
347 struct bam_chan {
348         struct virt_dma_chan vc;
349
350         struct bam_device *bdev;
351
352         /* configuration from device tree */
353         u32 id;
354
355         /* runtime configuration */
356         struct dma_slave_config slave;
357
358         /* fifo storage */
359         struct bam_desc_hw *fifo_virt;
360         dma_addr_t fifo_phys;
361
362         /* fifo markers */
363         unsigned short head;            /* start of active descriptor entries */
364         unsigned short tail;            /* end of active descriptor entries */
365
366         unsigned int initialized;       /* is the channel hw initialized? */
367         unsigned int paused;            /* is the channel paused? */
368         unsigned int reconfigure;       /* new slave config? */
369         /* list of descriptors currently processed */
370         struct list_head desc_list;
371
372         struct list_head node;
373 };
374
375 static inline struct bam_chan *to_bam_chan(struct dma_chan *common)
376 {
377         return container_of(common, struct bam_chan, vc.chan);
378 }
379
380 struct bam_device {
381         void __iomem *regs;
382         struct device *dev;
383         struct dma_device common;
384         struct bam_chan *channels;
385         u32 num_channels;
386         u32 num_ees;
387
388         /* execution environment ID, from DT */
389         u32 ee;
390         bool controlled_remotely;
391
392         const struct reg_offset_data *layout;
393
394         struct clk *bamclk;
395         int irq;
396
397         /* dma start transaction tasklet */
398         struct tasklet_struct task;
399 };
400
401 /**
402  * bam_addr - returns BAM register address
403  * @bdev: bam device
404  * @pipe: pipe instance (ignored when register doesn't have multiple instances)
405  * @reg:  register enum
406  */
407 static inline void __iomem *bam_addr(struct bam_device *bdev, u32 pipe,
408                 enum bam_reg reg)
409 {
410         const struct reg_offset_data r = bdev->layout[reg];
411
412         return bdev->regs + r.base_offset +
413                 r.pipe_mult * pipe +
414                 r.evnt_mult * pipe +
415                 r.ee_mult * bdev->ee;
416 }
417
418 /**
419  * bam_reset_channel - Reset individual BAM DMA channel
420  * @bchan: bam channel
421  *
422  * This function resets a specific BAM channel
423  */
424 static void bam_reset_channel(struct bam_chan *bchan)
425 {
426         struct bam_device *bdev = bchan->bdev;
427
428         lockdep_assert_held(&bchan->vc.lock);
429
430         /* reset channel */
431         writel_relaxed(1, bam_addr(bdev, bchan->id, BAM_P_RST));
432         writel_relaxed(0, bam_addr(bdev, bchan->id, BAM_P_RST));
433
434         /* don't allow cpu to reorder BAM register accesses done after this */
435         wmb();
436
437         /* make sure hw is initialized when channel is used the first time  */
438         bchan->initialized = 0;
439 }
440
441 /**
442  * bam_chan_init_hw - Initialize channel hardware
443  * @bchan: bam channel
444  * @dir: DMA transfer direction
445  *
446  * This function resets and initializes the BAM channel
447  */
448 static void bam_chan_init_hw(struct bam_chan *bchan,
449         enum dma_transfer_direction dir)
450 {
451         struct bam_device *bdev = bchan->bdev;
452         u32 val;
453
454         /* Reset the channel to clear internal state of the FIFO */
455         bam_reset_channel(bchan);
456
457         /*
458          * write out 8 byte aligned address.  We have enough space for this
459          * because we allocated 1 more descriptor (8 bytes) than we can use
460          */
461         writel_relaxed(ALIGN(bchan->fifo_phys, sizeof(struct bam_desc_hw)),
462                         bam_addr(bdev, bchan->id, BAM_P_DESC_FIFO_ADDR));
463         writel_relaxed(BAM_FIFO_SIZE,
464                         bam_addr(bdev, bchan->id, BAM_P_FIFO_SIZES));
465
466         /* enable the per pipe interrupts, enable EOT, ERR, and INT irqs */
467         writel_relaxed(P_DEFAULT_IRQS_EN,
468                         bam_addr(bdev, bchan->id, BAM_P_IRQ_EN));
469
470         /* unmask the specific pipe and EE combo */
471         val = readl_relaxed(bam_addr(bdev, 0, BAM_IRQ_SRCS_MSK_EE));
472         val |= BIT(bchan->id);
473         writel_relaxed(val, bam_addr(bdev, 0, BAM_IRQ_SRCS_MSK_EE));
474
475         /* don't allow cpu to reorder the channel enable done below */
476         wmb();
477
478         /* set fixed direction and mode, then enable channel */
479         val = P_EN | P_SYS_MODE;
480         if (dir == DMA_DEV_TO_MEM)
481                 val |= P_DIRECTION;
482
483         writel_relaxed(val, bam_addr(bdev, bchan->id, BAM_P_CTRL));
484
485         bchan->initialized = 1;
486
487         /* init FIFO pointers */
488         bchan->head = 0;
489         bchan->tail = 0;
490 }
491
492 /**
493  * bam_alloc_chan - Allocate channel resources for DMA channel.
494  * @chan: specified channel
495  *
496  * This function allocates the FIFO descriptor memory
497  */
498 static int bam_alloc_chan(struct dma_chan *chan)
499 {
500         struct bam_chan *bchan = to_bam_chan(chan);
501         struct bam_device *bdev = bchan->bdev;
502
503         if (bchan->fifo_virt)
504                 return 0;
505
506         /* allocate FIFO descriptor space, but only if necessary */
507         bchan->fifo_virt = dma_alloc_wc(bdev->dev, BAM_DESC_FIFO_SIZE,
508                                         &bchan->fifo_phys, GFP_KERNEL);
509
510         if (!bchan->fifo_virt) {
511                 dev_err(bdev->dev, "Failed to allocate desc fifo\n");
512                 return -ENOMEM;
513         }
514
515         return 0;
516 }
517
518 static int bam_pm_runtime_get_sync(struct device *dev)
519 {
520         if (pm_runtime_enabled(dev))
521                 return pm_runtime_get_sync(dev);
522
523         return 0;
524 }
525
526 /**
527  * bam_free_chan - Frees dma resources associated with specific channel
528  * @chan: specified channel
529  *
530  * Free the allocated fifo descriptor memory and channel resources
531  *
532  */
533 static void bam_free_chan(struct dma_chan *chan)
534 {
535         struct bam_chan *bchan = to_bam_chan(chan);
536         struct bam_device *bdev = bchan->bdev;
537         u32 val;
538         unsigned long flags;
539         int ret;
540
541         ret = bam_pm_runtime_get_sync(bdev->dev);
542         if (ret < 0)
543                 return;
544
545         vchan_free_chan_resources(to_virt_chan(chan));
546
547         if (!list_empty(&bchan->desc_list)) {
548                 dev_err(bchan->bdev->dev, "Cannot free busy channel\n");
549                 goto err;
550         }
551
552         spin_lock_irqsave(&bchan->vc.lock, flags);
553         bam_reset_channel(bchan);
554         spin_unlock_irqrestore(&bchan->vc.lock, flags);
555
556         dma_free_wc(bdev->dev, BAM_DESC_FIFO_SIZE, bchan->fifo_virt,
557                     bchan->fifo_phys);
558         bchan->fifo_virt = NULL;
559
560         /* mask irq for pipe/channel */
561         val = readl_relaxed(bam_addr(bdev, 0, BAM_IRQ_SRCS_MSK_EE));
562         val &= ~BIT(bchan->id);
563         writel_relaxed(val, bam_addr(bdev, 0, BAM_IRQ_SRCS_MSK_EE));
564
565         /* disable irq */
566         writel_relaxed(0, bam_addr(bdev, bchan->id, BAM_P_IRQ_EN));
567
568 err:
569         pm_runtime_mark_last_busy(bdev->dev);
570         pm_runtime_put_autosuspend(bdev->dev);
571 }
572
573 /**
574  * bam_slave_config - set slave configuration for channel
575  * @chan: dma channel
576  * @cfg: slave configuration
577  *
578  * Sets slave configuration for channel
579  *
580  */
581 static int bam_slave_config(struct dma_chan *chan,
582                             struct dma_slave_config *cfg)
583 {
584         struct bam_chan *bchan = to_bam_chan(chan);
585         unsigned long flag;
586
587         spin_lock_irqsave(&bchan->vc.lock, flag);
588         memcpy(&bchan->slave, cfg, sizeof(*cfg));
589         bchan->reconfigure = 1;
590         spin_unlock_irqrestore(&bchan->vc.lock, flag);
591
592         return 0;
593 }
594
595 /**
596  * bam_prep_slave_sg - Prep slave sg transaction
597  *
598  * @chan: dma channel
599  * @sgl: scatter gather list
600  * @sg_len: length of sg
601  * @direction: DMA transfer direction
602  * @flags: DMA flags
603  * @context: transfer context (unused)
604  */
605 static struct dma_async_tx_descriptor *bam_prep_slave_sg(struct dma_chan *chan,
606         struct scatterlist *sgl, unsigned int sg_len,
607         enum dma_transfer_direction direction, unsigned long flags,
608         void *context)
609 {
610         struct bam_chan *bchan = to_bam_chan(chan);
611         struct bam_device *bdev = bchan->bdev;
612         struct bam_async_desc *async_desc;
613         struct scatterlist *sg;
614         u32 i;
615         struct bam_desc_hw *desc;
616         unsigned int num_alloc = 0;
617
618
619         if (!is_slave_direction(direction)) {
620                 dev_err(bdev->dev, "invalid dma direction\n");
621                 return NULL;
622         }
623
624         /* calculate number of required entries */
625         for_each_sg(sgl, sg, sg_len, i)
626                 num_alloc += DIV_ROUND_UP(sg_dma_len(sg), BAM_FIFO_SIZE);
627
628         /* allocate enough room to accomodate the number of entries */
629         async_desc = kzalloc(struct_size(async_desc, desc, num_alloc),
630                              GFP_NOWAIT);
631
632         if (!async_desc)
633                 return NULL;
634
635         if (flags & DMA_PREP_FENCE)
636                 async_desc->flags |= DESC_FLAG_NWD;
637
638         if (flags & DMA_PREP_INTERRUPT)
639                 async_desc->flags |= DESC_FLAG_EOT;
640
641         async_desc->num_desc = num_alloc;
642         async_desc->curr_desc = async_desc->desc;
643         async_desc->dir = direction;
644
645         /* fill in temporary descriptors */
646         desc = async_desc->desc;
647         for_each_sg(sgl, sg, sg_len, i) {
648                 unsigned int remainder = sg_dma_len(sg);
649                 unsigned int curr_offset = 0;
650
651                 do {
652                         if (flags & DMA_PREP_CMD)
653                                 desc->flags |= cpu_to_le16(DESC_FLAG_CMD);
654
655                         desc->addr = cpu_to_le32(sg_dma_address(sg) +
656                                                  curr_offset);
657
658                         if (remainder > BAM_FIFO_SIZE) {
659                                 desc->size = cpu_to_le16(BAM_FIFO_SIZE);
660                                 remainder -= BAM_FIFO_SIZE;
661                                 curr_offset += BAM_FIFO_SIZE;
662                         } else {
663                                 desc->size = cpu_to_le16(remainder);
664                                 remainder = 0;
665                         }
666
667                         async_desc->length += le16_to_cpu(desc->size);
668                         desc++;
669                 } while (remainder > 0);
670         }
671
672         return vchan_tx_prep(&bchan->vc, &async_desc->vd, flags);
673 }
674
675 /**
676  * bam_dma_terminate_all - terminate all transactions on a channel
677  * @chan: bam dma channel
678  *
679  * Dequeues and frees all transactions
680  * No callbacks are done
681  *
682  */
683 static int bam_dma_terminate_all(struct dma_chan *chan)
684 {
685         struct bam_chan *bchan = to_bam_chan(chan);
686         struct bam_async_desc *async_desc, *tmp;
687         unsigned long flag;
688         LIST_HEAD(head);
689
690         /* remove all transactions, including active transaction */
691         spin_lock_irqsave(&bchan->vc.lock, flag);
692         /*
693          * If we have transactions queued, then some might be committed to the
694          * hardware in the desc fifo.  The only way to reset the desc fifo is
695          * to do a hardware reset (either by pipe or the entire block).
696          * bam_chan_init_hw() will trigger a pipe reset, and also reinit the
697          * pipe.  If the pipe is left disabled (default state after pipe reset)
698          * and is accessed by a connected hardware engine, a fatal error in
699          * the BAM will occur.  There is a small window where this could happen
700          * with bam_chan_init_hw(), but it is assumed that the caller has
701          * stopped activity on any attached hardware engine.  Make sure to do
702          * this first so that the BAM hardware doesn't cause memory corruption
703          * by accessing freed resources.
704          */
705         if (!list_empty(&bchan->desc_list)) {
706                 async_desc = list_first_entry(&bchan->desc_list,
707                                               struct bam_async_desc, desc_node);
708                 bam_chan_init_hw(bchan, async_desc->dir);
709         }
710
711         list_for_each_entry_safe(async_desc, tmp,
712                                  &bchan->desc_list, desc_node) {
713                 list_add(&async_desc->vd.node, &bchan->vc.desc_issued);
714                 list_del(&async_desc->desc_node);
715         }
716
717         vchan_get_all_descriptors(&bchan->vc, &head);
718         spin_unlock_irqrestore(&bchan->vc.lock, flag);
719
720         vchan_dma_desc_free_list(&bchan->vc, &head);
721
722         return 0;
723 }
724
725 /**
726  * bam_pause - Pause DMA channel
727  * @chan: dma channel
728  *
729  */
730 static int bam_pause(struct dma_chan *chan)
731 {
732         struct bam_chan *bchan = to_bam_chan(chan);
733         struct bam_device *bdev = bchan->bdev;
734         unsigned long flag;
735         int ret;
736
737         ret = bam_pm_runtime_get_sync(bdev->dev);
738         if (ret < 0)
739                 return ret;
740
741         spin_lock_irqsave(&bchan->vc.lock, flag);
742         writel_relaxed(1, bam_addr(bdev, bchan->id, BAM_P_HALT));
743         bchan->paused = 1;
744         spin_unlock_irqrestore(&bchan->vc.lock, flag);
745         pm_runtime_mark_last_busy(bdev->dev);
746         pm_runtime_put_autosuspend(bdev->dev);
747
748         return 0;
749 }
750
751 /**
752  * bam_resume - Resume DMA channel operations
753  * @chan: dma channel
754  *
755  */
756 static int bam_resume(struct dma_chan *chan)
757 {
758         struct bam_chan *bchan = to_bam_chan(chan);
759         struct bam_device *bdev = bchan->bdev;
760         unsigned long flag;
761         int ret;
762
763         ret = bam_pm_runtime_get_sync(bdev->dev);
764         if (ret < 0)
765                 return ret;
766
767         spin_lock_irqsave(&bchan->vc.lock, flag);
768         writel_relaxed(0, bam_addr(bdev, bchan->id, BAM_P_HALT));
769         bchan->paused = 0;
770         spin_unlock_irqrestore(&bchan->vc.lock, flag);
771         pm_runtime_mark_last_busy(bdev->dev);
772         pm_runtime_put_autosuspend(bdev->dev);
773
774         return 0;
775 }
776
777 /**
778  * process_channel_irqs - processes the channel interrupts
779  * @bdev: bam controller
780  *
781  * This function processes the channel interrupts
782  *
783  */
784 static u32 process_channel_irqs(struct bam_device *bdev)
785 {
786         u32 i, srcs, pipe_stts, offset, avail;
787         unsigned long flags;
788         struct bam_async_desc *async_desc, *tmp;
789
790         srcs = readl_relaxed(bam_addr(bdev, 0, BAM_IRQ_SRCS_EE));
791
792         /* return early if no pipe/channel interrupts are present */
793         if (!(srcs & P_IRQ))
794                 return srcs;
795
796         for (i = 0; i < bdev->num_channels; i++) {
797                 struct bam_chan *bchan = &bdev->channels[i];
798
799                 if (!(srcs & BIT(i)))
800                         continue;
801
802                 /* clear pipe irq */
803                 pipe_stts = readl_relaxed(bam_addr(bdev, i, BAM_P_IRQ_STTS));
804
805                 writel_relaxed(pipe_stts, bam_addr(bdev, i, BAM_P_IRQ_CLR));
806
807                 spin_lock_irqsave(&bchan->vc.lock, flags);
808
809                 offset = readl_relaxed(bam_addr(bdev, i, BAM_P_SW_OFSTS)) &
810                                        P_SW_OFSTS_MASK;
811                 offset /= sizeof(struct bam_desc_hw);
812
813                 /* Number of bytes available to read */
814                 avail = CIRC_CNT(offset, bchan->head, MAX_DESCRIPTORS + 1);
815
816                 if (offset < bchan->head)
817                         avail--;
818
819                 list_for_each_entry_safe(async_desc, tmp,
820                                          &bchan->desc_list, desc_node) {
821                         /* Not enough data to read */
822                         if (avail < async_desc->xfer_len)
823                                 break;
824
825                         /* manage FIFO */
826                         bchan->head += async_desc->xfer_len;
827                         bchan->head %= MAX_DESCRIPTORS;
828
829                         async_desc->num_desc -= async_desc->xfer_len;
830                         async_desc->curr_desc += async_desc->xfer_len;
831                         avail -= async_desc->xfer_len;
832
833                         /*
834                          * if complete, process cookie. Otherwise
835                          * push back to front of desc_issued so that
836                          * it gets restarted by the tasklet
837                          */
838                         if (!async_desc->num_desc) {
839                                 vchan_cookie_complete(&async_desc->vd);
840                         } else {
841                                 list_add(&async_desc->vd.node,
842                                          &bchan->vc.desc_issued);
843                         }
844                         list_del(&async_desc->desc_node);
845                 }
846
847                 spin_unlock_irqrestore(&bchan->vc.lock, flags);
848         }
849
850         return srcs;
851 }
852
853 /**
854  * bam_dma_irq - irq handler for bam controller
855  * @irq: IRQ of interrupt
856  * @data: callback data
857  *
858  * IRQ handler for the bam controller
859  */
860 static irqreturn_t bam_dma_irq(int irq, void *data)
861 {
862         struct bam_device *bdev = data;
863         u32 clr_mask = 0, srcs = 0;
864         int ret;
865
866         srcs |= process_channel_irqs(bdev);
867
868         /* kick off tasklet to start next dma transfer */
869         if (srcs & P_IRQ)
870                 tasklet_schedule(&bdev->task);
871
872         ret = bam_pm_runtime_get_sync(bdev->dev);
873         if (ret < 0)
874                 return IRQ_NONE;
875
876         if (srcs & BAM_IRQ) {
877                 clr_mask = readl_relaxed(bam_addr(bdev, 0, BAM_IRQ_STTS));
878
879                 /*
880                  * don't allow reorder of the various accesses to the BAM
881                  * registers
882                  */
883                 mb();
884
885                 writel_relaxed(clr_mask, bam_addr(bdev, 0, BAM_IRQ_CLR));
886         }
887
888         pm_runtime_mark_last_busy(bdev->dev);
889         pm_runtime_put_autosuspend(bdev->dev);
890
891         return IRQ_HANDLED;
892 }
893
894 /**
895  * bam_tx_status - returns status of transaction
896  * @chan: dma channel
897  * @cookie: transaction cookie
898  * @txstate: DMA transaction state
899  *
900  * Return status of dma transaction
901  */
902 static enum dma_status bam_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
903                 struct dma_tx_state *txstate)
904 {
905         struct bam_chan *bchan = to_bam_chan(chan);
906         struct bam_async_desc *async_desc;
907         struct virt_dma_desc *vd;
908         int ret;
909         size_t residue = 0;
910         unsigned int i;
911         unsigned long flags;
912
913         ret = dma_cookie_status(chan, cookie, txstate);
914         if (ret == DMA_COMPLETE)
915                 return ret;
916
917         if (!txstate)
918                 return bchan->paused ? DMA_PAUSED : ret;
919
920         spin_lock_irqsave(&bchan->vc.lock, flags);
921         vd = vchan_find_desc(&bchan->vc, cookie);
922         if (vd) {
923                 residue = container_of(vd, struct bam_async_desc, vd)->length;
924         } else {
925                 list_for_each_entry(async_desc, &bchan->desc_list, desc_node) {
926                         if (async_desc->vd.tx.cookie != cookie)
927                                 continue;
928
929                         for (i = 0; i < async_desc->num_desc; i++)
930                                 residue += le16_to_cpu(
931                                                 async_desc->curr_desc[i].size);
932                 }
933         }
934
935         spin_unlock_irqrestore(&bchan->vc.lock, flags);
936
937         dma_set_residue(txstate, residue);
938
939         if (ret == DMA_IN_PROGRESS && bchan->paused)
940                 ret = DMA_PAUSED;
941
942         return ret;
943 }
944
945 /**
946  * bam_apply_new_config
947  * @bchan: bam dma channel
948  * @dir: DMA direction
949  */
950 static void bam_apply_new_config(struct bam_chan *bchan,
951         enum dma_transfer_direction dir)
952 {
953         struct bam_device *bdev = bchan->bdev;
954         u32 maxburst;
955
956         if (!bdev->controlled_remotely) {
957                 if (dir == DMA_DEV_TO_MEM)
958                         maxburst = bchan->slave.src_maxburst;
959                 else
960                         maxburst = bchan->slave.dst_maxburst;
961
962                 writel_relaxed(maxburst,
963                                bam_addr(bdev, 0, BAM_DESC_CNT_TRSHLD));
964         }
965
966         bchan->reconfigure = 0;
967 }
968
969 /**
970  * bam_start_dma - start next transaction
971  * @bchan: bam dma channel
972  */
973 static void bam_start_dma(struct bam_chan *bchan)
974 {
975         struct virt_dma_desc *vd = vchan_next_desc(&bchan->vc);
976         struct bam_device *bdev = bchan->bdev;
977         struct bam_async_desc *async_desc = NULL;
978         struct bam_desc_hw *desc;
979         struct bam_desc_hw *fifo = PTR_ALIGN(bchan->fifo_virt,
980                                         sizeof(struct bam_desc_hw));
981         int ret;
982         unsigned int avail;
983         struct dmaengine_desc_callback cb;
984
985         lockdep_assert_held(&bchan->vc.lock);
986
987         if (!vd)
988                 return;
989
990         ret = bam_pm_runtime_get_sync(bdev->dev);
991         if (ret < 0)
992                 return;
993
994         while (vd && !IS_BUSY(bchan)) {
995                 list_del(&vd->node);
996
997                 async_desc = container_of(vd, struct bam_async_desc, vd);
998
999                 /* on first use, initialize the channel hardware */
1000                 if (!bchan->initialized)
1001                         bam_chan_init_hw(bchan, async_desc->dir);
1002
1003                 /* apply new slave config changes, if necessary */
1004                 if (bchan->reconfigure)
1005                         bam_apply_new_config(bchan, async_desc->dir);
1006
1007                 desc = async_desc->curr_desc;
1008                 avail = CIRC_SPACE(bchan->tail, bchan->head,
1009                                    MAX_DESCRIPTORS + 1);
1010
1011                 if (async_desc->num_desc > avail)
1012                         async_desc->xfer_len = avail;
1013                 else
1014                         async_desc->xfer_len = async_desc->num_desc;
1015
1016                 /* set any special flags on the last descriptor */
1017                 if (async_desc->num_desc == async_desc->xfer_len)
1018                         desc[async_desc->xfer_len - 1].flags |=
1019                                                 cpu_to_le16(async_desc->flags);
1020
1021                 vd = vchan_next_desc(&bchan->vc);
1022
1023                 dmaengine_desc_get_callback(&async_desc->vd.tx, &cb);
1024
1025                 /*
1026                  * An interrupt is generated at this desc, if
1027                  *  - FIFO is FULL.
1028                  *  - No more descriptors to add.
1029                  *  - If a callback completion was requested for this DESC,
1030                  *     In this case, BAM will deliver the completion callback
1031                  *     for this desc and continue processing the next desc.
1032                  */
1033                 if (((avail <= async_desc->xfer_len) || !vd ||
1034                      dmaengine_desc_callback_valid(&cb)) &&
1035                     !(async_desc->flags & DESC_FLAG_EOT))
1036                         desc[async_desc->xfer_len - 1].flags |=
1037                                 cpu_to_le16(DESC_FLAG_INT);
1038
1039                 if (bchan->tail + async_desc->xfer_len > MAX_DESCRIPTORS) {
1040                         u32 partial = MAX_DESCRIPTORS - bchan->tail;
1041
1042                         memcpy(&fifo[bchan->tail], desc,
1043                                partial * sizeof(struct bam_desc_hw));
1044                         memcpy(fifo, &desc[partial],
1045                                (async_desc->xfer_len - partial) *
1046                                 sizeof(struct bam_desc_hw));
1047                 } else {
1048                         memcpy(&fifo[bchan->tail], desc,
1049                                async_desc->xfer_len *
1050                                sizeof(struct bam_desc_hw));
1051                 }
1052
1053                 bchan->tail += async_desc->xfer_len;
1054                 bchan->tail %= MAX_DESCRIPTORS;
1055                 list_add_tail(&async_desc->desc_node, &bchan->desc_list);
1056         }
1057
1058         /* ensure descriptor writes and dma start not reordered */
1059         wmb();
1060         writel_relaxed(bchan->tail * sizeof(struct bam_desc_hw),
1061                         bam_addr(bdev, bchan->id, BAM_P_EVNT_REG));
1062
1063         pm_runtime_mark_last_busy(bdev->dev);
1064         pm_runtime_put_autosuspend(bdev->dev);
1065 }
1066
1067 /**
1068  * dma_tasklet - DMA IRQ tasklet
1069  * @t: tasklet argument (bam controller structure)
1070  *
1071  * Sets up next DMA operation and then processes all completed transactions
1072  */
1073 static void dma_tasklet(struct tasklet_struct *t)
1074 {
1075         struct bam_device *bdev = from_tasklet(bdev, t, task);
1076         struct bam_chan *bchan;
1077         unsigned long flags;
1078         unsigned int i;
1079
1080         /* go through the channels and kick off transactions */
1081         for (i = 0; i < bdev->num_channels; i++) {
1082                 bchan = &bdev->channels[i];
1083                 spin_lock_irqsave(&bchan->vc.lock, flags);
1084
1085                 if (!list_empty(&bchan->vc.desc_issued) && !IS_BUSY(bchan))
1086                         bam_start_dma(bchan);
1087                 spin_unlock_irqrestore(&bchan->vc.lock, flags);
1088         }
1089
1090 }
1091
1092 /**
1093  * bam_issue_pending - starts pending transactions
1094  * @chan: dma channel
1095  *
1096  * Calls tasklet directly which in turn starts any pending transactions
1097  */
1098 static void bam_issue_pending(struct dma_chan *chan)
1099 {
1100         struct bam_chan *bchan = to_bam_chan(chan);
1101         unsigned long flags;
1102
1103         spin_lock_irqsave(&bchan->vc.lock, flags);
1104
1105         /* if work pending and idle, start a transaction */
1106         if (vchan_issue_pending(&bchan->vc) && !IS_BUSY(bchan))
1107                 bam_start_dma(bchan);
1108
1109         spin_unlock_irqrestore(&bchan->vc.lock, flags);
1110 }
1111
1112 /**
1113  * bam_dma_free_desc - free descriptor memory
1114  * @vd: virtual descriptor
1115  *
1116  */
1117 static void bam_dma_free_desc(struct virt_dma_desc *vd)
1118 {
1119         struct bam_async_desc *async_desc = container_of(vd,
1120                         struct bam_async_desc, vd);
1121
1122         kfree(async_desc);
1123 }
1124
1125 static struct dma_chan *bam_dma_xlate(struct of_phandle_args *dma_spec,
1126                 struct of_dma *of)
1127 {
1128         struct bam_device *bdev = container_of(of->of_dma_data,
1129                                         struct bam_device, common);
1130         unsigned int request;
1131
1132         if (dma_spec->args_count != 1)
1133                 return NULL;
1134
1135         request = dma_spec->args[0];
1136         if (request >= bdev->num_channels)
1137                 return NULL;
1138
1139         return dma_get_slave_channel(&(bdev->channels[request].vc.chan));
1140 }
1141
1142 /**
1143  * bam_init
1144  * @bdev: bam device
1145  *
1146  * Initialization helper for global bam registers
1147  */
1148 static int bam_init(struct bam_device *bdev)
1149 {
1150         u32 val;
1151
1152         /* read revision and configuration information */
1153         if (!bdev->num_ees) {
1154                 val = readl_relaxed(bam_addr(bdev, 0, BAM_REVISION));
1155                 bdev->num_ees = (val >> NUM_EES_SHIFT) & NUM_EES_MASK;
1156         }
1157
1158         /* check that configured EE is within range */
1159         if (bdev->ee >= bdev->num_ees)
1160                 return -EINVAL;
1161
1162         if (!bdev->num_channels) {
1163                 val = readl_relaxed(bam_addr(bdev, 0, BAM_NUM_PIPES));
1164                 bdev->num_channels = val & BAM_NUM_PIPES_MASK;
1165         }
1166
1167         if (bdev->controlled_remotely)
1168                 return 0;
1169
1170         /* s/w reset bam */
1171         /* after reset all pipes are disabled and idle */
1172         val = readl_relaxed(bam_addr(bdev, 0, BAM_CTRL));
1173         val |= BAM_SW_RST;
1174         writel_relaxed(val, bam_addr(bdev, 0, BAM_CTRL));
1175         val &= ~BAM_SW_RST;
1176         writel_relaxed(val, bam_addr(bdev, 0, BAM_CTRL));
1177
1178         /* make sure previous stores are visible before enabling BAM */
1179         wmb();
1180
1181         /* enable bam */
1182         val |= BAM_EN;
1183         writel_relaxed(val, bam_addr(bdev, 0, BAM_CTRL));
1184
1185         /* set descriptor threshhold, start with 4 bytes */
1186         writel_relaxed(DEFAULT_CNT_THRSHLD,
1187                         bam_addr(bdev, 0, BAM_DESC_CNT_TRSHLD));
1188
1189         /* Enable default set of h/w workarounds, ie all except BAM_FULL_PIPE */
1190         writel_relaxed(BAM_CNFG_BITS_DEFAULT, bam_addr(bdev, 0, BAM_CNFG_BITS));
1191
1192         /* enable irqs for errors */
1193         writel_relaxed(BAM_ERROR_EN | BAM_HRESP_ERR_EN,
1194                         bam_addr(bdev, 0, BAM_IRQ_EN));
1195
1196         /* unmask global bam interrupt */
1197         writel_relaxed(BAM_IRQ_MSK, bam_addr(bdev, 0, BAM_IRQ_SRCS_MSK_EE));
1198
1199         return 0;
1200 }
1201
1202 static void bam_channel_init(struct bam_device *bdev, struct bam_chan *bchan,
1203         u32 index)
1204 {
1205         bchan->id = index;
1206         bchan->bdev = bdev;
1207
1208         vchan_init(&bchan->vc, &bdev->common);
1209         bchan->vc.desc_free = bam_dma_free_desc;
1210         INIT_LIST_HEAD(&bchan->desc_list);
1211 }
1212
1213 static const struct of_device_id bam_of_match[] = {
1214         { .compatible = "qcom,bam-v1.3.0", .data = &bam_v1_3_reg_info },
1215         { .compatible = "qcom,bam-v1.4.0", .data = &bam_v1_4_reg_info },
1216         { .compatible = "qcom,bam-v1.7.0", .data = &bam_v1_7_reg_info },
1217         {}
1218 };
1219
1220 MODULE_DEVICE_TABLE(of, bam_of_match);
1221
1222 static int bam_dma_probe(struct platform_device *pdev)
1223 {
1224         struct bam_device *bdev;
1225         const struct of_device_id *match;
1226         struct resource *iores;
1227         int ret, i;
1228
1229         bdev = devm_kzalloc(&pdev->dev, sizeof(*bdev), GFP_KERNEL);
1230         if (!bdev)
1231                 return -ENOMEM;
1232
1233         bdev->dev = &pdev->dev;
1234
1235         match = of_match_node(bam_of_match, pdev->dev.of_node);
1236         if (!match) {
1237                 dev_err(&pdev->dev, "Unsupported BAM module\n");
1238                 return -ENODEV;
1239         }
1240
1241         bdev->layout = match->data;
1242
1243         iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1244         bdev->regs = devm_ioremap_resource(&pdev->dev, iores);
1245         if (IS_ERR(bdev->regs))
1246                 return PTR_ERR(bdev->regs);
1247
1248         bdev->irq = platform_get_irq(pdev, 0);
1249         if (bdev->irq < 0)
1250                 return bdev->irq;
1251
1252         ret = of_property_read_u32(pdev->dev.of_node, "qcom,ee", &bdev->ee);
1253         if (ret) {
1254                 dev_err(bdev->dev, "Execution environment unspecified\n");
1255                 return ret;
1256         }
1257
1258         bdev->controlled_remotely = of_property_read_bool(pdev->dev.of_node,
1259                                                 "qcom,controlled-remotely");
1260
1261         if (bdev->controlled_remotely) {
1262                 ret = of_property_read_u32(pdev->dev.of_node, "num-channels",
1263                                            &bdev->num_channels);
1264                 if (ret)
1265                         dev_err(bdev->dev, "num-channels unspecified in dt\n");
1266
1267                 ret = of_property_read_u32(pdev->dev.of_node, "qcom,num-ees",
1268                                            &bdev->num_ees);
1269                 if (ret)
1270                         dev_err(bdev->dev, "num-ees unspecified in dt\n");
1271         }
1272
1273         if (bdev->controlled_remotely)
1274                 bdev->bamclk = devm_clk_get_optional(bdev->dev, "bam_clk");
1275         else
1276                 bdev->bamclk = devm_clk_get(bdev->dev, "bam_clk");
1277
1278         if (IS_ERR(bdev->bamclk))
1279                 return PTR_ERR(bdev->bamclk);
1280
1281         ret = clk_prepare_enable(bdev->bamclk);
1282         if (ret) {
1283                 dev_err(bdev->dev, "failed to prepare/enable clock\n");
1284                 return ret;
1285         }
1286
1287         ret = bam_init(bdev);
1288         if (ret)
1289                 goto err_disable_clk;
1290
1291         tasklet_setup(&bdev->task, dma_tasklet);
1292
1293         bdev->channels = devm_kcalloc(bdev->dev, bdev->num_channels,
1294                                 sizeof(*bdev->channels), GFP_KERNEL);
1295
1296         if (!bdev->channels) {
1297                 ret = -ENOMEM;
1298                 goto err_tasklet_kill;
1299         }
1300
1301         /* allocate and initialize channels */
1302         INIT_LIST_HEAD(&bdev->common.channels);
1303
1304         for (i = 0; i < bdev->num_channels; i++)
1305                 bam_channel_init(bdev, &bdev->channels[i], i);
1306
1307         ret = devm_request_irq(bdev->dev, bdev->irq, bam_dma_irq,
1308                         IRQF_TRIGGER_HIGH, "bam_dma", bdev);
1309         if (ret)
1310                 goto err_bam_channel_exit;
1311
1312         /* set max dma segment size */
1313         bdev->common.dev = bdev->dev;
1314         ret = dma_set_max_seg_size(bdev->common.dev, BAM_FIFO_SIZE);
1315         if (ret) {
1316                 dev_err(bdev->dev, "cannot set maximum segment size\n");
1317                 goto err_bam_channel_exit;
1318         }
1319
1320         platform_set_drvdata(pdev, bdev);
1321
1322         /* set capabilities */
1323         dma_cap_zero(bdev->common.cap_mask);
1324         dma_cap_set(DMA_SLAVE, bdev->common.cap_mask);
1325
1326         /* initialize dmaengine apis */
1327         bdev->common.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
1328         bdev->common.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
1329         bdev->common.src_addr_widths = DMA_SLAVE_BUSWIDTH_4_BYTES;
1330         bdev->common.dst_addr_widths = DMA_SLAVE_BUSWIDTH_4_BYTES;
1331         bdev->common.device_alloc_chan_resources = bam_alloc_chan;
1332         bdev->common.device_free_chan_resources = bam_free_chan;
1333         bdev->common.device_prep_slave_sg = bam_prep_slave_sg;
1334         bdev->common.device_config = bam_slave_config;
1335         bdev->common.device_pause = bam_pause;
1336         bdev->common.device_resume = bam_resume;
1337         bdev->common.device_terminate_all = bam_dma_terminate_all;
1338         bdev->common.device_issue_pending = bam_issue_pending;
1339         bdev->common.device_tx_status = bam_tx_status;
1340         bdev->common.dev = bdev->dev;
1341
1342         ret = dma_async_device_register(&bdev->common);
1343         if (ret) {
1344                 dev_err(bdev->dev, "failed to register dma async device\n");
1345                 goto err_bam_channel_exit;
1346         }
1347
1348         ret = of_dma_controller_register(pdev->dev.of_node, bam_dma_xlate,
1349                                         &bdev->common);
1350         if (ret)
1351                 goto err_unregister_dma;
1352
1353         if (!bdev->bamclk) {
1354                 pm_runtime_disable(&pdev->dev);
1355                 return 0;
1356         }
1357
1358         pm_runtime_irq_safe(&pdev->dev);
1359         pm_runtime_set_autosuspend_delay(&pdev->dev, BAM_DMA_AUTOSUSPEND_DELAY);
1360         pm_runtime_use_autosuspend(&pdev->dev);
1361         pm_runtime_mark_last_busy(&pdev->dev);
1362         pm_runtime_set_active(&pdev->dev);
1363         pm_runtime_enable(&pdev->dev);
1364
1365         return 0;
1366
1367 err_unregister_dma:
1368         dma_async_device_unregister(&bdev->common);
1369 err_bam_channel_exit:
1370         for (i = 0; i < bdev->num_channels; i++)
1371                 tasklet_kill(&bdev->channels[i].vc.task);
1372 err_tasklet_kill:
1373         tasklet_kill(&bdev->task);
1374 err_disable_clk:
1375         clk_disable_unprepare(bdev->bamclk);
1376
1377         return ret;
1378 }
1379
1380 static int bam_dma_remove(struct platform_device *pdev)
1381 {
1382         struct bam_device *bdev = platform_get_drvdata(pdev);
1383         u32 i;
1384
1385         pm_runtime_force_suspend(&pdev->dev);
1386
1387         of_dma_controller_free(pdev->dev.of_node);
1388         dma_async_device_unregister(&bdev->common);
1389
1390         /* mask all interrupts for this execution environment */
1391         writel_relaxed(0, bam_addr(bdev, 0,  BAM_IRQ_SRCS_MSK_EE));
1392
1393         devm_free_irq(bdev->dev, bdev->irq, bdev);
1394
1395         for (i = 0; i < bdev->num_channels; i++) {
1396                 bam_dma_terminate_all(&bdev->channels[i].vc.chan);
1397                 tasklet_kill(&bdev->channels[i].vc.task);
1398
1399                 if (!bdev->channels[i].fifo_virt)
1400                         continue;
1401
1402                 dma_free_wc(bdev->dev, BAM_DESC_FIFO_SIZE,
1403                             bdev->channels[i].fifo_virt,
1404                             bdev->channels[i].fifo_phys);
1405         }
1406
1407         tasklet_kill(&bdev->task);
1408
1409         clk_disable_unprepare(bdev->bamclk);
1410
1411         return 0;
1412 }
1413
1414 static int __maybe_unused bam_dma_runtime_suspend(struct device *dev)
1415 {
1416         struct bam_device *bdev = dev_get_drvdata(dev);
1417
1418         clk_disable(bdev->bamclk);
1419
1420         return 0;
1421 }
1422
1423 static int __maybe_unused bam_dma_runtime_resume(struct device *dev)
1424 {
1425         struct bam_device *bdev = dev_get_drvdata(dev);
1426         int ret;
1427
1428         ret = clk_enable(bdev->bamclk);
1429         if (ret < 0) {
1430                 dev_err(dev, "clk_enable failed: %d\n", ret);
1431                 return ret;
1432         }
1433
1434         return 0;
1435 }
1436
1437 static int __maybe_unused bam_dma_suspend(struct device *dev)
1438 {
1439         struct bam_device *bdev = dev_get_drvdata(dev);
1440
1441         if (bdev->bamclk) {
1442                 pm_runtime_force_suspend(dev);
1443                 clk_unprepare(bdev->bamclk);
1444         }
1445
1446         return 0;
1447 }
1448
1449 static int __maybe_unused bam_dma_resume(struct device *dev)
1450 {
1451         struct bam_device *bdev = dev_get_drvdata(dev);
1452         int ret;
1453
1454         if (bdev->bamclk) {
1455                 ret = clk_prepare(bdev->bamclk);
1456                 if (ret)
1457                         return ret;
1458
1459                 pm_runtime_force_resume(dev);
1460         }
1461
1462         return 0;
1463 }
1464
1465 static const struct dev_pm_ops bam_dma_pm_ops = {
1466         SET_LATE_SYSTEM_SLEEP_PM_OPS(bam_dma_suspend, bam_dma_resume)
1467         SET_RUNTIME_PM_OPS(bam_dma_runtime_suspend, bam_dma_runtime_resume,
1468                                 NULL)
1469 };
1470
1471 static struct platform_driver bam_dma_driver = {
1472         .probe = bam_dma_probe,
1473         .remove = bam_dma_remove,
1474         .driver = {
1475                 .name = "bam-dma-engine",
1476                 .pm = &bam_dma_pm_ops,
1477                 .of_match_table = bam_of_match,
1478         },
1479 };
1480
1481 module_platform_driver(bam_dma_driver);
1482
1483 MODULE_AUTHOR("Andy Gross <agross@codeaurora.org>");
1484 MODULE_DESCRIPTION("QCOM BAM DMA engine driver");
1485 MODULE_LICENSE("GPL v2");