1 // SPDX-License-Identifier: GPL-2.0-only
3 * Driver for STM32 DMA controller
5 * Inspired by dma-jz4740.c and tegra20-apb-dma.c
7 * Copyright (C) M'boumba Cedric Madianga 2015
8 * Author: M'boumba Cedric Madianga <cedric.madianga@gmail.com>
9 * Pierre-Yves Mordret <pierre-yves.mordret@st.com>
12 #include <linux/clk.h>
13 #include <linux/delay.h>
14 #include <linux/dmaengine.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/err.h>
17 #include <linux/init.h>
18 #include <linux/iopoll.h>
19 #include <linux/jiffies.h>
20 #include <linux/list.h>
21 #include <linux/module.h>
23 #include <linux/of_device.h>
24 #include <linux/of_dma.h>
25 #include <linux/platform_device.h>
26 #include <linux/pm_runtime.h>
27 #include <linux/reset.h>
28 #include <linux/sched.h>
29 #include <linux/slab.h>
33 #define STM32_DMA_LISR 0x0000 /* DMA Low Int Status Reg */
34 #define STM32_DMA_HISR 0x0004 /* DMA High Int Status Reg */
35 #define STM32_DMA_LIFCR 0x0008 /* DMA Low Int Flag Clear Reg */
36 #define STM32_DMA_HIFCR 0x000c /* DMA High Int Flag Clear Reg */
37 #define STM32_DMA_TCI BIT(5) /* Transfer Complete Interrupt */
38 #define STM32_DMA_HTI BIT(4) /* Half Transfer Interrupt */
39 #define STM32_DMA_TEI BIT(3) /* Transfer Error Interrupt */
40 #define STM32_DMA_DMEI BIT(2) /* Direct Mode Error Interrupt */
41 #define STM32_DMA_FEI BIT(0) /* FIFO Error Interrupt */
42 #define STM32_DMA_MASKI (STM32_DMA_TCI \
47 /* DMA Stream x Configuration Register */
48 #define STM32_DMA_SCR(x) (0x0010 + 0x18 * (x)) /* x = 0..7 */
49 #define STM32_DMA_SCR_REQ(n) ((n & 0x7) << 25)
50 #define STM32_DMA_SCR_MBURST_MASK GENMASK(24, 23)
51 #define STM32_DMA_SCR_MBURST(n) ((n & 0x3) << 23)
52 #define STM32_DMA_SCR_PBURST_MASK GENMASK(22, 21)
53 #define STM32_DMA_SCR_PBURST(n) ((n & 0x3) << 21)
54 #define STM32_DMA_SCR_PL_MASK GENMASK(17, 16)
55 #define STM32_DMA_SCR_PL(n) ((n & 0x3) << 16)
56 #define STM32_DMA_SCR_MSIZE_MASK GENMASK(14, 13)
57 #define STM32_DMA_SCR_MSIZE(n) ((n & 0x3) << 13)
58 #define STM32_DMA_SCR_PSIZE_MASK GENMASK(12, 11)
59 #define STM32_DMA_SCR_PSIZE(n) ((n & 0x3) << 11)
60 #define STM32_DMA_SCR_PSIZE_GET(n) ((n & STM32_DMA_SCR_PSIZE_MASK) >> 11)
61 #define STM32_DMA_SCR_DIR_MASK GENMASK(7, 6)
62 #define STM32_DMA_SCR_DIR(n) ((n & 0x3) << 6)
63 #define STM32_DMA_SCR_TRBUFF BIT(20) /* Bufferable transfer for USART/UART */
64 #define STM32_DMA_SCR_CT BIT(19) /* Target in double buffer */
65 #define STM32_DMA_SCR_DBM BIT(18) /* Double Buffer Mode */
66 #define STM32_DMA_SCR_PINCOS BIT(15) /* Peripheral inc offset size */
67 #define STM32_DMA_SCR_MINC BIT(10) /* Memory increment mode */
68 #define STM32_DMA_SCR_PINC BIT(9) /* Peripheral increment mode */
69 #define STM32_DMA_SCR_CIRC BIT(8) /* Circular mode */
70 #define STM32_DMA_SCR_PFCTRL BIT(5) /* Peripheral Flow Controller */
71 #define STM32_DMA_SCR_TCIE BIT(4) /* Transfer Complete Int Enable
73 #define STM32_DMA_SCR_TEIE BIT(2) /* Transfer Error Int Enable */
74 #define STM32_DMA_SCR_DMEIE BIT(1) /* Direct Mode Err Int Enable */
75 #define STM32_DMA_SCR_EN BIT(0) /* Stream Enable */
76 #define STM32_DMA_SCR_CFG_MASK (STM32_DMA_SCR_PINC \
77 | STM32_DMA_SCR_MINC \
78 | STM32_DMA_SCR_PINCOS \
79 | STM32_DMA_SCR_PL_MASK)
80 #define STM32_DMA_SCR_IRQ_MASK (STM32_DMA_SCR_TCIE \
81 | STM32_DMA_SCR_TEIE \
82 | STM32_DMA_SCR_DMEIE)
84 /* DMA Stream x number of data register */
85 #define STM32_DMA_SNDTR(x) (0x0014 + 0x18 * (x))
87 /* DMA stream peripheral address register */
88 #define STM32_DMA_SPAR(x) (0x0018 + 0x18 * (x))
90 /* DMA stream x memory 0 address register */
91 #define STM32_DMA_SM0AR(x) (0x001c + 0x18 * (x))
93 /* DMA stream x memory 1 address register */
94 #define STM32_DMA_SM1AR(x) (0x0020 + 0x18 * (x))
96 /* DMA stream x FIFO control register */
97 #define STM32_DMA_SFCR(x) (0x0024 + 0x18 * (x))
98 #define STM32_DMA_SFCR_FTH_MASK GENMASK(1, 0)
99 #define STM32_DMA_SFCR_FTH(n) (n & STM32_DMA_SFCR_FTH_MASK)
100 #define STM32_DMA_SFCR_FEIE BIT(7) /* FIFO error interrupt enable */
101 #define STM32_DMA_SFCR_DMDIS BIT(2) /* Direct mode disable */
102 #define STM32_DMA_SFCR_MASK (STM32_DMA_SFCR_FEIE \
103 | STM32_DMA_SFCR_DMDIS)
106 #define STM32_DMA_DEV_TO_MEM 0x00
107 #define STM32_DMA_MEM_TO_DEV 0x01
108 #define STM32_DMA_MEM_TO_MEM 0x02
110 /* DMA priority level */
111 #define STM32_DMA_PRIORITY_LOW 0x00
112 #define STM32_DMA_PRIORITY_MEDIUM 0x01
113 #define STM32_DMA_PRIORITY_HIGH 0x02
114 #define STM32_DMA_PRIORITY_VERY_HIGH 0x03
116 /* DMA FIFO threshold selection */
117 #define STM32_DMA_FIFO_THRESHOLD_1QUARTERFULL 0x00
118 #define STM32_DMA_FIFO_THRESHOLD_HALFFULL 0x01
119 #define STM32_DMA_FIFO_THRESHOLD_3QUARTERSFULL 0x02
120 #define STM32_DMA_FIFO_THRESHOLD_FULL 0x03
121 #define STM32_DMA_FIFO_THRESHOLD_NONE 0x04
123 #define STM32_DMA_MAX_DATA_ITEMS 0xffff
125 * Valid transfer starts from @0 to @0xFFFE leading to unaligned scatter
126 * gather at boundary. Thus it's safer to round down this value on FIFO
129 #define STM32_DMA_ALIGNED_MAX_DATA_ITEMS \
130 ALIGN_DOWN(STM32_DMA_MAX_DATA_ITEMS, 16)
131 #define STM32_DMA_MAX_CHANNELS 0x08
132 #define STM32_DMA_MAX_REQUEST_ID 0x08
133 #define STM32_DMA_MAX_DATA_PARAM 0x03
134 #define STM32_DMA_FIFO_SIZE 16 /* FIFO is 16 bytes */
135 #define STM32_DMA_MIN_BURST 4
136 #define STM32_DMA_MAX_BURST 16
139 #define STM32_DMA_THRESHOLD_FTR_MASK GENMASK(1, 0)
140 #define STM32_DMA_THRESHOLD_FTR_GET(n) ((n) & STM32_DMA_THRESHOLD_FTR_MASK)
141 #define STM32_DMA_DIRECT_MODE_MASK BIT(2)
142 #define STM32_DMA_DIRECT_MODE_GET(n) (((n) & STM32_DMA_DIRECT_MODE_MASK) >> 2)
143 #define STM32_DMA_ALT_ACK_MODE_MASK BIT(4)
144 #define STM32_DMA_ALT_ACK_MODE_GET(n) (((n) & STM32_DMA_ALT_ACK_MODE_MASK) >> 4)
146 enum stm32_dma_width {
152 enum stm32_dma_burst_size {
153 STM32_DMA_BURST_SINGLE,
154 STM32_DMA_BURST_INCR4,
155 STM32_DMA_BURST_INCR8,
156 STM32_DMA_BURST_INCR16,
160 * struct stm32_dma_cfg - STM32 DMA custom configuration
161 * @channel_id: channel ID
162 * @request_line: DMA request
163 * @stream_config: 32bit mask specifying the DMA channel configuration
164 * @features: 32bit mask specifying the DMA Feature list
166 struct stm32_dma_cfg {
173 struct stm32_dma_chan_reg {
186 struct stm32_dma_sg_req {
188 struct stm32_dma_chan_reg chan_reg;
191 struct stm32_dma_desc {
192 struct virt_dma_desc vdesc;
195 struct stm32_dma_sg_req sg_req[];
198 struct stm32_dma_chan {
199 struct virt_dma_chan vchan;
204 struct stm32_dma_desc *desc;
206 struct dma_slave_config dma_sconfig;
207 struct stm32_dma_chan_reg chan_reg;
211 enum dma_status status;
214 struct stm32_dma_device {
215 struct dma_device ddev;
219 struct stm32_dma_chan chan[STM32_DMA_MAX_CHANNELS];
222 static struct stm32_dma_device *stm32_dma_get_dev(struct stm32_dma_chan *chan)
224 return container_of(chan->vchan.chan.device, struct stm32_dma_device,
228 static struct stm32_dma_chan *to_stm32_dma_chan(struct dma_chan *c)
230 return container_of(c, struct stm32_dma_chan, vchan.chan);
233 static struct stm32_dma_desc *to_stm32_dma_desc(struct virt_dma_desc *vdesc)
235 return container_of(vdesc, struct stm32_dma_desc, vdesc);
238 static struct device *chan2dev(struct stm32_dma_chan *chan)
240 return &chan->vchan.chan.dev->device;
243 static u32 stm32_dma_read(struct stm32_dma_device *dmadev, u32 reg)
245 return readl_relaxed(dmadev->base + reg);
248 static void stm32_dma_write(struct stm32_dma_device *dmadev, u32 reg, u32 val)
250 writel_relaxed(val, dmadev->base + reg);
253 static int stm32_dma_get_width(struct stm32_dma_chan *chan,
254 enum dma_slave_buswidth width)
257 case DMA_SLAVE_BUSWIDTH_1_BYTE:
258 return STM32_DMA_BYTE;
259 case DMA_SLAVE_BUSWIDTH_2_BYTES:
260 return STM32_DMA_HALF_WORD;
261 case DMA_SLAVE_BUSWIDTH_4_BYTES:
262 return STM32_DMA_WORD;
264 dev_err(chan2dev(chan), "Dma bus width not supported\n");
269 static enum dma_slave_buswidth stm32_dma_get_max_width(u32 buf_len,
273 enum dma_slave_buswidth max_width;
275 if (threshold == STM32_DMA_FIFO_THRESHOLD_FULL)
276 max_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
278 max_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
280 while ((buf_len < max_width || buf_len % max_width) &&
281 max_width > DMA_SLAVE_BUSWIDTH_1_BYTE)
282 max_width = max_width >> 1;
284 if (buf_addr & (max_width - 1))
285 max_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
290 static bool stm32_dma_fifo_threshold_is_allowed(u32 burst, u32 threshold,
291 enum dma_slave_buswidth width)
295 if (threshold == STM32_DMA_FIFO_THRESHOLD_NONE)
298 if (width != DMA_SLAVE_BUSWIDTH_UNDEFINED) {
301 * If number of beats fit in several whole bursts
302 * this configuration is allowed.
304 remaining = ((STM32_DMA_FIFO_SIZE / width) *
305 (threshold + 1) / 4) % burst;
317 static bool stm32_dma_is_burst_possible(u32 buf_len, u32 threshold)
319 /* If FIFO direct mode, burst is not possible */
320 if (threshold == STM32_DMA_FIFO_THRESHOLD_NONE)
324 * Buffer or period length has to be aligned on FIFO depth.
325 * Otherwise bytes may be stuck within FIFO at buffer or period
328 return ((buf_len % ((threshold + 1) * 4)) == 0);
331 static u32 stm32_dma_get_best_burst(u32 buf_len, u32 max_burst, u32 threshold,
332 enum dma_slave_buswidth width)
334 u32 best_burst = max_burst;
336 if (best_burst == 1 || !stm32_dma_is_burst_possible(buf_len, threshold))
339 while ((buf_len < best_burst * width && best_burst > 1) ||
340 !stm32_dma_fifo_threshold_is_allowed(best_burst, threshold,
342 if (best_burst > STM32_DMA_MIN_BURST)
343 best_burst = best_burst >> 1;
351 static int stm32_dma_get_burst(struct stm32_dma_chan *chan, u32 maxburst)
356 return STM32_DMA_BURST_SINGLE;
358 return STM32_DMA_BURST_INCR4;
360 return STM32_DMA_BURST_INCR8;
362 return STM32_DMA_BURST_INCR16;
364 dev_err(chan2dev(chan), "Dma burst size not supported\n");
369 static void stm32_dma_set_fifo_config(struct stm32_dma_chan *chan,
370 u32 src_burst, u32 dst_burst)
372 chan->chan_reg.dma_sfcr &= ~STM32_DMA_SFCR_MASK;
373 chan->chan_reg.dma_scr &= ~STM32_DMA_SCR_DMEIE;
375 if (!src_burst && !dst_burst) {
376 /* Using direct mode */
377 chan->chan_reg.dma_scr |= STM32_DMA_SCR_DMEIE;
379 /* Using FIFO mode */
380 chan->chan_reg.dma_sfcr |= STM32_DMA_SFCR_MASK;
384 static int stm32_dma_slave_config(struct dma_chan *c,
385 struct dma_slave_config *config)
387 struct stm32_dma_chan *chan = to_stm32_dma_chan(c);
389 memcpy(&chan->dma_sconfig, config, sizeof(*config));
391 chan->config_init = true;
396 static u32 stm32_dma_irq_status(struct stm32_dma_chan *chan)
398 struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
402 * Read "flags" from DMA_xISR register corresponding to the selected
403 * DMA channel at the correct bit offset inside that register.
405 * If (ch % 4) is 2 or 3, left shift the mask by 16 bits.
406 * If (ch % 4) is 1 or 3, additionally left shift the mask by 6 bits.
410 dma_isr = stm32_dma_read(dmadev, STM32_DMA_HISR);
412 dma_isr = stm32_dma_read(dmadev, STM32_DMA_LISR);
414 flags = dma_isr >> (((chan->id & 2) << 3) | ((chan->id & 1) * 6));
416 return flags & STM32_DMA_MASKI;
419 static void stm32_dma_irq_clear(struct stm32_dma_chan *chan, u32 flags)
421 struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
425 * Write "flags" to the DMA_xIFCR register corresponding to the selected
426 * DMA channel at the correct bit offset inside that register.
428 * If (ch % 4) is 2 or 3, left shift the mask by 16 bits.
429 * If (ch % 4) is 1 or 3, additionally left shift the mask by 6 bits.
431 flags &= STM32_DMA_MASKI;
432 dma_ifcr = flags << (((chan->id & 2) << 3) | ((chan->id & 1) * 6));
435 stm32_dma_write(dmadev, STM32_DMA_HIFCR, dma_ifcr);
437 stm32_dma_write(dmadev, STM32_DMA_LIFCR, dma_ifcr);
440 static int stm32_dma_disable_chan(struct stm32_dma_chan *chan)
442 struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
443 u32 dma_scr, id, reg;
446 reg = STM32_DMA_SCR(id);
447 dma_scr = stm32_dma_read(dmadev, reg);
449 if (dma_scr & STM32_DMA_SCR_EN) {
450 dma_scr &= ~STM32_DMA_SCR_EN;
451 stm32_dma_write(dmadev, reg, dma_scr);
453 return readl_relaxed_poll_timeout_atomic(dmadev->base + reg,
454 dma_scr, !(dma_scr & STM32_DMA_SCR_EN),
461 static void stm32_dma_stop(struct stm32_dma_chan *chan)
463 struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
464 u32 dma_scr, dma_sfcr, status;
467 /* Disable interrupts */
468 dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(chan->id));
469 dma_scr &= ~STM32_DMA_SCR_IRQ_MASK;
470 stm32_dma_write(dmadev, STM32_DMA_SCR(chan->id), dma_scr);
471 dma_sfcr = stm32_dma_read(dmadev, STM32_DMA_SFCR(chan->id));
472 dma_sfcr &= ~STM32_DMA_SFCR_FEIE;
473 stm32_dma_write(dmadev, STM32_DMA_SFCR(chan->id), dma_sfcr);
476 ret = stm32_dma_disable_chan(chan);
480 /* Clear interrupt status if it is there */
481 status = stm32_dma_irq_status(chan);
483 dev_dbg(chan2dev(chan), "%s(): clearing interrupt: 0x%08x\n",
485 stm32_dma_irq_clear(chan, status);
489 chan->status = DMA_COMPLETE;
492 static int stm32_dma_terminate_all(struct dma_chan *c)
494 struct stm32_dma_chan *chan = to_stm32_dma_chan(c);
498 spin_lock_irqsave(&chan->vchan.lock, flags);
501 dma_cookie_complete(&chan->desc->vdesc.tx);
502 vchan_terminate_vdesc(&chan->desc->vdesc);
504 stm32_dma_stop(chan);
508 vchan_get_all_descriptors(&chan->vchan, &head);
509 spin_unlock_irqrestore(&chan->vchan.lock, flags);
510 vchan_dma_desc_free_list(&chan->vchan, &head);
515 static void stm32_dma_synchronize(struct dma_chan *c)
517 struct stm32_dma_chan *chan = to_stm32_dma_chan(c);
519 vchan_synchronize(&chan->vchan);
522 static void stm32_dma_dump_reg(struct stm32_dma_chan *chan)
524 struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
525 u32 scr = stm32_dma_read(dmadev, STM32_DMA_SCR(chan->id));
526 u32 ndtr = stm32_dma_read(dmadev, STM32_DMA_SNDTR(chan->id));
527 u32 spar = stm32_dma_read(dmadev, STM32_DMA_SPAR(chan->id));
528 u32 sm0ar = stm32_dma_read(dmadev, STM32_DMA_SM0AR(chan->id));
529 u32 sm1ar = stm32_dma_read(dmadev, STM32_DMA_SM1AR(chan->id));
530 u32 sfcr = stm32_dma_read(dmadev, STM32_DMA_SFCR(chan->id));
532 dev_dbg(chan2dev(chan), "SCR: 0x%08x\n", scr);
533 dev_dbg(chan2dev(chan), "NDTR: 0x%08x\n", ndtr);
534 dev_dbg(chan2dev(chan), "SPAR: 0x%08x\n", spar);
535 dev_dbg(chan2dev(chan), "SM0AR: 0x%08x\n", sm0ar);
536 dev_dbg(chan2dev(chan), "SM1AR: 0x%08x\n", sm1ar);
537 dev_dbg(chan2dev(chan), "SFCR: 0x%08x\n", sfcr);
540 static void stm32_dma_sg_inc(struct stm32_dma_chan *chan)
543 if (chan->desc->cyclic && (chan->next_sg == chan->desc->num_sgs))
547 static void stm32_dma_configure_next_sg(struct stm32_dma_chan *chan);
549 static void stm32_dma_start_transfer(struct stm32_dma_chan *chan)
551 struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
552 struct virt_dma_desc *vdesc;
553 struct stm32_dma_sg_req *sg_req;
554 struct stm32_dma_chan_reg *reg;
558 ret = stm32_dma_disable_chan(chan);
563 vdesc = vchan_next_desc(&chan->vchan);
567 list_del(&vdesc->node);
569 chan->desc = to_stm32_dma_desc(vdesc);
573 if (chan->next_sg == chan->desc->num_sgs)
576 sg_req = &chan->desc->sg_req[chan->next_sg];
577 reg = &sg_req->chan_reg;
579 reg->dma_scr &= ~STM32_DMA_SCR_EN;
580 stm32_dma_write(dmadev, STM32_DMA_SCR(chan->id), reg->dma_scr);
581 stm32_dma_write(dmadev, STM32_DMA_SPAR(chan->id), reg->dma_spar);
582 stm32_dma_write(dmadev, STM32_DMA_SM0AR(chan->id), reg->dma_sm0ar);
583 stm32_dma_write(dmadev, STM32_DMA_SFCR(chan->id), reg->dma_sfcr);
584 stm32_dma_write(dmadev, STM32_DMA_SM1AR(chan->id), reg->dma_sm1ar);
585 stm32_dma_write(dmadev, STM32_DMA_SNDTR(chan->id), reg->dma_sndtr);
587 stm32_dma_sg_inc(chan);
589 /* Clear interrupt status if it is there */
590 status = stm32_dma_irq_status(chan);
592 stm32_dma_irq_clear(chan, status);
594 if (chan->desc->cyclic)
595 stm32_dma_configure_next_sg(chan);
597 stm32_dma_dump_reg(chan);
601 chan->status = DMA_IN_PROGRESS;
602 reg->dma_scr |= STM32_DMA_SCR_EN;
603 stm32_dma_write(dmadev, STM32_DMA_SCR(chan->id), reg->dma_scr);
605 dev_dbg(chan2dev(chan), "vchan %pK: started\n", &chan->vchan);
608 static void stm32_dma_configure_next_sg(struct stm32_dma_chan *chan)
610 struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
611 struct stm32_dma_sg_req *sg_req;
612 u32 dma_scr, dma_sm0ar, dma_sm1ar, id;
615 dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(id));
617 sg_req = &chan->desc->sg_req[chan->next_sg];
619 if (dma_scr & STM32_DMA_SCR_CT) {
620 dma_sm0ar = sg_req->chan_reg.dma_sm0ar;
621 stm32_dma_write(dmadev, STM32_DMA_SM0AR(id), dma_sm0ar);
622 dev_dbg(chan2dev(chan), "CT=1 <=> SM0AR: 0x%08x\n",
623 stm32_dma_read(dmadev, STM32_DMA_SM0AR(id)));
625 dma_sm1ar = sg_req->chan_reg.dma_sm1ar;
626 stm32_dma_write(dmadev, STM32_DMA_SM1AR(id), dma_sm1ar);
627 dev_dbg(chan2dev(chan), "CT=0 <=> SM1AR: 0x%08x\n",
628 stm32_dma_read(dmadev, STM32_DMA_SM1AR(id)));
632 static void stm32_dma_handle_chan_paused(struct stm32_dma_chan *chan)
634 struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
638 * Read and store current remaining data items and peripheral/memory addresses to be
641 dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(chan->id));
643 * Transfer can be paused while between a previous resume and reconfiguration on transfer
644 * complete. If transfer is cyclic and CIRC and DBM have been deactivated for resume, need
645 * to set it here in SCR backup to ensure a good reconfiguration on transfer complete.
647 if (chan->desc && chan->desc->cyclic) {
648 if (chan->desc->num_sgs == 1)
649 dma_scr |= STM32_DMA_SCR_CIRC;
651 dma_scr |= STM32_DMA_SCR_DBM;
653 chan->chan_reg.dma_scr = dma_scr;
656 * Need to temporarily deactivate CIRC/DBM until next Transfer Complete interrupt, otherwise
657 * on resume NDTR autoreload value will be wrong (lower than the initial period length)
659 if (chan->desc && chan->desc->cyclic) {
660 dma_scr &= ~(STM32_DMA_SCR_DBM | STM32_DMA_SCR_CIRC);
661 stm32_dma_write(dmadev, STM32_DMA_SCR(chan->id), dma_scr);
664 chan->chan_reg.dma_sndtr = stm32_dma_read(dmadev, STM32_DMA_SNDTR(chan->id));
666 dev_dbg(chan2dev(chan), "vchan %pK: paused\n", &chan->vchan);
669 static void stm32_dma_post_resume_reconfigure(struct stm32_dma_chan *chan)
671 struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
672 struct stm32_dma_sg_req *sg_req;
673 u32 dma_scr, status, id;
676 dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(id));
678 /* Clear interrupt status if it is there */
679 status = stm32_dma_irq_status(chan);
681 stm32_dma_irq_clear(chan, status);
684 sg_req = &chan->desc->sg_req[chan->desc->num_sgs - 1];
686 sg_req = &chan->desc->sg_req[chan->next_sg - 1];
688 /* Reconfigure NDTR with the initial value */
689 stm32_dma_write(dmadev, STM32_DMA_SNDTR(chan->id), sg_req->chan_reg.dma_sndtr);
692 stm32_dma_write(dmadev, STM32_DMA_SPAR(id), sg_req->chan_reg.dma_spar);
694 /* Restore SM0AR/SM1AR whatever DBM/CT as they may have been modified */
695 stm32_dma_write(dmadev, STM32_DMA_SM0AR(id), sg_req->chan_reg.dma_sm0ar);
696 stm32_dma_write(dmadev, STM32_DMA_SM1AR(id), sg_req->chan_reg.dma_sm1ar);
698 /* Reactivate CIRC/DBM if needed */
699 if (chan->chan_reg.dma_scr & STM32_DMA_SCR_DBM) {
700 dma_scr |= STM32_DMA_SCR_DBM;
702 if (chan->chan_reg.dma_scr & STM32_DMA_SCR_CT)
703 dma_scr &= ~STM32_DMA_SCR_CT;
705 dma_scr |= STM32_DMA_SCR_CT;
706 } else if (chan->chan_reg.dma_scr & STM32_DMA_SCR_CIRC) {
707 dma_scr |= STM32_DMA_SCR_CIRC;
709 stm32_dma_write(dmadev, STM32_DMA_SCR(chan->id), dma_scr);
711 stm32_dma_configure_next_sg(chan);
713 stm32_dma_dump_reg(chan);
715 dma_scr |= STM32_DMA_SCR_EN;
716 stm32_dma_write(dmadev, STM32_DMA_SCR(chan->id), dma_scr);
718 dev_dbg(chan2dev(chan), "vchan %pK: reconfigured after pause/resume\n", &chan->vchan);
721 static void stm32_dma_handle_chan_done(struct stm32_dma_chan *chan, u32 scr)
726 if (chan->desc->cyclic) {
727 vchan_cyclic_callback(&chan->desc->vdesc);
728 stm32_dma_sg_inc(chan);
729 /* cyclic while CIRC/DBM disable => post resume reconfiguration needed */
730 if (!(scr & (STM32_DMA_SCR_CIRC | STM32_DMA_SCR_DBM)))
731 stm32_dma_post_resume_reconfigure(chan);
732 else if (scr & STM32_DMA_SCR_DBM)
733 stm32_dma_configure_next_sg(chan);
736 chan->status = DMA_COMPLETE;
737 if (chan->next_sg == chan->desc->num_sgs) {
738 vchan_cookie_complete(&chan->desc->vdesc);
741 stm32_dma_start_transfer(chan);
745 static irqreturn_t stm32_dma_chan_irq(int irq, void *devid)
747 struct stm32_dma_chan *chan = devid;
748 struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
749 u32 status, scr, sfcr;
751 spin_lock(&chan->vchan.lock);
753 status = stm32_dma_irq_status(chan);
754 scr = stm32_dma_read(dmadev, STM32_DMA_SCR(chan->id));
755 sfcr = stm32_dma_read(dmadev, STM32_DMA_SFCR(chan->id));
757 if (status & STM32_DMA_FEI) {
758 stm32_dma_irq_clear(chan, STM32_DMA_FEI);
759 status &= ~STM32_DMA_FEI;
760 if (sfcr & STM32_DMA_SFCR_FEIE) {
761 if (!(scr & STM32_DMA_SCR_EN) &&
762 !(status & STM32_DMA_TCI))
763 dev_err(chan2dev(chan), "FIFO Error\n");
765 dev_dbg(chan2dev(chan), "FIFO over/underrun\n");
768 if (status & STM32_DMA_DMEI) {
769 stm32_dma_irq_clear(chan, STM32_DMA_DMEI);
770 status &= ~STM32_DMA_DMEI;
771 if (sfcr & STM32_DMA_SCR_DMEIE)
772 dev_dbg(chan2dev(chan), "Direct mode overrun\n");
775 if (status & STM32_DMA_TCI) {
776 stm32_dma_irq_clear(chan, STM32_DMA_TCI);
777 if (scr & STM32_DMA_SCR_TCIE) {
778 if (chan->status == DMA_PAUSED && !(scr & STM32_DMA_SCR_EN))
779 stm32_dma_handle_chan_paused(chan);
781 stm32_dma_handle_chan_done(chan, scr);
783 status &= ~STM32_DMA_TCI;
786 if (status & STM32_DMA_HTI) {
787 stm32_dma_irq_clear(chan, STM32_DMA_HTI);
788 status &= ~STM32_DMA_HTI;
792 stm32_dma_irq_clear(chan, status);
793 dev_err(chan2dev(chan), "DMA error: status=0x%08x\n", status);
794 if (!(scr & STM32_DMA_SCR_EN))
795 dev_err(chan2dev(chan), "chan disabled by HW\n");
798 spin_unlock(&chan->vchan.lock);
803 static void stm32_dma_issue_pending(struct dma_chan *c)
805 struct stm32_dma_chan *chan = to_stm32_dma_chan(c);
808 spin_lock_irqsave(&chan->vchan.lock, flags);
809 if (vchan_issue_pending(&chan->vchan) && !chan->desc && !chan->busy) {
810 dev_dbg(chan2dev(chan), "vchan %pK: issued\n", &chan->vchan);
811 stm32_dma_start_transfer(chan);
814 spin_unlock_irqrestore(&chan->vchan.lock, flags);
817 static int stm32_dma_pause(struct dma_chan *c)
819 struct stm32_dma_chan *chan = to_stm32_dma_chan(c);
823 if (chan->status != DMA_IN_PROGRESS)
826 spin_lock_irqsave(&chan->vchan.lock, flags);
827 ret = stm32_dma_disable_chan(chan);
829 * A transfer complete flag is set to indicate the end of transfer due to the stream
830 * interruption, so wait for interrupt
833 chan->status = DMA_PAUSED;
834 spin_unlock_irqrestore(&chan->vchan.lock, flags);
839 static int stm32_dma_resume(struct dma_chan *c)
841 struct stm32_dma_chan *chan = to_stm32_dma_chan(c);
842 struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
843 struct stm32_dma_chan_reg chan_reg = chan->chan_reg;
844 u32 id = chan->id, scr, ndtr, offset, spar, sm0ar, sm1ar;
845 struct stm32_dma_sg_req *sg_req;
848 if (chan->status != DMA_PAUSED)
851 scr = stm32_dma_read(dmadev, STM32_DMA_SCR(id));
852 if (WARN_ON(scr & STM32_DMA_SCR_EN))
855 spin_lock_irqsave(&chan->vchan.lock, flags);
857 /* sg_reg[prev_sg] contains original ndtr, sm0ar and sm1ar before pausing the transfer */
859 sg_req = &chan->desc->sg_req[chan->desc->num_sgs - 1];
861 sg_req = &chan->desc->sg_req[chan->next_sg - 1];
863 ndtr = sg_req->chan_reg.dma_sndtr;
864 offset = (ndtr - chan_reg.dma_sndtr) << STM32_DMA_SCR_PSIZE_GET(chan_reg.dma_scr);
865 spar = sg_req->chan_reg.dma_spar;
866 sm0ar = sg_req->chan_reg.dma_sm0ar;
867 sm1ar = sg_req->chan_reg.dma_sm1ar;
870 * The peripheral and/or memory addresses have to be updated in order to adjust the
871 * address pointers. Need to check increment.
873 if (chan_reg.dma_scr & STM32_DMA_SCR_PINC)
874 stm32_dma_write(dmadev, STM32_DMA_SPAR(id), spar + offset);
876 stm32_dma_write(dmadev, STM32_DMA_SPAR(id), spar);
878 if (!(chan_reg.dma_scr & STM32_DMA_SCR_MINC))
882 * In case of DBM, the current target could be SM1AR.
883 * Need to temporarily deactivate CIRC/DBM to finish the current transfer, so
884 * SM0AR becomes the current target and must be updated with SM1AR + offset if CT=1.
886 if ((chan_reg.dma_scr & STM32_DMA_SCR_DBM) && (chan_reg.dma_scr & STM32_DMA_SCR_CT))
887 stm32_dma_write(dmadev, STM32_DMA_SM1AR(id), sm1ar + offset);
889 stm32_dma_write(dmadev, STM32_DMA_SM0AR(id), sm0ar + offset);
891 /* NDTR must be restored otherwise internal HW counter won't be correctly reset */
892 stm32_dma_write(dmadev, STM32_DMA_SNDTR(id), chan_reg.dma_sndtr);
895 * Need to temporarily deactivate CIRC/DBM until next Transfer Complete interrupt,
896 * otherwise NDTR autoreload value will be wrong (lower than the initial period length)
898 if (chan_reg.dma_scr & (STM32_DMA_SCR_CIRC | STM32_DMA_SCR_DBM))
899 chan_reg.dma_scr &= ~(STM32_DMA_SCR_CIRC | STM32_DMA_SCR_DBM);
901 if (chan_reg.dma_scr & STM32_DMA_SCR_DBM)
902 stm32_dma_configure_next_sg(chan);
904 stm32_dma_dump_reg(chan);
906 /* The stream may then be re-enabled to restart transfer from the point it was stopped */
907 chan->status = DMA_IN_PROGRESS;
908 chan_reg.dma_scr |= STM32_DMA_SCR_EN;
909 stm32_dma_write(dmadev, STM32_DMA_SCR(id), chan_reg.dma_scr);
911 spin_unlock_irqrestore(&chan->vchan.lock, flags);
913 dev_dbg(chan2dev(chan), "vchan %pK: resumed\n", &chan->vchan);
918 static int stm32_dma_set_xfer_param(struct stm32_dma_chan *chan,
919 enum dma_transfer_direction direction,
920 enum dma_slave_buswidth *buswidth,
921 u32 buf_len, dma_addr_t buf_addr)
923 enum dma_slave_buswidth src_addr_width, dst_addr_width;
924 int src_bus_width, dst_bus_width;
925 int src_burst_size, dst_burst_size;
926 u32 src_maxburst, dst_maxburst, src_best_burst, dst_best_burst;
929 src_addr_width = chan->dma_sconfig.src_addr_width;
930 dst_addr_width = chan->dma_sconfig.dst_addr_width;
931 src_maxburst = chan->dma_sconfig.src_maxburst;
932 dst_maxburst = chan->dma_sconfig.dst_maxburst;
933 fifoth = chan->threshold;
937 /* Set device data size */
938 dst_bus_width = stm32_dma_get_width(chan, dst_addr_width);
939 if (dst_bus_width < 0)
940 return dst_bus_width;
942 /* Set device burst size */
943 dst_best_burst = stm32_dma_get_best_burst(buf_len,
948 dst_burst_size = stm32_dma_get_burst(chan, dst_best_burst);
949 if (dst_burst_size < 0)
950 return dst_burst_size;
952 /* Set memory data size */
953 src_addr_width = stm32_dma_get_max_width(buf_len, buf_addr,
955 chan->mem_width = src_addr_width;
956 src_bus_width = stm32_dma_get_width(chan, src_addr_width);
957 if (src_bus_width < 0)
958 return src_bus_width;
961 * Set memory burst size - burst not possible if address is not aligned on
962 * the address boundary equal to the size of the transfer
964 if (buf_addr & (buf_len - 1))
967 src_maxburst = STM32_DMA_MAX_BURST;
968 src_best_burst = stm32_dma_get_best_burst(buf_len,
972 src_burst_size = stm32_dma_get_burst(chan, src_best_burst);
973 if (src_burst_size < 0)
974 return src_burst_size;
976 dma_scr = STM32_DMA_SCR_DIR(STM32_DMA_MEM_TO_DEV) |
977 STM32_DMA_SCR_PSIZE(dst_bus_width) |
978 STM32_DMA_SCR_MSIZE(src_bus_width) |
979 STM32_DMA_SCR_PBURST(dst_burst_size) |
980 STM32_DMA_SCR_MBURST(src_burst_size);
982 /* Set FIFO threshold */
983 chan->chan_reg.dma_sfcr &= ~STM32_DMA_SFCR_FTH_MASK;
984 if (fifoth != STM32_DMA_FIFO_THRESHOLD_NONE)
985 chan->chan_reg.dma_sfcr |= STM32_DMA_SFCR_FTH(fifoth);
987 /* Set peripheral address */
988 chan->chan_reg.dma_spar = chan->dma_sconfig.dst_addr;
989 *buswidth = dst_addr_width;
993 /* Set device data size */
994 src_bus_width = stm32_dma_get_width(chan, src_addr_width);
995 if (src_bus_width < 0)
996 return src_bus_width;
998 /* Set device burst size */
999 src_best_burst = stm32_dma_get_best_burst(buf_len,
1003 chan->mem_burst = src_best_burst;
1004 src_burst_size = stm32_dma_get_burst(chan, src_best_burst);
1005 if (src_burst_size < 0)
1006 return src_burst_size;
1008 /* Set memory data size */
1009 dst_addr_width = stm32_dma_get_max_width(buf_len, buf_addr,
1011 chan->mem_width = dst_addr_width;
1012 dst_bus_width = stm32_dma_get_width(chan, dst_addr_width);
1013 if (dst_bus_width < 0)
1014 return dst_bus_width;
1017 * Set memory burst size - burst not possible if address is not aligned on
1018 * the address boundary equal to the size of the transfer
1020 if (buf_addr & (buf_len - 1))
1023 dst_maxburst = STM32_DMA_MAX_BURST;
1024 dst_best_burst = stm32_dma_get_best_burst(buf_len,
1028 chan->mem_burst = dst_best_burst;
1029 dst_burst_size = stm32_dma_get_burst(chan, dst_best_burst);
1030 if (dst_burst_size < 0)
1031 return dst_burst_size;
1033 dma_scr = STM32_DMA_SCR_DIR(STM32_DMA_DEV_TO_MEM) |
1034 STM32_DMA_SCR_PSIZE(src_bus_width) |
1035 STM32_DMA_SCR_MSIZE(dst_bus_width) |
1036 STM32_DMA_SCR_PBURST(src_burst_size) |
1037 STM32_DMA_SCR_MBURST(dst_burst_size);
1039 /* Set FIFO threshold */
1040 chan->chan_reg.dma_sfcr &= ~STM32_DMA_SFCR_FTH_MASK;
1041 if (fifoth != STM32_DMA_FIFO_THRESHOLD_NONE)
1042 chan->chan_reg.dma_sfcr |= STM32_DMA_SFCR_FTH(fifoth);
1044 /* Set peripheral address */
1045 chan->chan_reg.dma_spar = chan->dma_sconfig.src_addr;
1046 *buswidth = chan->dma_sconfig.src_addr_width;
1050 dev_err(chan2dev(chan), "Dma direction is not supported\n");
1054 stm32_dma_set_fifo_config(chan, src_best_burst, dst_best_burst);
1056 /* Set DMA control register */
1057 chan->chan_reg.dma_scr &= ~(STM32_DMA_SCR_DIR_MASK |
1058 STM32_DMA_SCR_PSIZE_MASK | STM32_DMA_SCR_MSIZE_MASK |
1059 STM32_DMA_SCR_PBURST_MASK | STM32_DMA_SCR_MBURST_MASK);
1060 chan->chan_reg.dma_scr |= dma_scr;
1065 static void stm32_dma_clear_reg(struct stm32_dma_chan_reg *regs)
1067 memset(regs, 0, sizeof(struct stm32_dma_chan_reg));
1070 static struct dma_async_tx_descriptor *stm32_dma_prep_slave_sg(
1071 struct dma_chan *c, struct scatterlist *sgl,
1072 u32 sg_len, enum dma_transfer_direction direction,
1073 unsigned long flags, void *context)
1075 struct stm32_dma_chan *chan = to_stm32_dma_chan(c);
1076 struct stm32_dma_desc *desc;
1077 struct scatterlist *sg;
1078 enum dma_slave_buswidth buswidth;
1082 if (!chan->config_init) {
1083 dev_err(chan2dev(chan), "dma channel is not configured\n");
1088 dev_err(chan2dev(chan), "Invalid segment length %d\n", sg_len);
1092 desc = kzalloc(struct_size(desc, sg_req, sg_len), GFP_NOWAIT);
1096 /* Set peripheral flow controller */
1097 if (chan->dma_sconfig.device_fc)
1098 chan->chan_reg.dma_scr |= STM32_DMA_SCR_PFCTRL;
1100 chan->chan_reg.dma_scr &= ~STM32_DMA_SCR_PFCTRL;
1102 for_each_sg(sgl, sg, sg_len, i) {
1103 ret = stm32_dma_set_xfer_param(chan, direction, &buswidth,
1105 sg_dma_address(sg));
1109 desc->sg_req[i].len = sg_dma_len(sg);
1111 nb_data_items = desc->sg_req[i].len / buswidth;
1112 if (nb_data_items > STM32_DMA_ALIGNED_MAX_DATA_ITEMS) {
1113 dev_err(chan2dev(chan), "nb items not supported\n");
1117 stm32_dma_clear_reg(&desc->sg_req[i].chan_reg);
1118 desc->sg_req[i].chan_reg.dma_scr = chan->chan_reg.dma_scr;
1119 desc->sg_req[i].chan_reg.dma_sfcr = chan->chan_reg.dma_sfcr;
1120 desc->sg_req[i].chan_reg.dma_spar = chan->chan_reg.dma_spar;
1121 desc->sg_req[i].chan_reg.dma_sm0ar = sg_dma_address(sg);
1122 desc->sg_req[i].chan_reg.dma_sm1ar = sg_dma_address(sg);
1123 desc->sg_req[i].chan_reg.dma_sndtr = nb_data_items;
1126 desc->num_sgs = sg_len;
1127 desc->cyclic = false;
1129 return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
1136 static struct dma_async_tx_descriptor *stm32_dma_prep_dma_cyclic(
1137 struct dma_chan *c, dma_addr_t buf_addr, size_t buf_len,
1138 size_t period_len, enum dma_transfer_direction direction,
1139 unsigned long flags)
1141 struct stm32_dma_chan *chan = to_stm32_dma_chan(c);
1142 struct stm32_dma_desc *desc;
1143 enum dma_slave_buswidth buswidth;
1144 u32 num_periods, nb_data_items;
1147 if (!buf_len || !period_len) {
1148 dev_err(chan2dev(chan), "Invalid buffer/period len\n");
1152 if (!chan->config_init) {
1153 dev_err(chan2dev(chan), "dma channel is not configured\n");
1157 if (buf_len % period_len) {
1158 dev_err(chan2dev(chan), "buf_len not multiple of period_len\n");
1163 * We allow to take more number of requests till DMA is
1164 * not started. The driver will loop over all requests.
1165 * Once DMA is started then new requests can be queued only after
1166 * terminating the DMA.
1169 dev_err(chan2dev(chan), "Request not allowed when dma busy\n");
1173 ret = stm32_dma_set_xfer_param(chan, direction, &buswidth, period_len,
1178 nb_data_items = period_len / buswidth;
1179 if (nb_data_items > STM32_DMA_ALIGNED_MAX_DATA_ITEMS) {
1180 dev_err(chan2dev(chan), "number of items not supported\n");
1184 /* Enable Circular mode or double buffer mode */
1185 if (buf_len == period_len) {
1186 chan->chan_reg.dma_scr |= STM32_DMA_SCR_CIRC;
1188 chan->chan_reg.dma_scr |= STM32_DMA_SCR_DBM;
1189 chan->chan_reg.dma_scr &= ~STM32_DMA_SCR_CT;
1192 /* Clear periph ctrl if client set it */
1193 chan->chan_reg.dma_scr &= ~STM32_DMA_SCR_PFCTRL;
1195 num_periods = buf_len / period_len;
1197 desc = kzalloc(struct_size(desc, sg_req, num_periods), GFP_NOWAIT);
1201 for (i = 0; i < num_periods; i++) {
1202 desc->sg_req[i].len = period_len;
1204 stm32_dma_clear_reg(&desc->sg_req[i].chan_reg);
1205 desc->sg_req[i].chan_reg.dma_scr = chan->chan_reg.dma_scr;
1206 desc->sg_req[i].chan_reg.dma_sfcr = chan->chan_reg.dma_sfcr;
1207 desc->sg_req[i].chan_reg.dma_spar = chan->chan_reg.dma_spar;
1208 desc->sg_req[i].chan_reg.dma_sm0ar = buf_addr;
1209 desc->sg_req[i].chan_reg.dma_sm1ar = buf_addr;
1210 desc->sg_req[i].chan_reg.dma_sndtr = nb_data_items;
1211 buf_addr += period_len;
1214 desc->num_sgs = num_periods;
1215 desc->cyclic = true;
1217 return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
1220 static struct dma_async_tx_descriptor *stm32_dma_prep_dma_memcpy(
1221 struct dma_chan *c, dma_addr_t dest,
1222 dma_addr_t src, size_t len, unsigned long flags)
1224 struct stm32_dma_chan *chan = to_stm32_dma_chan(c);
1225 enum dma_slave_buswidth max_width;
1226 struct stm32_dma_desc *desc;
1227 size_t xfer_count, offset;
1228 u32 num_sgs, best_burst, dma_burst, threshold;
1231 num_sgs = DIV_ROUND_UP(len, STM32_DMA_ALIGNED_MAX_DATA_ITEMS);
1232 desc = kzalloc(struct_size(desc, sg_req, num_sgs), GFP_NOWAIT);
1236 threshold = chan->threshold;
1238 for (offset = 0, i = 0; offset < len; offset += xfer_count, i++) {
1239 xfer_count = min_t(size_t, len - offset,
1240 STM32_DMA_ALIGNED_MAX_DATA_ITEMS);
1242 /* Compute best burst size */
1243 max_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
1244 best_burst = stm32_dma_get_best_burst(len, STM32_DMA_MAX_BURST,
1245 threshold, max_width);
1246 dma_burst = stm32_dma_get_burst(chan, best_burst);
1248 stm32_dma_clear_reg(&desc->sg_req[i].chan_reg);
1249 desc->sg_req[i].chan_reg.dma_scr =
1250 STM32_DMA_SCR_DIR(STM32_DMA_MEM_TO_MEM) |
1251 STM32_DMA_SCR_PBURST(dma_burst) |
1252 STM32_DMA_SCR_MBURST(dma_burst) |
1253 STM32_DMA_SCR_MINC |
1254 STM32_DMA_SCR_PINC |
1255 STM32_DMA_SCR_TCIE |
1257 desc->sg_req[i].chan_reg.dma_sfcr |= STM32_DMA_SFCR_MASK;
1258 desc->sg_req[i].chan_reg.dma_sfcr |=
1259 STM32_DMA_SFCR_FTH(threshold);
1260 desc->sg_req[i].chan_reg.dma_spar = src + offset;
1261 desc->sg_req[i].chan_reg.dma_sm0ar = dest + offset;
1262 desc->sg_req[i].chan_reg.dma_sndtr = xfer_count;
1263 desc->sg_req[i].len = xfer_count;
1266 desc->num_sgs = num_sgs;
1267 desc->cyclic = false;
1269 return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
1272 static u32 stm32_dma_get_remaining_bytes(struct stm32_dma_chan *chan)
1274 u32 dma_scr, width, ndtr;
1275 struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
1277 dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(chan->id));
1278 width = STM32_DMA_SCR_PSIZE_GET(dma_scr);
1279 ndtr = stm32_dma_read(dmadev, STM32_DMA_SNDTR(chan->id));
1281 return ndtr << width;
1285 * stm32_dma_is_current_sg - check that expected sg_req is currently transferred
1286 * @chan: dma channel
1288 * This function called when IRQ are disable, checks that the hardware has not
1289 * switched on the next transfer in double buffer mode. The test is done by
1290 * comparing the next_sg memory address with the hardware related register
1291 * (based on CT bit value).
1293 * Returns true if expected current transfer is still running or double
1294 * buffer mode is not activated.
1296 static bool stm32_dma_is_current_sg(struct stm32_dma_chan *chan)
1298 struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
1299 struct stm32_dma_sg_req *sg_req;
1300 u32 dma_scr, dma_smar, id, period_len;
1303 dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(id));
1305 /* In cyclic CIRC but not DBM, CT is not used */
1306 if (!(dma_scr & STM32_DMA_SCR_DBM))
1309 sg_req = &chan->desc->sg_req[chan->next_sg];
1310 period_len = sg_req->len;
1312 /* DBM - take care of a previous pause/resume not yet post reconfigured */
1313 if (dma_scr & STM32_DMA_SCR_CT) {
1314 dma_smar = stm32_dma_read(dmadev, STM32_DMA_SM0AR(id));
1316 * If transfer has been pause/resumed,
1317 * SM0AR is in the range of [SM0AR:SM0AR+period_len]
1319 return (dma_smar >= sg_req->chan_reg.dma_sm0ar &&
1320 dma_smar < sg_req->chan_reg.dma_sm0ar + period_len);
1323 dma_smar = stm32_dma_read(dmadev, STM32_DMA_SM1AR(id));
1325 * If transfer has been pause/resumed,
1326 * SM1AR is in the range of [SM1AR:SM1AR+period_len]
1328 return (dma_smar >= sg_req->chan_reg.dma_sm1ar &&
1329 dma_smar < sg_req->chan_reg.dma_sm1ar + period_len);
1332 static size_t stm32_dma_desc_residue(struct stm32_dma_chan *chan,
1333 struct stm32_dma_desc *desc,
1336 u32 modulo, burst_size;
1339 struct stm32_dma_sg_req *sg_req = &chan->desc->sg_req[chan->next_sg];
1343 * Calculate the residue means compute the descriptors
1345 * - the sg_req currently transferred
1346 * - the Hardware remaining position in this sg (NDTR bits field).
1348 * A race condition may occur if DMA is running in cyclic or double
1349 * buffer mode, since the DMA register are automatically reloaded at end
1350 * of period transfer. The hardware may have switched to the next
1351 * transfer (CT bit updated) just before the position (SxNDTR reg) is
1353 * In this case the SxNDTR reg could (or not) correspond to the new
1354 * transfer position, and not the expected one.
1355 * The strategy implemented in the stm32 driver is to:
1356 * - read the SxNDTR register
1357 * - crosscheck that hardware is still in current transfer.
1358 * In case of switch, we can assume that the DMA is at the beginning of
1359 * the next transfer. So we approximate the residue in consequence, by
1360 * pointing on the beginning of next transfer.
1362 * This race condition doesn't apply for none cyclic mode, as double
1363 * buffer is not used. In such situation registers are updated by the
1367 residue = stm32_dma_get_remaining_bytes(chan);
1369 if (chan->desc->cyclic && !stm32_dma_is_current_sg(chan)) {
1371 if (n_sg == chan->desc->num_sgs)
1373 residue = sg_req->len;
1377 * In cyclic mode, for the last period, residue = remaining bytes
1379 * else for all other periods in cyclic mode, and in sg mode,
1380 * residue = remaining bytes from NDTR + remaining
1381 * periods/sg to be transferred
1383 if (!chan->desc->cyclic || n_sg != 0)
1384 for (i = n_sg; i < desc->num_sgs; i++)
1385 residue += desc->sg_req[i].len;
1387 if (!chan->mem_burst)
1390 burst_size = chan->mem_burst * chan->mem_width;
1391 modulo = residue % burst_size;
1393 residue = residue - modulo + burst_size;
1398 static enum dma_status stm32_dma_tx_status(struct dma_chan *c,
1399 dma_cookie_t cookie,
1400 struct dma_tx_state *state)
1402 struct stm32_dma_chan *chan = to_stm32_dma_chan(c);
1403 struct virt_dma_desc *vdesc;
1404 enum dma_status status;
1405 unsigned long flags;
1408 status = dma_cookie_status(c, cookie, state);
1409 if (status == DMA_COMPLETE)
1412 status = chan->status;
1417 spin_lock_irqsave(&chan->vchan.lock, flags);
1418 vdesc = vchan_find_desc(&chan->vchan, cookie);
1419 if (chan->desc && cookie == chan->desc->vdesc.tx.cookie)
1420 residue = stm32_dma_desc_residue(chan, chan->desc,
1423 residue = stm32_dma_desc_residue(chan,
1424 to_stm32_dma_desc(vdesc), 0);
1425 dma_set_residue(state, residue);
1427 spin_unlock_irqrestore(&chan->vchan.lock, flags);
1432 static int stm32_dma_alloc_chan_resources(struct dma_chan *c)
1434 struct stm32_dma_chan *chan = to_stm32_dma_chan(c);
1435 struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
1438 chan->config_init = false;
1440 ret = pm_runtime_resume_and_get(dmadev->ddev.dev);
1444 ret = stm32_dma_disable_chan(chan);
1446 pm_runtime_put(dmadev->ddev.dev);
1451 static void stm32_dma_free_chan_resources(struct dma_chan *c)
1453 struct stm32_dma_chan *chan = to_stm32_dma_chan(c);
1454 struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
1455 unsigned long flags;
1457 dev_dbg(chan2dev(chan), "Freeing channel %d\n", chan->id);
1460 spin_lock_irqsave(&chan->vchan.lock, flags);
1461 stm32_dma_stop(chan);
1463 spin_unlock_irqrestore(&chan->vchan.lock, flags);
1466 pm_runtime_put(dmadev->ddev.dev);
1468 vchan_free_chan_resources(to_virt_chan(c));
1469 stm32_dma_clear_reg(&chan->chan_reg);
1470 chan->threshold = 0;
1473 static void stm32_dma_desc_free(struct virt_dma_desc *vdesc)
1475 kfree(container_of(vdesc, struct stm32_dma_desc, vdesc));
1478 static void stm32_dma_set_config(struct stm32_dma_chan *chan,
1479 struct stm32_dma_cfg *cfg)
1481 stm32_dma_clear_reg(&chan->chan_reg);
1483 chan->chan_reg.dma_scr = cfg->stream_config & STM32_DMA_SCR_CFG_MASK;
1484 chan->chan_reg.dma_scr |= STM32_DMA_SCR_REQ(cfg->request_line);
1486 /* Enable Interrupts */
1487 chan->chan_reg.dma_scr |= STM32_DMA_SCR_TEIE | STM32_DMA_SCR_TCIE;
1489 chan->threshold = STM32_DMA_THRESHOLD_FTR_GET(cfg->features);
1490 if (STM32_DMA_DIRECT_MODE_GET(cfg->features))
1491 chan->threshold = STM32_DMA_FIFO_THRESHOLD_NONE;
1492 if (STM32_DMA_ALT_ACK_MODE_GET(cfg->features))
1493 chan->chan_reg.dma_scr |= STM32_DMA_SCR_TRBUFF;
1496 static struct dma_chan *stm32_dma_of_xlate(struct of_phandle_args *dma_spec,
1497 struct of_dma *ofdma)
1499 struct stm32_dma_device *dmadev = ofdma->of_dma_data;
1500 struct device *dev = dmadev->ddev.dev;
1501 struct stm32_dma_cfg cfg;
1502 struct stm32_dma_chan *chan;
1505 if (dma_spec->args_count < 4) {
1506 dev_err(dev, "Bad number of cells\n");
1510 cfg.channel_id = dma_spec->args[0];
1511 cfg.request_line = dma_spec->args[1];
1512 cfg.stream_config = dma_spec->args[2];
1513 cfg.features = dma_spec->args[3];
1515 if (cfg.channel_id >= STM32_DMA_MAX_CHANNELS ||
1516 cfg.request_line >= STM32_DMA_MAX_REQUEST_ID) {
1517 dev_err(dev, "Bad channel and/or request id\n");
1521 chan = &dmadev->chan[cfg.channel_id];
1523 c = dma_get_slave_channel(&chan->vchan.chan);
1525 dev_err(dev, "No more channels available\n");
1529 stm32_dma_set_config(chan, &cfg);
1534 static const struct of_device_id stm32_dma_of_match[] = {
1535 { .compatible = "st,stm32-dma", },
1538 MODULE_DEVICE_TABLE(of, stm32_dma_of_match);
1540 static int stm32_dma_probe(struct platform_device *pdev)
1542 struct stm32_dma_chan *chan;
1543 struct stm32_dma_device *dmadev;
1544 struct dma_device *dd;
1545 const struct of_device_id *match;
1546 struct resource *res;
1547 struct reset_control *rst;
1550 match = of_match_device(stm32_dma_of_match, &pdev->dev);
1552 dev_err(&pdev->dev, "Error: No device match found\n");
1556 dmadev = devm_kzalloc(&pdev->dev, sizeof(*dmadev), GFP_KERNEL);
1562 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1563 dmadev->base = devm_ioremap_resource(&pdev->dev, res);
1564 if (IS_ERR(dmadev->base))
1565 return PTR_ERR(dmadev->base);
1567 dmadev->clk = devm_clk_get(&pdev->dev, NULL);
1568 if (IS_ERR(dmadev->clk))
1569 return dev_err_probe(&pdev->dev, PTR_ERR(dmadev->clk), "Can't get clock\n");
1571 ret = clk_prepare_enable(dmadev->clk);
1573 dev_err(&pdev->dev, "clk_prep_enable error: %d\n", ret);
1577 dmadev->mem2mem = of_property_read_bool(pdev->dev.of_node,
1580 rst = devm_reset_control_get(&pdev->dev, NULL);
1583 if (ret == -EPROBE_DEFER)
1586 reset_control_assert(rst);
1588 reset_control_deassert(rst);
1591 dma_set_max_seg_size(&pdev->dev, STM32_DMA_ALIGNED_MAX_DATA_ITEMS);
1593 dma_cap_set(DMA_SLAVE, dd->cap_mask);
1594 dma_cap_set(DMA_PRIVATE, dd->cap_mask);
1595 dma_cap_set(DMA_CYCLIC, dd->cap_mask);
1596 dd->device_alloc_chan_resources = stm32_dma_alloc_chan_resources;
1597 dd->device_free_chan_resources = stm32_dma_free_chan_resources;
1598 dd->device_tx_status = stm32_dma_tx_status;
1599 dd->device_issue_pending = stm32_dma_issue_pending;
1600 dd->device_prep_slave_sg = stm32_dma_prep_slave_sg;
1601 dd->device_prep_dma_cyclic = stm32_dma_prep_dma_cyclic;
1602 dd->device_config = stm32_dma_slave_config;
1603 dd->device_pause = stm32_dma_pause;
1604 dd->device_resume = stm32_dma_resume;
1605 dd->device_terminate_all = stm32_dma_terminate_all;
1606 dd->device_synchronize = stm32_dma_synchronize;
1607 dd->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
1608 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
1609 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
1610 dd->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
1611 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
1612 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
1613 dd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
1614 dd->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
1615 dd->copy_align = DMAENGINE_ALIGN_32_BYTES;
1616 dd->max_burst = STM32_DMA_MAX_BURST;
1617 dd->max_sg_burst = STM32_DMA_ALIGNED_MAX_DATA_ITEMS;
1618 dd->descriptor_reuse = true;
1619 dd->dev = &pdev->dev;
1620 INIT_LIST_HEAD(&dd->channels);
1622 if (dmadev->mem2mem) {
1623 dma_cap_set(DMA_MEMCPY, dd->cap_mask);
1624 dd->device_prep_dma_memcpy = stm32_dma_prep_dma_memcpy;
1625 dd->directions |= BIT(DMA_MEM_TO_MEM);
1628 for (i = 0; i < STM32_DMA_MAX_CHANNELS; i++) {
1629 chan = &dmadev->chan[i];
1631 chan->vchan.desc_free = stm32_dma_desc_free;
1632 vchan_init(&chan->vchan, dd);
1635 ret = dma_async_device_register(dd);
1639 for (i = 0; i < STM32_DMA_MAX_CHANNELS; i++) {
1640 chan = &dmadev->chan[i];
1641 ret = platform_get_irq(pdev, i);
1643 goto err_unregister;
1646 ret = devm_request_irq(&pdev->dev, chan->irq,
1647 stm32_dma_chan_irq, 0,
1648 dev_name(chan2dev(chan)), chan);
1651 "request_irq failed with err %d channel %d\n",
1653 goto err_unregister;
1657 ret = of_dma_controller_register(pdev->dev.of_node,
1658 stm32_dma_of_xlate, dmadev);
1661 "STM32 DMA DMA OF registration failed %d\n", ret);
1662 goto err_unregister;
1665 platform_set_drvdata(pdev, dmadev);
1667 pm_runtime_set_active(&pdev->dev);
1668 pm_runtime_enable(&pdev->dev);
1669 pm_runtime_get_noresume(&pdev->dev);
1670 pm_runtime_put(&pdev->dev);
1672 dev_info(&pdev->dev, "STM32 DMA driver registered\n");
1677 dma_async_device_unregister(dd);
1679 clk_disable_unprepare(dmadev->clk);
1685 static int stm32_dma_runtime_suspend(struct device *dev)
1687 struct stm32_dma_device *dmadev = dev_get_drvdata(dev);
1689 clk_disable_unprepare(dmadev->clk);
1694 static int stm32_dma_runtime_resume(struct device *dev)
1696 struct stm32_dma_device *dmadev = dev_get_drvdata(dev);
1699 ret = clk_prepare_enable(dmadev->clk);
1701 dev_err(dev, "failed to prepare_enable clock\n");
1709 #ifdef CONFIG_PM_SLEEP
1710 static int stm32_dma_pm_suspend(struct device *dev)
1712 struct stm32_dma_device *dmadev = dev_get_drvdata(dev);
1715 ret = pm_runtime_resume_and_get(dev);
1719 for (id = 0; id < STM32_DMA_MAX_CHANNELS; id++) {
1720 scr = stm32_dma_read(dmadev, STM32_DMA_SCR(id));
1721 if (scr & STM32_DMA_SCR_EN) {
1722 dev_warn(dev, "Suspend is prevented by Chan %i\n", id);
1727 pm_runtime_put_sync(dev);
1729 pm_runtime_force_suspend(dev);
1734 static int stm32_dma_pm_resume(struct device *dev)
1736 return pm_runtime_force_resume(dev);
1740 static const struct dev_pm_ops stm32_dma_pm_ops = {
1741 SET_SYSTEM_SLEEP_PM_OPS(stm32_dma_pm_suspend, stm32_dma_pm_resume)
1742 SET_RUNTIME_PM_OPS(stm32_dma_runtime_suspend,
1743 stm32_dma_runtime_resume, NULL)
1746 static struct platform_driver stm32_dma_driver = {
1748 .name = "stm32-dma",
1749 .of_match_table = stm32_dma_of_match,
1750 .pm = &stm32_dma_pm_ops,
1752 .probe = stm32_dma_probe,
1755 static int __init stm32_dma_init(void)
1757 return platform_driver_register(&stm32_dma_driver);
1759 subsys_initcall(stm32_dma_init);