Merge tag 'fixes-v5.17-lsm-ceph-null' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-2.6-microblaze.git] / drivers / spi / spi-dw-dma.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Special handling for DW DMA core
4  *
5  * Copyright (c) 2009, 2014 Intel Corporation.
6  */
7
8 #include <linux/completion.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/dmaengine.h>
11 #include <linux/irqreturn.h>
12 #include <linux/jiffies.h>
13 #include <linux/module.h>
14 #include <linux/pci.h>
15 #include <linux/platform_data/dma-dw.h>
16 #include <linux/spi/spi.h>
17 #include <linux/types.h>
18
19 #include "spi-dw.h"
20
21 #define DW_SPI_RX_BUSY          0
22 #define DW_SPI_RX_BURST_LEVEL   16
23 #define DW_SPI_TX_BUSY          1
24 #define DW_SPI_TX_BURST_LEVEL   16
25
26 static bool dw_spi_dma_chan_filter(struct dma_chan *chan, void *param)
27 {
28         struct dw_dma_slave *s = param;
29
30         if (s->dma_dev != chan->device->dev)
31                 return false;
32
33         chan->private = s;
34         return true;
35 }
36
37 static void dw_spi_dma_maxburst_init(struct dw_spi *dws)
38 {
39         struct dma_slave_caps caps;
40         u32 max_burst, def_burst;
41         int ret;
42
43         def_burst = dws->fifo_len / 2;
44
45         ret = dma_get_slave_caps(dws->rxchan, &caps);
46         if (!ret && caps.max_burst)
47                 max_burst = caps.max_burst;
48         else
49                 max_burst = DW_SPI_RX_BURST_LEVEL;
50
51         dws->rxburst = min(max_burst, def_burst);
52         dw_writel(dws, DW_SPI_DMARDLR, dws->rxburst - 1);
53
54         ret = dma_get_slave_caps(dws->txchan, &caps);
55         if (!ret && caps.max_burst)
56                 max_burst = caps.max_burst;
57         else
58                 max_burst = DW_SPI_TX_BURST_LEVEL;
59
60         /*
61          * Having a Rx DMA channel serviced with higher priority than a Tx DMA
62          * channel might not be enough to provide a well balanced DMA-based
63          * SPI transfer interface. There might still be moments when the Tx DMA
64          * channel is occasionally handled faster than the Rx DMA channel.
65          * That in its turn will eventually cause the SPI Rx FIFO overflow if
66          * SPI bus speed is high enough to fill the SPI Rx FIFO in before it's
67          * cleared by the Rx DMA channel. In order to fix the problem the Tx
68          * DMA activity is intentionally slowed down by limiting the SPI Tx
69          * FIFO depth with a value twice bigger than the Tx burst length.
70          */
71         dws->txburst = min(max_burst, def_burst);
72         dw_writel(dws, DW_SPI_DMATDLR, dws->txburst);
73 }
74
75 static void dw_spi_dma_sg_burst_init(struct dw_spi *dws)
76 {
77         struct dma_slave_caps tx = {0}, rx = {0};
78
79         dma_get_slave_caps(dws->txchan, &tx);
80         dma_get_slave_caps(dws->rxchan, &rx);
81
82         if (tx.max_sg_burst > 0 && rx.max_sg_burst > 0)
83                 dws->dma_sg_burst = min(tx.max_sg_burst, rx.max_sg_burst);
84         else if (tx.max_sg_burst > 0)
85                 dws->dma_sg_burst = tx.max_sg_burst;
86         else if (rx.max_sg_burst > 0)
87                 dws->dma_sg_burst = rx.max_sg_burst;
88         else
89                 dws->dma_sg_burst = 0;
90 }
91
92 static int dw_spi_dma_init_mfld(struct device *dev, struct dw_spi *dws)
93 {
94         struct dw_dma_slave dma_tx = { .dst_id = 1 }, *tx = &dma_tx;
95         struct dw_dma_slave dma_rx = { .src_id = 0 }, *rx = &dma_rx;
96         struct pci_dev *dma_dev;
97         dma_cap_mask_t mask;
98
99         /*
100          * Get pci device for DMA controller, currently it could only
101          * be the DMA controller of Medfield
102          */
103         dma_dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x0827, NULL);
104         if (!dma_dev)
105                 return -ENODEV;
106
107         dma_cap_zero(mask);
108         dma_cap_set(DMA_SLAVE, mask);
109
110         /* 1. Init rx channel */
111         rx->dma_dev = &dma_dev->dev;
112         dws->rxchan = dma_request_channel(mask, dw_spi_dma_chan_filter, rx);
113         if (!dws->rxchan)
114                 goto err_exit;
115
116         /* 2. Init tx channel */
117         tx->dma_dev = &dma_dev->dev;
118         dws->txchan = dma_request_channel(mask, dw_spi_dma_chan_filter, tx);
119         if (!dws->txchan)
120                 goto free_rxchan;
121
122         dws->master->dma_rx = dws->rxchan;
123         dws->master->dma_tx = dws->txchan;
124
125         init_completion(&dws->dma_completion);
126
127         dw_spi_dma_maxburst_init(dws);
128
129         dw_spi_dma_sg_burst_init(dws);
130
131         return 0;
132
133 free_rxchan:
134         dma_release_channel(dws->rxchan);
135         dws->rxchan = NULL;
136 err_exit:
137         return -EBUSY;
138 }
139
140 static int dw_spi_dma_init_generic(struct device *dev, struct dw_spi *dws)
141 {
142         dws->rxchan = dma_request_slave_channel(dev, "rx");
143         if (!dws->rxchan)
144                 return -ENODEV;
145
146         dws->txchan = dma_request_slave_channel(dev, "tx");
147         if (!dws->txchan) {
148                 dma_release_channel(dws->rxchan);
149                 dws->rxchan = NULL;
150                 return -ENODEV;
151         }
152
153         dws->master->dma_rx = dws->rxchan;
154         dws->master->dma_tx = dws->txchan;
155
156         init_completion(&dws->dma_completion);
157
158         dw_spi_dma_maxburst_init(dws);
159
160         dw_spi_dma_sg_burst_init(dws);
161
162         return 0;
163 }
164
165 static void dw_spi_dma_exit(struct dw_spi *dws)
166 {
167         if (dws->txchan) {
168                 dmaengine_terminate_sync(dws->txchan);
169                 dma_release_channel(dws->txchan);
170         }
171
172         if (dws->rxchan) {
173                 dmaengine_terminate_sync(dws->rxchan);
174                 dma_release_channel(dws->rxchan);
175         }
176 }
177
178 static irqreturn_t dw_spi_dma_transfer_handler(struct dw_spi *dws)
179 {
180         dw_spi_check_status(dws, false);
181
182         complete(&dws->dma_completion);
183
184         return IRQ_HANDLED;
185 }
186
187 static bool dw_spi_can_dma(struct spi_controller *master,
188                            struct spi_device *spi, struct spi_transfer *xfer)
189 {
190         struct dw_spi *dws = spi_controller_get_devdata(master);
191
192         return xfer->len > dws->fifo_len;
193 }
194
195 static enum dma_slave_buswidth dw_spi_dma_convert_width(u8 n_bytes)
196 {
197         if (n_bytes == 1)
198                 return DMA_SLAVE_BUSWIDTH_1_BYTE;
199         else if (n_bytes == 2)
200                 return DMA_SLAVE_BUSWIDTH_2_BYTES;
201
202         return DMA_SLAVE_BUSWIDTH_UNDEFINED;
203 }
204
205 static int dw_spi_dma_wait(struct dw_spi *dws, unsigned int len, u32 speed)
206 {
207         unsigned long long ms;
208
209         ms = len * MSEC_PER_SEC * BITS_PER_BYTE;
210         do_div(ms, speed);
211         ms += ms + 200;
212
213         if (ms > UINT_MAX)
214                 ms = UINT_MAX;
215
216         ms = wait_for_completion_timeout(&dws->dma_completion,
217                                          msecs_to_jiffies(ms));
218
219         if (ms == 0) {
220                 dev_err(&dws->master->cur_msg->spi->dev,
221                         "DMA transaction timed out\n");
222                 return -ETIMEDOUT;
223         }
224
225         return 0;
226 }
227
228 static inline bool dw_spi_dma_tx_busy(struct dw_spi *dws)
229 {
230         return !(dw_readl(dws, DW_SPI_SR) & DW_SPI_SR_TF_EMPT);
231 }
232
233 static int dw_spi_dma_wait_tx_done(struct dw_spi *dws,
234                                    struct spi_transfer *xfer)
235 {
236         int retry = DW_SPI_WAIT_RETRIES;
237         struct spi_delay delay;
238         u32 nents;
239
240         nents = dw_readl(dws, DW_SPI_TXFLR);
241         delay.unit = SPI_DELAY_UNIT_SCK;
242         delay.value = nents * dws->n_bytes * BITS_PER_BYTE;
243
244         while (dw_spi_dma_tx_busy(dws) && retry--)
245                 spi_delay_exec(&delay, xfer);
246
247         if (retry < 0) {
248                 dev_err(&dws->master->dev, "Tx hanged up\n");
249                 return -EIO;
250         }
251
252         return 0;
253 }
254
255 /*
256  * dws->dma_chan_busy is set before the dma transfer starts, callback for tx
257  * channel will clear a corresponding bit.
258  */
259 static void dw_spi_dma_tx_done(void *arg)
260 {
261         struct dw_spi *dws = arg;
262
263         clear_bit(DW_SPI_TX_BUSY, &dws->dma_chan_busy);
264         if (test_bit(DW_SPI_RX_BUSY, &dws->dma_chan_busy))
265                 return;
266
267         complete(&dws->dma_completion);
268 }
269
270 static int dw_spi_dma_config_tx(struct dw_spi *dws)
271 {
272         struct dma_slave_config txconf;
273
274         memset(&txconf, 0, sizeof(txconf));
275         txconf.direction = DMA_MEM_TO_DEV;
276         txconf.dst_addr = dws->dma_addr;
277         txconf.dst_maxburst = dws->txburst;
278         txconf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
279         txconf.dst_addr_width = dw_spi_dma_convert_width(dws->n_bytes);
280         txconf.device_fc = false;
281
282         return dmaengine_slave_config(dws->txchan, &txconf);
283 }
284
285 static int dw_spi_dma_submit_tx(struct dw_spi *dws, struct scatterlist *sgl,
286                                 unsigned int nents)
287 {
288         struct dma_async_tx_descriptor *txdesc;
289         dma_cookie_t cookie;
290         int ret;
291
292         txdesc = dmaengine_prep_slave_sg(dws->txchan, sgl, nents,
293                                          DMA_MEM_TO_DEV,
294                                          DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
295         if (!txdesc)
296                 return -ENOMEM;
297
298         txdesc->callback = dw_spi_dma_tx_done;
299         txdesc->callback_param = dws;
300
301         cookie = dmaengine_submit(txdesc);
302         ret = dma_submit_error(cookie);
303         if (ret) {
304                 dmaengine_terminate_sync(dws->txchan);
305                 return ret;
306         }
307
308         set_bit(DW_SPI_TX_BUSY, &dws->dma_chan_busy);
309
310         return 0;
311 }
312
313 static inline bool dw_spi_dma_rx_busy(struct dw_spi *dws)
314 {
315         return !!(dw_readl(dws, DW_SPI_SR) & DW_SPI_SR_RF_NOT_EMPT);
316 }
317
318 static int dw_spi_dma_wait_rx_done(struct dw_spi *dws)
319 {
320         int retry = DW_SPI_WAIT_RETRIES;
321         struct spi_delay delay;
322         unsigned long ns, us;
323         u32 nents;
324
325         /*
326          * It's unlikely that DMA engine is still doing the data fetching, but
327          * if it's let's give it some reasonable time. The timeout calculation
328          * is based on the synchronous APB/SSI reference clock rate, on a
329          * number of data entries left in the Rx FIFO, times a number of clock
330          * periods normally needed for a single APB read/write transaction
331          * without PREADY signal utilized (which is true for the DW APB SSI
332          * controller).
333          */
334         nents = dw_readl(dws, DW_SPI_RXFLR);
335         ns = 4U * NSEC_PER_SEC / dws->max_freq * nents;
336         if (ns <= NSEC_PER_USEC) {
337                 delay.unit = SPI_DELAY_UNIT_NSECS;
338                 delay.value = ns;
339         } else {
340                 us = DIV_ROUND_UP(ns, NSEC_PER_USEC);
341                 delay.unit = SPI_DELAY_UNIT_USECS;
342                 delay.value = clamp_val(us, 0, USHRT_MAX);
343         }
344
345         while (dw_spi_dma_rx_busy(dws) && retry--)
346                 spi_delay_exec(&delay, NULL);
347
348         if (retry < 0) {
349                 dev_err(&dws->master->dev, "Rx hanged up\n");
350                 return -EIO;
351         }
352
353         return 0;
354 }
355
356 /*
357  * dws->dma_chan_busy is set before the dma transfer starts, callback for rx
358  * channel will clear a corresponding bit.
359  */
360 static void dw_spi_dma_rx_done(void *arg)
361 {
362         struct dw_spi *dws = arg;
363
364         clear_bit(DW_SPI_RX_BUSY, &dws->dma_chan_busy);
365         if (test_bit(DW_SPI_TX_BUSY, &dws->dma_chan_busy))
366                 return;
367
368         complete(&dws->dma_completion);
369 }
370
371 static int dw_spi_dma_config_rx(struct dw_spi *dws)
372 {
373         struct dma_slave_config rxconf;
374
375         memset(&rxconf, 0, sizeof(rxconf));
376         rxconf.direction = DMA_DEV_TO_MEM;
377         rxconf.src_addr = dws->dma_addr;
378         rxconf.src_maxburst = dws->rxburst;
379         rxconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
380         rxconf.src_addr_width = dw_spi_dma_convert_width(dws->n_bytes);
381         rxconf.device_fc = false;
382
383         return dmaengine_slave_config(dws->rxchan, &rxconf);
384 }
385
386 static int dw_spi_dma_submit_rx(struct dw_spi *dws, struct scatterlist *sgl,
387                                 unsigned int nents)
388 {
389         struct dma_async_tx_descriptor *rxdesc;
390         dma_cookie_t cookie;
391         int ret;
392
393         rxdesc = dmaengine_prep_slave_sg(dws->rxchan, sgl, nents,
394                                          DMA_DEV_TO_MEM,
395                                          DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
396         if (!rxdesc)
397                 return -ENOMEM;
398
399         rxdesc->callback = dw_spi_dma_rx_done;
400         rxdesc->callback_param = dws;
401
402         cookie = dmaengine_submit(rxdesc);
403         ret = dma_submit_error(cookie);
404         if (ret) {
405                 dmaengine_terminate_sync(dws->rxchan);
406                 return ret;
407         }
408
409         set_bit(DW_SPI_RX_BUSY, &dws->dma_chan_busy);
410
411         return 0;
412 }
413
414 static int dw_spi_dma_setup(struct dw_spi *dws, struct spi_transfer *xfer)
415 {
416         u16 imr, dma_ctrl;
417         int ret;
418
419         if (!xfer->tx_buf)
420                 return -EINVAL;
421
422         /* Setup DMA channels */
423         ret = dw_spi_dma_config_tx(dws);
424         if (ret)
425                 return ret;
426
427         if (xfer->rx_buf) {
428                 ret = dw_spi_dma_config_rx(dws);
429                 if (ret)
430                         return ret;
431         }
432
433         /* Set the DMA handshaking interface */
434         dma_ctrl = DW_SPI_DMACR_TDMAE;
435         if (xfer->rx_buf)
436                 dma_ctrl |= DW_SPI_DMACR_RDMAE;
437         dw_writel(dws, DW_SPI_DMACR, dma_ctrl);
438
439         /* Set the interrupt mask */
440         imr = DW_SPI_INT_TXOI;
441         if (xfer->rx_buf)
442                 imr |= DW_SPI_INT_RXUI | DW_SPI_INT_RXOI;
443         dw_spi_umask_intr(dws, imr);
444
445         reinit_completion(&dws->dma_completion);
446
447         dws->transfer_handler = dw_spi_dma_transfer_handler;
448
449         return 0;
450 }
451
452 static int dw_spi_dma_transfer_all(struct dw_spi *dws,
453                                    struct spi_transfer *xfer)
454 {
455         int ret;
456
457         /* Submit the DMA Tx transfer */
458         ret = dw_spi_dma_submit_tx(dws, xfer->tx_sg.sgl, xfer->tx_sg.nents);
459         if (ret)
460                 goto err_clear_dmac;
461
462         /* Submit the DMA Rx transfer if required */
463         if (xfer->rx_buf) {
464                 ret = dw_spi_dma_submit_rx(dws, xfer->rx_sg.sgl,
465                                            xfer->rx_sg.nents);
466                 if (ret)
467                         goto err_clear_dmac;
468
469                 /* rx must be started before tx due to spi instinct */
470                 dma_async_issue_pending(dws->rxchan);
471         }
472
473         dma_async_issue_pending(dws->txchan);
474
475         ret = dw_spi_dma_wait(dws, xfer->len, xfer->effective_speed_hz);
476
477 err_clear_dmac:
478         dw_writel(dws, DW_SPI_DMACR, 0);
479
480         return ret;
481 }
482
483 /*
484  * In case if at least one of the requested DMA channels doesn't support the
485  * hardware accelerated SG list entries traverse, the DMA driver will most
486  * likely work that around by performing the IRQ-based SG list entries
487  * resubmission. That might and will cause a problem if the DMA Tx channel is
488  * recharged and re-executed before the Rx DMA channel. Due to
489  * non-deterministic IRQ-handler execution latency the DMA Tx channel will
490  * start pushing data to the SPI bus before the Rx DMA channel is even
491  * reinitialized with the next inbound SG list entry. By doing so the DMA Tx
492  * channel will implicitly start filling the DW APB SSI Rx FIFO up, which while
493  * the DMA Rx channel being recharged and re-executed will eventually be
494  * overflown.
495  *
496  * In order to solve the problem we have to feed the DMA engine with SG list
497  * entries one-by-one. It shall keep the DW APB SSI Tx and Rx FIFOs
498  * synchronized and prevent the Rx FIFO overflow. Since in general the tx_sg
499  * and rx_sg lists may have different number of entries of different lengths
500  * (though total length should match) let's virtually split the SG-lists to the
501  * set of DMA transfers, which length is a minimum of the ordered SG-entries
502  * lengths. An ASCII-sketch of the implemented algo is following:
503  *                  xfer->len
504  *                |___________|
505  * tx_sg list:    |___|____|__|
506  * rx_sg list:    |_|____|____|
507  * DMA transfers: |_|_|__|_|__|
508  *
509  * Note in order to have this workaround solving the denoted problem the DMA
510  * engine driver should properly initialize the max_sg_burst capability and set
511  * the DMA device max segment size parameter with maximum data block size the
512  * DMA engine supports.
513  */
514
515 static int dw_spi_dma_transfer_one(struct dw_spi *dws,
516                                    struct spi_transfer *xfer)
517 {
518         struct scatterlist *tx_sg = NULL, *rx_sg = NULL, tx_tmp, rx_tmp;
519         unsigned int tx_len = 0, rx_len = 0;
520         unsigned int base, len;
521         int ret;
522
523         sg_init_table(&tx_tmp, 1);
524         sg_init_table(&rx_tmp, 1);
525
526         for (base = 0, len = 0; base < xfer->len; base += len) {
527                 /* Fetch next Tx DMA data chunk */
528                 if (!tx_len) {
529                         tx_sg = !tx_sg ? &xfer->tx_sg.sgl[0] : sg_next(tx_sg);
530                         sg_dma_address(&tx_tmp) = sg_dma_address(tx_sg);
531                         tx_len = sg_dma_len(tx_sg);
532                 }
533
534                 /* Fetch next Rx DMA data chunk */
535                 if (!rx_len) {
536                         rx_sg = !rx_sg ? &xfer->rx_sg.sgl[0] : sg_next(rx_sg);
537                         sg_dma_address(&rx_tmp) = sg_dma_address(rx_sg);
538                         rx_len = sg_dma_len(rx_sg);
539                 }
540
541                 len = min(tx_len, rx_len);
542
543                 sg_dma_len(&tx_tmp) = len;
544                 sg_dma_len(&rx_tmp) = len;
545
546                 /* Submit DMA Tx transfer */
547                 ret = dw_spi_dma_submit_tx(dws, &tx_tmp, 1);
548                 if (ret)
549                         break;
550
551                 /* Submit DMA Rx transfer */
552                 ret = dw_spi_dma_submit_rx(dws, &rx_tmp, 1);
553                 if (ret)
554                         break;
555
556                 /* Rx must be started before Tx due to SPI instinct */
557                 dma_async_issue_pending(dws->rxchan);
558
559                 dma_async_issue_pending(dws->txchan);
560
561                 /*
562                  * Here we only need to wait for the DMA transfer to be
563                  * finished since SPI controller is kept enabled during the
564                  * procedure this loop implements and there is no risk to lose
565                  * data left in the Tx/Rx FIFOs.
566                  */
567                 ret = dw_spi_dma_wait(dws, len, xfer->effective_speed_hz);
568                 if (ret)
569                         break;
570
571                 reinit_completion(&dws->dma_completion);
572
573                 sg_dma_address(&tx_tmp) += len;
574                 sg_dma_address(&rx_tmp) += len;
575                 tx_len -= len;
576                 rx_len -= len;
577         }
578
579         dw_writel(dws, DW_SPI_DMACR, 0);
580
581         return ret;
582 }
583
584 static int dw_spi_dma_transfer(struct dw_spi *dws, struct spi_transfer *xfer)
585 {
586         unsigned int nents;
587         int ret;
588
589         nents = max(xfer->tx_sg.nents, xfer->rx_sg.nents);
590
591         /*
592          * Execute normal DMA-based transfer (which submits the Rx and Tx SG
593          * lists directly to the DMA engine at once) if either full hardware
594          * accelerated SG list traverse is supported by both channels, or the
595          * Tx-only SPI transfer is requested, or the DMA engine is capable to
596          * handle both SG lists on hardware accelerated basis.
597          */
598         if (!dws->dma_sg_burst || !xfer->rx_buf || nents <= dws->dma_sg_burst)
599                 ret = dw_spi_dma_transfer_all(dws, xfer);
600         else
601                 ret = dw_spi_dma_transfer_one(dws, xfer);
602         if (ret)
603                 return ret;
604
605         if (dws->master->cur_msg->status == -EINPROGRESS) {
606                 ret = dw_spi_dma_wait_tx_done(dws, xfer);
607                 if (ret)
608                         return ret;
609         }
610
611         if (xfer->rx_buf && dws->master->cur_msg->status == -EINPROGRESS)
612                 ret = dw_spi_dma_wait_rx_done(dws);
613
614         return ret;
615 }
616
617 static void dw_spi_dma_stop(struct dw_spi *dws)
618 {
619         if (test_bit(DW_SPI_TX_BUSY, &dws->dma_chan_busy)) {
620                 dmaengine_terminate_sync(dws->txchan);
621                 clear_bit(DW_SPI_TX_BUSY, &dws->dma_chan_busy);
622         }
623         if (test_bit(DW_SPI_RX_BUSY, &dws->dma_chan_busy)) {
624                 dmaengine_terminate_sync(dws->rxchan);
625                 clear_bit(DW_SPI_RX_BUSY, &dws->dma_chan_busy);
626         }
627 }
628
629 static const struct dw_spi_dma_ops dw_spi_dma_mfld_ops = {
630         .dma_init       = dw_spi_dma_init_mfld,
631         .dma_exit       = dw_spi_dma_exit,
632         .dma_setup      = dw_spi_dma_setup,
633         .can_dma        = dw_spi_can_dma,
634         .dma_transfer   = dw_spi_dma_transfer,
635         .dma_stop       = dw_spi_dma_stop,
636 };
637
638 void dw_spi_dma_setup_mfld(struct dw_spi *dws)
639 {
640         dws->dma_ops = &dw_spi_dma_mfld_ops;
641 }
642 EXPORT_SYMBOL_NS_GPL(dw_spi_dma_setup_mfld, SPI_DW_CORE);
643
644 static const struct dw_spi_dma_ops dw_spi_dma_generic_ops = {
645         .dma_init       = dw_spi_dma_init_generic,
646         .dma_exit       = dw_spi_dma_exit,
647         .dma_setup      = dw_spi_dma_setup,
648         .can_dma        = dw_spi_can_dma,
649         .dma_transfer   = dw_spi_dma_transfer,
650         .dma_stop       = dw_spi_dma_stop,
651 };
652
653 void dw_spi_dma_setup_generic(struct dw_spi *dws)
654 {
655         dws->dma_ops = &dw_spi_dma_generic_ops;
656 }
657 EXPORT_SYMBOL_NS_GPL(dw_spi_dma_setup_generic, SPI_DW_CORE);