treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 151
[linux-2.6-microblaze.git] / drivers / mmc / host / jz4740_mmc.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *  Copyright (C) 2009-2010, Lars-Peter Clausen <lars@metafoo.de>
4  *  Copyright (C) 2013, Imagination Technologies
5  *
6  *  JZ4740 SD/MMC controller driver
7  */
8
9 #include <linux/bitops.h>
10 #include <linux/clk.h>
11 #include <linux/delay.h>
12 #include <linux/dmaengine.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/err.h>
15 #include <linux/interrupt.h>
16 #include <linux/io.h>
17 #include <linux/irq.h>
18 #include <linux/mmc/host.h>
19 #include <linux/mmc/slot-gpio.h>
20 #include <linux/module.h>
21 #include <linux/of_device.h>
22 #include <linux/pinctrl/consumer.h>
23 #include <linux/platform_device.h>
24 #include <linux/scatterlist.h>
25
26 #include <asm/cacheflush.h>
27
28 #include <asm/mach-jz4740/dma.h>
29
30 #define JZ_REG_MMC_STRPCL       0x00
31 #define JZ_REG_MMC_STATUS       0x04
32 #define JZ_REG_MMC_CLKRT        0x08
33 #define JZ_REG_MMC_CMDAT        0x0C
34 #define JZ_REG_MMC_RESTO        0x10
35 #define JZ_REG_MMC_RDTO         0x14
36 #define JZ_REG_MMC_BLKLEN       0x18
37 #define JZ_REG_MMC_NOB          0x1C
38 #define JZ_REG_MMC_SNOB         0x20
39 #define JZ_REG_MMC_IMASK        0x24
40 #define JZ_REG_MMC_IREG         0x28
41 #define JZ_REG_MMC_CMD          0x2C
42 #define JZ_REG_MMC_ARG          0x30
43 #define JZ_REG_MMC_RESP_FIFO    0x34
44 #define JZ_REG_MMC_RXFIFO       0x38
45 #define JZ_REG_MMC_TXFIFO       0x3C
46 #define JZ_REG_MMC_DMAC         0x44
47
48 #define JZ_MMC_STRPCL_EXIT_MULTIPLE BIT(7)
49 #define JZ_MMC_STRPCL_EXIT_TRANSFER BIT(6)
50 #define JZ_MMC_STRPCL_START_READWAIT BIT(5)
51 #define JZ_MMC_STRPCL_STOP_READWAIT BIT(4)
52 #define JZ_MMC_STRPCL_RESET BIT(3)
53 #define JZ_MMC_STRPCL_START_OP BIT(2)
54 #define JZ_MMC_STRPCL_CLOCK_CONTROL (BIT(1) | BIT(0))
55 #define JZ_MMC_STRPCL_CLOCK_STOP BIT(0)
56 #define JZ_MMC_STRPCL_CLOCK_START BIT(1)
57
58
59 #define JZ_MMC_STATUS_IS_RESETTING BIT(15)
60 #define JZ_MMC_STATUS_SDIO_INT_ACTIVE BIT(14)
61 #define JZ_MMC_STATUS_PRG_DONE BIT(13)
62 #define JZ_MMC_STATUS_DATA_TRAN_DONE BIT(12)
63 #define JZ_MMC_STATUS_END_CMD_RES BIT(11)
64 #define JZ_MMC_STATUS_DATA_FIFO_AFULL BIT(10)
65 #define JZ_MMC_STATUS_IS_READWAIT BIT(9)
66 #define JZ_MMC_STATUS_CLK_EN BIT(8)
67 #define JZ_MMC_STATUS_DATA_FIFO_FULL BIT(7)
68 #define JZ_MMC_STATUS_DATA_FIFO_EMPTY BIT(6)
69 #define JZ_MMC_STATUS_CRC_RES_ERR BIT(5)
70 #define JZ_MMC_STATUS_CRC_READ_ERROR BIT(4)
71 #define JZ_MMC_STATUS_TIMEOUT_WRITE BIT(3)
72 #define JZ_MMC_STATUS_CRC_WRITE_ERROR BIT(2)
73 #define JZ_MMC_STATUS_TIMEOUT_RES BIT(1)
74 #define JZ_MMC_STATUS_TIMEOUT_READ BIT(0)
75
76 #define JZ_MMC_STATUS_READ_ERROR_MASK (BIT(4) | BIT(0))
77 #define JZ_MMC_STATUS_WRITE_ERROR_MASK (BIT(3) | BIT(2))
78
79
80 #define JZ_MMC_CMDAT_IO_ABORT BIT(11)
81 #define JZ_MMC_CMDAT_BUS_WIDTH_4BIT BIT(10)
82 #define JZ_MMC_CMDAT_DMA_EN BIT(8)
83 #define JZ_MMC_CMDAT_INIT BIT(7)
84 #define JZ_MMC_CMDAT_BUSY BIT(6)
85 #define JZ_MMC_CMDAT_STREAM BIT(5)
86 #define JZ_MMC_CMDAT_WRITE BIT(4)
87 #define JZ_MMC_CMDAT_DATA_EN BIT(3)
88 #define JZ_MMC_CMDAT_RESPONSE_FORMAT (BIT(2) | BIT(1) | BIT(0))
89 #define JZ_MMC_CMDAT_RSP_R1 1
90 #define JZ_MMC_CMDAT_RSP_R2 2
91 #define JZ_MMC_CMDAT_RSP_R3 3
92
93 #define JZ_MMC_IRQ_SDIO BIT(7)
94 #define JZ_MMC_IRQ_TXFIFO_WR_REQ BIT(6)
95 #define JZ_MMC_IRQ_RXFIFO_RD_REQ BIT(5)
96 #define JZ_MMC_IRQ_END_CMD_RES BIT(2)
97 #define JZ_MMC_IRQ_PRG_DONE BIT(1)
98 #define JZ_MMC_IRQ_DATA_TRAN_DONE BIT(0)
99
100 #define JZ_MMC_DMAC_DMA_SEL BIT(1)
101 #define JZ_MMC_DMAC_DMA_EN BIT(0)
102
103 #define JZ_MMC_CLK_RATE 24000000
104
105 enum jz4740_mmc_version {
106         JZ_MMC_JZ4740,
107         JZ_MMC_JZ4725B,
108         JZ_MMC_JZ4780,
109 };
110
111 enum jz4740_mmc_state {
112         JZ4740_MMC_STATE_READ_RESPONSE,
113         JZ4740_MMC_STATE_TRANSFER_DATA,
114         JZ4740_MMC_STATE_SEND_STOP,
115         JZ4740_MMC_STATE_DONE,
116 };
117
118 /*
119  * The MMC core allows to prepare a mmc_request while another mmc_request
120  * is in-flight. This is used via the pre_req/post_req hooks.
121  * This driver uses the pre_req/post_req hooks to map/unmap the mmc_request.
122  * Following what other drivers do (sdhci, dw_mmc) we use the following cookie
123  * flags to keep track of the mmc_request mapping state.
124  *
125  * COOKIE_UNMAPPED: the request is not mapped.
126  * COOKIE_PREMAPPED: the request was mapped in pre_req,
127  * and should be unmapped in post_req.
128  * COOKIE_MAPPED: the request was mapped in the irq handler,
129  * and should be unmapped before mmc_request_done is called..
130  */
131 enum jz4780_cookie {
132         COOKIE_UNMAPPED = 0,
133         COOKIE_PREMAPPED,
134         COOKIE_MAPPED,
135 };
136
137 struct jz4740_mmc_host {
138         struct mmc_host *mmc;
139         struct platform_device *pdev;
140         struct clk *clk;
141
142         enum jz4740_mmc_version version;
143
144         int irq;
145         int card_detect_irq;
146
147         void __iomem *base;
148         struct resource *mem_res;
149         struct mmc_request *req;
150         struct mmc_command *cmd;
151
152         unsigned long waiting;
153
154         uint32_t cmdat;
155
156         uint32_t irq_mask;
157
158         spinlock_t lock;
159
160         struct timer_list timeout_timer;
161         struct sg_mapping_iter miter;
162         enum jz4740_mmc_state state;
163
164         /* DMA support */
165         struct dma_chan *dma_rx;
166         struct dma_chan *dma_tx;
167         bool use_dma;
168
169 /* The DMA trigger level is 8 words, that is to say, the DMA read
170  * trigger is when data words in MSC_RXFIFO is >= 8 and the DMA write
171  * trigger is when data words in MSC_TXFIFO is < 8.
172  */
173 #define JZ4740_MMC_FIFO_HALF_SIZE 8
174 };
175
176 static void jz4740_mmc_write_irq_mask(struct jz4740_mmc_host *host,
177                                       uint32_t val)
178 {
179         if (host->version >= JZ_MMC_JZ4725B)
180                 return writel(val, host->base + JZ_REG_MMC_IMASK);
181         else
182                 return writew(val, host->base + JZ_REG_MMC_IMASK);
183 }
184
185 static void jz4740_mmc_write_irq_reg(struct jz4740_mmc_host *host,
186                                      uint32_t val)
187 {
188         if (host->version >= JZ_MMC_JZ4780)
189                 return writel(val, host->base + JZ_REG_MMC_IREG);
190         else
191                 return writew(val, host->base + JZ_REG_MMC_IREG);
192 }
193
194 static uint32_t jz4740_mmc_read_irq_reg(struct jz4740_mmc_host *host)
195 {
196         if (host->version >= JZ_MMC_JZ4780)
197                 return readl(host->base + JZ_REG_MMC_IREG);
198         else
199                 return readw(host->base + JZ_REG_MMC_IREG);
200 }
201
202 /*----------------------------------------------------------------------------*/
203 /* DMA infrastructure */
204
205 static void jz4740_mmc_release_dma_channels(struct jz4740_mmc_host *host)
206 {
207         if (!host->use_dma)
208                 return;
209
210         dma_release_channel(host->dma_tx);
211         dma_release_channel(host->dma_rx);
212 }
213
214 static int jz4740_mmc_acquire_dma_channels(struct jz4740_mmc_host *host)
215 {
216         host->dma_tx = dma_request_chan(mmc_dev(host->mmc), "tx");
217         if (IS_ERR(host->dma_tx)) {
218                 dev_err(mmc_dev(host->mmc), "Failed to get dma_tx channel\n");
219                 return PTR_ERR(host->dma_tx);
220         }
221
222         host->dma_rx = dma_request_chan(mmc_dev(host->mmc), "rx");
223         if (IS_ERR(host->dma_rx)) {
224                 dev_err(mmc_dev(host->mmc), "Failed to get dma_rx channel\n");
225                 dma_release_channel(host->dma_tx);
226                 return PTR_ERR(host->dma_rx);
227         }
228
229         return 0;
230 }
231
232 static inline struct dma_chan *jz4740_mmc_get_dma_chan(struct jz4740_mmc_host *host,
233                                                        struct mmc_data *data)
234 {
235         return (data->flags & MMC_DATA_READ) ? host->dma_rx : host->dma_tx;
236 }
237
238 static void jz4740_mmc_dma_unmap(struct jz4740_mmc_host *host,
239                                  struct mmc_data *data)
240 {
241         struct dma_chan *chan = jz4740_mmc_get_dma_chan(host, data);
242         enum dma_data_direction dir = mmc_get_dma_dir(data);
243
244         dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir);
245         data->host_cookie = COOKIE_UNMAPPED;
246 }
247
248 /* Prepares DMA data for current or next transfer.
249  * A request can be in-flight when this is called.
250  */
251 static int jz4740_mmc_prepare_dma_data(struct jz4740_mmc_host *host,
252                                        struct mmc_data *data,
253                                        int cookie)
254 {
255         struct dma_chan *chan = jz4740_mmc_get_dma_chan(host, data);
256         enum dma_data_direction dir = mmc_get_dma_dir(data);
257         int sg_count;
258
259         if (data->host_cookie == COOKIE_PREMAPPED)
260                 return data->sg_count;
261
262         sg_count = dma_map_sg(chan->device->dev,
263                         data->sg,
264                         data->sg_len,
265                         dir);
266
267         if (sg_count <= 0) {
268                 dev_err(mmc_dev(host->mmc),
269                         "Failed to map scatterlist for DMA operation\n");
270                 return -EINVAL;
271         }
272
273         data->sg_count = sg_count;
274         data->host_cookie = cookie;
275
276         return data->sg_count;
277 }
278
279 static int jz4740_mmc_start_dma_transfer(struct jz4740_mmc_host *host,
280                                          struct mmc_data *data)
281 {
282         struct dma_chan *chan = jz4740_mmc_get_dma_chan(host, data);
283         struct dma_async_tx_descriptor *desc;
284         struct dma_slave_config conf = {
285                 .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
286                 .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
287                 .src_maxburst = JZ4740_MMC_FIFO_HALF_SIZE,
288                 .dst_maxburst = JZ4740_MMC_FIFO_HALF_SIZE,
289         };
290         int sg_count;
291
292         if (data->flags & MMC_DATA_WRITE) {
293                 conf.direction = DMA_MEM_TO_DEV;
294                 conf.dst_addr = host->mem_res->start + JZ_REG_MMC_TXFIFO;
295                 conf.slave_id = JZ4740_DMA_TYPE_MMC_TRANSMIT;
296         } else {
297                 conf.direction = DMA_DEV_TO_MEM;
298                 conf.src_addr = host->mem_res->start + JZ_REG_MMC_RXFIFO;
299                 conf.slave_id = JZ4740_DMA_TYPE_MMC_RECEIVE;
300         }
301
302         sg_count = jz4740_mmc_prepare_dma_data(host, data, COOKIE_MAPPED);
303         if (sg_count < 0)
304                 return sg_count;
305
306         dmaengine_slave_config(chan, &conf);
307         desc = dmaengine_prep_slave_sg(chan, data->sg, sg_count,
308                         conf.direction,
309                         DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
310         if (!desc) {
311                 dev_err(mmc_dev(host->mmc),
312                         "Failed to allocate DMA %s descriptor",
313                          conf.direction == DMA_MEM_TO_DEV ? "TX" : "RX");
314                 goto dma_unmap;
315         }
316
317         dmaengine_submit(desc);
318         dma_async_issue_pending(chan);
319
320         return 0;
321
322 dma_unmap:
323         if (data->host_cookie == COOKIE_MAPPED)
324                 jz4740_mmc_dma_unmap(host, data);
325         return -ENOMEM;
326 }
327
328 static void jz4740_mmc_pre_request(struct mmc_host *mmc,
329                                    struct mmc_request *mrq)
330 {
331         struct jz4740_mmc_host *host = mmc_priv(mmc);
332         struct mmc_data *data = mrq->data;
333
334         if (!host->use_dma)
335                 return;
336
337         data->host_cookie = COOKIE_UNMAPPED;
338         if (jz4740_mmc_prepare_dma_data(host, data, COOKIE_PREMAPPED) < 0)
339                 data->host_cookie = COOKIE_UNMAPPED;
340 }
341
342 static void jz4740_mmc_post_request(struct mmc_host *mmc,
343                                     struct mmc_request *mrq,
344                                     int err)
345 {
346         struct jz4740_mmc_host *host = mmc_priv(mmc);
347         struct mmc_data *data = mrq->data;
348
349         if (data && data->host_cookie != COOKIE_UNMAPPED)
350                 jz4740_mmc_dma_unmap(host, data);
351
352         if (err) {
353                 struct dma_chan *chan = jz4740_mmc_get_dma_chan(host, data);
354
355                 dmaengine_terminate_all(chan);
356         }
357 }
358
359 /*----------------------------------------------------------------------------*/
360
361 static void jz4740_mmc_set_irq_enabled(struct jz4740_mmc_host *host,
362         unsigned int irq, bool enabled)
363 {
364         unsigned long flags;
365
366         spin_lock_irqsave(&host->lock, flags);
367         if (enabled)
368                 host->irq_mask &= ~irq;
369         else
370                 host->irq_mask |= irq;
371
372         jz4740_mmc_write_irq_mask(host, host->irq_mask);
373         spin_unlock_irqrestore(&host->lock, flags);
374 }
375
376 static void jz4740_mmc_clock_enable(struct jz4740_mmc_host *host,
377         bool start_transfer)
378 {
379         uint16_t val = JZ_MMC_STRPCL_CLOCK_START;
380
381         if (start_transfer)
382                 val |= JZ_MMC_STRPCL_START_OP;
383
384         writew(val, host->base + JZ_REG_MMC_STRPCL);
385 }
386
387 static void jz4740_mmc_clock_disable(struct jz4740_mmc_host *host)
388 {
389         uint32_t status;
390         unsigned int timeout = 1000;
391
392         writew(JZ_MMC_STRPCL_CLOCK_STOP, host->base + JZ_REG_MMC_STRPCL);
393         do {
394                 status = readl(host->base + JZ_REG_MMC_STATUS);
395         } while (status & JZ_MMC_STATUS_CLK_EN && --timeout);
396 }
397
398 static void jz4740_mmc_reset(struct jz4740_mmc_host *host)
399 {
400         uint32_t status;
401         unsigned int timeout = 1000;
402
403         writew(JZ_MMC_STRPCL_RESET, host->base + JZ_REG_MMC_STRPCL);
404         udelay(10);
405         do {
406                 status = readl(host->base + JZ_REG_MMC_STATUS);
407         } while (status & JZ_MMC_STATUS_IS_RESETTING && --timeout);
408 }
409
410 static void jz4740_mmc_request_done(struct jz4740_mmc_host *host)
411 {
412         struct mmc_request *req;
413         struct mmc_data *data;
414
415         req = host->req;
416         data = req->data;
417         host->req = NULL;
418
419         if (data && data->host_cookie == COOKIE_MAPPED)
420                 jz4740_mmc_dma_unmap(host, data);
421         mmc_request_done(host->mmc, req);
422 }
423
424 static unsigned int jz4740_mmc_poll_irq(struct jz4740_mmc_host *host,
425         unsigned int irq)
426 {
427         unsigned int timeout = 0x800;
428         uint32_t status;
429
430         do {
431                 status = jz4740_mmc_read_irq_reg(host);
432         } while (!(status & irq) && --timeout);
433
434         if (timeout == 0) {
435                 set_bit(0, &host->waiting);
436                 mod_timer(&host->timeout_timer, jiffies + 5*HZ);
437                 jz4740_mmc_set_irq_enabled(host, irq, true);
438                 return true;
439         }
440
441         return false;
442 }
443
444 static void jz4740_mmc_transfer_check_state(struct jz4740_mmc_host *host,
445         struct mmc_data *data)
446 {
447         int status;
448
449         status = readl(host->base + JZ_REG_MMC_STATUS);
450         if (status & JZ_MMC_STATUS_WRITE_ERROR_MASK) {
451                 if (status & (JZ_MMC_STATUS_TIMEOUT_WRITE)) {
452                         host->req->cmd->error = -ETIMEDOUT;
453                         data->error = -ETIMEDOUT;
454                 } else {
455                         host->req->cmd->error = -EIO;
456                         data->error = -EIO;
457                 }
458         } else if (status & JZ_MMC_STATUS_READ_ERROR_MASK) {
459                 if (status & (JZ_MMC_STATUS_TIMEOUT_READ)) {
460                         host->req->cmd->error = -ETIMEDOUT;
461                         data->error = -ETIMEDOUT;
462                 } else {
463                         host->req->cmd->error = -EIO;
464                         data->error = -EIO;
465                 }
466         }
467 }
468
469 static bool jz4740_mmc_write_data(struct jz4740_mmc_host *host,
470         struct mmc_data *data)
471 {
472         struct sg_mapping_iter *miter = &host->miter;
473         void __iomem *fifo_addr = host->base + JZ_REG_MMC_TXFIFO;
474         uint32_t *buf;
475         bool timeout;
476         size_t i, j;
477
478         while (sg_miter_next(miter)) {
479                 buf = miter->addr;
480                 i = miter->length / 4;
481                 j = i / 8;
482                 i = i & 0x7;
483                 while (j) {
484                         timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_TXFIFO_WR_REQ);
485                         if (unlikely(timeout))
486                                 goto poll_timeout;
487
488                         writel(buf[0], fifo_addr);
489                         writel(buf[1], fifo_addr);
490                         writel(buf[2], fifo_addr);
491                         writel(buf[3], fifo_addr);
492                         writel(buf[4], fifo_addr);
493                         writel(buf[5], fifo_addr);
494                         writel(buf[6], fifo_addr);
495                         writel(buf[7], fifo_addr);
496                         buf += 8;
497                         --j;
498                 }
499                 if (unlikely(i)) {
500                         timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_TXFIFO_WR_REQ);
501                         if (unlikely(timeout))
502                                 goto poll_timeout;
503
504                         while (i) {
505                                 writel(*buf, fifo_addr);
506                                 ++buf;
507                                 --i;
508                         }
509                 }
510                 data->bytes_xfered += miter->length;
511         }
512         sg_miter_stop(miter);
513
514         return false;
515
516 poll_timeout:
517         miter->consumed = (void *)buf - miter->addr;
518         data->bytes_xfered += miter->consumed;
519         sg_miter_stop(miter);
520
521         return true;
522 }
523
524 static bool jz4740_mmc_read_data(struct jz4740_mmc_host *host,
525                                 struct mmc_data *data)
526 {
527         struct sg_mapping_iter *miter = &host->miter;
528         void __iomem *fifo_addr = host->base + JZ_REG_MMC_RXFIFO;
529         uint32_t *buf;
530         uint32_t d;
531         uint32_t status;
532         size_t i, j;
533         unsigned int timeout;
534
535         while (sg_miter_next(miter)) {
536                 buf = miter->addr;
537                 i = miter->length;
538                 j = i / 32;
539                 i = i & 0x1f;
540                 while (j) {
541                         timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_RXFIFO_RD_REQ);
542                         if (unlikely(timeout))
543                                 goto poll_timeout;
544
545                         buf[0] = readl(fifo_addr);
546                         buf[1] = readl(fifo_addr);
547                         buf[2] = readl(fifo_addr);
548                         buf[3] = readl(fifo_addr);
549                         buf[4] = readl(fifo_addr);
550                         buf[5] = readl(fifo_addr);
551                         buf[6] = readl(fifo_addr);
552                         buf[7] = readl(fifo_addr);
553
554                         buf += 8;
555                         --j;
556                 }
557
558                 if (unlikely(i)) {
559                         timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_RXFIFO_RD_REQ);
560                         if (unlikely(timeout))
561                                 goto poll_timeout;
562
563                         while (i >= 4) {
564                                 *buf++ = readl(fifo_addr);
565                                 i -= 4;
566                         }
567                         if (unlikely(i > 0)) {
568                                 d = readl(fifo_addr);
569                                 memcpy(buf, &d, i);
570                         }
571                 }
572                 data->bytes_xfered += miter->length;
573
574                 /* This can go away once MIPS implements
575                  * flush_kernel_dcache_page */
576                 flush_dcache_page(miter->page);
577         }
578         sg_miter_stop(miter);
579
580         /* For whatever reason there is sometime one word more in the fifo then
581          * requested */
582         timeout = 1000;
583         status = readl(host->base + JZ_REG_MMC_STATUS);
584         while (!(status & JZ_MMC_STATUS_DATA_FIFO_EMPTY) && --timeout) {
585                 d = readl(fifo_addr);
586                 status = readl(host->base + JZ_REG_MMC_STATUS);
587         }
588
589         return false;
590
591 poll_timeout:
592         miter->consumed = (void *)buf - miter->addr;
593         data->bytes_xfered += miter->consumed;
594         sg_miter_stop(miter);
595
596         return true;
597 }
598
599 static void jz4740_mmc_timeout(struct timer_list *t)
600 {
601         struct jz4740_mmc_host *host = from_timer(host, t, timeout_timer);
602
603         if (!test_and_clear_bit(0, &host->waiting))
604                 return;
605
606         jz4740_mmc_set_irq_enabled(host, JZ_MMC_IRQ_END_CMD_RES, false);
607
608         host->req->cmd->error = -ETIMEDOUT;
609         jz4740_mmc_request_done(host);
610 }
611
612 static void jz4740_mmc_read_response(struct jz4740_mmc_host *host,
613         struct mmc_command *cmd)
614 {
615         int i;
616         uint16_t tmp;
617         void __iomem *fifo_addr = host->base + JZ_REG_MMC_RESP_FIFO;
618
619         if (cmd->flags & MMC_RSP_136) {
620                 tmp = readw(fifo_addr);
621                 for (i = 0; i < 4; ++i) {
622                         cmd->resp[i] = tmp << 24;
623                         tmp = readw(fifo_addr);
624                         cmd->resp[i] |= tmp << 8;
625                         tmp = readw(fifo_addr);
626                         cmd->resp[i] |= tmp >> 8;
627                 }
628         } else {
629                 cmd->resp[0] = readw(fifo_addr) << 24;
630                 cmd->resp[0] |= readw(fifo_addr) << 8;
631                 cmd->resp[0] |= readw(fifo_addr) & 0xff;
632         }
633 }
634
635 static void jz4740_mmc_send_command(struct jz4740_mmc_host *host,
636         struct mmc_command *cmd)
637 {
638         uint32_t cmdat = host->cmdat;
639
640         host->cmdat &= ~JZ_MMC_CMDAT_INIT;
641         jz4740_mmc_clock_disable(host);
642
643         host->cmd = cmd;
644
645         if (cmd->flags & MMC_RSP_BUSY)
646                 cmdat |= JZ_MMC_CMDAT_BUSY;
647
648         switch (mmc_resp_type(cmd)) {
649         case MMC_RSP_R1B:
650         case MMC_RSP_R1:
651                 cmdat |= JZ_MMC_CMDAT_RSP_R1;
652                 break;
653         case MMC_RSP_R2:
654                 cmdat |= JZ_MMC_CMDAT_RSP_R2;
655                 break;
656         case MMC_RSP_R3:
657                 cmdat |= JZ_MMC_CMDAT_RSP_R3;
658                 break;
659         default:
660                 break;
661         }
662
663         if (cmd->data) {
664                 cmdat |= JZ_MMC_CMDAT_DATA_EN;
665                 if (cmd->data->flags & MMC_DATA_WRITE)
666                         cmdat |= JZ_MMC_CMDAT_WRITE;
667                 if (host->use_dma) {
668                         /*
669                          * The 4780's MMC controller has integrated DMA ability
670                          * in addition to being able to use the external DMA
671                          * controller. It moves DMA control bits to a separate
672                          * register. The DMA_SEL bit chooses the external
673                          * controller over the integrated one. Earlier SoCs
674                          * can only use the external controller, and have a
675                          * single DMA enable bit in CMDAT.
676                          */
677                         if (host->version >= JZ_MMC_JZ4780) {
678                                 writel(JZ_MMC_DMAC_DMA_EN | JZ_MMC_DMAC_DMA_SEL,
679                                        host->base + JZ_REG_MMC_DMAC);
680                         } else {
681                                 cmdat |= JZ_MMC_CMDAT_DMA_EN;
682                         }
683                 } else if (host->version >= JZ_MMC_JZ4780) {
684                         writel(0, host->base + JZ_REG_MMC_DMAC);
685                 }
686
687                 writew(cmd->data->blksz, host->base + JZ_REG_MMC_BLKLEN);
688                 writew(cmd->data->blocks, host->base + JZ_REG_MMC_NOB);
689         }
690
691         writeb(cmd->opcode, host->base + JZ_REG_MMC_CMD);
692         writel(cmd->arg, host->base + JZ_REG_MMC_ARG);
693         writel(cmdat, host->base + JZ_REG_MMC_CMDAT);
694
695         jz4740_mmc_clock_enable(host, 1);
696 }
697
698 static void jz_mmc_prepare_data_transfer(struct jz4740_mmc_host *host)
699 {
700         struct mmc_command *cmd = host->req->cmd;
701         struct mmc_data *data = cmd->data;
702         int direction;
703
704         if (data->flags & MMC_DATA_READ)
705                 direction = SG_MITER_TO_SG;
706         else
707                 direction = SG_MITER_FROM_SG;
708
709         sg_miter_start(&host->miter, data->sg, data->sg_len, direction);
710 }
711
712
713 static irqreturn_t jz_mmc_irq_worker(int irq, void *devid)
714 {
715         struct jz4740_mmc_host *host = (struct jz4740_mmc_host *)devid;
716         struct mmc_command *cmd = host->req->cmd;
717         struct mmc_request *req = host->req;
718         struct mmc_data *data = cmd->data;
719         bool timeout = false;
720
721         if (cmd->error)
722                 host->state = JZ4740_MMC_STATE_DONE;
723
724         switch (host->state) {
725         case JZ4740_MMC_STATE_READ_RESPONSE:
726                 if (cmd->flags & MMC_RSP_PRESENT)
727                         jz4740_mmc_read_response(host, cmd);
728
729                 if (!data)
730                         break;
731
732                 jz_mmc_prepare_data_transfer(host);
733                 /* fall through */
734
735         case JZ4740_MMC_STATE_TRANSFER_DATA:
736                 if (host->use_dma) {
737                         /* Use DMA if enabled.
738                          * Data transfer direction is defined later by
739                          * relying on data flags in
740                          * jz4740_mmc_prepare_dma_data() and
741                          * jz4740_mmc_start_dma_transfer().
742                          */
743                         timeout = jz4740_mmc_start_dma_transfer(host, data);
744                         data->bytes_xfered = data->blocks * data->blksz;
745                 } else if (data->flags & MMC_DATA_READ)
746                         /* Use PIO if DMA is not enabled.
747                          * Data transfer direction was defined before
748                          * by relying on data flags in
749                          * jz_mmc_prepare_data_transfer().
750                          */
751                         timeout = jz4740_mmc_read_data(host, data);
752                 else
753                         timeout = jz4740_mmc_write_data(host, data);
754
755                 if (unlikely(timeout)) {
756                         host->state = JZ4740_MMC_STATE_TRANSFER_DATA;
757                         break;
758                 }
759
760                 jz4740_mmc_transfer_check_state(host, data);
761
762                 timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_DATA_TRAN_DONE);
763                 if (unlikely(timeout)) {
764                         host->state = JZ4740_MMC_STATE_SEND_STOP;
765                         break;
766                 }
767                 jz4740_mmc_write_irq_reg(host, JZ_MMC_IRQ_DATA_TRAN_DONE);
768                 /* fall through */
769
770         case JZ4740_MMC_STATE_SEND_STOP:
771                 if (!req->stop)
772                         break;
773
774                 jz4740_mmc_send_command(host, req->stop);
775
776                 if (mmc_resp_type(req->stop) & MMC_RSP_BUSY) {
777                         timeout = jz4740_mmc_poll_irq(host,
778                                                       JZ_MMC_IRQ_PRG_DONE);
779                         if (timeout) {
780                                 host->state = JZ4740_MMC_STATE_DONE;
781                                 break;
782                         }
783                 }
784         case JZ4740_MMC_STATE_DONE:
785                 break;
786         }
787
788         if (!timeout)
789                 jz4740_mmc_request_done(host);
790
791         return IRQ_HANDLED;
792 }
793
794 static irqreturn_t jz_mmc_irq(int irq, void *devid)
795 {
796         struct jz4740_mmc_host *host = devid;
797         struct mmc_command *cmd = host->cmd;
798         uint32_t irq_reg, status, tmp;
799
800         status = readl(host->base + JZ_REG_MMC_STATUS);
801         irq_reg = jz4740_mmc_read_irq_reg(host);
802
803         tmp = irq_reg;
804         irq_reg &= ~host->irq_mask;
805
806         tmp &= ~(JZ_MMC_IRQ_TXFIFO_WR_REQ | JZ_MMC_IRQ_RXFIFO_RD_REQ |
807                 JZ_MMC_IRQ_PRG_DONE | JZ_MMC_IRQ_DATA_TRAN_DONE);
808
809         if (tmp != irq_reg)
810                 jz4740_mmc_write_irq_reg(host, tmp & ~irq_reg);
811
812         if (irq_reg & JZ_MMC_IRQ_SDIO) {
813                 jz4740_mmc_write_irq_reg(host, JZ_MMC_IRQ_SDIO);
814                 mmc_signal_sdio_irq(host->mmc);
815                 irq_reg &= ~JZ_MMC_IRQ_SDIO;
816         }
817
818         if (host->req && cmd && irq_reg) {
819                 if (test_and_clear_bit(0, &host->waiting)) {
820                         del_timer(&host->timeout_timer);
821
822                         if (status & JZ_MMC_STATUS_TIMEOUT_RES) {
823                                         cmd->error = -ETIMEDOUT;
824                         } else if (status & JZ_MMC_STATUS_CRC_RES_ERR) {
825                                         cmd->error = -EIO;
826                         } else if (status & (JZ_MMC_STATUS_CRC_READ_ERROR |
827                                     JZ_MMC_STATUS_CRC_WRITE_ERROR)) {
828                                         if (cmd->data)
829                                                         cmd->data->error = -EIO;
830                                         cmd->error = -EIO;
831                         }
832
833                         jz4740_mmc_set_irq_enabled(host, irq_reg, false);
834                         jz4740_mmc_write_irq_reg(host, irq_reg);
835
836                         return IRQ_WAKE_THREAD;
837                 }
838         }
839
840         return IRQ_HANDLED;
841 }
842
843 static int jz4740_mmc_set_clock_rate(struct jz4740_mmc_host *host, int rate)
844 {
845         int div = 0;
846         int real_rate;
847
848         jz4740_mmc_clock_disable(host);
849         clk_set_rate(host->clk, host->mmc->f_max);
850
851         real_rate = clk_get_rate(host->clk);
852
853         while (real_rate > rate && div < 7) {
854                 ++div;
855                 real_rate >>= 1;
856         }
857
858         writew(div, host->base + JZ_REG_MMC_CLKRT);
859         return real_rate;
860 }
861
862 static void jz4740_mmc_request(struct mmc_host *mmc, struct mmc_request *req)
863 {
864         struct jz4740_mmc_host *host = mmc_priv(mmc);
865
866         host->req = req;
867
868         jz4740_mmc_write_irq_reg(host, ~0);
869         jz4740_mmc_set_irq_enabled(host, JZ_MMC_IRQ_END_CMD_RES, true);
870
871         host->state = JZ4740_MMC_STATE_READ_RESPONSE;
872         set_bit(0, &host->waiting);
873         mod_timer(&host->timeout_timer, jiffies + 5*HZ);
874         jz4740_mmc_send_command(host, req->cmd);
875 }
876
877 static void jz4740_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
878 {
879         struct jz4740_mmc_host *host = mmc_priv(mmc);
880         if (ios->clock)
881                 jz4740_mmc_set_clock_rate(host, ios->clock);
882
883         switch (ios->power_mode) {
884         case MMC_POWER_UP:
885                 jz4740_mmc_reset(host);
886                 if (!IS_ERR(mmc->supply.vmmc))
887                         mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
888                 host->cmdat |= JZ_MMC_CMDAT_INIT;
889                 clk_prepare_enable(host->clk);
890                 break;
891         case MMC_POWER_ON:
892                 break;
893         default:
894                 if (!IS_ERR(mmc->supply.vmmc))
895                         mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
896                 clk_disable_unprepare(host->clk);
897                 break;
898         }
899
900         switch (ios->bus_width) {
901         case MMC_BUS_WIDTH_1:
902                 host->cmdat &= ~JZ_MMC_CMDAT_BUS_WIDTH_4BIT;
903                 break;
904         case MMC_BUS_WIDTH_4:
905                 host->cmdat |= JZ_MMC_CMDAT_BUS_WIDTH_4BIT;
906                 break;
907         default:
908                 break;
909         }
910 }
911
912 static void jz4740_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
913 {
914         struct jz4740_mmc_host *host = mmc_priv(mmc);
915         jz4740_mmc_set_irq_enabled(host, JZ_MMC_IRQ_SDIO, enable);
916 }
917
918 static const struct mmc_host_ops jz4740_mmc_ops = {
919         .request        = jz4740_mmc_request,
920         .pre_req        = jz4740_mmc_pre_request,
921         .post_req       = jz4740_mmc_post_request,
922         .set_ios        = jz4740_mmc_set_ios,
923         .get_ro         = mmc_gpio_get_ro,
924         .get_cd         = mmc_gpio_get_cd,
925         .enable_sdio_irq = jz4740_mmc_enable_sdio_irq,
926 };
927
928 static const struct of_device_id jz4740_mmc_of_match[] = {
929         { .compatible = "ingenic,jz4740-mmc", .data = (void *) JZ_MMC_JZ4740 },
930         { .compatible = "ingenic,jz4725b-mmc", .data = (void *)JZ_MMC_JZ4725B },
931         { .compatible = "ingenic,jz4780-mmc", .data = (void *) JZ_MMC_JZ4780 },
932         {},
933 };
934 MODULE_DEVICE_TABLE(of, jz4740_mmc_of_match);
935
936 static int jz4740_mmc_probe(struct platform_device* pdev)
937 {
938         int ret;
939         struct mmc_host *mmc;
940         struct jz4740_mmc_host *host;
941         const struct of_device_id *match;
942
943         mmc = mmc_alloc_host(sizeof(struct jz4740_mmc_host), &pdev->dev);
944         if (!mmc) {
945                 dev_err(&pdev->dev, "Failed to alloc mmc host structure\n");
946                 return -ENOMEM;
947         }
948
949         host = mmc_priv(mmc);
950
951         match = of_match_device(jz4740_mmc_of_match, &pdev->dev);
952         if (match) {
953                 host->version = (enum jz4740_mmc_version)match->data;
954         } else {
955                 /* JZ4740 should be the only one using legacy probe */
956                 host->version = JZ_MMC_JZ4740;
957         }
958
959         ret = mmc_of_parse(mmc);
960         if (ret) {
961                 if (ret != -EPROBE_DEFER)
962                         dev_err(&pdev->dev,
963                                 "could not parse device properties: %d\n", ret);
964                 goto err_free_host;
965         }
966
967         mmc_regulator_get_supply(mmc);
968
969         host->irq = platform_get_irq(pdev, 0);
970         if (host->irq < 0) {
971                 ret = host->irq;
972                 dev_err(&pdev->dev, "Failed to get platform irq: %d\n", ret);
973                 goto err_free_host;
974         }
975
976         host->clk = devm_clk_get(&pdev->dev, "mmc");
977         if (IS_ERR(host->clk)) {
978                 ret = PTR_ERR(host->clk);
979                 dev_err(&pdev->dev, "Failed to get mmc clock\n");
980                 goto err_free_host;
981         }
982
983         host->mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
984         host->base = devm_ioremap_resource(&pdev->dev, host->mem_res);
985         if (IS_ERR(host->base)) {
986                 ret = PTR_ERR(host->base);
987                 dev_err(&pdev->dev, "Failed to ioremap base memory\n");
988                 goto err_free_host;
989         }
990
991         mmc->ops = &jz4740_mmc_ops;
992         if (!mmc->f_max)
993                 mmc->f_max = JZ_MMC_CLK_RATE;
994         mmc->f_min = mmc->f_max / 128;
995         mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
996
997         mmc->max_blk_size = (1 << 10) - 1;
998         mmc->max_blk_count = (1 << 15) - 1;
999         mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
1000
1001         mmc->max_segs = 128;
1002         mmc->max_seg_size = mmc->max_req_size;
1003
1004         host->mmc = mmc;
1005         host->pdev = pdev;
1006         spin_lock_init(&host->lock);
1007         host->irq_mask = ~0;
1008
1009         jz4740_mmc_reset(host);
1010
1011         ret = request_threaded_irq(host->irq, jz_mmc_irq, jz_mmc_irq_worker, 0,
1012                         dev_name(&pdev->dev), host);
1013         if (ret) {
1014                 dev_err(&pdev->dev, "Failed to request irq: %d\n", ret);
1015                 goto err_free_host;
1016         }
1017
1018         jz4740_mmc_clock_disable(host);
1019         timer_setup(&host->timeout_timer, jz4740_mmc_timeout, 0);
1020
1021         ret = jz4740_mmc_acquire_dma_channels(host);
1022         if (ret == -EPROBE_DEFER)
1023                 goto err_free_irq;
1024         host->use_dma = !ret;
1025
1026         platform_set_drvdata(pdev, host);
1027         ret = mmc_add_host(mmc);
1028
1029         if (ret) {
1030                 dev_err(&pdev->dev, "Failed to add mmc host: %d\n", ret);
1031                 goto err_release_dma;
1032         }
1033         dev_info(&pdev->dev, "JZ SD/MMC card driver registered\n");
1034
1035         dev_info(&pdev->dev, "Using %s, %d-bit mode\n",
1036                  host->use_dma ? "DMA" : "PIO",
1037                  (mmc->caps & MMC_CAP_4_BIT_DATA) ? 4 : 1);
1038
1039         return 0;
1040
1041 err_release_dma:
1042         if (host->use_dma)
1043                 jz4740_mmc_release_dma_channels(host);
1044 err_free_irq:
1045         free_irq(host->irq, host);
1046 err_free_host:
1047         mmc_free_host(mmc);
1048
1049         return ret;
1050 }
1051
1052 static int jz4740_mmc_remove(struct platform_device *pdev)
1053 {
1054         struct jz4740_mmc_host *host = platform_get_drvdata(pdev);
1055
1056         del_timer_sync(&host->timeout_timer);
1057         jz4740_mmc_set_irq_enabled(host, 0xff, false);
1058         jz4740_mmc_reset(host);
1059
1060         mmc_remove_host(host->mmc);
1061
1062         free_irq(host->irq, host);
1063
1064         if (host->use_dma)
1065                 jz4740_mmc_release_dma_channels(host);
1066
1067         mmc_free_host(host->mmc);
1068
1069         return 0;
1070 }
1071
1072 #ifdef CONFIG_PM_SLEEP
1073
1074 static int jz4740_mmc_suspend(struct device *dev)
1075 {
1076         return pinctrl_pm_select_sleep_state(dev);
1077 }
1078
1079 static int jz4740_mmc_resume(struct device *dev)
1080 {
1081         return pinctrl_pm_select_default_state(dev);
1082 }
1083
1084 static SIMPLE_DEV_PM_OPS(jz4740_mmc_pm_ops, jz4740_mmc_suspend,
1085         jz4740_mmc_resume);
1086 #define JZ4740_MMC_PM_OPS (&jz4740_mmc_pm_ops)
1087 #else
1088 #define JZ4740_MMC_PM_OPS NULL
1089 #endif
1090
1091 static struct platform_driver jz4740_mmc_driver = {
1092         .probe = jz4740_mmc_probe,
1093         .remove = jz4740_mmc_remove,
1094         .driver = {
1095                 .name = "jz4740-mmc",
1096                 .of_match_table = of_match_ptr(jz4740_mmc_of_match),
1097                 .pm = JZ4740_MMC_PM_OPS,
1098         },
1099 };
1100
1101 module_platform_driver(jz4740_mmc_driver);
1102
1103 MODULE_DESCRIPTION("JZ4740 SD/MMC controller driver");
1104 MODULE_LICENSE("GPL");
1105 MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");