1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (C) 2010 Renesas Solutions Corp.
6 * Yusuke Goda <yusuke.goda.sx@renesas.com>
10 * The MMCIF driver is now processing MMC requests asynchronously, according
11 * to the Linux MMC API requirement.
13 * The MMCIF driver processes MMC requests in up to 3 stages: command, optional
14 * data, and optional stop. To achieve asynchronous processing each of these
15 * stages is split into two halves: a top and a bottom half. The top half
16 * initialises the hardware, installs a timeout handler to handle completion
17 * timeouts, and returns. In case of the command stage this immediately returns
18 * control to the caller, leaving all further processing to run asynchronously.
19 * All further request processing is performed by the bottom halves.
21 * The bottom half further consists of a "hard" IRQ handler, an IRQ handler
22 * thread, a DMA completion callback, if DMA is used, a timeout work, and
23 * request- and stage-specific handler methods.
25 * Each bottom half run begins with either a hardware interrupt, a DMA callback
26 * invocation, or a timeout work run. In case of an error or a successful
27 * processing completion, the MMC core is informed and the request processing is
28 * finished. In case processing has to continue, i.e., if data has to be read
29 * from or written to the card, or if a stop command has to be sent, the next
30 * top half is called, which performs the necessary hardware handling and
31 * reschedules the timeout work. This returns the driver state machine into the
32 * bottom half waiting state.
35 #include <linux/bitops.h>
36 #include <linux/clk.h>
37 #include <linux/completion.h>
38 #include <linux/delay.h>
39 #include <linux/dma-mapping.h>
40 #include <linux/dmaengine.h>
41 #include <linux/mmc/card.h>
42 #include <linux/mmc/core.h>
43 #include <linux/mmc/host.h>
44 #include <linux/mmc/mmc.h>
45 #include <linux/mmc/sdio.h>
46 #include <linux/mmc/slot-gpio.h>
47 #include <linux/mod_devicetable.h>
48 #include <linux/mutex.h>
49 #include <linux/of_device.h>
50 #include <linux/pagemap.h>
51 #include <linux/platform_data/sh_mmcif.h>
52 #include <linux/platform_device.h>
53 #include <linux/pm_qos.h>
54 #include <linux/pm_runtime.h>
55 #include <linux/sh_dma.h>
56 #include <linux/spinlock.h>
57 #include <linux/module.h>
59 #define DRIVER_NAME "sh_mmcif"
62 #define CMD_MASK 0x3f000000
63 #define CMD_SET_RTYP_NO ((0 << 23) | (0 << 22))
64 #define CMD_SET_RTYP_6B ((0 << 23) | (1 << 22)) /* R1/R1b/R3/R4/R5 */
65 #define CMD_SET_RTYP_17B ((1 << 23) | (0 << 22)) /* R2 */
66 #define CMD_SET_RBSY (1 << 21) /* R1b */
67 #define CMD_SET_CCSEN (1 << 20)
68 #define CMD_SET_WDAT (1 << 19) /* 1: on data, 0: no data */
69 #define CMD_SET_DWEN (1 << 18) /* 1: write, 0: read */
70 #define CMD_SET_CMLTE (1 << 17) /* 1: multi block trans, 0: single */
71 #define CMD_SET_CMD12EN (1 << 16) /* 1: CMD12 auto issue */
72 #define CMD_SET_RIDXC_INDEX ((0 << 15) | (0 << 14)) /* index check */
73 #define CMD_SET_RIDXC_BITS ((0 << 15) | (1 << 14)) /* check bits check */
74 #define CMD_SET_RIDXC_NO ((1 << 15) | (0 << 14)) /* no check */
75 #define CMD_SET_CRC7C ((0 << 13) | (0 << 12)) /* CRC7 check*/
76 #define CMD_SET_CRC7C_BITS ((0 << 13) | (1 << 12)) /* check bits check*/
77 #define CMD_SET_CRC7C_INTERNAL ((1 << 13) | (0 << 12)) /* internal CRC7 check*/
78 #define CMD_SET_CRC16C (1 << 10) /* 0: CRC16 check*/
79 #define CMD_SET_CRCSTE (1 << 8) /* 1: not receive CRC status */
80 #define CMD_SET_TBIT (1 << 7) /* 1: tran mission bit "Low" */
81 #define CMD_SET_OPDM (1 << 6) /* 1: open/drain */
82 #define CMD_SET_CCSH (1 << 5)
83 #define CMD_SET_DARS (1 << 2) /* Dual Data Rate */
84 #define CMD_SET_DATW_1 ((0 << 1) | (0 << 0)) /* 1bit */
85 #define CMD_SET_DATW_4 ((0 << 1) | (1 << 0)) /* 4bit */
86 #define CMD_SET_DATW_8 ((1 << 1) | (0 << 0)) /* 8bit */
89 #define CMD_CTRL_BREAK (1 << 0)
92 #define BLOCK_SIZE_MASK 0x0000ffff
95 #define INT_CCSDE (1 << 29)
96 #define INT_CMD12DRE (1 << 26)
97 #define INT_CMD12RBE (1 << 25)
98 #define INT_CMD12CRE (1 << 24)
99 #define INT_DTRANE (1 << 23)
100 #define INT_BUFRE (1 << 22)
101 #define INT_BUFWEN (1 << 21)
102 #define INT_BUFREN (1 << 20)
103 #define INT_CCSRCV (1 << 19)
104 #define INT_RBSYE (1 << 17)
105 #define INT_CRSPE (1 << 16)
106 #define INT_CMDVIO (1 << 15)
107 #define INT_BUFVIO (1 << 14)
108 #define INT_WDATERR (1 << 11)
109 #define INT_RDATERR (1 << 10)
110 #define INT_RIDXERR (1 << 9)
111 #define INT_RSPERR (1 << 8)
112 #define INT_CCSTO (1 << 5)
113 #define INT_CRCSTO (1 << 4)
114 #define INT_WDATTO (1 << 3)
115 #define INT_RDATTO (1 << 2)
116 #define INT_RBSYTO (1 << 1)
117 #define INT_RSPTO (1 << 0)
118 #define INT_ERR_STS (INT_CMDVIO | INT_BUFVIO | INT_WDATERR | \
119 INT_RDATERR | INT_RIDXERR | INT_RSPERR | \
120 INT_CCSTO | INT_CRCSTO | INT_WDATTO | \
121 INT_RDATTO | INT_RBSYTO | INT_RSPTO)
123 #define INT_ALL (INT_RBSYE | INT_CRSPE | INT_BUFREN | \
124 INT_BUFWEN | INT_CMD12DRE | INT_BUFRE | \
125 INT_DTRANE | INT_CMD12RBE | INT_CMD12CRE)
127 #define INT_CCS (INT_CCSTO | INT_CCSRCV | INT_CCSDE)
130 #define MASK_ALL 0x00000000
131 #define MASK_MCCSDE (1 << 29)
132 #define MASK_MCMD12DRE (1 << 26)
133 #define MASK_MCMD12RBE (1 << 25)
134 #define MASK_MCMD12CRE (1 << 24)
135 #define MASK_MDTRANE (1 << 23)
136 #define MASK_MBUFRE (1 << 22)
137 #define MASK_MBUFWEN (1 << 21)
138 #define MASK_MBUFREN (1 << 20)
139 #define MASK_MCCSRCV (1 << 19)
140 #define MASK_MRBSYE (1 << 17)
141 #define MASK_MCRSPE (1 << 16)
142 #define MASK_MCMDVIO (1 << 15)
143 #define MASK_MBUFVIO (1 << 14)
144 #define MASK_MWDATERR (1 << 11)
145 #define MASK_MRDATERR (1 << 10)
146 #define MASK_MRIDXERR (1 << 9)
147 #define MASK_MRSPERR (1 << 8)
148 #define MASK_MCCSTO (1 << 5)
149 #define MASK_MCRCSTO (1 << 4)
150 #define MASK_MWDATTO (1 << 3)
151 #define MASK_MRDATTO (1 << 2)
152 #define MASK_MRBSYTO (1 << 1)
153 #define MASK_MRSPTO (1 << 0)
155 #define MASK_START_CMD (MASK_MCMDVIO | MASK_MBUFVIO | MASK_MWDATERR | \
156 MASK_MRDATERR | MASK_MRIDXERR | MASK_MRSPERR | \
157 MASK_MCRCSTO | MASK_MWDATTO | \
158 MASK_MRDATTO | MASK_MRBSYTO | MASK_MRSPTO)
160 #define MASK_CLEAN (INT_ERR_STS | MASK_MRBSYE | MASK_MCRSPE | \
161 MASK_MBUFREN | MASK_MBUFWEN | \
162 MASK_MCMD12DRE | MASK_MBUFRE | MASK_MDTRANE | \
163 MASK_MCMD12RBE | MASK_MCMD12CRE)
166 #define STS1_CMDSEQ (1 << 31)
169 #define STS2_CRCSTE (1 << 31)
170 #define STS2_CRC16E (1 << 30)
171 #define STS2_AC12CRCE (1 << 29)
172 #define STS2_RSPCRC7E (1 << 28)
173 #define STS2_CRCSTEBE (1 << 27)
174 #define STS2_RDATEBE (1 << 26)
175 #define STS2_AC12REBE (1 << 25)
176 #define STS2_RSPEBE (1 << 24)
177 #define STS2_AC12IDXE (1 << 23)
178 #define STS2_RSPIDXE (1 << 22)
179 #define STS2_CCSTO (1 << 15)
180 #define STS2_RDATTO (1 << 14)
181 #define STS2_DATBSYTO (1 << 13)
182 #define STS2_CRCSTTO (1 << 12)
183 #define STS2_AC12BSYTO (1 << 11)
184 #define STS2_RSPBSYTO (1 << 10)
185 #define STS2_AC12RSPTO (1 << 9)
186 #define STS2_RSPTO (1 << 8)
187 #define STS2_CRC_ERR (STS2_CRCSTE | STS2_CRC16E | \
188 STS2_AC12CRCE | STS2_RSPCRC7E | STS2_CRCSTEBE)
189 #define STS2_TIMEOUT_ERR (STS2_CCSTO | STS2_RDATTO | \
190 STS2_DATBSYTO | STS2_CRCSTTO | \
191 STS2_AC12BSYTO | STS2_RSPBSYTO | \
192 STS2_AC12RSPTO | STS2_RSPTO)
194 #define CLKDEV_EMMC_DATA 52000000 /* 52 MHz */
195 #define CLKDEV_MMC_DATA 20000000 /* 20 MHz */
196 #define CLKDEV_INIT 400000 /* 400 kHz */
198 enum sh_mmcif_state {
205 enum sh_mmcif_wait_for {
206 MMCIF_WAIT_FOR_REQUEST,
208 MMCIF_WAIT_FOR_MREAD,
209 MMCIF_WAIT_FOR_MWRITE,
211 MMCIF_WAIT_FOR_WRITE,
212 MMCIF_WAIT_FOR_READ_END,
213 MMCIF_WAIT_FOR_WRITE_END,
218 * difference for each SoC
220 struct sh_mmcif_host {
221 struct mmc_host *mmc;
222 struct mmc_request *mrq;
223 struct platform_device *pd;
226 unsigned char timing;
232 spinlock_t lock; /* protect sh_mmcif_host::state */
233 enum sh_mmcif_state state;
234 enum sh_mmcif_wait_for wait_for;
235 struct delayed_work timeout_work;
240 bool ccs_enable; /* Command Completion Signal support */
241 bool clk_ctrl2_enable;
242 struct mutex thread_lock;
243 u32 clkdiv_map; /* see CE_CLK_CTRL::CLKDIV */
246 struct dma_chan *chan_rx;
247 struct dma_chan *chan_tx;
248 struct completion dma_complete;
252 static const struct of_device_id sh_mmcif_of_match[] = {
253 { .compatible = "renesas,sh-mmcif" },
256 MODULE_DEVICE_TABLE(of, sh_mmcif_of_match);
258 #define sh_mmcif_host_to_dev(host) (&host->pd->dev)
260 static inline void sh_mmcif_bitset(struct sh_mmcif_host *host,
261 unsigned int reg, u32 val)
263 writel(val | readl(host->addr + reg), host->addr + reg);
266 static inline void sh_mmcif_bitclr(struct sh_mmcif_host *host,
267 unsigned int reg, u32 val)
269 writel(~val & readl(host->addr + reg), host->addr + reg);
272 static void sh_mmcif_dma_complete(void *arg)
274 struct sh_mmcif_host *host = arg;
275 struct mmc_request *mrq = host->mrq;
276 struct device *dev = sh_mmcif_host_to_dev(host);
278 dev_dbg(dev, "Command completed\n");
280 if (WARN(!mrq || !mrq->data, "%s: NULL data in DMA completion!\n",
284 complete(&host->dma_complete);
287 static void sh_mmcif_start_dma_rx(struct sh_mmcif_host *host)
289 struct mmc_data *data = host->mrq->data;
290 struct scatterlist *sg = data->sg;
291 struct dma_async_tx_descriptor *desc = NULL;
292 struct dma_chan *chan = host->chan_rx;
293 struct device *dev = sh_mmcif_host_to_dev(host);
294 dma_cookie_t cookie = -EINVAL;
297 ret = dma_map_sg(chan->device->dev, sg, data->sg_len,
300 host->dma_active = true;
301 desc = dmaengine_prep_slave_sg(chan, sg, ret,
302 DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
306 desc->callback = sh_mmcif_dma_complete;
307 desc->callback_param = host;
308 cookie = dmaengine_submit(desc);
309 sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN);
310 dma_async_issue_pending(chan);
312 dev_dbg(dev, "%s(): mapped %d -> %d, cookie %d\n",
313 __func__, data->sg_len, ret, cookie);
316 /* DMA failed, fall back to PIO */
319 host->chan_rx = NULL;
320 host->dma_active = false;
321 dma_release_channel(chan);
322 /* Free the Tx channel too */
323 chan = host->chan_tx;
325 host->chan_tx = NULL;
326 dma_release_channel(chan);
329 "DMA failed: %d, falling back to PIO\n", ret);
330 sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
333 dev_dbg(dev, "%s(): desc %p, cookie %d, sg[%d]\n", __func__,
334 desc, cookie, data->sg_len);
337 static void sh_mmcif_start_dma_tx(struct sh_mmcif_host *host)
339 struct mmc_data *data = host->mrq->data;
340 struct scatterlist *sg = data->sg;
341 struct dma_async_tx_descriptor *desc = NULL;
342 struct dma_chan *chan = host->chan_tx;
343 struct device *dev = sh_mmcif_host_to_dev(host);
344 dma_cookie_t cookie = -EINVAL;
347 ret = dma_map_sg(chan->device->dev, sg, data->sg_len,
350 host->dma_active = true;
351 desc = dmaengine_prep_slave_sg(chan, sg, ret,
352 DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
356 desc->callback = sh_mmcif_dma_complete;
357 desc->callback_param = host;
358 cookie = dmaengine_submit(desc);
359 sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAWEN);
360 dma_async_issue_pending(chan);
362 dev_dbg(dev, "%s(): mapped %d -> %d, cookie %d\n",
363 __func__, data->sg_len, ret, cookie);
366 /* DMA failed, fall back to PIO */
369 host->chan_tx = NULL;
370 host->dma_active = false;
371 dma_release_channel(chan);
372 /* Free the Rx channel too */
373 chan = host->chan_rx;
375 host->chan_rx = NULL;
376 dma_release_channel(chan);
379 "DMA failed: %d, falling back to PIO\n", ret);
380 sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
383 dev_dbg(dev, "%s(): desc %p, cookie %d\n", __func__,
387 static struct dma_chan *
388 sh_mmcif_request_dma_pdata(struct sh_mmcif_host *host, uintptr_t slave_id)
393 dma_cap_set(DMA_SLAVE, mask);
397 return dma_request_channel(mask, shdma_chan_filter, (void *)slave_id);
400 static int sh_mmcif_dma_slave_config(struct sh_mmcif_host *host,
401 struct dma_chan *chan,
402 enum dma_transfer_direction direction)
404 struct resource *res;
405 struct dma_slave_config cfg = { 0, };
407 res = platform_get_resource(host->pd, IORESOURCE_MEM, 0);
411 cfg.direction = direction;
413 if (direction == DMA_DEV_TO_MEM) {
414 cfg.src_addr = res->start + MMCIF_CE_DATA;
415 cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
417 cfg.dst_addr = res->start + MMCIF_CE_DATA;
418 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
421 return dmaengine_slave_config(chan, &cfg);
424 static void sh_mmcif_request_dma(struct sh_mmcif_host *host)
426 struct device *dev = sh_mmcif_host_to_dev(host);
427 host->dma_active = false;
429 /* We can only either use DMA for both Tx and Rx or not use it at all */
430 if (IS_ENABLED(CONFIG_SUPERH) && dev->platform_data) {
431 struct sh_mmcif_plat_data *pdata = dev->platform_data;
433 host->chan_tx = sh_mmcif_request_dma_pdata(host,
435 host->chan_rx = sh_mmcif_request_dma_pdata(host,
438 host->chan_tx = dma_request_chan(dev, "tx");
439 if (IS_ERR(host->chan_tx))
440 host->chan_tx = NULL;
441 host->chan_rx = dma_request_chan(dev, "rx");
442 if (IS_ERR(host->chan_rx))
443 host->chan_rx = NULL;
445 dev_dbg(dev, "%s: got channel TX %p RX %p\n", __func__, host->chan_tx,
448 if (!host->chan_tx || !host->chan_rx ||
449 sh_mmcif_dma_slave_config(host, host->chan_tx, DMA_MEM_TO_DEV) ||
450 sh_mmcif_dma_slave_config(host, host->chan_rx, DMA_DEV_TO_MEM))
457 dma_release_channel(host->chan_tx);
459 dma_release_channel(host->chan_rx);
460 host->chan_tx = host->chan_rx = NULL;
463 static void sh_mmcif_release_dma(struct sh_mmcif_host *host)
465 sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
466 /* Descriptors are freed automatically */
468 struct dma_chan *chan = host->chan_tx;
469 host->chan_tx = NULL;
470 dma_release_channel(chan);
473 struct dma_chan *chan = host->chan_rx;
474 host->chan_rx = NULL;
475 dma_release_channel(chan);
478 host->dma_active = false;
481 static void sh_mmcif_clock_control(struct sh_mmcif_host *host, unsigned int clk)
483 struct device *dev = sh_mmcif_host_to_dev(host);
484 struct sh_mmcif_plat_data *p = dev->platform_data;
485 bool sup_pclk = p ? p->sup_pclk : false;
486 unsigned int current_clk = clk_get_rate(host->clk);
489 sh_mmcif_bitclr(host, MMCIF_CE_CLK_CTRL, CLK_ENABLE);
490 sh_mmcif_bitclr(host, MMCIF_CE_CLK_CTRL, CLK_CLEAR);
495 if (host->clkdiv_map) {
496 unsigned int freq, best_freq, myclk, div, diff_min, diff;
502 for (i = 31; i >= 0; i--) {
503 if (!((1 << i) & host->clkdiv_map))
507 * clk = parent_freq / div
508 * -> parent_freq = clk x div
512 freq = clk_round_rate(host->clk, clk * div);
514 diff = (myclk > clk) ? myclk - clk : clk - myclk;
516 if (diff <= diff_min) {
523 dev_dbg(dev, "clk %u/%u (%u, 0x%x)\n",
524 (best_freq >> (clkdiv + 1)), clk, best_freq, clkdiv);
526 clk_set_rate(host->clk, best_freq);
527 clkdiv = clkdiv << 16;
528 } else if (sup_pclk && clk == current_clk) {
529 clkdiv = CLK_SUP_PCLK;
531 clkdiv = (fls(DIV_ROUND_UP(current_clk, clk) - 1) - 1) << 16;
534 sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_CLEAR & clkdiv);
535 sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_ENABLE);
538 static void sh_mmcif_sync_reset(struct sh_mmcif_host *host)
542 tmp = 0x010f0000 & sh_mmcif_readl(host->addr, MMCIF_CE_CLK_CTRL);
544 sh_mmcif_writel(host->addr, MMCIF_CE_VERSION, SOFT_RST_ON);
545 sh_mmcif_writel(host->addr, MMCIF_CE_VERSION, SOFT_RST_OFF);
546 if (host->ccs_enable)
548 if (host->clk_ctrl2_enable)
549 sh_mmcif_writel(host->addr, MMCIF_CE_CLK_CTRL2, 0x0F0F0000);
550 sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, tmp |
551 SRSPTO_256 | SRBSYTO_29 | SRWDTO_29);
553 sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_ATYP);
556 static int sh_mmcif_error_manage(struct sh_mmcif_host *host)
558 struct device *dev = sh_mmcif_host_to_dev(host);
562 host->sd_error = false;
564 state1 = sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS1);
565 state2 = sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS2);
566 dev_dbg(dev, "ERR HOST_STS1 = %08x\n", state1);
567 dev_dbg(dev, "ERR HOST_STS2 = %08x\n", state2);
569 if (state1 & STS1_CMDSEQ) {
570 sh_mmcif_bitset(host, MMCIF_CE_CMD_CTRL, CMD_CTRL_BREAK);
571 sh_mmcif_bitset(host, MMCIF_CE_CMD_CTRL, ~CMD_CTRL_BREAK);
572 for (timeout = 10000; timeout; timeout--) {
573 if (!(sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS1)
580 "Forced end of command sequence timeout err\n");
583 sh_mmcif_sync_reset(host);
584 dev_dbg(dev, "Forced end of command sequence\n");
588 if (state2 & STS2_CRC_ERR) {
589 dev_err(dev, " CRC error: state %u, wait %u\n",
590 host->state, host->wait_for);
592 } else if (state2 & STS2_TIMEOUT_ERR) {
593 dev_err(dev, " Timeout: state %u, wait %u\n",
594 host->state, host->wait_for);
597 dev_dbg(dev, " End/Index error: state %u, wait %u\n",
598 host->state, host->wait_for);
604 static bool sh_mmcif_next_block(struct sh_mmcif_host *host, u32 *p)
606 struct mmc_data *data = host->mrq->data;
608 host->sg_blkidx += host->blocksize;
610 /* data->sg->length must be a multiple of host->blocksize? */
611 BUG_ON(host->sg_blkidx > data->sg->length);
613 if (host->sg_blkidx == data->sg->length) {
615 if (++host->sg_idx < data->sg_len)
616 host->pio_ptr = sg_virt(++data->sg);
621 return host->sg_idx != data->sg_len;
624 static void sh_mmcif_single_read(struct sh_mmcif_host *host,
625 struct mmc_request *mrq)
627 host->blocksize = (sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) &
628 BLOCK_SIZE_MASK) + 3;
630 host->wait_for = MMCIF_WAIT_FOR_READ;
632 /* buf read enable */
633 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
636 static bool sh_mmcif_read_block(struct sh_mmcif_host *host)
638 struct device *dev = sh_mmcif_host_to_dev(host);
639 struct mmc_data *data = host->mrq->data;
640 u32 *p = sg_virt(data->sg);
643 if (host->sd_error) {
644 data->error = sh_mmcif_error_manage(host);
645 dev_dbg(dev, "%s(): %d\n", __func__, data->error);
649 for (i = 0; i < host->blocksize / 4; i++)
650 *p++ = sh_mmcif_readl(host->addr, MMCIF_CE_DATA);
652 /* buffer read end */
653 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFRE);
654 host->wait_for = MMCIF_WAIT_FOR_READ_END;
659 static void sh_mmcif_multi_read(struct sh_mmcif_host *host,
660 struct mmc_request *mrq)
662 struct mmc_data *data = mrq->data;
664 if (!data->sg_len || !data->sg->length)
667 host->blocksize = sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) &
670 host->wait_for = MMCIF_WAIT_FOR_MREAD;
673 host->pio_ptr = sg_virt(data->sg);
675 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
678 static bool sh_mmcif_mread_block(struct sh_mmcif_host *host)
680 struct device *dev = sh_mmcif_host_to_dev(host);
681 struct mmc_data *data = host->mrq->data;
682 u32 *p = host->pio_ptr;
685 if (host->sd_error) {
686 data->error = sh_mmcif_error_manage(host);
687 dev_dbg(dev, "%s(): %d\n", __func__, data->error);
691 BUG_ON(!data->sg->length);
693 for (i = 0; i < host->blocksize / 4; i++)
694 *p++ = sh_mmcif_readl(host->addr, MMCIF_CE_DATA);
696 if (!sh_mmcif_next_block(host, p))
699 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
704 static void sh_mmcif_single_write(struct sh_mmcif_host *host,
705 struct mmc_request *mrq)
707 host->blocksize = (sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) &
708 BLOCK_SIZE_MASK) + 3;
710 host->wait_for = MMCIF_WAIT_FOR_WRITE;
712 /* buf write enable */
713 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
716 static bool sh_mmcif_write_block(struct sh_mmcif_host *host)
718 struct device *dev = sh_mmcif_host_to_dev(host);
719 struct mmc_data *data = host->mrq->data;
720 u32 *p = sg_virt(data->sg);
723 if (host->sd_error) {
724 data->error = sh_mmcif_error_manage(host);
725 dev_dbg(dev, "%s(): %d\n", __func__, data->error);
729 for (i = 0; i < host->blocksize / 4; i++)
730 sh_mmcif_writel(host->addr, MMCIF_CE_DATA, *p++);
732 /* buffer write end */
733 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MDTRANE);
734 host->wait_for = MMCIF_WAIT_FOR_WRITE_END;
739 static void sh_mmcif_multi_write(struct sh_mmcif_host *host,
740 struct mmc_request *mrq)
742 struct mmc_data *data = mrq->data;
744 if (!data->sg_len || !data->sg->length)
747 host->blocksize = sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) &
750 host->wait_for = MMCIF_WAIT_FOR_MWRITE;
753 host->pio_ptr = sg_virt(data->sg);
755 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
758 static bool sh_mmcif_mwrite_block(struct sh_mmcif_host *host)
760 struct device *dev = sh_mmcif_host_to_dev(host);
761 struct mmc_data *data = host->mrq->data;
762 u32 *p = host->pio_ptr;
765 if (host->sd_error) {
766 data->error = sh_mmcif_error_manage(host);
767 dev_dbg(dev, "%s(): %d\n", __func__, data->error);
771 BUG_ON(!data->sg->length);
773 for (i = 0; i < host->blocksize / 4; i++)
774 sh_mmcif_writel(host->addr, MMCIF_CE_DATA, *p++);
776 if (!sh_mmcif_next_block(host, p))
779 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
784 static void sh_mmcif_get_response(struct sh_mmcif_host *host,
785 struct mmc_command *cmd)
787 if (cmd->flags & MMC_RSP_136) {
788 cmd->resp[0] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP3);
789 cmd->resp[1] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP2);
790 cmd->resp[2] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP1);
791 cmd->resp[3] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP0);
793 cmd->resp[0] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP0);
796 static void sh_mmcif_get_cmd12response(struct sh_mmcif_host *host,
797 struct mmc_command *cmd)
799 cmd->resp[0] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP_CMD12);
802 static u32 sh_mmcif_set_cmd(struct sh_mmcif_host *host,
803 struct mmc_request *mrq)
805 struct device *dev = sh_mmcif_host_to_dev(host);
806 struct mmc_data *data = mrq->data;
807 struct mmc_command *cmd = mrq->cmd;
808 u32 opc = cmd->opcode;
811 /* Response Type check */
812 switch (mmc_resp_type(cmd)) {
814 tmp |= CMD_SET_RTYP_NO;
818 tmp |= CMD_SET_RTYP_6B;
821 tmp |= CMD_SET_RBSY | CMD_SET_RTYP_6B;
824 tmp |= CMD_SET_RTYP_17B;
827 dev_err(dev, "Unsupported response type.\n");
834 switch (host->bus_width) {
835 case MMC_BUS_WIDTH_1:
836 tmp |= CMD_SET_DATW_1;
838 case MMC_BUS_WIDTH_4:
839 tmp |= CMD_SET_DATW_4;
841 case MMC_BUS_WIDTH_8:
842 tmp |= CMD_SET_DATW_8;
845 dev_err(dev, "Unsupported bus width.\n");
848 switch (host->timing) {
849 case MMC_TIMING_MMC_DDR52:
851 * MMC core will only set this timing, if the host
852 * advertises the MMC_CAP_1_8V_DDR/MMC_CAP_1_2V_DDR
853 * capability. MMCIF implementations with this
854 * capability, e.g. sh73a0, will have to set it
855 * in their platform data.
862 if (opc == MMC_WRITE_BLOCK || opc == MMC_WRITE_MULTIPLE_BLOCK)
865 if (opc == MMC_READ_MULTIPLE_BLOCK || opc == MMC_WRITE_MULTIPLE_BLOCK) {
866 tmp |= CMD_SET_CMLTE | CMD_SET_CMD12EN;
867 sh_mmcif_bitset(host, MMCIF_CE_BLOCK_SET,
870 /* RIDXC[1:0] check bits */
871 if (opc == MMC_SEND_OP_COND || opc == MMC_ALL_SEND_CID ||
872 opc == MMC_SEND_CSD || opc == MMC_SEND_CID)
873 tmp |= CMD_SET_RIDXC_BITS;
874 /* RCRC7C[1:0] check bits */
875 if (opc == MMC_SEND_OP_COND)
876 tmp |= CMD_SET_CRC7C_BITS;
877 /* RCRC7C[1:0] internal CRC7 */
878 if (opc == MMC_ALL_SEND_CID ||
879 opc == MMC_SEND_CSD || opc == MMC_SEND_CID)
880 tmp |= CMD_SET_CRC7C_INTERNAL;
882 return (opc << 24) | tmp;
885 static int sh_mmcif_data_trans(struct sh_mmcif_host *host,
886 struct mmc_request *mrq, u32 opc)
888 struct device *dev = sh_mmcif_host_to_dev(host);
891 case MMC_READ_MULTIPLE_BLOCK:
892 sh_mmcif_multi_read(host, mrq);
894 case MMC_WRITE_MULTIPLE_BLOCK:
895 sh_mmcif_multi_write(host, mrq);
897 case MMC_WRITE_BLOCK:
898 sh_mmcif_single_write(host, mrq);
900 case MMC_READ_SINGLE_BLOCK:
901 case MMC_SEND_EXT_CSD:
902 sh_mmcif_single_read(host, mrq);
905 dev_err(dev, "Unsupported CMD%d\n", opc);
910 static void sh_mmcif_start_cmd(struct sh_mmcif_host *host,
911 struct mmc_request *mrq)
913 struct mmc_command *cmd = mrq->cmd;
918 if (cmd->flags & MMC_RSP_BUSY)
919 mask = MASK_START_CMD | MASK_MRBSYE;
921 mask = MASK_START_CMD | MASK_MCRSPE;
923 if (host->ccs_enable)
927 sh_mmcif_writel(host->addr, MMCIF_CE_BLOCK_SET, 0);
928 sh_mmcif_writel(host->addr, MMCIF_CE_BLOCK_SET,
931 opc = sh_mmcif_set_cmd(host, mrq);
933 if (host->ccs_enable)
934 sh_mmcif_writel(host->addr, MMCIF_CE_INT, 0xD80430C0);
936 sh_mmcif_writel(host->addr, MMCIF_CE_INT, 0xD80430C0 | INT_CCS);
937 sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, mask);
939 sh_mmcif_writel(host->addr, MMCIF_CE_ARG, cmd->arg);
941 spin_lock_irqsave(&host->lock, flags);
942 sh_mmcif_writel(host->addr, MMCIF_CE_CMD_SET, opc);
944 host->wait_for = MMCIF_WAIT_FOR_CMD;
945 schedule_delayed_work(&host->timeout_work, host->timeout);
946 spin_unlock_irqrestore(&host->lock, flags);
949 static void sh_mmcif_stop_cmd(struct sh_mmcif_host *host,
950 struct mmc_request *mrq)
952 struct device *dev = sh_mmcif_host_to_dev(host);
954 switch (mrq->cmd->opcode) {
955 case MMC_READ_MULTIPLE_BLOCK:
956 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MCMD12DRE);
958 case MMC_WRITE_MULTIPLE_BLOCK:
959 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MCMD12RBE);
962 dev_err(dev, "unsupported stop cmd\n");
963 mrq->stop->error = sh_mmcif_error_manage(host);
967 host->wait_for = MMCIF_WAIT_FOR_STOP;
970 static void sh_mmcif_request(struct mmc_host *mmc, struct mmc_request *mrq)
972 struct sh_mmcif_host *host = mmc_priv(mmc);
973 struct device *dev = sh_mmcif_host_to_dev(host);
976 spin_lock_irqsave(&host->lock, flags);
977 if (host->state != STATE_IDLE) {
978 dev_dbg(dev, "%s() rejected, state %u\n",
979 __func__, host->state);
980 spin_unlock_irqrestore(&host->lock, flags);
981 mrq->cmd->error = -EAGAIN;
982 mmc_request_done(mmc, mrq);
986 host->state = STATE_REQUEST;
987 spin_unlock_irqrestore(&host->lock, flags);
991 sh_mmcif_start_cmd(host, mrq);
994 static void sh_mmcif_clk_setup(struct sh_mmcif_host *host)
996 struct device *dev = sh_mmcif_host_to_dev(host);
998 if (host->mmc->f_max) {
999 unsigned int f_max, f_min = 0, f_min_old;
1001 f_max = host->mmc->f_max;
1002 for (f_min_old = f_max; f_min_old > 2;) {
1003 f_min = clk_round_rate(host->clk, f_min_old / 2);
1004 if (f_min == f_min_old)
1010 * This driver assumes this SoC is R-Car Gen2 or later
1012 host->clkdiv_map = 0x3ff;
1014 host->mmc->f_max = f_max >> ffs(host->clkdiv_map);
1015 host->mmc->f_min = f_min >> fls(host->clkdiv_map);
1017 unsigned int clk = clk_get_rate(host->clk);
1019 host->mmc->f_max = clk / 2;
1020 host->mmc->f_min = clk / 512;
1023 dev_dbg(dev, "clk max/min = %d/%d\n",
1024 host->mmc->f_max, host->mmc->f_min);
1027 static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1029 struct sh_mmcif_host *host = mmc_priv(mmc);
1030 struct device *dev = sh_mmcif_host_to_dev(host);
1031 unsigned long flags;
1033 spin_lock_irqsave(&host->lock, flags);
1034 if (host->state != STATE_IDLE) {
1035 dev_dbg(dev, "%s() rejected, state %u\n",
1036 __func__, host->state);
1037 spin_unlock_irqrestore(&host->lock, flags);
1041 host->state = STATE_IOS;
1042 spin_unlock_irqrestore(&host->lock, flags);
1044 switch (ios->power_mode) {
1046 if (!IS_ERR(mmc->supply.vmmc))
1047 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
1049 clk_prepare_enable(host->clk);
1050 pm_runtime_get_sync(dev);
1051 sh_mmcif_sync_reset(host);
1052 sh_mmcif_request_dma(host);
1057 if (!IS_ERR(mmc->supply.vmmc))
1058 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
1060 sh_mmcif_clock_control(host, 0);
1061 sh_mmcif_release_dma(host);
1062 pm_runtime_put(dev);
1063 clk_disable_unprepare(host->clk);
1064 host->power = false;
1068 sh_mmcif_clock_control(host, ios->clock);
1072 host->timing = ios->timing;
1073 host->bus_width = ios->bus_width;
1074 host->state = STATE_IDLE;
1077 static const struct mmc_host_ops sh_mmcif_ops = {
1078 .request = sh_mmcif_request,
1079 .set_ios = sh_mmcif_set_ios,
1080 .get_cd = mmc_gpio_get_cd,
1083 static bool sh_mmcif_end_cmd(struct sh_mmcif_host *host)
1085 struct mmc_command *cmd = host->mrq->cmd;
1086 struct mmc_data *data = host->mrq->data;
1087 struct device *dev = sh_mmcif_host_to_dev(host);
1090 if (host->sd_error) {
1091 switch (cmd->opcode) {
1092 case MMC_ALL_SEND_CID:
1093 case MMC_SELECT_CARD:
1095 cmd->error = -ETIMEDOUT;
1098 cmd->error = sh_mmcif_error_manage(host);
1101 dev_dbg(dev, "CMD%d error %d\n",
1102 cmd->opcode, cmd->error);
1103 host->sd_error = false;
1106 if (!(cmd->flags & MMC_RSP_PRESENT)) {
1111 sh_mmcif_get_response(host, cmd);
1117 * Completion can be signalled from DMA callback and error, so, have to
1118 * reset here, before setting .dma_active
1120 init_completion(&host->dma_complete);
1122 if (data->flags & MMC_DATA_READ) {
1124 sh_mmcif_start_dma_rx(host);
1127 sh_mmcif_start_dma_tx(host);
1130 if (!host->dma_active) {
1131 data->error = sh_mmcif_data_trans(host, host->mrq, cmd->opcode);
1132 return !data->error;
1135 /* Running in the IRQ thread, can sleep */
1136 time = wait_for_completion_interruptible_timeout(&host->dma_complete,
1139 if (data->flags & MMC_DATA_READ)
1140 dma_unmap_sg(host->chan_rx->device->dev,
1141 data->sg, data->sg_len,
1144 dma_unmap_sg(host->chan_tx->device->dev,
1145 data->sg, data->sg_len,
1148 if (host->sd_error) {
1149 dev_err(host->mmc->parent,
1150 "Error IRQ while waiting for DMA completion!\n");
1151 /* Woken up by an error IRQ: abort DMA */
1152 data->error = sh_mmcif_error_manage(host);
1154 dev_err(host->mmc->parent, "DMA timeout!\n");
1155 data->error = -ETIMEDOUT;
1156 } else if (time < 0) {
1157 dev_err(host->mmc->parent,
1158 "wait_for_completion_...() error %ld!\n", time);
1161 sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC,
1162 BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
1163 host->dma_active = false;
1166 data->bytes_xfered = 0;
1168 if (data->flags & MMC_DATA_READ)
1169 dmaengine_terminate_sync(host->chan_rx);
1171 dmaengine_terminate_sync(host->chan_tx);
1177 static irqreturn_t sh_mmcif_irqt(int irq, void *dev_id)
1179 struct sh_mmcif_host *host = dev_id;
1180 struct mmc_request *mrq;
1181 struct device *dev = sh_mmcif_host_to_dev(host);
1183 unsigned long flags;
1186 spin_lock_irqsave(&host->lock, flags);
1187 wait_work = host->wait_for;
1188 spin_unlock_irqrestore(&host->lock, flags);
1190 cancel_delayed_work_sync(&host->timeout_work);
1192 mutex_lock(&host->thread_lock);
1196 dev_dbg(dev, "IRQ thread state %u, wait %u: NULL mrq!\n",
1197 host->state, host->wait_for);
1198 mutex_unlock(&host->thread_lock);
1203 * All handlers return true, if processing continues, and false, if the
1204 * request has to be completed - successfully or not
1206 switch (wait_work) {
1207 case MMCIF_WAIT_FOR_REQUEST:
1208 /* We're too late, the timeout has already kicked in */
1209 mutex_unlock(&host->thread_lock);
1211 case MMCIF_WAIT_FOR_CMD:
1212 /* Wait for data? */
1213 wait = sh_mmcif_end_cmd(host);
1215 case MMCIF_WAIT_FOR_MREAD:
1216 /* Wait for more data? */
1217 wait = sh_mmcif_mread_block(host);
1219 case MMCIF_WAIT_FOR_READ:
1220 /* Wait for data end? */
1221 wait = sh_mmcif_read_block(host);
1223 case MMCIF_WAIT_FOR_MWRITE:
1224 /* Wait data to write? */
1225 wait = sh_mmcif_mwrite_block(host);
1227 case MMCIF_WAIT_FOR_WRITE:
1228 /* Wait for data end? */
1229 wait = sh_mmcif_write_block(host);
1231 case MMCIF_WAIT_FOR_STOP:
1232 if (host->sd_error) {
1233 mrq->stop->error = sh_mmcif_error_manage(host);
1234 dev_dbg(dev, "%s(): %d\n", __func__, mrq->stop->error);
1237 sh_mmcif_get_cmd12response(host, mrq->stop);
1238 mrq->stop->error = 0;
1240 case MMCIF_WAIT_FOR_READ_END:
1241 case MMCIF_WAIT_FOR_WRITE_END:
1242 if (host->sd_error) {
1243 mrq->data->error = sh_mmcif_error_manage(host);
1244 dev_dbg(dev, "%s(): %d\n", __func__, mrq->data->error);
1252 schedule_delayed_work(&host->timeout_work, host->timeout);
1253 /* Wait for more data */
1254 mutex_unlock(&host->thread_lock);
1258 if (host->wait_for != MMCIF_WAIT_FOR_STOP) {
1259 struct mmc_data *data = mrq->data;
1260 if (!mrq->cmd->error && data && !data->error)
1261 data->bytes_xfered =
1262 data->blocks * data->blksz;
1264 if (mrq->stop && !mrq->cmd->error && (!data || !data->error)) {
1265 sh_mmcif_stop_cmd(host, mrq);
1266 if (!mrq->stop->error) {
1267 schedule_delayed_work(&host->timeout_work, host->timeout);
1268 mutex_unlock(&host->thread_lock);
1274 host->wait_for = MMCIF_WAIT_FOR_REQUEST;
1275 host->state = STATE_IDLE;
1277 mmc_request_done(host->mmc, mrq);
1279 mutex_unlock(&host->thread_lock);
1284 static irqreturn_t sh_mmcif_intr(int irq, void *dev_id)
1286 struct sh_mmcif_host *host = dev_id;
1287 struct device *dev = sh_mmcif_host_to_dev(host);
1290 state = sh_mmcif_readl(host->addr, MMCIF_CE_INT);
1291 mask = sh_mmcif_readl(host->addr, MMCIF_CE_INT_MASK);
1292 if (host->ccs_enable)
1293 sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~(state & mask));
1295 sh_mmcif_writel(host->addr, MMCIF_CE_INT, INT_CCS | ~(state & mask));
1296 sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, state & MASK_CLEAN);
1298 if (state & ~MASK_CLEAN)
1299 dev_dbg(dev, "IRQ state = 0x%08x incompletely cleared\n",
1302 if (state & INT_ERR_STS || state & ~INT_ALL) {
1303 host->sd_error = true;
1304 dev_dbg(dev, "int err state = 0x%08x\n", state);
1306 if (state & ~(INT_CMD12RBE | INT_CMD12CRE)) {
1308 dev_dbg(dev, "NULL IRQ state = 0x%08x\n", state);
1309 if (!host->dma_active)
1310 return IRQ_WAKE_THREAD;
1311 else if (host->sd_error)
1312 sh_mmcif_dma_complete(host);
1314 dev_dbg(dev, "Unexpected IRQ 0x%x\n", state);
1320 static void sh_mmcif_timeout_work(struct work_struct *work)
1322 struct delayed_work *d = to_delayed_work(work);
1323 struct sh_mmcif_host *host = container_of(d, struct sh_mmcif_host, timeout_work);
1324 struct mmc_request *mrq = host->mrq;
1325 struct device *dev = sh_mmcif_host_to_dev(host);
1326 unsigned long flags;
1329 /* Don't run after mmc_remove_host() */
1332 spin_lock_irqsave(&host->lock, flags);
1333 if (host->state == STATE_IDLE) {
1334 spin_unlock_irqrestore(&host->lock, flags);
1338 dev_err(dev, "Timeout waiting for %u on CMD%u\n",
1339 host->wait_for, mrq->cmd->opcode);
1341 host->state = STATE_TIMEOUT;
1342 spin_unlock_irqrestore(&host->lock, flags);
1345 * Handle races with cancel_delayed_work(), unless
1346 * cancel_delayed_work_sync() is used
1348 switch (host->wait_for) {
1349 case MMCIF_WAIT_FOR_CMD:
1350 mrq->cmd->error = sh_mmcif_error_manage(host);
1352 case MMCIF_WAIT_FOR_STOP:
1353 mrq->stop->error = sh_mmcif_error_manage(host);
1355 case MMCIF_WAIT_FOR_MREAD:
1356 case MMCIF_WAIT_FOR_MWRITE:
1357 case MMCIF_WAIT_FOR_READ:
1358 case MMCIF_WAIT_FOR_WRITE:
1359 case MMCIF_WAIT_FOR_READ_END:
1360 case MMCIF_WAIT_FOR_WRITE_END:
1361 mrq->data->error = sh_mmcif_error_manage(host);
1367 host->state = STATE_IDLE;
1368 host->wait_for = MMCIF_WAIT_FOR_REQUEST;
1370 mmc_request_done(host->mmc, mrq);
1373 static void sh_mmcif_init_ocr(struct sh_mmcif_host *host)
1375 struct device *dev = sh_mmcif_host_to_dev(host);
1376 struct sh_mmcif_plat_data *pd = dev->platform_data;
1377 struct mmc_host *mmc = host->mmc;
1379 mmc_regulator_get_supply(mmc);
1384 if (!mmc->ocr_avail)
1385 mmc->ocr_avail = pd->ocr;
1387 dev_warn(mmc_dev(mmc), "Platform OCR mask is ignored\n");
1390 static int sh_mmcif_probe(struct platform_device *pdev)
1392 int ret = 0, irq[2];
1393 struct mmc_host *mmc;
1394 struct sh_mmcif_host *host;
1395 struct device *dev = &pdev->dev;
1396 struct sh_mmcif_plat_data *pd = dev->platform_data;
1400 irq[0] = platform_get_irq(pdev, 0);
1401 irq[1] = platform_get_irq_optional(pdev, 1);
1405 reg = devm_platform_ioremap_resource(pdev, 0);
1407 return PTR_ERR(reg);
1409 mmc = mmc_alloc_host(sizeof(struct sh_mmcif_host), dev);
1413 ret = mmc_of_parse(mmc);
1417 host = mmc_priv(mmc);
1420 host->timeout = msecs_to_jiffies(10000);
1421 host->ccs_enable = true;
1422 host->clk_ctrl2_enable = false;
1426 spin_lock_init(&host->lock);
1428 mmc->ops = &sh_mmcif_ops;
1429 sh_mmcif_init_ocr(host);
1431 mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_WAIT_WHILE_BUSY;
1432 mmc->caps2 |= MMC_CAP2_NO_SD | MMC_CAP2_NO_SDIO;
1433 mmc->max_busy_timeout = 10000;
1436 mmc->caps |= pd->caps;
1438 mmc->max_blk_size = 512;
1439 mmc->max_req_size = PAGE_SIZE * mmc->max_segs;
1440 mmc->max_blk_count = mmc->max_req_size / mmc->max_blk_size;
1441 mmc->max_seg_size = mmc->max_req_size;
1443 platform_set_drvdata(pdev, host);
1445 host->clk = devm_clk_get(dev, NULL);
1446 if (IS_ERR(host->clk)) {
1447 ret = PTR_ERR(host->clk);
1448 dev_err(dev, "cannot get clock: %d\n", ret);
1452 ret = clk_prepare_enable(host->clk);
1456 sh_mmcif_clk_setup(host);
1458 pm_runtime_enable(dev);
1459 host->power = false;
1461 ret = pm_runtime_get_sync(dev);
1465 INIT_DELAYED_WORK(&host->timeout_work, sh_mmcif_timeout_work);
1467 sh_mmcif_sync_reset(host);
1468 sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
1470 name = irq[1] < 0 ? dev_name(dev) : "sh_mmc:error";
1471 ret = devm_request_threaded_irq(dev, irq[0], sh_mmcif_intr,
1472 sh_mmcif_irqt, 0, name, host);
1474 dev_err(dev, "request_irq error (%s)\n", name);
1478 ret = devm_request_threaded_irq(dev, irq[1],
1479 sh_mmcif_intr, sh_mmcif_irqt,
1480 0, "sh_mmc:int", host);
1482 dev_err(dev, "request_irq error (sh_mmc:int)\n");
1487 mutex_init(&host->thread_lock);
1489 ret = mmc_add_host(mmc);
1493 dev_pm_qos_expose_latency_limit(dev, 100);
1495 dev_info(dev, "Chip version 0x%04x, clock rate %luMHz\n",
1496 sh_mmcif_readl(host->addr, MMCIF_CE_VERSION) & 0xffff,
1497 clk_get_rate(host->clk) / 1000000UL);
1499 pm_runtime_put(dev);
1500 clk_disable_unprepare(host->clk);
1504 clk_disable_unprepare(host->clk);
1505 pm_runtime_put_sync(dev);
1506 pm_runtime_disable(dev);
1512 static int sh_mmcif_remove(struct platform_device *pdev)
1514 struct sh_mmcif_host *host = platform_get_drvdata(pdev);
1517 clk_prepare_enable(host->clk);
1518 pm_runtime_get_sync(&pdev->dev);
1520 dev_pm_qos_hide_latency_limit(&pdev->dev);
1522 mmc_remove_host(host->mmc);
1523 sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
1526 * FIXME: cancel_delayed_work(_sync)() and free_irq() race with the
1527 * mmc_remove_host() call above. But swapping order doesn't help either
1528 * (a query on the linux-mmc mailing list didn't bring any replies).
1530 cancel_delayed_work_sync(&host->timeout_work);
1532 clk_disable_unprepare(host->clk);
1533 mmc_free_host(host->mmc);
1534 pm_runtime_put_sync(&pdev->dev);
1535 pm_runtime_disable(&pdev->dev);
1540 #ifdef CONFIG_PM_SLEEP
1541 static int sh_mmcif_suspend(struct device *dev)
1543 struct sh_mmcif_host *host = dev_get_drvdata(dev);
1545 pm_runtime_get_sync(dev);
1546 sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
1547 pm_runtime_put(dev);
1552 static int sh_mmcif_resume(struct device *dev)
1558 static const struct dev_pm_ops sh_mmcif_dev_pm_ops = {
1559 SET_SYSTEM_SLEEP_PM_OPS(sh_mmcif_suspend, sh_mmcif_resume)
1562 static struct platform_driver sh_mmcif_driver = {
1563 .probe = sh_mmcif_probe,
1564 .remove = sh_mmcif_remove,
1566 .name = DRIVER_NAME,
1567 .probe_type = PROBE_PREFER_ASYNCHRONOUS,
1568 .pm = &sh_mmcif_dev_pm_ops,
1569 .of_match_table = sh_mmcif_of_match,
1573 module_platform_driver(sh_mmcif_driver);
1575 MODULE_DESCRIPTION("SuperH on-chip MMC/eMMC interface driver");
1576 MODULE_LICENSE("GPL v2");
1577 MODULE_ALIAS("platform:" DRIVER_NAME);
1578 MODULE_AUTHOR("Yusuke Goda <yusuke.goda.sx@renesas.com>");