1 // SPDX-License-Identifier: GPL-2.0
3 * Driver for the MMC / SD / SDIO IP found in:
5 * TC6393XB, TC6391XB, TC6387XB, T7L66XB, ASIC3, SH-Mobile SoCs
7 * Copyright (C) 2015-17 Renesas Electronics Corporation
8 * Copyright (C) 2016-17 Sang Engineering, Wolfram Sang
9 * Copyright (C) 2017 Horms Solutions, Simon Horman
10 * Copyright (C) 2011 Guennadi Liakhovetski
11 * Copyright (C) 2007 Ian Molton
12 * Copyright (C) 2004 Ian Molton
14 * This driver draws mainly on scattered spec sheets, Reverse engineering
15 * of the toshiba e800 SD driver and some parts of the 2.4 ASIC3 driver (4 bit
16 * support). (Further 4 bit support from a later datasheet).
19 * Investigate using a workqueue for PIO transfers
21 * Better Power management
22 * Handle MMC errors better
23 * double buffer support
27 #include <linux/delay.h>
28 #include <linux/device.h>
29 #include <linux/highmem.h>
30 #include <linux/interrupt.h>
32 #include <linux/irq.h>
33 #include <linux/mfd/tmio.h>
34 #include <linux/mmc/card.h>
35 #include <linux/mmc/host.h>
36 #include <linux/mmc/mmc.h>
37 #include <linux/mmc/slot-gpio.h>
38 #include <linux/module.h>
39 #include <linux/pagemap.h>
40 #include <linux/platform_device.h>
41 #include <linux/pm_qos.h>
42 #include <linux/pm_runtime.h>
43 #include <linux/regulator/consumer.h>
44 #include <linux/mmc/sdio.h>
45 #include <linux/scatterlist.h>
46 #include <linux/spinlock.h>
47 #include <linux/swiotlb.h>
48 #include <linux/workqueue.h>
52 static inline void tmio_mmc_start_dma(struct tmio_mmc_host *host,
53 struct mmc_data *data)
56 host->dma_ops->start(host, data);
59 static inline void tmio_mmc_enable_dma(struct tmio_mmc_host *host, bool enable)
62 host->dma_ops->enable(host, enable);
65 static inline void tmio_mmc_request_dma(struct tmio_mmc_host *host,
66 struct tmio_mmc_data *pdata)
69 host->dma_ops->request(host, pdata);
76 static inline void tmio_mmc_release_dma(struct tmio_mmc_host *host)
79 host->dma_ops->release(host);
82 static inline void tmio_mmc_abort_dma(struct tmio_mmc_host *host)
85 host->dma_ops->abort(host);
88 static inline void tmio_mmc_dataend_dma(struct tmio_mmc_host *host)
91 host->dma_ops->dataend(host);
94 void tmio_mmc_enable_mmc_irqs(struct tmio_mmc_host *host, u32 i)
96 host->sdcard_irq_mask &= ~(i & TMIO_MASK_IRQ);
97 sd_ctrl_write32_as_16_and_16(host, CTL_IRQ_MASK, host->sdcard_irq_mask);
99 EXPORT_SYMBOL_GPL(tmio_mmc_enable_mmc_irqs);
101 void tmio_mmc_disable_mmc_irqs(struct tmio_mmc_host *host, u32 i)
103 host->sdcard_irq_mask |= (i & TMIO_MASK_IRQ);
104 sd_ctrl_write32_as_16_and_16(host, CTL_IRQ_MASK, host->sdcard_irq_mask);
106 EXPORT_SYMBOL_GPL(tmio_mmc_disable_mmc_irqs);
108 static void tmio_mmc_ack_mmc_irqs(struct tmio_mmc_host *host, u32 i)
110 sd_ctrl_write32_as_16_and_16(host, CTL_STATUS, ~i);
113 static void tmio_mmc_init_sg(struct tmio_mmc_host *host, struct mmc_data *data)
115 host->sg_len = data->sg_len;
116 host->sg_ptr = data->sg;
117 host->sg_orig = data->sg;
121 static int tmio_mmc_next_sg(struct tmio_mmc_host *host)
123 host->sg_ptr = sg_next(host->sg_ptr);
125 return --host->sg_len;
128 #define CMDREQ_TIMEOUT 5000
130 static void tmio_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
132 struct tmio_mmc_host *host = mmc_priv(mmc);
134 if (enable && !host->sdio_irq_enabled) {
137 /* Keep device active while SDIO irq is enabled */
138 pm_runtime_get_sync(mmc_dev(mmc));
140 host->sdio_irq_enabled = true;
141 host->sdio_irq_mask = TMIO_SDIO_MASK_ALL & ~TMIO_SDIO_STAT_IOIRQ;
143 /* Clear obsolete interrupts before enabling */
144 sdio_status = sd_ctrl_read16(host, CTL_SDIO_STATUS) & ~TMIO_SDIO_MASK_ALL;
145 if (host->pdata->flags & TMIO_MMC_SDIO_STATUS_SETBITS)
146 sdio_status |= TMIO_SDIO_SETBITS_MASK;
147 sd_ctrl_write16(host, CTL_SDIO_STATUS, sdio_status);
149 sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, host->sdio_irq_mask);
150 } else if (!enable && host->sdio_irq_enabled) {
151 host->sdio_irq_mask = TMIO_SDIO_MASK_ALL;
152 sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, host->sdio_irq_mask);
154 host->sdio_irq_enabled = false;
155 pm_runtime_mark_last_busy(mmc_dev(mmc));
156 pm_runtime_put_autosuspend(mmc_dev(mmc));
160 static void tmio_mmc_reset(struct tmio_mmc_host *host)
162 /* FIXME - should we set stop clock reg here */
163 sd_ctrl_write16(host, CTL_RESET_SD, 0x0000);
164 usleep_range(10000, 11000);
165 sd_ctrl_write16(host, CTL_RESET_SD, 0x0001);
166 usleep_range(10000, 11000);
168 if (host->pdata->flags & TMIO_MMC_SDIO_IRQ) {
169 sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, host->sdio_irq_mask);
170 sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0001);
174 static void tmio_mmc_hw_reset(struct mmc_host *mmc)
176 struct tmio_mmc_host *host = mmc_priv(mmc);
180 tmio_mmc_abort_dma(host);
183 host->hw_reset(host);
186 static void tmio_mmc_reset_work(struct work_struct *work)
188 struct tmio_mmc_host *host = container_of(work, struct tmio_mmc_host,
189 delayed_reset_work.work);
190 struct mmc_request *mrq;
193 spin_lock_irqsave(&host->lock, flags);
197 * is request already finished? Since we use a non-blocking
198 * cancel_delayed_work(), it can happen, that a .set_ios() call preempts
199 * us, so, have to check for IS_ERR(host->mrq)
201 if (IS_ERR_OR_NULL(mrq) ||
202 time_is_after_jiffies(host->last_req_ts +
203 msecs_to_jiffies(CMDREQ_TIMEOUT))) {
204 spin_unlock_irqrestore(&host->lock, flags);
208 dev_warn(&host->pdev->dev,
209 "timeout waiting for hardware interrupt (CMD%u)\n",
213 host->data->error = -ETIMEDOUT;
215 host->cmd->error = -ETIMEDOUT;
217 mrq->cmd->error = -ETIMEDOUT;
222 spin_unlock_irqrestore(&host->lock, flags);
224 tmio_mmc_hw_reset(host->mmc);
226 /* Ready for new calls */
229 mmc_request_done(host->mmc, mrq);
232 /* These are the bitmasks the tmio chip requires to implement the MMC response
233 * types. Note that R1 and R6 are the same in this scheme. */
234 #define APP_CMD 0x0040
235 #define RESP_NONE 0x0300
236 #define RESP_R1 0x0400
237 #define RESP_R1B 0x0500
238 #define RESP_R2 0x0600
239 #define RESP_R3 0x0700
240 #define DATA_PRESENT 0x0800
241 #define TRANSFER_READ 0x1000
242 #define TRANSFER_MULTI 0x2000
243 #define SECURITY_CMD 0x4000
244 #define NO_CMD12_ISSUE 0x4000 /* TMIO_MMC_HAVE_CMD12_CTRL */
246 static int tmio_mmc_start_command(struct tmio_mmc_host *host,
247 struct mmc_command *cmd)
249 struct mmc_data *data = host->data;
252 switch (mmc_resp_type(cmd)) {
253 case MMC_RSP_NONE: c |= RESP_NONE; break;
255 case MMC_RSP_R1_NO_CRC:
257 case MMC_RSP_R1B: c |= RESP_R1B; break;
258 case MMC_RSP_R2: c |= RESP_R2; break;
259 case MMC_RSP_R3: c |= RESP_R3; break;
261 pr_debug("Unknown response type %d\n", mmc_resp_type(cmd));
267 /* FIXME - this seems to be ok commented out but the spec suggest this bit
268 * should be set when issuing app commands.
269 * if(cmd->flags & MMC_FLAG_ACMD)
274 if (data->blocks > 1) {
275 sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, TMIO_STOP_SEC);
279 * Disable auto CMD12 at IO_RW_EXTENDED and
280 * SET_BLOCK_COUNT when doing multiple block transfer
282 if ((host->pdata->flags & TMIO_MMC_HAVE_CMD12_CTRL) &&
283 (cmd->opcode == SD_IO_RW_EXTENDED || host->mrq->sbc))
286 if (data->flags & MMC_DATA_READ)
290 tmio_mmc_enable_mmc_irqs(host, TMIO_MASK_CMD);
292 /* Fire off the command */
293 sd_ctrl_write32_as_16_and_16(host, CTL_ARG_REG, cmd->arg);
294 sd_ctrl_write16(host, CTL_SD_CMD, c);
299 static void tmio_mmc_transfer_data(struct tmio_mmc_host *host,
303 int is_read = host->data->flags & MMC_DATA_READ;
309 if (host->pdata->flags & TMIO_MMC_32BIT_DATA_PORT) {
311 u32 *buf32 = (u32 *)buf;
314 sd_ctrl_read32_rep(host, CTL_SD_DATA_PORT, buf32,
317 sd_ctrl_write32_rep(host, CTL_SD_DATA_PORT, buf32,
320 /* if count was multiple of 4 */
328 sd_ctrl_read32_rep(host, CTL_SD_DATA_PORT, &data, 1);
329 memcpy(buf32, &data, count);
331 memcpy(&data, buf32, count);
332 sd_ctrl_write32_rep(host, CTL_SD_DATA_PORT, &data, 1);
339 sd_ctrl_read16_rep(host, CTL_SD_DATA_PORT, buf, count >> 1);
341 sd_ctrl_write16_rep(host, CTL_SD_DATA_PORT, buf, count >> 1);
343 /* if count was even number */
347 /* if count was odd number */
348 buf8 = (u8 *)(buf + (count >> 1));
353 * driver and this function are assuming that
354 * it is used as little endian
357 *buf8 = sd_ctrl_read16(host, CTL_SD_DATA_PORT) & 0xff;
359 sd_ctrl_write16(host, CTL_SD_DATA_PORT, *buf8);
363 * This chip always returns (at least?) as much data as you ask for.
364 * I'm unsure what happens if you ask for less than a block. This should be
365 * looked into to ensure that a funny length read doesn't hose the controller.
367 static void tmio_mmc_pio_irq(struct tmio_mmc_host *host)
369 struct mmc_data *data = host->data;
376 pr_err("PIO IRQ in DMA mode!\n");
379 pr_debug("Spurious PIO IRQ\n");
383 sg_virt = tmio_mmc_kmap_atomic(host->sg_ptr, &flags);
384 buf = (unsigned short *)(sg_virt + host->sg_off);
386 count = host->sg_ptr->length - host->sg_off;
387 if (count > data->blksz)
390 pr_debug("count: %08x offset: %08x flags %08x\n",
391 count, host->sg_off, data->flags);
393 /* Transfer the data */
394 tmio_mmc_transfer_data(host, buf, count);
396 host->sg_off += count;
398 tmio_mmc_kunmap_atomic(host->sg_ptr, &flags, sg_virt);
400 if (host->sg_off == host->sg_ptr->length)
401 tmio_mmc_next_sg(host);
404 static void tmio_mmc_check_bounce_buffer(struct tmio_mmc_host *host)
406 if (host->sg_ptr == &host->bounce_sg) {
408 void *sg_vaddr = tmio_mmc_kmap_atomic(host->sg_orig, &flags);
410 memcpy(sg_vaddr, host->bounce_buf, host->bounce_sg.length);
411 tmio_mmc_kunmap_atomic(host->sg_orig, &flags, sg_vaddr);
415 /* needs to be called with host->lock held */
416 void tmio_mmc_do_data_irq(struct tmio_mmc_host *host)
418 struct mmc_data *data = host->data;
419 struct mmc_command *stop;
424 dev_warn(&host->pdev->dev, "Spurious data end IRQ\n");
429 /* FIXME - return correct transfer count on errors */
431 data->bytes_xfered = data->blocks * data->blksz;
433 data->bytes_xfered = 0;
435 pr_debug("Completed data request\n");
438 * FIXME: other drivers allow an optional stop command of any given type
439 * which we dont do, as the chip can auto generate them.
440 * Perhaps we can be smarter about when to use auto CMD12 and
441 * only issue the auto request when we know this is the desired
442 * stop command, allowing fallback to the stop command the
443 * upper layers expect. For now, we do what works.
446 if (data->flags & MMC_DATA_READ) {
448 tmio_mmc_check_bounce_buffer(host);
449 dev_dbg(&host->pdev->dev, "Complete Rx request %p\n",
452 dev_dbg(&host->pdev->dev, "Complete Tx request %p\n",
456 if (stop && !host->mrq->sbc) {
457 if (stop->opcode != MMC_STOP_TRANSMISSION || stop->arg)
458 dev_err(&host->pdev->dev, "unsupported stop: CMD%u,0x%x. We did CMD12,0\n",
459 stop->opcode, stop->arg);
461 /* fill in response from auto CMD12 */
462 stop->resp[0] = sd_ctrl_read16_and_16_as_32(host, CTL_RESPONSE);
464 sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0);
467 schedule_work(&host->done);
469 EXPORT_SYMBOL_GPL(tmio_mmc_do_data_irq);
471 static void tmio_mmc_data_irq(struct tmio_mmc_host *host, unsigned int stat)
473 struct mmc_data *data;
475 spin_lock(&host->lock);
481 if (stat & TMIO_STAT_CRCFAIL || stat & TMIO_STAT_STOPBIT_ERR ||
482 stat & TMIO_STAT_TXUNDERRUN)
483 data->error = -EILSEQ;
484 if (host->dma_on && (data->flags & MMC_DATA_WRITE)) {
485 u32 status = sd_ctrl_read16_and_16_as_32(host, CTL_STATUS);
489 * Has all data been written out yet? Testing on SuperH showed,
490 * that in most cases the first interrupt comes already with the
491 * BUSY status bit clear, but on some operations, like mount or
492 * in the beginning of a write / sync / umount, there is one
493 * DATAEND interrupt with the BUSY bit set, in this cases
494 * waiting for one more interrupt fixes the problem.
496 if (host->pdata->flags & TMIO_MMC_HAS_IDLE_WAIT) {
497 if (status & TMIO_STAT_SCLKDIVEN)
500 if (!(status & TMIO_STAT_CMD_BUSY))
505 tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_DATAEND);
506 tmio_mmc_dataend_dma(host);
508 } else if (host->dma_on && (data->flags & MMC_DATA_READ)) {
509 tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_DATAEND);
510 tmio_mmc_dataend_dma(host);
512 tmio_mmc_do_data_irq(host);
513 tmio_mmc_disable_mmc_irqs(host, TMIO_MASK_READOP | TMIO_MASK_WRITEOP);
516 spin_unlock(&host->lock);
519 static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host, unsigned int stat)
521 struct mmc_command *cmd = host->cmd;
524 spin_lock(&host->lock);
527 pr_debug("Spurious CMD irq\n");
531 /* This controller is sicker than the PXA one. Not only do we need to
532 * drop the top 8 bits of the first response word, we also need to
533 * modify the order of the response for short response command types.
536 for (i = 3, addr = CTL_RESPONSE ; i >= 0 ; i--, addr += 4)
537 cmd->resp[i] = sd_ctrl_read16_and_16_as_32(host, addr);
539 if (cmd->flags & MMC_RSP_136) {
540 cmd->resp[0] = (cmd->resp[0] << 8) | (cmd->resp[1] >> 24);
541 cmd->resp[1] = (cmd->resp[1] << 8) | (cmd->resp[2] >> 24);
542 cmd->resp[2] = (cmd->resp[2] << 8) | (cmd->resp[3] >> 24);
544 } else if (cmd->flags & MMC_RSP_R3) {
545 cmd->resp[0] = cmd->resp[3];
548 if (stat & TMIO_STAT_CMDTIMEOUT)
549 cmd->error = -ETIMEDOUT;
550 else if ((stat & TMIO_STAT_CRCFAIL && cmd->flags & MMC_RSP_CRC) ||
551 stat & TMIO_STAT_STOPBIT_ERR ||
552 stat & TMIO_STAT_CMD_IDX_ERR)
553 cmd->error = -EILSEQ;
555 /* If there is data to handle we enable data IRQs here, and
556 * we will ultimatley finish the request in the data_end handler.
557 * If theres no data or we encountered an error, finish now.
559 if (host->data && (!cmd->error || cmd->error == -EILSEQ)) {
560 if (host->data->flags & MMC_DATA_READ) {
562 tmio_mmc_enable_mmc_irqs(host, TMIO_MASK_READOP);
564 tmio_mmc_disable_mmc_irqs(host,
566 tasklet_schedule(&host->dma_issue);
570 tmio_mmc_enable_mmc_irqs(host, TMIO_MASK_WRITEOP);
572 tmio_mmc_disable_mmc_irqs(host,
574 tasklet_schedule(&host->dma_issue);
578 schedule_work(&host->done);
582 spin_unlock(&host->lock);
585 static bool __tmio_mmc_card_detect_irq(struct tmio_mmc_host *host,
586 int ireg, int status)
588 struct mmc_host *mmc = host->mmc;
590 /* Card insert / remove attempts */
591 if (ireg & (TMIO_STAT_CARD_INSERT | TMIO_STAT_CARD_REMOVE)) {
592 tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_CARD_INSERT |
593 TMIO_STAT_CARD_REMOVE);
594 if ((((ireg & TMIO_STAT_CARD_REMOVE) && mmc->card) ||
595 ((ireg & TMIO_STAT_CARD_INSERT) && !mmc->card)) &&
596 !work_pending(&mmc->detect.work))
597 mmc_detect_change(host->mmc, msecs_to_jiffies(100));
604 static bool __tmio_mmc_sdcard_irq(struct tmio_mmc_host *host, int ireg,
607 /* Command completion */
608 if (ireg & (TMIO_STAT_CMDRESPEND | TMIO_STAT_CMDTIMEOUT)) {
609 tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_CMDRESPEND |
610 TMIO_STAT_CMDTIMEOUT);
611 tmio_mmc_cmd_irq(host, status);
616 if (ireg & (TMIO_STAT_RXRDY | TMIO_STAT_TXRQ)) {
617 tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_RXRDY | TMIO_STAT_TXRQ);
618 tmio_mmc_pio_irq(host);
622 /* Data transfer completion */
623 if (ireg & TMIO_STAT_DATAEND) {
624 tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_DATAEND);
625 tmio_mmc_data_irq(host, status);
632 static void __tmio_mmc_sdio_irq(struct tmio_mmc_host *host)
634 struct mmc_host *mmc = host->mmc;
635 struct tmio_mmc_data *pdata = host->pdata;
636 unsigned int ireg, status;
637 unsigned int sdio_status;
639 if (!(pdata->flags & TMIO_MMC_SDIO_IRQ))
642 status = sd_ctrl_read16(host, CTL_SDIO_STATUS);
643 ireg = status & TMIO_SDIO_MASK_ALL & ~host->sdio_irq_mask;
645 sdio_status = status & ~TMIO_SDIO_MASK_ALL;
646 if (pdata->flags & TMIO_MMC_SDIO_STATUS_SETBITS)
647 sdio_status |= TMIO_SDIO_SETBITS_MASK;
649 sd_ctrl_write16(host, CTL_SDIO_STATUS, sdio_status);
651 if (mmc->caps & MMC_CAP_SDIO_IRQ && ireg & TMIO_SDIO_STAT_IOIRQ)
652 mmc_signal_sdio_irq(mmc);
655 irqreturn_t tmio_mmc_irq(int irq, void *devid)
657 struct tmio_mmc_host *host = devid;
658 unsigned int ireg, status;
660 status = sd_ctrl_read16_and_16_as_32(host, CTL_STATUS);
661 ireg = status & TMIO_MASK_IRQ & ~host->sdcard_irq_mask;
663 /* Clear the status except the interrupt status */
664 sd_ctrl_write32_as_16_and_16(host, CTL_STATUS, TMIO_MASK_IRQ);
666 if (__tmio_mmc_card_detect_irq(host, ireg, status))
668 if (__tmio_mmc_sdcard_irq(host, ireg, status))
671 __tmio_mmc_sdio_irq(host);
675 EXPORT_SYMBOL_GPL(tmio_mmc_irq);
677 static int tmio_mmc_start_data(struct tmio_mmc_host *host,
678 struct mmc_data *data)
680 struct tmio_mmc_data *pdata = host->pdata;
682 pr_debug("setup data transfer: blocksize %08x nr_blocks %d\n",
683 data->blksz, data->blocks);
685 /* Some hardware cannot perform 2 byte requests in 4/8 bit mode */
686 if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4 ||
687 host->mmc->ios.bus_width == MMC_BUS_WIDTH_8) {
688 int blksz_2bytes = pdata->flags & TMIO_MMC_BLKSZ_2BYTES;
690 if (data->blksz < 2 || (data->blksz < 4 && !blksz_2bytes)) {
691 pr_err("%s: %d byte block unsupported in 4/8 bit mode\n",
692 mmc_hostname(host->mmc), data->blksz);
697 tmio_mmc_init_sg(host, data);
699 host->dma_on = false;
701 /* Set transfer length / blocksize */
702 sd_ctrl_write16(host, CTL_SD_XFER_LEN, data->blksz);
703 sd_ctrl_write16(host, CTL_XFER_BLK_COUNT, data->blocks);
705 tmio_mmc_start_dma(host, data);
710 static int tmio_mmc_execute_tuning(struct mmc_host *mmc, u32 opcode)
712 struct tmio_mmc_host *host = mmc_priv(mmc);
715 if (!host->init_tuning || !host->select_tuning)
716 /* Tuning is not supported */
719 host->tap_num = host->init_tuning(host);
721 /* Tuning is not supported */
724 if (host->tap_num * 2 >= sizeof(host->taps) * BITS_PER_BYTE) {
725 dev_warn_once(&host->pdev->dev,
726 "Too many taps, skipping tuning. Please consider updating size of taps field of tmio_mmc_host\n");
730 bitmap_zero(host->taps, host->tap_num * 2);
732 /* Issue CMD19 twice for each tap */
733 for (i = 0; i < 2 * host->tap_num; i++) {
734 if (host->prepare_tuning)
735 host->prepare_tuning(host, i % host->tap_num);
737 ret = mmc_send_tuning(mmc, opcode, NULL);
739 set_bit(i, host->taps);
742 ret = host->select_tuning(host);
746 dev_warn(&host->pdev->dev, "Tuning procedure failed\n");
747 tmio_mmc_hw_reset(mmc);
753 static void tmio_process_mrq(struct tmio_mmc_host *host,
754 struct mmc_request *mrq)
756 struct mmc_command *cmd;
759 if (mrq->sbc && host->cmd != mrq->sbc) {
764 ret = tmio_mmc_start_data(host, mrq->data);
770 ret = tmio_mmc_start_command(host, cmd);
774 schedule_delayed_work(&host->delayed_reset_work,
775 msecs_to_jiffies(CMDREQ_TIMEOUT));
780 mrq->cmd->error = ret;
781 mmc_request_done(host->mmc, mrq);
784 /* Process requests from the MMC layer */
785 static void tmio_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
787 struct tmio_mmc_host *host = mmc_priv(mmc);
790 spin_lock_irqsave(&host->lock, flags);
793 pr_debug("request not null\n");
794 if (IS_ERR(host->mrq)) {
795 spin_unlock_irqrestore(&host->lock, flags);
796 mrq->cmd->error = -EAGAIN;
797 mmc_request_done(mmc, mrq);
802 host->last_req_ts = jiffies;
806 spin_unlock_irqrestore(&host->lock, flags);
808 tmio_process_mrq(host, mrq);
811 static void tmio_mmc_finish_request(struct tmio_mmc_host *host)
813 struct mmc_request *mrq;
816 spin_lock_irqsave(&host->lock, flags);
819 if (IS_ERR_OR_NULL(mrq)) {
820 spin_unlock_irqrestore(&host->lock, flags);
824 /* If not SET_BLOCK_COUNT, clear old data */
825 if (host->cmd != mrq->sbc) {
831 cancel_delayed_work(&host->delayed_reset_work);
833 spin_unlock_irqrestore(&host->lock, flags);
835 if (mrq->cmd->error || (mrq->data && mrq->data->error))
836 tmio_mmc_abort_dma(host);
838 if (host->check_scc_error && host->check_scc_error(host))
839 mrq->cmd->error = -EILSEQ;
841 /* If SET_BLOCK_COUNT, continue with main command */
842 if (host->mrq && !mrq->cmd->error) {
843 tmio_process_mrq(host, mrq);
847 mmc_request_done(host->mmc, mrq);
850 static void tmio_mmc_done_work(struct work_struct *work)
852 struct tmio_mmc_host *host = container_of(work, struct tmio_mmc_host,
854 tmio_mmc_finish_request(host);
857 static void tmio_mmc_power_on(struct tmio_mmc_host *host, unsigned short vdd)
859 struct mmc_host *mmc = host->mmc;
862 /* .set_ios() is returning void, so, no chance to report an error */
865 host->set_pwr(host->pdev, 1);
867 if (!IS_ERR(mmc->supply.vmmc)) {
868 ret = mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
870 * Attention: empiric value. With a b43 WiFi SDIO card this
871 * delay proved necessary for reliable card-insertion probing.
872 * 100us were not enough. Is this the same 140us delay, as in
873 * tmio_mmc_set_ios()?
875 usleep_range(200, 300);
878 * It seems, VccQ should be switched on after Vcc, this is also what the
879 * omap_hsmmc.c driver does.
881 if (!IS_ERR(mmc->supply.vqmmc) && !ret) {
882 ret = regulator_enable(mmc->supply.vqmmc);
883 usleep_range(200, 300);
887 dev_dbg(&host->pdev->dev, "Regulators failed to power up: %d\n",
891 static void tmio_mmc_power_off(struct tmio_mmc_host *host)
893 struct mmc_host *mmc = host->mmc;
895 if (!IS_ERR(mmc->supply.vqmmc))
896 regulator_disable(mmc->supply.vqmmc);
898 if (!IS_ERR(mmc->supply.vmmc))
899 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
902 host->set_pwr(host->pdev, 0);
905 static void tmio_mmc_set_bus_width(struct tmio_mmc_host *host,
906 unsigned char bus_width)
908 u16 reg = sd_ctrl_read16(host, CTL_SD_MEM_CARD_OPT)
909 & ~(CARD_OPT_WIDTH | CARD_OPT_WIDTH8);
911 /* reg now applies to MMC_BUS_WIDTH_4 */
912 if (bus_width == MMC_BUS_WIDTH_1)
913 reg |= CARD_OPT_WIDTH;
914 else if (bus_width == MMC_BUS_WIDTH_8)
915 reg |= CARD_OPT_WIDTH8;
917 sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, reg);
920 /* Set MMC clock / power.
921 * Note: This controller uses a simple divider scheme therefore it cannot
922 * run a MMC card at full speed (20MHz). The max clock is 24MHz on SD, but as
923 * MMC wont run that fast, it has to be clocked at 12MHz which is the next
926 static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
928 struct tmio_mmc_host *host = mmc_priv(mmc);
929 struct device *dev = &host->pdev->dev;
932 mutex_lock(&host->ios_lock);
934 spin_lock_irqsave(&host->lock, flags);
936 if (IS_ERR(host->mrq)) {
938 "%s.%d: concurrent .set_ios(), clk %u, mode %u\n",
939 current->comm, task_pid_nr(current),
940 ios->clock, ios->power_mode);
941 host->mrq = ERR_PTR(-EINTR);
944 "%s.%d: CMD%u active since %lu, now %lu!\n",
945 current->comm, task_pid_nr(current),
946 host->mrq->cmd->opcode, host->last_req_ts,
949 spin_unlock_irqrestore(&host->lock, flags);
951 mutex_unlock(&host->ios_lock);
955 host->mrq = ERR_PTR(-EBUSY);
957 spin_unlock_irqrestore(&host->lock, flags);
959 switch (ios->power_mode) {
961 tmio_mmc_power_off(host);
962 host->set_clock(host, 0);
965 tmio_mmc_power_on(host, ios->vdd);
966 host->set_clock(host, ios->clock);
967 tmio_mmc_set_bus_width(host, ios->bus_width);
970 host->set_clock(host, ios->clock);
971 tmio_mmc_set_bus_width(host, ios->bus_width);
975 /* Let things settle. delay taken from winCE driver */
976 usleep_range(140, 200);
977 if (PTR_ERR(host->mrq) == -EINTR)
978 dev_dbg(&host->pdev->dev,
979 "%s.%d: IOS interrupted: clk %u, mode %u",
980 current->comm, task_pid_nr(current),
981 ios->clock, ios->power_mode);
984 host->clk_cache = ios->clock;
986 mutex_unlock(&host->ios_lock);
989 static int tmio_mmc_get_ro(struct mmc_host *mmc)
991 struct tmio_mmc_host *host = mmc_priv(mmc);
993 return !(sd_ctrl_read16_and_16_as_32(host, CTL_STATUS) &
994 TMIO_STAT_WRPROTECT);
997 static int tmio_mmc_get_cd(struct mmc_host *mmc)
999 struct tmio_mmc_host *host = mmc_priv(mmc);
1001 return !!(sd_ctrl_read16_and_16_as_32(host, CTL_STATUS) &
1002 TMIO_STAT_SIGSTATE);
1005 static int tmio_multi_io_quirk(struct mmc_card *card,
1006 unsigned int direction, int blk_size)
1008 struct tmio_mmc_host *host = mmc_priv(card->host);
1010 if (host->multi_io_quirk)
1011 return host->multi_io_quirk(card, direction, blk_size);
1016 static int tmio_mmc_prepare_hs400_tuning(struct mmc_host *mmc,
1017 struct mmc_ios *ios)
1019 struct tmio_mmc_host *host = mmc_priv(mmc);
1021 if (host->prepare_hs400_tuning)
1022 host->prepare_hs400_tuning(host);
1027 static void tmio_mmc_hs400_downgrade(struct mmc_host *mmc)
1029 struct tmio_mmc_host *host = mmc_priv(mmc);
1031 if (host->hs400_downgrade)
1032 host->hs400_downgrade(host);
1035 static void tmio_mmc_hs400_complete(struct mmc_host *mmc)
1037 struct tmio_mmc_host *host = mmc_priv(mmc);
1039 if (host->hs400_complete)
1040 host->hs400_complete(host);
1043 static const struct mmc_host_ops tmio_mmc_ops = {
1044 .request = tmio_mmc_request,
1045 .set_ios = tmio_mmc_set_ios,
1046 .get_ro = tmio_mmc_get_ro,
1047 .get_cd = tmio_mmc_get_cd,
1048 .enable_sdio_irq = tmio_mmc_enable_sdio_irq,
1049 .multi_io_quirk = tmio_multi_io_quirk,
1050 .hw_reset = tmio_mmc_hw_reset,
1051 .execute_tuning = tmio_mmc_execute_tuning,
1052 .prepare_hs400_tuning = tmio_mmc_prepare_hs400_tuning,
1053 .hs400_downgrade = tmio_mmc_hs400_downgrade,
1054 .hs400_complete = tmio_mmc_hs400_complete,
1057 static int tmio_mmc_init_ocr(struct tmio_mmc_host *host)
1059 struct tmio_mmc_data *pdata = host->pdata;
1060 struct mmc_host *mmc = host->mmc;
1063 err = mmc_regulator_get_supply(mmc);
1067 /* use ocr_mask if no regulator */
1068 if (!mmc->ocr_avail)
1069 mmc->ocr_avail = pdata->ocr_mask;
1073 * There is possibility that regulator has not been probed
1075 if (!mmc->ocr_avail)
1076 return -EPROBE_DEFER;
1081 static void tmio_mmc_of_parse(struct platform_device *pdev,
1082 struct mmc_host *mmc)
1084 const struct device_node *np = pdev->dev.of_node;
1091 * For new platforms, please use "disable-wp" instead of
1092 * "toshiba,mmc-wrprotect-disable"
1094 if (of_get_property(np, "toshiba,mmc-wrprotect-disable", NULL))
1095 mmc->caps2 |= MMC_CAP2_NO_WRITE_PROTECT;
1098 struct tmio_mmc_host *tmio_mmc_host_alloc(struct platform_device *pdev,
1099 struct tmio_mmc_data *pdata)
1101 struct tmio_mmc_host *host;
1102 struct mmc_host *mmc;
1103 struct resource *res;
1107 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1108 ctl = devm_ioremap_resource(&pdev->dev, res);
1110 return ERR_CAST(ctl);
1112 mmc = mmc_alloc_host(sizeof(struct tmio_mmc_host), &pdev->dev);
1114 return ERR_PTR(-ENOMEM);
1116 host = mmc_priv(mmc);
1120 host->pdata = pdata;
1121 host->ops = tmio_mmc_ops;
1122 mmc->ops = &host->ops;
1124 ret = mmc_of_parse(host->mmc);
1126 host = ERR_PTR(ret);
1130 tmio_mmc_of_parse(pdev, mmc);
1132 platform_set_drvdata(pdev, host);
1140 EXPORT_SYMBOL_GPL(tmio_mmc_host_alloc);
1142 void tmio_mmc_host_free(struct tmio_mmc_host *host)
1144 mmc_free_host(host->mmc);
1146 EXPORT_SYMBOL_GPL(tmio_mmc_host_free);
1148 int tmio_mmc_host_probe(struct tmio_mmc_host *_host)
1150 struct platform_device *pdev = _host->pdev;
1151 struct tmio_mmc_data *pdata = _host->pdata;
1152 struct mmc_host *mmc = _host->mmc;
1156 * Check the sanity of mmc->f_min to prevent host->set_clock() from
1157 * looping forever...
1159 if (mmc->f_min == 0)
1162 if (!(pdata->flags & TMIO_MMC_HAS_IDLE_WAIT))
1163 _host->write16_hook = NULL;
1165 _host->set_pwr = pdata->set_pwr;
1167 ret = tmio_mmc_init_ocr(_host);
1172 * Look for a card detect GPIO, if it fails with anything
1173 * else than a probe deferral, just live without it.
1175 ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0, NULL);
1176 if (ret == -EPROBE_DEFER)
1179 mmc->caps |= MMC_CAP_4_BIT_DATA | pdata->capabilities;
1180 mmc->caps2 |= pdata->capabilities2;
1181 mmc->max_segs = pdata->max_segs ? : 32;
1182 mmc->max_blk_size = 512;
1183 mmc->max_blk_count = pdata->max_blk_count ? :
1184 (PAGE_SIZE / mmc->max_blk_size) * mmc->max_segs;
1185 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
1187 * Since swiotlb has memory size limitation, this will calculate
1188 * the maximum size locally (because we don't have any APIs for it now)
1189 * and check the current max_req_size. And then, this will update
1190 * the max_req_size if needed as a workaround.
1192 if (swiotlb_max_segment()) {
1193 unsigned int max_size = (1 << IO_TLB_SHIFT) * IO_TLB_SEGSIZE;
1195 if (mmc->max_req_size > max_size)
1196 mmc->max_req_size = max_size;
1198 mmc->max_seg_size = mmc->max_req_size;
1200 if (mmc_can_gpio_ro(mmc))
1201 _host->ops.get_ro = mmc_gpio_get_ro;
1203 if (mmc_can_gpio_cd(mmc))
1204 _host->ops.get_cd = mmc_gpio_get_cd;
1206 _host->native_hotplug = !(mmc_can_gpio_cd(mmc) ||
1207 mmc->caps & MMC_CAP_NEEDS_POLL ||
1208 !mmc_card_is_removable(mmc));
1211 _host->reset = tmio_mmc_reset;
1214 * On Gen2+, eMMC with NONREMOVABLE currently fails because native
1215 * hotplug gets disabled. It seems RuntimePM related yet we need further
1216 * research. Since we are planning a PM overhaul anyway, let's enforce
1217 * for now the device being active by enabling native hotplug always.
1219 if (pdata->flags & TMIO_MMC_MIN_RCAR2)
1220 _host->native_hotplug = true;
1223 * While using internal tmio hardware logic for card detection, we need
1224 * to ensure it stays powered for it to work.
1226 if (_host->native_hotplug)
1227 pm_runtime_get_noresume(&pdev->dev);
1229 _host->sdio_irq_enabled = false;
1230 if (pdata->flags & TMIO_MMC_SDIO_IRQ)
1231 _host->sdio_irq_mask = TMIO_SDIO_MASK_ALL;
1233 _host->set_clock(_host, 0);
1234 tmio_mmc_hw_reset(mmc);
1236 _host->sdcard_irq_mask = sd_ctrl_read16_and_16_as_32(_host, CTL_IRQ_MASK);
1237 tmio_mmc_disable_mmc_irqs(_host, TMIO_MASK_ALL);
1239 if (_host->native_hotplug)
1240 tmio_mmc_enable_mmc_irqs(_host,
1241 TMIO_STAT_CARD_REMOVE | TMIO_STAT_CARD_INSERT);
1243 spin_lock_init(&_host->lock);
1244 mutex_init(&_host->ios_lock);
1246 /* Init delayed work for request timeouts */
1247 INIT_DELAYED_WORK(&_host->delayed_reset_work, tmio_mmc_reset_work);
1248 INIT_WORK(&_host->done, tmio_mmc_done_work);
1250 /* See if we also get DMA */
1251 tmio_mmc_request_dma(_host, pdata);
1253 pm_runtime_set_active(&pdev->dev);
1254 pm_runtime_set_autosuspend_delay(&pdev->dev, 50);
1255 pm_runtime_use_autosuspend(&pdev->dev);
1256 pm_runtime_enable(&pdev->dev);
1258 ret = mmc_add_host(mmc);
1262 dev_pm_qos_expose_latency_limit(&pdev->dev, 100);
1267 tmio_mmc_host_remove(_host);
1270 EXPORT_SYMBOL_GPL(tmio_mmc_host_probe);
1272 void tmio_mmc_host_remove(struct tmio_mmc_host *host)
1274 struct platform_device *pdev = host->pdev;
1275 struct mmc_host *mmc = host->mmc;
1277 if (host->pdata->flags & TMIO_MMC_SDIO_IRQ)
1278 sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0000);
1280 if (!host->native_hotplug)
1281 pm_runtime_get_sync(&pdev->dev);
1283 dev_pm_qos_hide_latency_limit(&pdev->dev);
1285 mmc_remove_host(mmc);
1286 cancel_work_sync(&host->done);
1287 cancel_delayed_work_sync(&host->delayed_reset_work);
1288 tmio_mmc_release_dma(host);
1290 pm_runtime_put_sync(&pdev->dev);
1291 pm_runtime_disable(&pdev->dev);
1293 EXPORT_SYMBOL_GPL(tmio_mmc_host_remove);
1296 static int tmio_mmc_clk_enable(struct tmio_mmc_host *host)
1298 if (!host->clk_enable)
1301 return host->clk_enable(host);
1304 static void tmio_mmc_clk_disable(struct tmio_mmc_host *host)
1306 if (host->clk_disable)
1307 host->clk_disable(host);
1310 int tmio_mmc_host_runtime_suspend(struct device *dev)
1312 struct tmio_mmc_host *host = dev_get_drvdata(dev);
1314 tmio_mmc_disable_mmc_irqs(host, TMIO_MASK_ALL);
1316 if (host->clk_cache)
1317 host->set_clock(host, 0);
1319 tmio_mmc_clk_disable(host);
1323 EXPORT_SYMBOL_GPL(tmio_mmc_host_runtime_suspend);
1325 static bool tmio_mmc_can_retune(struct tmio_mmc_host *host)
1327 return host->tap_num && mmc_can_retune(host->mmc);
1330 int tmio_mmc_host_runtime_resume(struct device *dev)
1332 struct tmio_mmc_host *host = dev_get_drvdata(dev);
1334 tmio_mmc_clk_enable(host);
1335 tmio_mmc_hw_reset(host->mmc);
1337 if (host->clk_cache)
1338 host->set_clock(host, host->clk_cache);
1340 if (host->native_hotplug)
1341 tmio_mmc_enable_mmc_irqs(host,
1342 TMIO_STAT_CARD_REMOVE | TMIO_STAT_CARD_INSERT);
1344 tmio_mmc_enable_dma(host, true);
1346 if (tmio_mmc_can_retune(host) && host->select_tuning(host))
1347 dev_warn(&host->pdev->dev, "Tuning selection failed\n");
1351 EXPORT_SYMBOL_GPL(tmio_mmc_host_runtime_resume);
1354 MODULE_LICENSE("GPL v2");