2 * linux/drivers/mmc/host/sdhci.c - Secure Digital Host Controller Interface driver
4 * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or (at
9 * your option) any later version.
11 * Thanks to the following companies for their support:
13 * - JMicron (hardware and technical support)
16 #include <linux/delay.h>
17 #include <linux/highmem.h>
19 #include <linux/module.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/slab.h>
22 #include <linux/scatterlist.h>
23 #include <linux/regulator/consumer.h>
24 #include <linux/pm_runtime.h>
26 #include <linux/leds.h>
28 #include <linux/mmc/mmc.h>
29 #include <linux/mmc/host.h>
30 #include <linux/mmc/card.h>
31 #include <linux/mmc/sdio.h>
32 #include <linux/mmc/slot-gpio.h>
36 #define DRIVER_NAME "sdhci"
38 #define DBG(f, x...) \
39 pr_debug(DRIVER_NAME " [%s()]: " f, __func__,## x)
41 #define MAX_TUNING_LOOP 40
43 static unsigned int debug_quirks = 0;
44 static unsigned int debug_quirks2;
46 static void sdhci_finish_data(struct sdhci_host *);
48 static void sdhci_finish_command(struct sdhci_host *);
49 static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode);
50 static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable);
51 static int sdhci_get_cd(struct mmc_host *mmc);
53 static void sdhci_dumpregs(struct sdhci_host *host)
55 pr_debug(DRIVER_NAME ": =========== REGISTER DUMP (%s)===========\n",
56 mmc_hostname(host->mmc));
58 pr_debug(DRIVER_NAME ": Sys addr: 0x%08x | Version: 0x%08x\n",
59 sdhci_readl(host, SDHCI_DMA_ADDRESS),
60 sdhci_readw(host, SDHCI_HOST_VERSION));
61 pr_debug(DRIVER_NAME ": Blk size: 0x%08x | Blk cnt: 0x%08x\n",
62 sdhci_readw(host, SDHCI_BLOCK_SIZE),
63 sdhci_readw(host, SDHCI_BLOCK_COUNT));
64 pr_debug(DRIVER_NAME ": Argument: 0x%08x | Trn mode: 0x%08x\n",
65 sdhci_readl(host, SDHCI_ARGUMENT),
66 sdhci_readw(host, SDHCI_TRANSFER_MODE));
67 pr_debug(DRIVER_NAME ": Present: 0x%08x | Host ctl: 0x%08x\n",
68 sdhci_readl(host, SDHCI_PRESENT_STATE),
69 sdhci_readb(host, SDHCI_HOST_CONTROL));
70 pr_debug(DRIVER_NAME ": Power: 0x%08x | Blk gap: 0x%08x\n",
71 sdhci_readb(host, SDHCI_POWER_CONTROL),
72 sdhci_readb(host, SDHCI_BLOCK_GAP_CONTROL));
73 pr_debug(DRIVER_NAME ": Wake-up: 0x%08x | Clock: 0x%08x\n",
74 sdhci_readb(host, SDHCI_WAKE_UP_CONTROL),
75 sdhci_readw(host, SDHCI_CLOCK_CONTROL));
76 pr_debug(DRIVER_NAME ": Timeout: 0x%08x | Int stat: 0x%08x\n",
77 sdhci_readb(host, SDHCI_TIMEOUT_CONTROL),
78 sdhci_readl(host, SDHCI_INT_STATUS));
79 pr_debug(DRIVER_NAME ": Int enab: 0x%08x | Sig enab: 0x%08x\n",
80 sdhci_readl(host, SDHCI_INT_ENABLE),
81 sdhci_readl(host, SDHCI_SIGNAL_ENABLE));
82 pr_debug(DRIVER_NAME ": AC12 err: 0x%08x | Slot int: 0x%08x\n",
83 sdhci_readw(host, SDHCI_ACMD12_ERR),
84 sdhci_readw(host, SDHCI_SLOT_INT_STATUS));
85 pr_debug(DRIVER_NAME ": Caps: 0x%08x | Caps_1: 0x%08x\n",
86 sdhci_readl(host, SDHCI_CAPABILITIES),
87 sdhci_readl(host, SDHCI_CAPABILITIES_1));
88 pr_debug(DRIVER_NAME ": Cmd: 0x%08x | Max curr: 0x%08x\n",
89 sdhci_readw(host, SDHCI_COMMAND),
90 sdhci_readl(host, SDHCI_MAX_CURRENT));
91 pr_debug(DRIVER_NAME ": Host ctl2: 0x%08x\n",
92 sdhci_readw(host, SDHCI_HOST_CONTROL2));
94 if (host->flags & SDHCI_USE_ADMA) {
95 if (host->flags & SDHCI_USE_64_BIT_DMA)
96 pr_debug(DRIVER_NAME ": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x%08x\n",
97 readl(host->ioaddr + SDHCI_ADMA_ERROR),
98 readl(host->ioaddr + SDHCI_ADMA_ADDRESS_HI),
99 readl(host->ioaddr + SDHCI_ADMA_ADDRESS));
101 pr_debug(DRIVER_NAME ": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x\n",
102 readl(host->ioaddr + SDHCI_ADMA_ERROR),
103 readl(host->ioaddr + SDHCI_ADMA_ADDRESS));
106 pr_debug(DRIVER_NAME ": ===========================================\n");
109 /*****************************************************************************\
111 * Low level functions *
113 \*****************************************************************************/
115 static void sdhci_set_card_detection(struct sdhci_host *host, bool enable)
119 if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) ||
120 (host->mmc->caps & MMC_CAP_NONREMOVABLE))
124 present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
127 host->ier |= present ? SDHCI_INT_CARD_REMOVE :
128 SDHCI_INT_CARD_INSERT;
130 host->ier &= ~(SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT);
133 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
134 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
137 static void sdhci_enable_card_detection(struct sdhci_host *host)
139 sdhci_set_card_detection(host, true);
142 static void sdhci_disable_card_detection(struct sdhci_host *host)
144 sdhci_set_card_detection(host, false);
147 static void sdhci_runtime_pm_bus_on(struct sdhci_host *host)
152 pm_runtime_get_noresume(host->mmc->parent);
155 static void sdhci_runtime_pm_bus_off(struct sdhci_host *host)
159 host->bus_on = false;
160 pm_runtime_put_noidle(host->mmc->parent);
163 void sdhci_reset(struct sdhci_host *host, u8 mask)
165 unsigned long timeout;
167 sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET);
169 if (mask & SDHCI_RESET_ALL) {
171 /* Reset-all turns off SD Bus Power */
172 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
173 sdhci_runtime_pm_bus_off(host);
176 /* Wait max 100 ms */
179 /* hw clears the bit when it's done */
180 while (sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask) {
182 pr_err("%s: Reset 0x%x never completed.\n",
183 mmc_hostname(host->mmc), (int)mask);
184 sdhci_dumpregs(host);
191 EXPORT_SYMBOL_GPL(sdhci_reset);
193 static void sdhci_do_reset(struct sdhci_host *host, u8 mask)
195 if (host->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) {
196 if (!sdhci_get_cd(host->mmc))
200 host->ops->reset(host, mask);
202 if (mask & SDHCI_RESET_ALL) {
203 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
204 if (host->ops->enable_dma)
205 host->ops->enable_dma(host);
208 /* Resetting the controller clears many */
209 host->preset_enabled = false;
213 static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios);
215 static void sdhci_init(struct sdhci_host *host, int soft)
218 sdhci_do_reset(host, SDHCI_RESET_CMD|SDHCI_RESET_DATA);
220 sdhci_do_reset(host, SDHCI_RESET_ALL);
222 host->ier = SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT |
223 SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT |
224 SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC |
225 SDHCI_INT_TIMEOUT | SDHCI_INT_DATA_END |
228 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
229 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
232 /* force clock reconfiguration */
234 sdhci_set_ios(host->mmc, &host->mmc->ios);
238 static void sdhci_reinit(struct sdhci_host *host)
241 sdhci_enable_card_detection(host);
244 static void __sdhci_led_activate(struct sdhci_host *host)
248 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
249 ctrl |= SDHCI_CTRL_LED;
250 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
253 static void __sdhci_led_deactivate(struct sdhci_host *host)
257 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
258 ctrl &= ~SDHCI_CTRL_LED;
259 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
262 #if IS_REACHABLE(CONFIG_LEDS_CLASS)
263 static void sdhci_led_control(struct led_classdev *led,
264 enum led_brightness brightness)
266 struct sdhci_host *host = container_of(led, struct sdhci_host, led);
269 spin_lock_irqsave(&host->lock, flags);
271 if (host->runtime_suspended)
274 if (brightness == LED_OFF)
275 __sdhci_led_deactivate(host);
277 __sdhci_led_activate(host);
279 spin_unlock_irqrestore(&host->lock, flags);
282 static int sdhci_led_register(struct sdhci_host *host)
284 struct mmc_host *mmc = host->mmc;
286 snprintf(host->led_name, sizeof(host->led_name),
287 "%s::", mmc_hostname(mmc));
289 host->led.name = host->led_name;
290 host->led.brightness = LED_OFF;
291 host->led.default_trigger = mmc_hostname(mmc);
292 host->led.brightness_set = sdhci_led_control;
294 return led_classdev_register(mmc_dev(mmc), &host->led);
297 static void sdhci_led_unregister(struct sdhci_host *host)
299 led_classdev_unregister(&host->led);
302 static inline void sdhci_led_activate(struct sdhci_host *host)
306 static inline void sdhci_led_deactivate(struct sdhci_host *host)
312 static inline int sdhci_led_register(struct sdhci_host *host)
317 static inline void sdhci_led_unregister(struct sdhci_host *host)
321 static inline void sdhci_led_activate(struct sdhci_host *host)
323 __sdhci_led_activate(host);
326 static inline void sdhci_led_deactivate(struct sdhci_host *host)
328 __sdhci_led_deactivate(host);
333 /*****************************************************************************\
337 \*****************************************************************************/
339 static void sdhci_read_block_pio(struct sdhci_host *host)
342 size_t blksize, len, chunk;
343 u32 uninitialized_var(scratch);
346 DBG("PIO reading\n");
348 blksize = host->data->blksz;
351 local_irq_save(flags);
354 BUG_ON(!sg_miter_next(&host->sg_miter));
356 len = min(host->sg_miter.length, blksize);
359 host->sg_miter.consumed = len;
361 buf = host->sg_miter.addr;
365 scratch = sdhci_readl(host, SDHCI_BUFFER);
369 *buf = scratch & 0xFF;
378 sg_miter_stop(&host->sg_miter);
380 local_irq_restore(flags);
383 static void sdhci_write_block_pio(struct sdhci_host *host)
386 size_t blksize, len, chunk;
390 DBG("PIO writing\n");
392 blksize = host->data->blksz;
396 local_irq_save(flags);
399 BUG_ON(!sg_miter_next(&host->sg_miter));
401 len = min(host->sg_miter.length, blksize);
404 host->sg_miter.consumed = len;
406 buf = host->sg_miter.addr;
409 scratch |= (u32)*buf << (chunk * 8);
415 if ((chunk == 4) || ((len == 0) && (blksize == 0))) {
416 sdhci_writel(host, scratch, SDHCI_BUFFER);
423 sg_miter_stop(&host->sg_miter);
425 local_irq_restore(flags);
428 static void sdhci_transfer_pio(struct sdhci_host *host)
434 if (host->blocks == 0)
437 if (host->data->flags & MMC_DATA_READ)
438 mask = SDHCI_DATA_AVAILABLE;
440 mask = SDHCI_SPACE_AVAILABLE;
443 * Some controllers (JMicron JMB38x) mess up the buffer bits
444 * for transfers < 4 bytes. As long as it is just one block,
445 * we can ignore the bits.
447 if ((host->quirks & SDHCI_QUIRK_BROKEN_SMALL_PIO) &&
448 (host->data->blocks == 1))
451 while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
452 if (host->quirks & SDHCI_QUIRK_PIO_NEEDS_DELAY)
455 if (host->data->flags & MMC_DATA_READ)
456 sdhci_read_block_pio(host);
458 sdhci_write_block_pio(host);
461 if (host->blocks == 0)
465 DBG("PIO transfer complete.\n");
468 static int sdhci_pre_dma_transfer(struct sdhci_host *host,
469 struct mmc_data *data, int cookie)
474 * If the data buffers are already mapped, return the previous
475 * dma_map_sg() result.
477 if (data->host_cookie == COOKIE_PRE_MAPPED)
478 return data->sg_count;
480 sg_count = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
481 data->flags & MMC_DATA_WRITE ?
482 DMA_TO_DEVICE : DMA_FROM_DEVICE);
487 data->sg_count = sg_count;
488 data->host_cookie = cookie;
493 static char *sdhci_kmap_atomic(struct scatterlist *sg, unsigned long *flags)
495 local_irq_save(*flags);
496 return kmap_atomic(sg_page(sg)) + sg->offset;
499 static void sdhci_kunmap_atomic(void *buffer, unsigned long *flags)
501 kunmap_atomic(buffer);
502 local_irq_restore(*flags);
505 static void sdhci_adma_write_desc(struct sdhci_host *host, void *desc,
506 dma_addr_t addr, int len, unsigned cmd)
508 struct sdhci_adma2_64_desc *dma_desc = desc;
510 /* 32-bit and 64-bit descriptors have these members in same position */
511 dma_desc->cmd = cpu_to_le16(cmd);
512 dma_desc->len = cpu_to_le16(len);
513 dma_desc->addr_lo = cpu_to_le32((u32)addr);
515 if (host->flags & SDHCI_USE_64_BIT_DMA)
516 dma_desc->addr_hi = cpu_to_le32((u64)addr >> 32);
519 static void sdhci_adma_mark_end(void *desc)
521 struct sdhci_adma2_64_desc *dma_desc = desc;
523 /* 32-bit and 64-bit descriptors have 'cmd' in same position */
524 dma_desc->cmd |= cpu_to_le16(ADMA2_END);
527 static void sdhci_adma_table_pre(struct sdhci_host *host,
528 struct mmc_data *data, int sg_count)
530 struct scatterlist *sg;
532 dma_addr_t addr, align_addr;
538 * The spec does not specify endianness of descriptor table.
539 * We currently guess that it is LE.
542 host->sg_count = sg_count;
544 desc = host->adma_table;
545 align = host->align_buffer;
547 align_addr = host->align_addr;
549 for_each_sg(data->sg, sg, host->sg_count, i) {
550 addr = sg_dma_address(sg);
551 len = sg_dma_len(sg);
554 * The SDHCI specification states that ADMA addresses must
555 * be 32-bit aligned. If they aren't, then we use a bounce
556 * buffer for the (up to three) bytes that screw up the
559 offset = (SDHCI_ADMA2_ALIGN - (addr & SDHCI_ADMA2_MASK)) &
562 if (data->flags & MMC_DATA_WRITE) {
563 buffer = sdhci_kmap_atomic(sg, &flags);
564 memcpy(align, buffer, offset);
565 sdhci_kunmap_atomic(buffer, &flags);
569 sdhci_adma_write_desc(host, desc, align_addr, offset,
572 BUG_ON(offset > 65536);
574 align += SDHCI_ADMA2_ALIGN;
575 align_addr += SDHCI_ADMA2_ALIGN;
577 desc += host->desc_sz;
587 sdhci_adma_write_desc(host, desc, addr, len,
589 desc += host->desc_sz;
593 * If this triggers then we have a calculation bug
596 WARN_ON((desc - host->adma_table) >= host->adma_table_sz);
599 if (host->quirks & SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC) {
600 /* Mark the last descriptor as the terminating descriptor */
601 if (desc != host->adma_table) {
602 desc -= host->desc_sz;
603 sdhci_adma_mark_end(desc);
606 /* Add a terminating entry - nop, end, valid */
607 sdhci_adma_write_desc(host, desc, 0, 0, ADMA2_NOP_END_VALID);
611 static void sdhci_adma_table_post(struct sdhci_host *host,
612 struct mmc_data *data)
614 struct scatterlist *sg;
620 if (data->flags & MMC_DATA_READ) {
621 bool has_unaligned = false;
623 /* Do a quick scan of the SG list for any unaligned mappings */
624 for_each_sg(data->sg, sg, host->sg_count, i)
625 if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) {
626 has_unaligned = true;
631 dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg,
632 data->sg_len, DMA_FROM_DEVICE);
634 align = host->align_buffer;
636 for_each_sg(data->sg, sg, host->sg_count, i) {
637 if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) {
638 size = SDHCI_ADMA2_ALIGN -
639 (sg_dma_address(sg) & SDHCI_ADMA2_MASK);
641 buffer = sdhci_kmap_atomic(sg, &flags);
642 memcpy(buffer, align, size);
643 sdhci_kunmap_atomic(buffer, &flags);
645 align += SDHCI_ADMA2_ALIGN;
652 static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd)
655 struct mmc_data *data = cmd->data;
656 unsigned target_timeout, current_timeout;
659 * If the host controller provides us with an incorrect timeout
660 * value, just skip the check and use 0xE. The hardware may take
661 * longer to time out, but that's much better than having a too-short
664 if (host->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL)
667 /* Unspecified timeout, assume max */
668 if (!data && !cmd->busy_timeout)
673 target_timeout = cmd->busy_timeout * 1000;
675 target_timeout = DIV_ROUND_UP(data->timeout_ns, 1000);
676 if (host->clock && data->timeout_clks) {
677 unsigned long long val;
680 * data->timeout_clks is in units of clock cycles.
681 * host->clock is in Hz. target_timeout is in us.
682 * Hence, us = 1000000 * cycles / Hz. Round up.
684 val = 1000000 * data->timeout_clks;
685 if (do_div(val, host->clock))
687 target_timeout += val;
692 * Figure out needed cycles.
693 * We do this in steps in order to fit inside a 32 bit int.
694 * The first step is the minimum timeout, which will have a
695 * minimum resolution of 6 bits:
696 * (1) 2^13*1000 > 2^22,
697 * (2) host->timeout_clk < 2^16
702 current_timeout = (1 << 13) * 1000 / host->timeout_clk;
703 while (current_timeout < target_timeout) {
705 current_timeout <<= 1;
711 DBG("%s: Too large timeout 0x%x requested for CMD%d!\n",
712 mmc_hostname(host->mmc), count, cmd->opcode);
719 static void sdhci_set_transfer_irqs(struct sdhci_host *host)
721 u32 pio_irqs = SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL;
722 u32 dma_irqs = SDHCI_INT_DMA_END | SDHCI_INT_ADMA_ERROR;
724 if (host->flags & SDHCI_REQ_USE_DMA)
725 host->ier = (host->ier & ~pio_irqs) | dma_irqs;
727 host->ier = (host->ier & ~dma_irqs) | pio_irqs;
729 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
730 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
733 static void sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
737 if (host->ops->set_timeout) {
738 host->ops->set_timeout(host, cmd);
740 count = sdhci_calc_timeout(host, cmd);
741 sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL);
745 static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
748 struct mmc_data *data = cmd->data;
752 if (data || (cmd->flags & MMC_RSP_BUSY))
753 sdhci_set_timeout(host, cmd);
759 BUG_ON(data->blksz * data->blocks > 524288);
760 BUG_ON(data->blksz > host->mmc->max_blk_size);
761 BUG_ON(data->blocks > 65535);
764 host->data_early = 0;
765 host->data->bytes_xfered = 0;
767 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
768 struct scatterlist *sg;
769 unsigned int length_mask, offset_mask;
772 host->flags |= SDHCI_REQ_USE_DMA;
775 * FIXME: This doesn't account for merging when mapping the
778 * The assumption here being that alignment and lengths are
779 * the same after DMA mapping to device address space.
783 if (host->flags & SDHCI_USE_ADMA) {
784 if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE) {
787 * As we use up to 3 byte chunks to work
788 * around alignment problems, we need to
789 * check the offset as well.
794 if (host->quirks & SDHCI_QUIRK_32BIT_DMA_SIZE)
796 if (host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR)
800 if (unlikely(length_mask | offset_mask)) {
801 for_each_sg(data->sg, sg, data->sg_len, i) {
802 if (sg->length & length_mask) {
803 DBG("Reverting to PIO because of transfer size (%d)\n",
805 host->flags &= ~SDHCI_REQ_USE_DMA;
808 if (sg->offset & offset_mask) {
809 DBG("Reverting to PIO because of bad alignment\n");
810 host->flags &= ~SDHCI_REQ_USE_DMA;
817 if (host->flags & SDHCI_REQ_USE_DMA) {
818 int sg_cnt = sdhci_pre_dma_transfer(host, data, COOKIE_MAPPED);
822 * This only happens when someone fed
823 * us an invalid request.
826 host->flags &= ~SDHCI_REQ_USE_DMA;
827 } else if (host->flags & SDHCI_USE_ADMA) {
828 sdhci_adma_table_pre(host, data, sg_cnt);
830 sdhci_writel(host, host->adma_addr, SDHCI_ADMA_ADDRESS);
831 if (host->flags & SDHCI_USE_64_BIT_DMA)
833 (u64)host->adma_addr >> 32,
834 SDHCI_ADMA_ADDRESS_HI);
836 WARN_ON(sg_cnt != 1);
837 sdhci_writel(host, sg_dma_address(data->sg),
843 * Always adjust the DMA selection as some controllers
844 * (e.g. JMicron) can't do PIO properly when the selection
847 if (host->version >= SDHCI_SPEC_200) {
848 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
849 ctrl &= ~SDHCI_CTRL_DMA_MASK;
850 if ((host->flags & SDHCI_REQ_USE_DMA) &&
851 (host->flags & SDHCI_USE_ADMA)) {
852 if (host->flags & SDHCI_USE_64_BIT_DMA)
853 ctrl |= SDHCI_CTRL_ADMA64;
855 ctrl |= SDHCI_CTRL_ADMA32;
857 ctrl |= SDHCI_CTRL_SDMA;
859 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
862 if (!(host->flags & SDHCI_REQ_USE_DMA)) {
865 flags = SG_MITER_ATOMIC;
866 if (host->data->flags & MMC_DATA_READ)
867 flags |= SG_MITER_TO_SG;
869 flags |= SG_MITER_FROM_SG;
870 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
871 host->blocks = data->blocks;
874 sdhci_set_transfer_irqs(host);
876 /* Set the DMA boundary value and block size */
877 sdhci_writew(host, SDHCI_MAKE_BLKSZ(SDHCI_DEFAULT_BOUNDARY_ARG,
878 data->blksz), SDHCI_BLOCK_SIZE);
879 sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT);
882 static void sdhci_set_transfer_mode(struct sdhci_host *host,
883 struct mmc_command *cmd)
886 struct mmc_data *data = cmd->data;
890 SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD) {
891 sdhci_writew(host, 0x0, SDHCI_TRANSFER_MODE);
893 /* clear Auto CMD settings for no data CMDs */
894 mode = sdhci_readw(host, SDHCI_TRANSFER_MODE);
895 sdhci_writew(host, mode & ~(SDHCI_TRNS_AUTO_CMD12 |
896 SDHCI_TRNS_AUTO_CMD23), SDHCI_TRANSFER_MODE);
901 WARN_ON(!host->data);
903 if (!(host->quirks2 & SDHCI_QUIRK2_SUPPORT_SINGLE))
904 mode = SDHCI_TRNS_BLK_CNT_EN;
906 if (mmc_op_multi(cmd->opcode) || data->blocks > 1) {
907 mode = SDHCI_TRNS_BLK_CNT_EN | SDHCI_TRNS_MULTI;
909 * If we are sending CMD23, CMD12 never gets sent
910 * on successful completion (so no Auto-CMD12).
912 if (!host->mrq->sbc && (host->flags & SDHCI_AUTO_CMD12) &&
913 (cmd->opcode != SD_IO_RW_EXTENDED))
914 mode |= SDHCI_TRNS_AUTO_CMD12;
915 else if (host->mrq->sbc && (host->flags & SDHCI_AUTO_CMD23)) {
916 mode |= SDHCI_TRNS_AUTO_CMD23;
917 sdhci_writel(host, host->mrq->sbc->arg, SDHCI_ARGUMENT2);
921 if (data->flags & MMC_DATA_READ)
922 mode |= SDHCI_TRNS_READ;
923 if (host->flags & SDHCI_REQ_USE_DMA)
924 mode |= SDHCI_TRNS_DMA;
926 sdhci_writew(host, mode, SDHCI_TRANSFER_MODE);
929 static void sdhci_finish_data(struct sdhci_host *host)
931 struct mmc_data *data;
938 if ((host->flags & (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA)) ==
939 (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA))
940 sdhci_adma_table_post(host, data);
943 * The specification states that the block count register must
944 * be updated, but it does not specify at what point in the
945 * data flow. That makes the register entirely useless to read
946 * back so we have to assume that nothing made it to the card
947 * in the event of an error.
950 data->bytes_xfered = 0;
952 data->bytes_xfered = data->blksz * data->blocks;
955 * Need to send CMD12 if -
956 * a) open-ended multiblock transfer (no CMD23)
957 * b) error in multiblock transfer
964 * The controller needs a reset of internal state machines
965 * upon error conditions.
968 sdhci_do_reset(host, SDHCI_RESET_CMD);
969 sdhci_do_reset(host, SDHCI_RESET_DATA);
972 sdhci_send_command(host, data->stop);
974 tasklet_schedule(&host->finish_tasklet);
977 void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
981 unsigned long timeout;
985 /* Initially, a command has no error */
991 mask = SDHCI_CMD_INHIBIT;
992 if ((cmd->data != NULL) || (cmd->flags & MMC_RSP_BUSY))
993 mask |= SDHCI_DATA_INHIBIT;
995 /* We shouldn't wait for data inihibit for stop commands, even
996 though they might use busy signaling */
997 if (host->mrq->data && (cmd == host->mrq->data->stop))
998 mask &= ~SDHCI_DATA_INHIBIT;
1000 while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
1002 pr_err("%s: Controller never released inhibit bit(s).\n",
1003 mmc_hostname(host->mmc));
1004 sdhci_dumpregs(host);
1006 tasklet_schedule(&host->finish_tasklet);
1014 if (!cmd->data && cmd->busy_timeout > 9000)
1015 timeout += DIV_ROUND_UP(cmd->busy_timeout, 1000) * HZ + HZ;
1018 mod_timer(&host->timer, timeout);
1021 host->busy_handle = 0;
1023 sdhci_prepare_data(host, cmd);
1025 sdhci_writel(host, cmd->arg, SDHCI_ARGUMENT);
1027 sdhci_set_transfer_mode(host, cmd);
1029 if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) {
1030 pr_err("%s: Unsupported response type!\n",
1031 mmc_hostname(host->mmc));
1032 cmd->error = -EINVAL;
1033 tasklet_schedule(&host->finish_tasklet);
1037 if (!(cmd->flags & MMC_RSP_PRESENT))
1038 flags = SDHCI_CMD_RESP_NONE;
1039 else if (cmd->flags & MMC_RSP_136)
1040 flags = SDHCI_CMD_RESP_LONG;
1041 else if (cmd->flags & MMC_RSP_BUSY)
1042 flags = SDHCI_CMD_RESP_SHORT_BUSY;
1044 flags = SDHCI_CMD_RESP_SHORT;
1046 if (cmd->flags & MMC_RSP_CRC)
1047 flags |= SDHCI_CMD_CRC;
1048 if (cmd->flags & MMC_RSP_OPCODE)
1049 flags |= SDHCI_CMD_INDEX;
1051 /* CMD19 is special in that the Data Present Select should be set */
1052 if (cmd->data || cmd->opcode == MMC_SEND_TUNING_BLOCK ||
1053 cmd->opcode == MMC_SEND_TUNING_BLOCK_HS200)
1054 flags |= SDHCI_CMD_DATA;
1056 sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND);
1058 EXPORT_SYMBOL_GPL(sdhci_send_command);
1060 static void sdhci_finish_command(struct sdhci_host *host)
1064 BUG_ON(host->cmd == NULL);
1066 if (host->cmd->flags & MMC_RSP_PRESENT) {
1067 if (host->cmd->flags & MMC_RSP_136) {
1068 /* CRC is stripped so we need to do some shifting. */
1069 for (i = 0;i < 4;i++) {
1070 host->cmd->resp[i] = sdhci_readl(host,
1071 SDHCI_RESPONSE + (3-i)*4) << 8;
1073 host->cmd->resp[i] |=
1075 SDHCI_RESPONSE + (3-i)*4-1);
1078 host->cmd->resp[0] = sdhci_readl(host, SDHCI_RESPONSE);
1082 /* Finished CMD23, now send actual command. */
1083 if (host->cmd == host->mrq->sbc) {
1085 sdhci_send_command(host, host->mrq->cmd);
1088 /* Processed actual command. */
1089 if (host->data && host->data_early)
1090 sdhci_finish_data(host);
1092 if (!host->cmd->data)
1093 tasklet_schedule(&host->finish_tasklet);
1099 static u16 sdhci_get_preset_value(struct sdhci_host *host)
1103 switch (host->timing) {
1104 case MMC_TIMING_UHS_SDR12:
1105 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12);
1107 case MMC_TIMING_UHS_SDR25:
1108 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR25);
1110 case MMC_TIMING_UHS_SDR50:
1111 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR50);
1113 case MMC_TIMING_UHS_SDR104:
1114 case MMC_TIMING_MMC_HS200:
1115 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR104);
1117 case MMC_TIMING_UHS_DDR50:
1118 case MMC_TIMING_MMC_DDR52:
1119 preset = sdhci_readw(host, SDHCI_PRESET_FOR_DDR50);
1121 case MMC_TIMING_MMC_HS400:
1122 preset = sdhci_readw(host, SDHCI_PRESET_FOR_HS400);
1125 pr_warn("%s: Invalid UHS-I mode selected\n",
1126 mmc_hostname(host->mmc));
1127 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12);
1133 u16 sdhci_calc_clk(struct sdhci_host *host, unsigned int clock,
1134 unsigned int *actual_clock)
1136 int div = 0; /* Initialized for compiler warning */
1137 int real_div = div, clk_mul = 1;
1139 bool switch_base_clk = false;
1141 if (host->version >= SDHCI_SPEC_300) {
1142 if (host->preset_enabled) {
1145 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1146 pre_val = sdhci_get_preset_value(host);
1147 div = (pre_val & SDHCI_PRESET_SDCLK_FREQ_MASK)
1148 >> SDHCI_PRESET_SDCLK_FREQ_SHIFT;
1149 if (host->clk_mul &&
1150 (pre_val & SDHCI_PRESET_CLKGEN_SEL_MASK)) {
1151 clk = SDHCI_PROG_CLOCK_MODE;
1153 clk_mul = host->clk_mul;
1155 real_div = max_t(int, 1, div << 1);
1161 * Check if the Host Controller supports Programmable Clock
1164 if (host->clk_mul) {
1165 for (div = 1; div <= 1024; div++) {
1166 if ((host->max_clk * host->clk_mul / div)
1170 if ((host->max_clk * host->clk_mul / div) <= clock) {
1172 * Set Programmable Clock Mode in the Clock
1175 clk = SDHCI_PROG_CLOCK_MODE;
1177 clk_mul = host->clk_mul;
1181 * Divisor can be too small to reach clock
1182 * speed requirement. Then use the base clock.
1184 switch_base_clk = true;
1188 if (!host->clk_mul || switch_base_clk) {
1189 /* Version 3.00 divisors must be a multiple of 2. */
1190 if (host->max_clk <= clock)
1193 for (div = 2; div < SDHCI_MAX_DIV_SPEC_300;
1195 if ((host->max_clk / div) <= clock)
1201 if ((host->quirks2 & SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN)
1202 && !div && host->max_clk <= 25000000)
1206 /* Version 2.00 divisors must be a power of 2. */
1207 for (div = 1; div < SDHCI_MAX_DIV_SPEC_200; div *= 2) {
1208 if ((host->max_clk / div) <= clock)
1217 *actual_clock = (host->max_clk * clk_mul) / real_div;
1218 clk |= (div & SDHCI_DIV_MASK) << SDHCI_DIVIDER_SHIFT;
1219 clk |= ((div & SDHCI_DIV_HI_MASK) >> SDHCI_DIV_MASK_LEN)
1220 << SDHCI_DIVIDER_HI_SHIFT;
1224 EXPORT_SYMBOL_GPL(sdhci_calc_clk);
1226 void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
1229 unsigned long timeout;
1231 host->mmc->actual_clock = 0;
1233 sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
1238 clk = sdhci_calc_clk(host, clock, &host->mmc->actual_clock);
1240 clk |= SDHCI_CLOCK_INT_EN;
1241 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1243 /* Wait max 20 ms */
1245 while (!((clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL))
1246 & SDHCI_CLOCK_INT_STABLE)) {
1248 pr_err("%s: Internal clock never stabilised.\n",
1249 mmc_hostname(host->mmc));
1250 sdhci_dumpregs(host);
1257 clk |= SDHCI_CLOCK_CARD_EN;
1258 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1260 EXPORT_SYMBOL_GPL(sdhci_set_clock);
1262 static void sdhci_set_power_reg(struct sdhci_host *host, unsigned char mode,
1265 struct mmc_host *mmc = host->mmc;
1267 spin_unlock_irq(&host->lock);
1268 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
1269 spin_lock_irq(&host->lock);
1271 if (mode != MMC_POWER_OFF)
1272 sdhci_writeb(host, SDHCI_POWER_ON, SDHCI_POWER_CONTROL);
1274 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
1277 void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
1282 if (mode != MMC_POWER_OFF) {
1284 case MMC_VDD_165_195:
1285 pwr = SDHCI_POWER_180;
1289 pwr = SDHCI_POWER_300;
1293 pwr = SDHCI_POWER_330;
1296 WARN(1, "%s: Invalid vdd %#x\n",
1297 mmc_hostname(host->mmc), vdd);
1302 if (host->pwr == pwr)
1308 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
1309 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
1310 sdhci_runtime_pm_bus_off(host);
1313 * Spec says that we should clear the power reg before setting
1314 * a new value. Some controllers don't seem to like this though.
1316 if (!(host->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE))
1317 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
1320 * At least the Marvell CaFe chip gets confused if we set the
1321 * voltage and set turn on power at the same time, so set the
1324 if (host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER)
1325 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
1327 pwr |= SDHCI_POWER_ON;
1329 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
1331 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
1332 sdhci_runtime_pm_bus_on(host);
1335 * Some controllers need an extra 10ms delay of 10ms before
1336 * they can apply clock after applying power
1338 if (host->quirks & SDHCI_QUIRK_DELAY_AFTER_POWER)
1342 EXPORT_SYMBOL_GPL(sdhci_set_power);
1344 static void __sdhci_set_power(struct sdhci_host *host, unsigned char mode,
1347 struct mmc_host *mmc = host->mmc;
1349 if (host->ops->set_power)
1350 host->ops->set_power(host, mode, vdd);
1351 else if (!IS_ERR(mmc->supply.vmmc))
1352 sdhci_set_power_reg(host, mode, vdd);
1354 sdhci_set_power(host, mode, vdd);
1357 /*****************************************************************************\
1361 \*****************************************************************************/
1363 static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1365 struct sdhci_host *host;
1367 unsigned long flags;
1369 host = mmc_priv(mmc);
1371 /* Firstly check card presence */
1372 present = mmc->ops->get_cd(mmc);
1374 spin_lock_irqsave(&host->lock, flags);
1376 WARN_ON(host->mrq != NULL);
1378 sdhci_led_activate(host);
1381 * Ensure we don't send the STOP for non-SET_BLOCK_COUNTED
1382 * requests if Auto-CMD12 is enabled.
1384 if (!mrq->sbc && (host->flags & SDHCI_AUTO_CMD12)) {
1386 mrq->data->stop = NULL;
1393 if (!present || host->flags & SDHCI_DEVICE_DEAD) {
1394 host->mrq->cmd->error = -ENOMEDIUM;
1395 tasklet_schedule(&host->finish_tasklet);
1397 if (mrq->sbc && !(host->flags & SDHCI_AUTO_CMD23))
1398 sdhci_send_command(host, mrq->sbc);
1400 sdhci_send_command(host, mrq->cmd);
1404 spin_unlock_irqrestore(&host->lock, flags);
1407 void sdhci_set_bus_width(struct sdhci_host *host, int width)
1411 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
1412 if (width == MMC_BUS_WIDTH_8) {
1413 ctrl &= ~SDHCI_CTRL_4BITBUS;
1414 if (host->version >= SDHCI_SPEC_300)
1415 ctrl |= SDHCI_CTRL_8BITBUS;
1417 if (host->version >= SDHCI_SPEC_300)
1418 ctrl &= ~SDHCI_CTRL_8BITBUS;
1419 if (width == MMC_BUS_WIDTH_4)
1420 ctrl |= SDHCI_CTRL_4BITBUS;
1422 ctrl &= ~SDHCI_CTRL_4BITBUS;
1424 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1426 EXPORT_SYMBOL_GPL(sdhci_set_bus_width);
1428 void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing)
1432 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1433 /* Select Bus Speed Mode for host */
1434 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
1435 if ((timing == MMC_TIMING_MMC_HS200) ||
1436 (timing == MMC_TIMING_UHS_SDR104))
1437 ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
1438 else if (timing == MMC_TIMING_UHS_SDR12)
1439 ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
1440 else if (timing == MMC_TIMING_UHS_SDR25)
1441 ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
1442 else if (timing == MMC_TIMING_UHS_SDR50)
1443 ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
1444 else if ((timing == MMC_TIMING_UHS_DDR50) ||
1445 (timing == MMC_TIMING_MMC_DDR52))
1446 ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
1447 else if (timing == MMC_TIMING_MMC_HS400)
1448 ctrl_2 |= SDHCI_CTRL_HS400; /* Non-standard */
1449 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
1451 EXPORT_SYMBOL_GPL(sdhci_set_uhs_signaling);
1453 static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1455 struct sdhci_host *host = mmc_priv(mmc);
1456 unsigned long flags;
1459 spin_lock_irqsave(&host->lock, flags);
1461 if (host->flags & SDHCI_DEVICE_DEAD) {
1462 spin_unlock_irqrestore(&host->lock, flags);
1463 if (!IS_ERR(mmc->supply.vmmc) &&
1464 ios->power_mode == MMC_POWER_OFF)
1465 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
1470 * Reset the chip on each power off.
1471 * Should clear out any weird states.
1473 if (ios->power_mode == MMC_POWER_OFF) {
1474 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
1478 if (host->version >= SDHCI_SPEC_300 &&
1479 (ios->power_mode == MMC_POWER_UP) &&
1480 !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN))
1481 sdhci_enable_preset_value(host, false);
1483 if (!ios->clock || ios->clock != host->clock) {
1484 host->ops->set_clock(host, ios->clock);
1485 host->clock = ios->clock;
1487 if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK &&
1489 host->timeout_clk = host->mmc->actual_clock ?
1490 host->mmc->actual_clock / 1000 :
1492 host->mmc->max_busy_timeout =
1493 host->ops->get_max_timeout_count ?
1494 host->ops->get_max_timeout_count(host) :
1496 host->mmc->max_busy_timeout /= host->timeout_clk;
1500 __sdhci_set_power(host, ios->power_mode, ios->vdd);
1502 if (host->ops->platform_send_init_74_clocks)
1503 host->ops->platform_send_init_74_clocks(host, ios->power_mode);
1505 host->ops->set_bus_width(host, ios->bus_width);
1507 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
1509 if ((ios->timing == MMC_TIMING_SD_HS ||
1510 ios->timing == MMC_TIMING_MMC_HS)
1511 && !(host->quirks & SDHCI_QUIRK_NO_HISPD_BIT))
1512 ctrl |= SDHCI_CTRL_HISPD;
1514 ctrl &= ~SDHCI_CTRL_HISPD;
1516 if (host->version >= SDHCI_SPEC_300) {
1519 /* In case of UHS-I modes, set High Speed Enable */
1520 if ((ios->timing == MMC_TIMING_MMC_HS400) ||
1521 (ios->timing == MMC_TIMING_MMC_HS200) ||
1522 (ios->timing == MMC_TIMING_MMC_DDR52) ||
1523 (ios->timing == MMC_TIMING_UHS_SDR50) ||
1524 (ios->timing == MMC_TIMING_UHS_SDR104) ||
1525 (ios->timing == MMC_TIMING_UHS_DDR50) ||
1526 (ios->timing == MMC_TIMING_UHS_SDR25))
1527 ctrl |= SDHCI_CTRL_HISPD;
1529 if (!host->preset_enabled) {
1530 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1532 * We only need to set Driver Strength if the
1533 * preset value enable is not set.
1535 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1536 ctrl_2 &= ~SDHCI_CTRL_DRV_TYPE_MASK;
1537 if (ios->drv_type == MMC_SET_DRIVER_TYPE_A)
1538 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_A;
1539 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_B)
1540 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B;
1541 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_C)
1542 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_C;
1543 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_D)
1544 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_D;
1546 pr_warn("%s: invalid driver type, default to driver type B\n",
1548 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B;
1551 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
1554 * According to SDHC Spec v3.00, if the Preset Value
1555 * Enable in the Host Control 2 register is set, we
1556 * need to reset SD Clock Enable before changing High
1557 * Speed Enable to avoid generating clock gliches.
1560 /* Reset SD Clock Enable */
1561 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1562 clk &= ~SDHCI_CLOCK_CARD_EN;
1563 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1565 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1567 /* Re-enable SD Clock */
1568 host->ops->set_clock(host, host->clock);
1571 /* Reset SD Clock Enable */
1572 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1573 clk &= ~SDHCI_CLOCK_CARD_EN;
1574 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1576 host->ops->set_uhs_signaling(host, ios->timing);
1577 host->timing = ios->timing;
1579 if (!(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN) &&
1580 ((ios->timing == MMC_TIMING_UHS_SDR12) ||
1581 (ios->timing == MMC_TIMING_UHS_SDR25) ||
1582 (ios->timing == MMC_TIMING_UHS_SDR50) ||
1583 (ios->timing == MMC_TIMING_UHS_SDR104) ||
1584 (ios->timing == MMC_TIMING_UHS_DDR50) ||
1585 (ios->timing == MMC_TIMING_MMC_DDR52))) {
1588 sdhci_enable_preset_value(host, true);
1589 preset = sdhci_get_preset_value(host);
1590 ios->drv_type = (preset & SDHCI_PRESET_DRV_MASK)
1591 >> SDHCI_PRESET_DRV_SHIFT;
1594 /* Re-enable SD Clock */
1595 host->ops->set_clock(host, host->clock);
1597 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1600 * Some (ENE) controllers go apeshit on some ios operation,
1601 * signalling timeout and CRC errors even on CMD0. Resetting
1602 * it on each ios seems to solve the problem.
1604 if (host->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS)
1605 sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
1608 spin_unlock_irqrestore(&host->lock, flags);
1611 static int sdhci_get_cd(struct mmc_host *mmc)
1613 struct sdhci_host *host = mmc_priv(mmc);
1614 int gpio_cd = mmc_gpio_get_cd(mmc);
1616 if (host->flags & SDHCI_DEVICE_DEAD)
1619 /* If nonremovable, assume that the card is always present. */
1620 if (host->mmc->caps & MMC_CAP_NONREMOVABLE)
1624 * Try slot gpio detect, if defined it take precedence
1625 * over build in controller functionality
1627 if (!IS_ERR_VALUE(gpio_cd))
1630 /* If polling, assume that the card is always present. */
1631 if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
1634 /* Host native card detect */
1635 return !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT);
1638 static int sdhci_check_ro(struct sdhci_host *host)
1640 unsigned long flags;
1643 spin_lock_irqsave(&host->lock, flags);
1645 if (host->flags & SDHCI_DEVICE_DEAD)
1647 else if (host->ops->get_ro)
1648 is_readonly = host->ops->get_ro(host);
1650 is_readonly = !(sdhci_readl(host, SDHCI_PRESENT_STATE)
1651 & SDHCI_WRITE_PROTECT);
1653 spin_unlock_irqrestore(&host->lock, flags);
1655 /* This quirk needs to be replaced by a callback-function later */
1656 return host->quirks & SDHCI_QUIRK_INVERTED_WRITE_PROTECT ?
1657 !is_readonly : is_readonly;
1660 #define SAMPLE_COUNT 5
1662 static int sdhci_get_ro(struct mmc_host *mmc)
1664 struct sdhci_host *host = mmc_priv(mmc);
1667 if (!(host->quirks & SDHCI_QUIRK_UNSTABLE_RO_DETECT))
1668 return sdhci_check_ro(host);
1671 for (i = 0; i < SAMPLE_COUNT; i++) {
1672 if (sdhci_check_ro(host)) {
1673 if (++ro_count > SAMPLE_COUNT / 2)
1681 static void sdhci_hw_reset(struct mmc_host *mmc)
1683 struct sdhci_host *host = mmc_priv(mmc);
1685 if (host->ops && host->ops->hw_reset)
1686 host->ops->hw_reset(host);
1689 static void sdhci_enable_sdio_irq_nolock(struct sdhci_host *host, int enable)
1691 if (!(host->flags & SDHCI_DEVICE_DEAD)) {
1693 host->ier |= SDHCI_INT_CARD_INT;
1695 host->ier &= ~SDHCI_INT_CARD_INT;
1697 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
1698 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
1703 static void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable)
1705 struct sdhci_host *host = mmc_priv(mmc);
1706 unsigned long flags;
1708 spin_lock_irqsave(&host->lock, flags);
1710 host->flags |= SDHCI_SDIO_IRQ_ENABLED;
1712 host->flags &= ~SDHCI_SDIO_IRQ_ENABLED;
1714 sdhci_enable_sdio_irq_nolock(host, enable);
1715 spin_unlock_irqrestore(&host->lock, flags);
1718 static int sdhci_start_signal_voltage_switch(struct mmc_host *mmc,
1719 struct mmc_ios *ios)
1721 struct sdhci_host *host = mmc_priv(mmc);
1726 * Signal Voltage Switching is only applicable for Host Controllers
1729 if (host->version < SDHCI_SPEC_300)
1732 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1734 switch (ios->signal_voltage) {
1735 case MMC_SIGNAL_VOLTAGE_330:
1736 /* Set 1.8V Signal Enable in the Host Control2 register to 0 */
1737 ctrl &= ~SDHCI_CTRL_VDD_180;
1738 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
1740 if (!IS_ERR(mmc->supply.vqmmc)) {
1741 ret = regulator_set_voltage(mmc->supply.vqmmc, 2700000,
1744 pr_warn("%s: Switching to 3.3V signalling voltage failed\n",
1750 usleep_range(5000, 5500);
1752 /* 3.3V regulator output should be stable within 5 ms */
1753 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1754 if (!(ctrl & SDHCI_CTRL_VDD_180))
1757 pr_warn("%s: 3.3V regulator output did not became stable\n",
1761 case MMC_SIGNAL_VOLTAGE_180:
1762 if (!IS_ERR(mmc->supply.vqmmc)) {
1763 ret = regulator_set_voltage(mmc->supply.vqmmc,
1766 pr_warn("%s: Switching to 1.8V signalling voltage failed\n",
1773 * Enable 1.8V Signal Enable in the Host Control2
1776 ctrl |= SDHCI_CTRL_VDD_180;
1777 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
1779 /* Some controller need to do more when switching */
1780 if (host->ops->voltage_switch)
1781 host->ops->voltage_switch(host);
1783 /* 1.8V regulator output should be stable within 5 ms */
1784 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1785 if (ctrl & SDHCI_CTRL_VDD_180)
1788 pr_warn("%s: 1.8V regulator output did not became stable\n",
1792 case MMC_SIGNAL_VOLTAGE_120:
1793 if (!IS_ERR(mmc->supply.vqmmc)) {
1794 ret = regulator_set_voltage(mmc->supply.vqmmc, 1100000,
1797 pr_warn("%s: Switching to 1.2V signalling voltage failed\n",
1804 /* No signal voltage switch required */
1809 static int sdhci_card_busy(struct mmc_host *mmc)
1811 struct sdhci_host *host = mmc_priv(mmc);
1814 /* Check whether DAT[3:0] is 0000 */
1815 present_state = sdhci_readl(host, SDHCI_PRESENT_STATE);
1817 return !(present_state & SDHCI_DATA_LVL_MASK);
1820 static int sdhci_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios)
1822 struct sdhci_host *host = mmc_priv(mmc);
1823 unsigned long flags;
1825 spin_lock_irqsave(&host->lock, flags);
1826 host->flags |= SDHCI_HS400_TUNING;
1827 spin_unlock_irqrestore(&host->lock, flags);
1832 static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
1834 struct sdhci_host *host = mmc_priv(mmc);
1836 int tuning_loop_counter = MAX_TUNING_LOOP;
1838 unsigned long flags;
1839 unsigned int tuning_count = 0;
1842 spin_lock_irqsave(&host->lock, flags);
1844 hs400_tuning = host->flags & SDHCI_HS400_TUNING;
1845 host->flags &= ~SDHCI_HS400_TUNING;
1847 if (host->tuning_mode == SDHCI_TUNING_MODE_1)
1848 tuning_count = host->tuning_count;
1851 * The Host Controller needs tuning in case of SDR104 and DDR50
1852 * mode, and for SDR50 mode when Use Tuning for SDR50 is set in
1853 * the Capabilities register.
1854 * If the Host Controller supports the HS200 mode then the
1855 * tuning function has to be executed.
1857 switch (host->timing) {
1858 /* HS400 tuning is done in HS200 mode */
1859 case MMC_TIMING_MMC_HS400:
1863 case MMC_TIMING_MMC_HS200:
1865 * Periodic re-tuning for HS400 is not expected to be needed, so
1872 case MMC_TIMING_UHS_SDR104:
1873 case MMC_TIMING_UHS_DDR50:
1876 case MMC_TIMING_UHS_SDR50:
1877 if (host->flags & SDHCI_SDR50_NEEDS_TUNING)
1885 if (host->ops->platform_execute_tuning) {
1886 spin_unlock_irqrestore(&host->lock, flags);
1887 err = host->ops->platform_execute_tuning(host, opcode);
1891 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1892 ctrl |= SDHCI_CTRL_EXEC_TUNING;
1893 if (host->quirks2 & SDHCI_QUIRK2_TUNING_WORK_AROUND)
1894 ctrl |= SDHCI_CTRL_TUNED_CLK;
1895 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
1898 * As per the Host Controller spec v3.00, tuning command
1899 * generates Buffer Read Ready interrupt, so enable that.
1901 * Note: The spec clearly says that when tuning sequence
1902 * is being performed, the controller does not generate
1903 * interrupts other than Buffer Read Ready interrupt. But
1904 * to make sure we don't hit a controller bug, we _only_
1905 * enable Buffer Read Ready interrupt here.
1907 sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_INT_ENABLE);
1908 sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_SIGNAL_ENABLE);
1911 * Issue CMD19 repeatedly till Execute Tuning is set to 0 or the number
1912 * of loops reaches 40 times or a timeout of 150ms occurs.
1915 struct mmc_command cmd = {0};
1916 struct mmc_request mrq = {NULL};
1918 cmd.opcode = opcode;
1920 cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
1925 if (tuning_loop_counter-- == 0)
1932 * In response to CMD19, the card sends 64 bytes of tuning
1933 * block to the Host Controller. So we set the block size
1936 if (cmd.opcode == MMC_SEND_TUNING_BLOCK_HS200) {
1937 if (mmc->ios.bus_width == MMC_BUS_WIDTH_8)
1938 sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 128),
1940 else if (mmc->ios.bus_width == MMC_BUS_WIDTH_4)
1941 sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 64),
1944 sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 64),
1949 * The tuning block is sent by the card to the host controller.
1950 * So we set the TRNS_READ bit in the Transfer Mode register.
1951 * This also takes care of setting DMA Enable and Multi Block
1952 * Select in the same register to 0.
1954 sdhci_writew(host, SDHCI_TRNS_READ, SDHCI_TRANSFER_MODE);
1956 sdhci_send_command(host, &cmd);
1961 spin_unlock_irqrestore(&host->lock, flags);
1962 /* Wait for Buffer Read Ready interrupt */
1963 wait_event_interruptible_timeout(host->buf_ready_int,
1964 (host->tuning_done == 1),
1965 msecs_to_jiffies(50));
1966 spin_lock_irqsave(&host->lock, flags);
1968 if (!host->tuning_done) {
1969 pr_info(DRIVER_NAME ": Timeout waiting for Buffer Read Ready interrupt during tuning procedure, falling back to fixed sampling clock\n");
1970 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1971 ctrl &= ~SDHCI_CTRL_TUNED_CLK;
1972 ctrl &= ~SDHCI_CTRL_EXEC_TUNING;
1973 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
1979 host->tuning_done = 0;
1981 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1983 /* eMMC spec does not require a delay between tuning cycles */
1984 if (opcode == MMC_SEND_TUNING_BLOCK)
1986 } while (ctrl & SDHCI_CTRL_EXEC_TUNING);
1989 * The Host Driver has exhausted the maximum number of loops allowed,
1990 * so use fixed sampling frequency.
1992 if (tuning_loop_counter < 0) {
1993 ctrl &= ~SDHCI_CTRL_TUNED_CLK;
1994 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
1996 if (!(ctrl & SDHCI_CTRL_TUNED_CLK)) {
1997 pr_info(DRIVER_NAME ": Tuning procedure failed, falling back to fixed sampling clock\n");
2004 * In case tuning fails, host controllers which support
2005 * re-tuning can try tuning again at a later time, when the
2006 * re-tuning timer expires. So for these controllers, we
2007 * return 0. Since there might be other controllers who do not
2008 * have this capability, we return error for them.
2013 host->mmc->retune_period = err ? 0 : tuning_count;
2015 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
2016 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
2018 spin_unlock_irqrestore(&host->lock, flags);
2022 static int sdhci_select_drive_strength(struct mmc_card *card,
2023 unsigned int max_dtr, int host_drv,
2024 int card_drv, int *drv_type)
2026 struct sdhci_host *host = mmc_priv(card->host);
2028 if (!host->ops->select_drive_strength)
2031 return host->ops->select_drive_strength(host, card, max_dtr, host_drv,
2032 card_drv, drv_type);
2035 static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable)
2037 /* Host Controller v3.00 defines preset value registers */
2038 if (host->version < SDHCI_SPEC_300)
2042 * We only enable or disable Preset Value if they are not already
2043 * enabled or disabled respectively. Otherwise, we bail out.
2045 if (host->preset_enabled != enable) {
2046 u16 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2049 ctrl |= SDHCI_CTRL_PRESET_VAL_ENABLE;
2051 ctrl &= ~SDHCI_CTRL_PRESET_VAL_ENABLE;
2053 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2056 host->flags |= SDHCI_PV_ENABLED;
2058 host->flags &= ~SDHCI_PV_ENABLED;
2060 host->preset_enabled = enable;
2064 static void sdhci_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
2067 struct sdhci_host *host = mmc_priv(mmc);
2068 struct mmc_data *data = mrq->data;
2070 if (data->host_cookie != COOKIE_UNMAPPED)
2071 dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
2072 data->flags & MMC_DATA_WRITE ?
2073 DMA_TO_DEVICE : DMA_FROM_DEVICE);
2075 data->host_cookie = COOKIE_UNMAPPED;
2078 static void sdhci_pre_req(struct mmc_host *mmc, struct mmc_request *mrq,
2081 struct sdhci_host *host = mmc_priv(mmc);
2083 mrq->data->host_cookie = COOKIE_UNMAPPED;
2085 if (host->flags & SDHCI_REQ_USE_DMA)
2086 sdhci_pre_dma_transfer(host, mrq->data, COOKIE_PRE_MAPPED);
2089 static void sdhci_card_event(struct mmc_host *mmc)
2091 struct sdhci_host *host = mmc_priv(mmc);
2092 unsigned long flags;
2095 /* First check if client has provided their own card event */
2096 if (host->ops->card_event)
2097 host->ops->card_event(host);
2099 present = sdhci_get_cd(host->mmc);
2101 spin_lock_irqsave(&host->lock, flags);
2103 /* Check host->mrq first in case we are runtime suspended */
2104 if (host->mrq && !present) {
2105 pr_err("%s: Card removed during transfer!\n",
2106 mmc_hostname(host->mmc));
2107 pr_err("%s: Resetting controller.\n",
2108 mmc_hostname(host->mmc));
2110 sdhci_do_reset(host, SDHCI_RESET_CMD);
2111 sdhci_do_reset(host, SDHCI_RESET_DATA);
2113 host->mrq->cmd->error = -ENOMEDIUM;
2114 tasklet_schedule(&host->finish_tasklet);
2117 spin_unlock_irqrestore(&host->lock, flags);
2120 static const struct mmc_host_ops sdhci_ops = {
2121 .request = sdhci_request,
2122 .post_req = sdhci_post_req,
2123 .pre_req = sdhci_pre_req,
2124 .set_ios = sdhci_set_ios,
2125 .get_cd = sdhci_get_cd,
2126 .get_ro = sdhci_get_ro,
2127 .hw_reset = sdhci_hw_reset,
2128 .enable_sdio_irq = sdhci_enable_sdio_irq,
2129 .start_signal_voltage_switch = sdhci_start_signal_voltage_switch,
2130 .prepare_hs400_tuning = sdhci_prepare_hs400_tuning,
2131 .execute_tuning = sdhci_execute_tuning,
2132 .select_drive_strength = sdhci_select_drive_strength,
2133 .card_event = sdhci_card_event,
2134 .card_busy = sdhci_card_busy,
2137 /*****************************************************************************\
2141 \*****************************************************************************/
2143 static void sdhci_tasklet_finish(unsigned long param)
2145 struct sdhci_host *host;
2146 unsigned long flags;
2147 struct mmc_request *mrq;
2149 host = (struct sdhci_host*)param;
2151 spin_lock_irqsave(&host->lock, flags);
2154 * If this tasklet gets rescheduled while running, it will
2155 * be run again afterwards but without any active request.
2158 spin_unlock_irqrestore(&host->lock, flags);
2162 del_timer(&host->timer);
2167 * Always unmap the data buffers if they were mapped by
2168 * sdhci_prepare_data() whenever we finish with a request.
2169 * This avoids leaking DMA mappings on error.
2171 if (host->flags & SDHCI_REQ_USE_DMA) {
2172 struct mmc_data *data = mrq->data;
2174 if (data && data->host_cookie == COOKIE_MAPPED) {
2175 dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
2176 (data->flags & MMC_DATA_READ) ?
2177 DMA_FROM_DEVICE : DMA_TO_DEVICE);
2178 data->host_cookie = COOKIE_UNMAPPED;
2183 * The controller needs a reset of internal state machines
2184 * upon error conditions.
2186 if (!(host->flags & SDHCI_DEVICE_DEAD) &&
2187 ((mrq->cmd && mrq->cmd->error) ||
2188 (mrq->sbc && mrq->sbc->error) ||
2189 (mrq->data && ((mrq->data->error && !mrq->data->stop) ||
2190 (mrq->data->stop && mrq->data->stop->error))) ||
2191 (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST))) {
2193 /* Some controllers need this kick or reset won't work here */
2194 if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET)
2195 /* This is to force an update */
2196 host->ops->set_clock(host, host->clock);
2198 /* Spec says we should do both at the same time, but Ricoh
2199 controllers do not like that. */
2200 sdhci_do_reset(host, SDHCI_RESET_CMD);
2201 sdhci_do_reset(host, SDHCI_RESET_DATA);
2208 sdhci_led_deactivate(host);
2211 spin_unlock_irqrestore(&host->lock, flags);
2213 mmc_request_done(host->mmc, mrq);
2216 static void sdhci_timeout_timer(unsigned long data)
2218 struct sdhci_host *host;
2219 unsigned long flags;
2221 host = (struct sdhci_host*)data;
2223 spin_lock_irqsave(&host->lock, flags);
2226 pr_err("%s: Timeout waiting for hardware interrupt.\n",
2227 mmc_hostname(host->mmc));
2228 sdhci_dumpregs(host);
2231 host->data->error = -ETIMEDOUT;
2232 sdhci_finish_data(host);
2235 host->cmd->error = -ETIMEDOUT;
2237 host->mrq->cmd->error = -ETIMEDOUT;
2239 tasklet_schedule(&host->finish_tasklet);
2244 spin_unlock_irqrestore(&host->lock, flags);
2247 /*****************************************************************************\
2249 * Interrupt handling *
2251 \*****************************************************************************/
2253 static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *mask)
2255 BUG_ON(intmask == 0);
2258 pr_err("%s: Got command interrupt 0x%08x even though no command operation was in progress.\n",
2259 mmc_hostname(host->mmc), (unsigned)intmask);
2260 sdhci_dumpregs(host);
2264 if (intmask & (SDHCI_INT_TIMEOUT | SDHCI_INT_CRC |
2265 SDHCI_INT_END_BIT | SDHCI_INT_INDEX)) {
2266 if (intmask & SDHCI_INT_TIMEOUT)
2267 host->cmd->error = -ETIMEDOUT;
2269 host->cmd->error = -EILSEQ;
2272 * If this command initiates a data phase and a response
2273 * CRC error is signalled, the card can start transferring
2274 * data - the card may have received the command without
2275 * error. We must not terminate the mmc_request early.
2277 * If the card did not receive the command or returned an
2278 * error which prevented it sending data, the data phase
2281 if (host->cmd->data &&
2282 (intmask & (SDHCI_INT_CRC | SDHCI_INT_TIMEOUT)) ==
2288 tasklet_schedule(&host->finish_tasklet);
2293 * The host can send and interrupt when the busy state has
2294 * ended, allowing us to wait without wasting CPU cycles.
2295 * Unfortunately this is overloaded on the "data complete"
2296 * interrupt, so we need to take some care when handling
2299 * Note: The 1.0 specification is a bit ambiguous about this
2300 * feature so there might be some problems with older
2303 if (host->cmd->flags & MMC_RSP_BUSY) {
2304 if (host->cmd->data)
2305 DBG("Cannot wait for busy signal when also doing a data transfer");
2306 else if (!(host->quirks & SDHCI_QUIRK_NO_BUSY_IRQ)
2307 && !host->busy_handle) {
2308 /* Mark that command complete before busy is ended */
2309 host->busy_handle = 1;
2313 /* The controller does not support the end-of-busy IRQ,
2314 * fall through and take the SDHCI_INT_RESPONSE */
2315 } else if ((host->quirks2 & SDHCI_QUIRK2_STOP_WITH_TC) &&
2316 host->cmd->opcode == MMC_STOP_TRANSMISSION && !host->data) {
2317 *mask &= ~SDHCI_INT_DATA_END;
2320 if (intmask & SDHCI_INT_RESPONSE)
2321 sdhci_finish_command(host);
2324 #ifdef CONFIG_MMC_DEBUG
2325 static void sdhci_adma_show_error(struct sdhci_host *host)
2327 const char *name = mmc_hostname(host->mmc);
2328 void *desc = host->adma_table;
2330 sdhci_dumpregs(host);
2333 struct sdhci_adma2_64_desc *dma_desc = desc;
2335 if (host->flags & SDHCI_USE_64_BIT_DMA)
2336 DBG("%s: %p: DMA 0x%08x%08x, LEN 0x%04x, Attr=0x%02x\n",
2337 name, desc, le32_to_cpu(dma_desc->addr_hi),
2338 le32_to_cpu(dma_desc->addr_lo),
2339 le16_to_cpu(dma_desc->len),
2340 le16_to_cpu(dma_desc->cmd));
2342 DBG("%s: %p: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n",
2343 name, desc, le32_to_cpu(dma_desc->addr_lo),
2344 le16_to_cpu(dma_desc->len),
2345 le16_to_cpu(dma_desc->cmd));
2347 desc += host->desc_sz;
2349 if (dma_desc->cmd & cpu_to_le16(ADMA2_END))
2354 static void sdhci_adma_show_error(struct sdhci_host *host) { }
2357 static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
2360 BUG_ON(intmask == 0);
2362 /* CMD19 generates _only_ Buffer Read Ready interrupt */
2363 if (intmask & SDHCI_INT_DATA_AVAIL) {
2364 command = SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND));
2365 if (command == MMC_SEND_TUNING_BLOCK ||
2366 command == MMC_SEND_TUNING_BLOCK_HS200) {
2367 host->tuning_done = 1;
2368 wake_up(&host->buf_ready_int);
2375 * The "data complete" interrupt is also used to
2376 * indicate that a busy state has ended. See comment
2377 * above in sdhci_cmd_irq().
2379 if (host->cmd && (host->cmd->flags & MMC_RSP_BUSY)) {
2380 if (intmask & SDHCI_INT_DATA_TIMEOUT) {
2381 host->cmd->error = -ETIMEDOUT;
2382 tasklet_schedule(&host->finish_tasklet);
2385 if (intmask & SDHCI_INT_DATA_END) {
2387 * Some cards handle busy-end interrupt
2388 * before the command completed, so make
2389 * sure we do things in the proper order.
2391 if (host->busy_handle)
2392 sdhci_finish_command(host);
2394 host->busy_handle = 1;
2399 pr_err("%s: Got data interrupt 0x%08x even though no data operation was in progress.\n",
2400 mmc_hostname(host->mmc), (unsigned)intmask);
2401 sdhci_dumpregs(host);
2406 if (intmask & SDHCI_INT_DATA_TIMEOUT)
2407 host->data->error = -ETIMEDOUT;
2408 else if (intmask & SDHCI_INT_DATA_END_BIT)
2409 host->data->error = -EILSEQ;
2410 else if ((intmask & SDHCI_INT_DATA_CRC) &&
2411 SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))
2413 host->data->error = -EILSEQ;
2414 else if (intmask & SDHCI_INT_ADMA_ERROR) {
2415 pr_err("%s: ADMA error\n", mmc_hostname(host->mmc));
2416 sdhci_adma_show_error(host);
2417 host->data->error = -EIO;
2418 if (host->ops->adma_workaround)
2419 host->ops->adma_workaround(host, intmask);
2422 if (host->data->error)
2423 sdhci_finish_data(host);
2425 if (intmask & (SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL))
2426 sdhci_transfer_pio(host);
2429 * We currently don't do anything fancy with DMA
2430 * boundaries, but as we can't disable the feature
2431 * we need to at least restart the transfer.
2433 * According to the spec sdhci_readl(host, SDHCI_DMA_ADDRESS)
2434 * should return a valid address to continue from, but as
2435 * some controllers are faulty, don't trust them.
2437 if (intmask & SDHCI_INT_DMA_END) {
2438 u32 dmastart, dmanow;
2439 dmastart = sg_dma_address(host->data->sg);
2440 dmanow = dmastart + host->data->bytes_xfered;
2442 * Force update to the next DMA block boundary.
2445 ~(SDHCI_DEFAULT_BOUNDARY_SIZE - 1)) +
2446 SDHCI_DEFAULT_BOUNDARY_SIZE;
2447 host->data->bytes_xfered = dmanow - dmastart;
2448 DBG("%s: DMA base 0x%08x, transferred 0x%06x bytes,"
2450 mmc_hostname(host->mmc), dmastart,
2451 host->data->bytes_xfered, dmanow);
2452 sdhci_writel(host, dmanow, SDHCI_DMA_ADDRESS);
2455 if (intmask & SDHCI_INT_DATA_END) {
2458 * Data managed to finish before the
2459 * command completed. Make sure we do
2460 * things in the proper order.
2462 host->data_early = 1;
2464 sdhci_finish_data(host);
2470 static irqreturn_t sdhci_irq(int irq, void *dev_id)
2472 irqreturn_t result = IRQ_NONE;
2473 struct sdhci_host *host = dev_id;
2474 u32 intmask, mask, unexpected = 0;
2477 spin_lock(&host->lock);
2479 if (host->runtime_suspended && !sdhci_sdio_irq_enabled(host)) {
2480 spin_unlock(&host->lock);
2484 intmask = sdhci_readl(host, SDHCI_INT_STATUS);
2485 if (!intmask || intmask == 0xffffffff) {
2491 /* Clear selected interrupts. */
2492 mask = intmask & (SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK |
2493 SDHCI_INT_BUS_POWER);
2494 sdhci_writel(host, mask, SDHCI_INT_STATUS);
2496 DBG("*** %s got interrupt: 0x%08x\n",
2497 mmc_hostname(host->mmc), intmask);
2499 if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
2500 u32 present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
2504 * There is a observation on i.mx esdhc. INSERT
2505 * bit will be immediately set again when it gets
2506 * cleared, if a card is inserted. We have to mask
2507 * the irq to prevent interrupt storm which will
2508 * freeze the system. And the REMOVE gets the
2511 * More testing are needed here to ensure it works
2512 * for other platforms though.
2514 host->ier &= ~(SDHCI_INT_CARD_INSERT |
2515 SDHCI_INT_CARD_REMOVE);
2516 host->ier |= present ? SDHCI_INT_CARD_REMOVE :
2517 SDHCI_INT_CARD_INSERT;
2518 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
2519 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
2521 sdhci_writel(host, intmask & (SDHCI_INT_CARD_INSERT |
2522 SDHCI_INT_CARD_REMOVE), SDHCI_INT_STATUS);
2524 host->thread_isr |= intmask & (SDHCI_INT_CARD_INSERT |
2525 SDHCI_INT_CARD_REMOVE);
2526 result = IRQ_WAKE_THREAD;
2529 if (intmask & SDHCI_INT_CMD_MASK)
2530 sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK,
2533 if (intmask & SDHCI_INT_DATA_MASK)
2534 sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK);
2536 if (intmask & SDHCI_INT_BUS_POWER)
2537 pr_err("%s: Card is consuming too much power!\n",
2538 mmc_hostname(host->mmc));
2540 if (intmask & SDHCI_INT_CARD_INT) {
2541 sdhci_enable_sdio_irq_nolock(host, false);
2542 host->thread_isr |= SDHCI_INT_CARD_INT;
2543 result = IRQ_WAKE_THREAD;
2546 intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE |
2547 SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK |
2548 SDHCI_INT_ERROR | SDHCI_INT_BUS_POWER |
2549 SDHCI_INT_CARD_INT);
2552 unexpected |= intmask;
2553 sdhci_writel(host, intmask, SDHCI_INT_STATUS);
2556 if (result == IRQ_NONE)
2557 result = IRQ_HANDLED;
2559 intmask = sdhci_readl(host, SDHCI_INT_STATUS);
2560 } while (intmask && --max_loops);
2562 spin_unlock(&host->lock);
2565 pr_err("%s: Unexpected interrupt 0x%08x.\n",
2566 mmc_hostname(host->mmc), unexpected);
2567 sdhci_dumpregs(host);
2573 static irqreturn_t sdhci_thread_irq(int irq, void *dev_id)
2575 struct sdhci_host *host = dev_id;
2576 unsigned long flags;
2579 spin_lock_irqsave(&host->lock, flags);
2580 isr = host->thread_isr;
2581 host->thread_isr = 0;
2582 spin_unlock_irqrestore(&host->lock, flags);
2584 if (isr & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
2585 sdhci_card_event(host->mmc);
2586 mmc_detect_change(host->mmc, msecs_to_jiffies(200));
2589 if (isr & SDHCI_INT_CARD_INT) {
2590 sdio_run_irqs(host->mmc);
2592 spin_lock_irqsave(&host->lock, flags);
2593 if (host->flags & SDHCI_SDIO_IRQ_ENABLED)
2594 sdhci_enable_sdio_irq_nolock(host, true);
2595 spin_unlock_irqrestore(&host->lock, flags);
2598 return isr ? IRQ_HANDLED : IRQ_NONE;
2601 /*****************************************************************************\
2605 \*****************************************************************************/
2608 void sdhci_enable_irq_wakeups(struct sdhci_host *host)
2611 u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE
2612 | SDHCI_WAKE_ON_INT;
2614 val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
2616 /* Avoid fake wake up */
2617 if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
2618 val &= ~(SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE);
2619 sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
2621 EXPORT_SYMBOL_GPL(sdhci_enable_irq_wakeups);
2623 static void sdhci_disable_irq_wakeups(struct sdhci_host *host)
2626 u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE
2627 | SDHCI_WAKE_ON_INT;
2629 val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
2631 sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
2634 int sdhci_suspend_host(struct sdhci_host *host)
2636 sdhci_disable_card_detection(host);
2638 mmc_retune_timer_stop(host->mmc);
2639 mmc_retune_needed(host->mmc);
2641 if (!device_may_wakeup(mmc_dev(host->mmc))) {
2643 sdhci_writel(host, 0, SDHCI_INT_ENABLE);
2644 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
2645 free_irq(host->irq, host);
2647 sdhci_enable_irq_wakeups(host);
2648 enable_irq_wake(host->irq);
2653 EXPORT_SYMBOL_GPL(sdhci_suspend_host);
2655 int sdhci_resume_host(struct sdhci_host *host)
2659 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
2660 if (host->ops->enable_dma)
2661 host->ops->enable_dma(host);
2664 if ((host->mmc->pm_flags & MMC_PM_KEEP_POWER) &&
2665 (host->quirks2 & SDHCI_QUIRK2_HOST_OFF_CARD_ON)) {
2666 /* Card keeps power but host controller does not */
2667 sdhci_init(host, 0);
2670 sdhci_set_ios(host->mmc, &host->mmc->ios);
2672 sdhci_init(host, (host->mmc->pm_flags & MMC_PM_KEEP_POWER));
2676 if (!device_may_wakeup(mmc_dev(host->mmc))) {
2677 ret = request_threaded_irq(host->irq, sdhci_irq,
2678 sdhci_thread_irq, IRQF_SHARED,
2679 mmc_hostname(host->mmc), host);
2683 sdhci_disable_irq_wakeups(host);
2684 disable_irq_wake(host->irq);
2687 sdhci_enable_card_detection(host);
2692 EXPORT_SYMBOL_GPL(sdhci_resume_host);
2694 int sdhci_runtime_suspend_host(struct sdhci_host *host)
2696 unsigned long flags;
2698 mmc_retune_timer_stop(host->mmc);
2699 mmc_retune_needed(host->mmc);
2701 spin_lock_irqsave(&host->lock, flags);
2702 host->ier &= SDHCI_INT_CARD_INT;
2703 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
2704 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
2705 spin_unlock_irqrestore(&host->lock, flags);
2707 synchronize_hardirq(host->irq);
2709 spin_lock_irqsave(&host->lock, flags);
2710 host->runtime_suspended = true;
2711 spin_unlock_irqrestore(&host->lock, flags);
2715 EXPORT_SYMBOL_GPL(sdhci_runtime_suspend_host);
2717 int sdhci_runtime_resume_host(struct sdhci_host *host)
2719 unsigned long flags;
2720 int host_flags = host->flags;
2722 if (host_flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
2723 if (host->ops->enable_dma)
2724 host->ops->enable_dma(host);
2727 sdhci_init(host, 0);
2729 /* Force clock and power re-program */
2732 sdhci_start_signal_voltage_switch(host->mmc, &host->mmc->ios);
2733 sdhci_set_ios(host->mmc, &host->mmc->ios);
2735 if ((host_flags & SDHCI_PV_ENABLED) &&
2736 !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN)) {
2737 spin_lock_irqsave(&host->lock, flags);
2738 sdhci_enable_preset_value(host, true);
2739 spin_unlock_irqrestore(&host->lock, flags);
2742 spin_lock_irqsave(&host->lock, flags);
2744 host->runtime_suspended = false;
2746 /* Enable SDIO IRQ */
2747 if (host->flags & SDHCI_SDIO_IRQ_ENABLED)
2748 sdhci_enable_sdio_irq_nolock(host, true);
2750 /* Enable Card Detection */
2751 sdhci_enable_card_detection(host);
2753 spin_unlock_irqrestore(&host->lock, flags);
2757 EXPORT_SYMBOL_GPL(sdhci_runtime_resume_host);
2759 #endif /* CONFIG_PM */
2761 /*****************************************************************************\
2763 * Device allocation/registration *
2765 \*****************************************************************************/
2767 struct sdhci_host *sdhci_alloc_host(struct device *dev,
2770 struct mmc_host *mmc;
2771 struct sdhci_host *host;
2773 WARN_ON(dev == NULL);
2775 mmc = mmc_alloc_host(sizeof(struct sdhci_host) + priv_size, dev);
2777 return ERR_PTR(-ENOMEM);
2779 host = mmc_priv(mmc);
2781 host->mmc_host_ops = sdhci_ops;
2782 mmc->ops = &host->mmc_host_ops;
2787 EXPORT_SYMBOL_GPL(sdhci_alloc_host);
2789 static int sdhci_set_dma_mask(struct sdhci_host *host)
2791 struct mmc_host *mmc = host->mmc;
2792 struct device *dev = mmc_dev(mmc);
2795 if (host->quirks2 & SDHCI_QUIRK2_BROKEN_64_BIT_DMA)
2796 host->flags &= ~SDHCI_USE_64_BIT_DMA;
2798 /* Try 64-bit mask if hardware is capable of it */
2799 if (host->flags & SDHCI_USE_64_BIT_DMA) {
2800 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
2802 pr_warn("%s: Failed to set 64-bit DMA mask.\n",
2804 host->flags &= ~SDHCI_USE_64_BIT_DMA;
2808 /* 32-bit mask as default & fallback */
2810 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
2812 pr_warn("%s: Failed to set 32-bit DMA mask.\n",
2819 int sdhci_add_host(struct sdhci_host *host)
2821 struct mmc_host *mmc;
2822 u32 caps[2] = {0, 0};
2823 u32 max_current_caps;
2824 unsigned int ocr_avail;
2825 unsigned int override_timeout_clk;
2829 WARN_ON(host == NULL);
2836 host->quirks = debug_quirks;
2838 host->quirks2 = debug_quirks2;
2840 override_timeout_clk = host->timeout_clk;
2842 sdhci_do_reset(host, SDHCI_RESET_ALL);
2844 host->version = sdhci_readw(host, SDHCI_HOST_VERSION);
2845 host->version = (host->version & SDHCI_SPEC_VER_MASK)
2846 >> SDHCI_SPEC_VER_SHIFT;
2847 if (host->version > SDHCI_SPEC_300) {
2848 pr_err("%s: Unknown controller version (%d). You may experience problems.\n",
2849 mmc_hostname(mmc), host->version);
2852 caps[0] = (host->quirks & SDHCI_QUIRK_MISSING_CAPS) ? host->caps :
2853 sdhci_readl(host, SDHCI_CAPABILITIES);
2855 if (host->version >= SDHCI_SPEC_300)
2856 caps[1] = (host->quirks & SDHCI_QUIRK_MISSING_CAPS) ?
2858 sdhci_readl(host, SDHCI_CAPABILITIES_1);
2860 if (host->quirks & SDHCI_QUIRK_FORCE_DMA)
2861 host->flags |= SDHCI_USE_SDMA;
2862 else if (!(caps[0] & SDHCI_CAN_DO_SDMA))
2863 DBG("Controller doesn't have SDMA capability\n");
2865 host->flags |= SDHCI_USE_SDMA;
2867 if ((host->quirks & SDHCI_QUIRK_BROKEN_DMA) &&
2868 (host->flags & SDHCI_USE_SDMA)) {
2869 DBG("Disabling DMA as it is marked broken\n");
2870 host->flags &= ~SDHCI_USE_SDMA;
2873 if ((host->version >= SDHCI_SPEC_200) &&
2874 (caps[0] & SDHCI_CAN_DO_ADMA2))
2875 host->flags |= SDHCI_USE_ADMA;
2877 if ((host->quirks & SDHCI_QUIRK_BROKEN_ADMA) &&
2878 (host->flags & SDHCI_USE_ADMA)) {
2879 DBG("Disabling ADMA as it is marked broken\n");
2880 host->flags &= ~SDHCI_USE_ADMA;
2884 * It is assumed that a 64-bit capable device has set a 64-bit DMA mask
2885 * and *must* do 64-bit DMA. A driver has the opportunity to change
2886 * that during the first call to ->enable_dma(). Similarly
2887 * SDHCI_QUIRK2_BROKEN_64_BIT_DMA must be left to the drivers to
2890 if (caps[0] & SDHCI_CAN_64BIT)
2891 host->flags |= SDHCI_USE_64_BIT_DMA;
2893 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
2894 ret = sdhci_set_dma_mask(host);
2896 if (!ret && host->ops->enable_dma)
2897 ret = host->ops->enable_dma(host);
2900 pr_warn("%s: No suitable DMA available - falling back to PIO\n",
2902 host->flags &= ~(SDHCI_USE_SDMA | SDHCI_USE_ADMA);
2908 /* SDMA does not support 64-bit DMA */
2909 if (host->flags & SDHCI_USE_64_BIT_DMA)
2910 host->flags &= ~SDHCI_USE_SDMA;
2912 if (host->flags & SDHCI_USE_ADMA) {
2917 * The DMA descriptor table size is calculated as the maximum
2918 * number of segments times 2, to allow for an alignment
2919 * descriptor for each segment, plus 1 for a nop end descriptor,
2920 * all multipled by the descriptor size.
2922 if (host->flags & SDHCI_USE_64_BIT_DMA) {
2923 host->adma_table_sz = (SDHCI_MAX_SEGS * 2 + 1) *
2924 SDHCI_ADMA2_64_DESC_SZ;
2925 host->desc_sz = SDHCI_ADMA2_64_DESC_SZ;
2927 host->adma_table_sz = (SDHCI_MAX_SEGS * 2 + 1) *
2928 SDHCI_ADMA2_32_DESC_SZ;
2929 host->desc_sz = SDHCI_ADMA2_32_DESC_SZ;
2932 host->align_buffer_sz = SDHCI_MAX_SEGS * SDHCI_ADMA2_ALIGN;
2933 buf = dma_alloc_coherent(mmc_dev(mmc), host->align_buffer_sz +
2934 host->adma_table_sz, &dma, GFP_KERNEL);
2936 pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n",
2938 host->flags &= ~SDHCI_USE_ADMA;
2939 } else if ((dma + host->align_buffer_sz) &
2940 (SDHCI_ADMA2_DESC_ALIGN - 1)) {
2941 pr_warn("%s: unable to allocate aligned ADMA descriptor\n",
2943 host->flags &= ~SDHCI_USE_ADMA;
2944 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
2945 host->adma_table_sz, buf, dma);
2947 host->align_buffer = buf;
2948 host->align_addr = dma;
2950 host->adma_table = buf + host->align_buffer_sz;
2951 host->adma_addr = dma + host->align_buffer_sz;
2956 * If we use DMA, then it's up to the caller to set the DMA
2957 * mask, but PIO does not need the hw shim so we set a new
2958 * mask here in that case.
2960 if (!(host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA))) {
2961 host->dma_mask = DMA_BIT_MASK(64);
2962 mmc_dev(mmc)->dma_mask = &host->dma_mask;
2965 if (host->version >= SDHCI_SPEC_300)
2966 host->max_clk = (caps[0] & SDHCI_CLOCK_V3_BASE_MASK)
2967 >> SDHCI_CLOCK_BASE_SHIFT;
2969 host->max_clk = (caps[0] & SDHCI_CLOCK_BASE_MASK)
2970 >> SDHCI_CLOCK_BASE_SHIFT;
2972 host->max_clk *= 1000000;
2973 if (host->max_clk == 0 || host->quirks &
2974 SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN) {
2975 if (!host->ops->get_max_clock) {
2976 pr_err("%s: Hardware doesn't specify base clock frequency.\n",
2981 host->max_clk = host->ops->get_max_clock(host);
2985 * In case of Host Controller v3.00, find out whether clock
2986 * multiplier is supported.
2988 host->clk_mul = (caps[1] & SDHCI_CLOCK_MUL_MASK) >>
2989 SDHCI_CLOCK_MUL_SHIFT;
2992 * In case the value in Clock Multiplier is 0, then programmable
2993 * clock mode is not supported, otherwise the actual clock
2994 * multiplier is one more than the value of Clock Multiplier
2995 * in the Capabilities Register.
3001 * Set host parameters.
3003 max_clk = host->max_clk;
3005 if (host->ops->get_min_clock)
3006 mmc->f_min = host->ops->get_min_clock(host);
3007 else if (host->version >= SDHCI_SPEC_300) {
3008 if (host->clk_mul) {
3009 mmc->f_min = (host->max_clk * host->clk_mul) / 1024;
3010 max_clk = host->max_clk * host->clk_mul;
3012 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300;
3014 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200;
3016 if (!mmc->f_max || mmc->f_max > max_clk)
3017 mmc->f_max = max_clk;
3019 if (!(host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) {
3020 host->timeout_clk = (caps[0] & SDHCI_TIMEOUT_CLK_MASK) >>
3021 SDHCI_TIMEOUT_CLK_SHIFT;
3022 if (host->timeout_clk == 0) {
3023 if (host->ops->get_timeout_clock) {
3025 host->ops->get_timeout_clock(host);
3027 pr_err("%s: Hardware doesn't specify timeout clock frequency.\n",
3034 if (caps[0] & SDHCI_TIMEOUT_CLK_UNIT)
3035 host->timeout_clk *= 1000;
3037 if (override_timeout_clk)
3038 host->timeout_clk = override_timeout_clk;
3040 mmc->max_busy_timeout = host->ops->get_max_timeout_count ?
3041 host->ops->get_max_timeout_count(host) : 1 << 27;
3042 mmc->max_busy_timeout /= host->timeout_clk;
3045 mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_ERASE | MMC_CAP_CMD23;
3046 mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD;
3048 if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12)
3049 host->flags |= SDHCI_AUTO_CMD12;
3051 /* Auto-CMD23 stuff only works in ADMA or PIO. */
3052 if ((host->version >= SDHCI_SPEC_300) &&
3053 ((host->flags & SDHCI_USE_ADMA) ||
3054 !(host->flags & SDHCI_USE_SDMA)) &&
3055 !(host->quirks2 & SDHCI_QUIRK2_ACMD23_BROKEN)) {
3056 host->flags |= SDHCI_AUTO_CMD23;
3057 DBG("%s: Auto-CMD23 available\n", mmc_hostname(mmc));
3059 DBG("%s: Auto-CMD23 unavailable\n", mmc_hostname(mmc));
3063 * A controller may support 8-bit width, but the board itself
3064 * might not have the pins brought out. Boards that support
3065 * 8-bit width must set "mmc->caps |= MMC_CAP_8_BIT_DATA;" in
3066 * their platform code before calling sdhci_add_host(), and we
3067 * won't assume 8-bit width for hosts without that CAP.
3069 if (!(host->quirks & SDHCI_QUIRK_FORCE_1_BIT_DATA))
3070 mmc->caps |= MMC_CAP_4_BIT_DATA;
3072 if (host->quirks2 & SDHCI_QUIRK2_HOST_NO_CMD23)
3073 mmc->caps &= ~MMC_CAP_CMD23;
3075 if (caps[0] & SDHCI_CAN_DO_HISPD)
3076 mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
3078 if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) &&
3079 !(mmc->caps & MMC_CAP_NONREMOVABLE) &&
3080 IS_ERR_VALUE(mmc_gpio_get_cd(host->mmc)))
3081 mmc->caps |= MMC_CAP_NEEDS_POLL;
3083 /* If there are external regulators, get them */
3084 ret = mmc_regulator_get_supply(mmc);
3085 if (ret == -EPROBE_DEFER)
3088 /* If vqmmc regulator and no 1.8V signalling, then there's no UHS */
3089 if (!IS_ERR(mmc->supply.vqmmc)) {
3090 ret = regulator_enable(mmc->supply.vqmmc);
3091 if (!regulator_is_supported_voltage(mmc->supply.vqmmc, 1700000,
3093 caps[1] &= ~(SDHCI_SUPPORT_SDR104 |
3094 SDHCI_SUPPORT_SDR50 |
3095 SDHCI_SUPPORT_DDR50);
3097 pr_warn("%s: Failed to enable vqmmc regulator: %d\n",
3098 mmc_hostname(mmc), ret);
3099 mmc->supply.vqmmc = ERR_PTR(-EINVAL);
3103 if (host->quirks2 & SDHCI_QUIRK2_NO_1_8_V)
3104 caps[1] &= ~(SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
3105 SDHCI_SUPPORT_DDR50);
3107 /* Any UHS-I mode in caps implies SDR12 and SDR25 support. */
3108 if (caps[1] & (SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
3109 SDHCI_SUPPORT_DDR50))
3110 mmc->caps |= MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25;
3112 /* SDR104 supports also implies SDR50 support */
3113 if (caps[1] & SDHCI_SUPPORT_SDR104) {
3114 mmc->caps |= MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50;
3115 /* SD3.0: SDR104 is supported so (for eMMC) the caps2
3116 * field can be promoted to support HS200.
3118 if (!(host->quirks2 & SDHCI_QUIRK2_BROKEN_HS200))
3119 mmc->caps2 |= MMC_CAP2_HS200;
3120 } else if (caps[1] & SDHCI_SUPPORT_SDR50)
3121 mmc->caps |= MMC_CAP_UHS_SDR50;
3123 if (host->quirks2 & SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 &&
3124 (caps[1] & SDHCI_SUPPORT_HS400))
3125 mmc->caps2 |= MMC_CAP2_HS400;
3127 if ((mmc->caps2 & MMC_CAP2_HSX00_1_2V) &&
3128 (IS_ERR(mmc->supply.vqmmc) ||
3129 !regulator_is_supported_voltage(mmc->supply.vqmmc, 1100000,
3131 mmc->caps2 &= ~MMC_CAP2_HSX00_1_2V;
3133 if ((caps[1] & SDHCI_SUPPORT_DDR50) &&
3134 !(host->quirks2 & SDHCI_QUIRK2_BROKEN_DDR50))
3135 mmc->caps |= MMC_CAP_UHS_DDR50;
3137 /* Does the host need tuning for SDR50? */
3138 if (caps[1] & SDHCI_USE_SDR50_TUNING)
3139 host->flags |= SDHCI_SDR50_NEEDS_TUNING;
3141 /* Driver Type(s) (A, C, D) supported by the host */
3142 if (caps[1] & SDHCI_DRIVER_TYPE_A)
3143 mmc->caps |= MMC_CAP_DRIVER_TYPE_A;
3144 if (caps[1] & SDHCI_DRIVER_TYPE_C)
3145 mmc->caps |= MMC_CAP_DRIVER_TYPE_C;
3146 if (caps[1] & SDHCI_DRIVER_TYPE_D)
3147 mmc->caps |= MMC_CAP_DRIVER_TYPE_D;
3149 /* Initial value for re-tuning timer count */
3150 host->tuning_count = (caps[1] & SDHCI_RETUNING_TIMER_COUNT_MASK) >>
3151 SDHCI_RETUNING_TIMER_COUNT_SHIFT;
3154 * In case Re-tuning Timer is not disabled, the actual value of
3155 * re-tuning timer will be 2 ^ (n - 1).
3157 if (host->tuning_count)
3158 host->tuning_count = 1 << (host->tuning_count - 1);
3160 /* Re-tuning mode supported by the Host Controller */
3161 host->tuning_mode = (caps[1] & SDHCI_RETUNING_MODE_MASK) >>
3162 SDHCI_RETUNING_MODE_SHIFT;
3167 * According to SD Host Controller spec v3.00, if the Host System
3168 * can afford more than 150mA, Host Driver should set XPC to 1. Also
3169 * the value is meaningful only if Voltage Support in the Capabilities
3170 * register is set. The actual current value is 4 times the register
3173 max_current_caps = sdhci_readl(host, SDHCI_MAX_CURRENT);
3174 if (!max_current_caps && !IS_ERR(mmc->supply.vmmc)) {
3175 int curr = regulator_get_current_limit(mmc->supply.vmmc);
3178 /* convert to SDHCI_MAX_CURRENT format */
3179 curr = curr/1000; /* convert to mA */
3180 curr = curr/SDHCI_MAX_CURRENT_MULTIPLIER;
3182 curr = min_t(u32, curr, SDHCI_MAX_CURRENT_LIMIT);
3184 (curr << SDHCI_MAX_CURRENT_330_SHIFT) |
3185 (curr << SDHCI_MAX_CURRENT_300_SHIFT) |
3186 (curr << SDHCI_MAX_CURRENT_180_SHIFT);
3190 if (caps[0] & SDHCI_CAN_VDD_330) {
3191 ocr_avail |= MMC_VDD_32_33 | MMC_VDD_33_34;
3193 mmc->max_current_330 = ((max_current_caps &
3194 SDHCI_MAX_CURRENT_330_MASK) >>
3195 SDHCI_MAX_CURRENT_330_SHIFT) *
3196 SDHCI_MAX_CURRENT_MULTIPLIER;
3198 if (caps[0] & SDHCI_CAN_VDD_300) {
3199 ocr_avail |= MMC_VDD_29_30 | MMC_VDD_30_31;
3201 mmc->max_current_300 = ((max_current_caps &
3202 SDHCI_MAX_CURRENT_300_MASK) >>
3203 SDHCI_MAX_CURRENT_300_SHIFT) *
3204 SDHCI_MAX_CURRENT_MULTIPLIER;
3206 if (caps[0] & SDHCI_CAN_VDD_180) {
3207 ocr_avail |= MMC_VDD_165_195;
3209 mmc->max_current_180 = ((max_current_caps &
3210 SDHCI_MAX_CURRENT_180_MASK) >>
3211 SDHCI_MAX_CURRENT_180_SHIFT) *
3212 SDHCI_MAX_CURRENT_MULTIPLIER;
3215 /* If OCR set by host, use it instead. */
3217 ocr_avail = host->ocr_mask;
3219 /* If OCR set by external regulators, give it highest prio. */
3221 ocr_avail = mmc->ocr_avail;
3223 mmc->ocr_avail = ocr_avail;
3224 mmc->ocr_avail_sdio = ocr_avail;
3225 if (host->ocr_avail_sdio)
3226 mmc->ocr_avail_sdio &= host->ocr_avail_sdio;
3227 mmc->ocr_avail_sd = ocr_avail;
3228 if (host->ocr_avail_sd)
3229 mmc->ocr_avail_sd &= host->ocr_avail_sd;
3230 else /* normal SD controllers don't support 1.8V */
3231 mmc->ocr_avail_sd &= ~MMC_VDD_165_195;
3232 mmc->ocr_avail_mmc = ocr_avail;
3233 if (host->ocr_avail_mmc)
3234 mmc->ocr_avail_mmc &= host->ocr_avail_mmc;
3236 if (mmc->ocr_avail == 0) {
3237 pr_err("%s: Hardware doesn't report any support voltages.\n",
3243 spin_lock_init(&host->lock);
3246 * Maximum number of segments. Depends on if the hardware
3247 * can do scatter/gather or not.
3249 if (host->flags & SDHCI_USE_ADMA)
3250 mmc->max_segs = SDHCI_MAX_SEGS;
3251 else if (host->flags & SDHCI_USE_SDMA)
3254 mmc->max_segs = SDHCI_MAX_SEGS;
3257 * Maximum number of sectors in one transfer. Limited by SDMA boundary
3258 * size (512KiB). Note some tuning modes impose a 4MiB limit, but this
3261 mmc->max_req_size = 524288;
3264 * Maximum segment size. Could be one segment with the maximum number
3265 * of bytes. When doing hardware scatter/gather, each entry cannot
3266 * be larger than 64 KiB though.
3268 if (host->flags & SDHCI_USE_ADMA) {
3269 if (host->quirks & SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC)
3270 mmc->max_seg_size = 65535;
3272 mmc->max_seg_size = 65536;
3274 mmc->max_seg_size = mmc->max_req_size;
3278 * Maximum block size. This varies from controller to controller and
3279 * is specified in the capabilities register.
3281 if (host->quirks & SDHCI_QUIRK_FORCE_BLK_SZ_2048) {
3282 mmc->max_blk_size = 2;
3284 mmc->max_blk_size = (caps[0] & SDHCI_MAX_BLOCK_MASK) >>
3285 SDHCI_MAX_BLOCK_SHIFT;
3286 if (mmc->max_blk_size >= 3) {
3287 pr_warn("%s: Invalid maximum block size, assuming 512 bytes\n",
3289 mmc->max_blk_size = 0;
3293 mmc->max_blk_size = 512 << mmc->max_blk_size;
3296 * Maximum block count.
3298 mmc->max_blk_count = (host->quirks & SDHCI_QUIRK_NO_MULTIBLOCK) ? 1 : 65535;
3303 tasklet_init(&host->finish_tasklet,
3304 sdhci_tasklet_finish, (unsigned long)host);
3306 setup_timer(&host->timer, sdhci_timeout_timer, (unsigned long)host);
3308 init_waitqueue_head(&host->buf_ready_int);
3310 sdhci_init(host, 0);
3312 ret = request_threaded_irq(host->irq, sdhci_irq, sdhci_thread_irq,
3313 IRQF_SHARED, mmc_hostname(mmc), host);
3315 pr_err("%s: Failed to request IRQ %d: %d\n",
3316 mmc_hostname(mmc), host->irq, ret);
3320 #ifdef CONFIG_MMC_DEBUG
3321 sdhci_dumpregs(host);
3324 ret = sdhci_led_register(host);
3326 pr_err("%s: Failed to register LED device: %d\n",
3327 mmc_hostname(mmc), ret);
3333 ret = mmc_add_host(mmc);
3337 pr_info("%s: SDHCI controller on %s [%s] using %s\n",
3338 mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)),
3339 (host->flags & SDHCI_USE_ADMA) ?
3340 (host->flags & SDHCI_USE_64_BIT_DMA) ? "ADMA 64-bit" : "ADMA" :
3341 (host->flags & SDHCI_USE_SDMA) ? "DMA" : "PIO");
3343 sdhci_enable_card_detection(host);
3348 sdhci_led_unregister(host);
3350 sdhci_do_reset(host, SDHCI_RESET_ALL);
3351 sdhci_writel(host, 0, SDHCI_INT_ENABLE);
3352 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
3353 free_irq(host->irq, host);
3355 tasklet_kill(&host->finish_tasklet);
3357 if (!IS_ERR(mmc->supply.vqmmc))
3358 regulator_disable(mmc->supply.vqmmc);
3360 if (host->align_buffer)
3361 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
3362 host->adma_table_sz, host->align_buffer,
3364 host->adma_table = NULL;
3365 host->align_buffer = NULL;
3370 EXPORT_SYMBOL_GPL(sdhci_add_host);
3372 void sdhci_remove_host(struct sdhci_host *host, int dead)
3374 struct mmc_host *mmc = host->mmc;
3375 unsigned long flags;
3378 spin_lock_irqsave(&host->lock, flags);
3380 host->flags |= SDHCI_DEVICE_DEAD;
3383 pr_err("%s: Controller removed during "
3384 " transfer!\n", mmc_hostname(mmc));
3386 host->mrq->cmd->error = -ENOMEDIUM;
3387 tasklet_schedule(&host->finish_tasklet);
3390 spin_unlock_irqrestore(&host->lock, flags);
3393 sdhci_disable_card_detection(host);
3395 mmc_remove_host(mmc);
3397 sdhci_led_unregister(host);
3400 sdhci_do_reset(host, SDHCI_RESET_ALL);
3402 sdhci_writel(host, 0, SDHCI_INT_ENABLE);
3403 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
3404 free_irq(host->irq, host);
3406 del_timer_sync(&host->timer);
3408 tasklet_kill(&host->finish_tasklet);
3410 if (!IS_ERR(mmc->supply.vqmmc))
3411 regulator_disable(mmc->supply.vqmmc);
3413 if (host->align_buffer)
3414 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
3415 host->adma_table_sz, host->align_buffer,
3418 host->adma_table = NULL;
3419 host->align_buffer = NULL;
3422 EXPORT_SYMBOL_GPL(sdhci_remove_host);
3424 void sdhci_free_host(struct sdhci_host *host)
3426 mmc_free_host(host->mmc);
3429 EXPORT_SYMBOL_GPL(sdhci_free_host);
3431 /*****************************************************************************\
3433 * Driver init/exit *
3435 \*****************************************************************************/
3437 static int __init sdhci_drv_init(void)
3440 ": Secure Digital Host Controller Interface driver\n");
3441 pr_info(DRIVER_NAME ": Copyright(c) Pierre Ossman\n");
3446 static void __exit sdhci_drv_exit(void)
3450 module_init(sdhci_drv_init);
3451 module_exit(sdhci_drv_exit);
3453 module_param(debug_quirks, uint, 0444);
3454 module_param(debug_quirks2, uint, 0444);
3456 MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>");
3457 MODULE_DESCRIPTION("Secure Digital Host Controller Interface core driver");
3458 MODULE_LICENSE("GPL");
3460 MODULE_PARM_DESC(debug_quirks, "Force certain quirks.");
3461 MODULE_PARM_DESC(debug_quirks2, "Force certain other quirks.");