2 * linux/drivers/mmc/host/sdhci.c - Secure Digital Host Controller Interface driver
4 * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or (at
9 * your option) any later version.
11 * Thanks to the following companies for their support:
13 * - JMicron (hardware and technical support)
16 #include <linux/delay.h>
17 #include <linux/highmem.h>
19 #include <linux/module.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/slab.h>
22 #include <linux/scatterlist.h>
23 #include <linux/regulator/consumer.h>
24 #include <linux/pm_runtime.h>
27 #include <linux/leds.h>
29 #include <linux/mmc/mmc.h>
30 #include <linux/mmc/host.h>
31 #include <linux/mmc/card.h>
32 #include <linux/mmc/sdio.h>
33 #include <linux/mmc/slot-gpio.h>
37 #define DRIVER_NAME "sdhci"
39 #define DBG(f, x...) \
40 pr_debug(DRIVER_NAME " [%s()]: " f, __func__,## x)
42 #define MAX_TUNING_LOOP 40
44 static unsigned int debug_quirks = 0;
45 static unsigned int debug_quirks2;
47 static void sdhci_finish_data(struct sdhci_host *);
49 static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable);
51 static void sdhci_dumpregs(struct sdhci_host *host)
53 pr_err(DRIVER_NAME ": =========== REGISTER DUMP (%s)===========\n",
54 mmc_hostname(host->mmc));
56 pr_err(DRIVER_NAME ": Sys addr: 0x%08x | Version: 0x%08x\n",
57 sdhci_readl(host, SDHCI_DMA_ADDRESS),
58 sdhci_readw(host, SDHCI_HOST_VERSION));
59 pr_err(DRIVER_NAME ": Blk size: 0x%08x | Blk cnt: 0x%08x\n",
60 sdhci_readw(host, SDHCI_BLOCK_SIZE),
61 sdhci_readw(host, SDHCI_BLOCK_COUNT));
62 pr_err(DRIVER_NAME ": Argument: 0x%08x | Trn mode: 0x%08x\n",
63 sdhci_readl(host, SDHCI_ARGUMENT),
64 sdhci_readw(host, SDHCI_TRANSFER_MODE));
65 pr_err(DRIVER_NAME ": Present: 0x%08x | Host ctl: 0x%08x\n",
66 sdhci_readl(host, SDHCI_PRESENT_STATE),
67 sdhci_readb(host, SDHCI_HOST_CONTROL));
68 pr_err(DRIVER_NAME ": Power: 0x%08x | Blk gap: 0x%08x\n",
69 sdhci_readb(host, SDHCI_POWER_CONTROL),
70 sdhci_readb(host, SDHCI_BLOCK_GAP_CONTROL));
71 pr_err(DRIVER_NAME ": Wake-up: 0x%08x | Clock: 0x%08x\n",
72 sdhci_readb(host, SDHCI_WAKE_UP_CONTROL),
73 sdhci_readw(host, SDHCI_CLOCK_CONTROL));
74 pr_err(DRIVER_NAME ": Timeout: 0x%08x | Int stat: 0x%08x\n",
75 sdhci_readb(host, SDHCI_TIMEOUT_CONTROL),
76 sdhci_readl(host, SDHCI_INT_STATUS));
77 pr_err(DRIVER_NAME ": Int enab: 0x%08x | Sig enab: 0x%08x\n",
78 sdhci_readl(host, SDHCI_INT_ENABLE),
79 sdhci_readl(host, SDHCI_SIGNAL_ENABLE));
80 pr_err(DRIVER_NAME ": AC12 err: 0x%08x | Slot int: 0x%08x\n",
81 sdhci_readw(host, SDHCI_ACMD12_ERR),
82 sdhci_readw(host, SDHCI_SLOT_INT_STATUS));
83 pr_err(DRIVER_NAME ": Caps: 0x%08x | Caps_1: 0x%08x\n",
84 sdhci_readl(host, SDHCI_CAPABILITIES),
85 sdhci_readl(host, SDHCI_CAPABILITIES_1));
86 pr_err(DRIVER_NAME ": Cmd: 0x%08x | Max curr: 0x%08x\n",
87 sdhci_readw(host, SDHCI_COMMAND),
88 sdhci_readl(host, SDHCI_MAX_CURRENT));
89 pr_err(DRIVER_NAME ": Host ctl2: 0x%08x\n",
90 sdhci_readw(host, SDHCI_HOST_CONTROL2));
92 if (host->flags & SDHCI_USE_ADMA) {
93 if (host->flags & SDHCI_USE_64_BIT_DMA)
94 pr_err(DRIVER_NAME ": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x%08x\n",
95 readl(host->ioaddr + SDHCI_ADMA_ERROR),
96 readl(host->ioaddr + SDHCI_ADMA_ADDRESS_HI),
97 readl(host->ioaddr + SDHCI_ADMA_ADDRESS));
99 pr_err(DRIVER_NAME ": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x\n",
100 readl(host->ioaddr + SDHCI_ADMA_ERROR),
101 readl(host->ioaddr + SDHCI_ADMA_ADDRESS));
104 pr_err(DRIVER_NAME ": ===========================================\n");
107 /*****************************************************************************\
109 * Low level functions *
111 \*****************************************************************************/
113 static inline bool sdhci_data_line_cmd(struct mmc_command *cmd)
115 return cmd->data || cmd->flags & MMC_RSP_BUSY;
118 static void sdhci_set_card_detection(struct sdhci_host *host, bool enable)
122 if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) ||
123 !mmc_card_is_removable(host->mmc))
127 present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
130 host->ier |= present ? SDHCI_INT_CARD_REMOVE :
131 SDHCI_INT_CARD_INSERT;
133 host->ier &= ~(SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT);
136 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
137 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
140 static void sdhci_enable_card_detection(struct sdhci_host *host)
142 sdhci_set_card_detection(host, true);
145 static void sdhci_disable_card_detection(struct sdhci_host *host)
147 sdhci_set_card_detection(host, false);
150 static void sdhci_runtime_pm_bus_on(struct sdhci_host *host)
155 pm_runtime_get_noresume(host->mmc->parent);
158 static void sdhci_runtime_pm_bus_off(struct sdhci_host *host)
162 host->bus_on = false;
163 pm_runtime_put_noidle(host->mmc->parent);
166 void sdhci_reset(struct sdhci_host *host, u8 mask)
168 unsigned long timeout;
170 sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET);
172 if (mask & SDHCI_RESET_ALL) {
174 /* Reset-all turns off SD Bus Power */
175 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
176 sdhci_runtime_pm_bus_off(host);
179 /* Wait max 100 ms */
182 /* hw clears the bit when it's done */
183 while (sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask) {
185 pr_err("%s: Reset 0x%x never completed.\n",
186 mmc_hostname(host->mmc), (int)mask);
187 sdhci_dumpregs(host);
194 EXPORT_SYMBOL_GPL(sdhci_reset);
196 static void sdhci_do_reset(struct sdhci_host *host, u8 mask)
198 if (host->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) {
199 struct mmc_host *mmc = host->mmc;
201 if (!mmc->ops->get_cd(mmc))
205 host->ops->reset(host, mask);
207 if (mask & SDHCI_RESET_ALL) {
208 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
209 if (host->ops->enable_dma)
210 host->ops->enable_dma(host);
213 /* Resetting the controller clears many */
214 host->preset_enabled = false;
218 static void sdhci_init(struct sdhci_host *host, int soft)
220 struct mmc_host *mmc = host->mmc;
223 sdhci_do_reset(host, SDHCI_RESET_CMD|SDHCI_RESET_DATA);
225 sdhci_do_reset(host, SDHCI_RESET_ALL);
227 host->ier = SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT |
228 SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT |
229 SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC |
230 SDHCI_INT_TIMEOUT | SDHCI_INT_DATA_END |
233 if (host->tuning_mode == SDHCI_TUNING_MODE_2 ||
234 host->tuning_mode == SDHCI_TUNING_MODE_3)
235 host->ier |= SDHCI_INT_RETUNE;
237 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
238 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
241 /* force clock reconfiguration */
243 mmc->ops->set_ios(mmc, &mmc->ios);
247 static void sdhci_reinit(struct sdhci_host *host)
250 sdhci_enable_card_detection(host);
253 static void __sdhci_led_activate(struct sdhci_host *host)
257 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
258 ctrl |= SDHCI_CTRL_LED;
259 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
262 static void __sdhci_led_deactivate(struct sdhci_host *host)
266 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
267 ctrl &= ~SDHCI_CTRL_LED;
268 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
271 #if IS_REACHABLE(CONFIG_LEDS_CLASS)
272 static void sdhci_led_control(struct led_classdev *led,
273 enum led_brightness brightness)
275 struct sdhci_host *host = container_of(led, struct sdhci_host, led);
278 spin_lock_irqsave(&host->lock, flags);
280 if (host->runtime_suspended)
283 if (brightness == LED_OFF)
284 __sdhci_led_deactivate(host);
286 __sdhci_led_activate(host);
288 spin_unlock_irqrestore(&host->lock, flags);
291 static int sdhci_led_register(struct sdhci_host *host)
293 struct mmc_host *mmc = host->mmc;
295 snprintf(host->led_name, sizeof(host->led_name),
296 "%s::", mmc_hostname(mmc));
298 host->led.name = host->led_name;
299 host->led.brightness = LED_OFF;
300 host->led.default_trigger = mmc_hostname(mmc);
301 host->led.brightness_set = sdhci_led_control;
303 return led_classdev_register(mmc_dev(mmc), &host->led);
306 static void sdhci_led_unregister(struct sdhci_host *host)
308 led_classdev_unregister(&host->led);
311 static inline void sdhci_led_activate(struct sdhci_host *host)
315 static inline void sdhci_led_deactivate(struct sdhci_host *host)
321 static inline int sdhci_led_register(struct sdhci_host *host)
326 static inline void sdhci_led_unregister(struct sdhci_host *host)
330 static inline void sdhci_led_activate(struct sdhci_host *host)
332 __sdhci_led_activate(host);
335 static inline void sdhci_led_deactivate(struct sdhci_host *host)
337 __sdhci_led_deactivate(host);
342 /*****************************************************************************\
346 \*****************************************************************************/
348 static void sdhci_read_block_pio(struct sdhci_host *host)
351 size_t blksize, len, chunk;
352 u32 uninitialized_var(scratch);
355 DBG("PIO reading\n");
357 blksize = host->data->blksz;
360 local_irq_save(flags);
363 BUG_ON(!sg_miter_next(&host->sg_miter));
365 len = min(host->sg_miter.length, blksize);
368 host->sg_miter.consumed = len;
370 buf = host->sg_miter.addr;
374 scratch = sdhci_readl(host, SDHCI_BUFFER);
378 *buf = scratch & 0xFF;
387 sg_miter_stop(&host->sg_miter);
389 local_irq_restore(flags);
392 static void sdhci_write_block_pio(struct sdhci_host *host)
395 size_t blksize, len, chunk;
399 DBG("PIO writing\n");
401 blksize = host->data->blksz;
405 local_irq_save(flags);
408 BUG_ON(!sg_miter_next(&host->sg_miter));
410 len = min(host->sg_miter.length, blksize);
413 host->sg_miter.consumed = len;
415 buf = host->sg_miter.addr;
418 scratch |= (u32)*buf << (chunk * 8);
424 if ((chunk == 4) || ((len == 0) && (blksize == 0))) {
425 sdhci_writel(host, scratch, SDHCI_BUFFER);
432 sg_miter_stop(&host->sg_miter);
434 local_irq_restore(flags);
437 static void sdhci_transfer_pio(struct sdhci_host *host)
441 if (host->blocks == 0)
444 if (host->data->flags & MMC_DATA_READ)
445 mask = SDHCI_DATA_AVAILABLE;
447 mask = SDHCI_SPACE_AVAILABLE;
450 * Some controllers (JMicron JMB38x) mess up the buffer bits
451 * for transfers < 4 bytes. As long as it is just one block,
452 * we can ignore the bits.
454 if ((host->quirks & SDHCI_QUIRK_BROKEN_SMALL_PIO) &&
455 (host->data->blocks == 1))
458 while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
459 if (host->quirks & SDHCI_QUIRK_PIO_NEEDS_DELAY)
462 if (host->data->flags & MMC_DATA_READ)
463 sdhci_read_block_pio(host);
465 sdhci_write_block_pio(host);
468 if (host->blocks == 0)
472 DBG("PIO transfer complete.\n");
475 static int sdhci_pre_dma_transfer(struct sdhci_host *host,
476 struct mmc_data *data, int cookie)
481 * If the data buffers are already mapped, return the previous
482 * dma_map_sg() result.
484 if (data->host_cookie == COOKIE_PRE_MAPPED)
485 return data->sg_count;
487 sg_count = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
488 data->flags & MMC_DATA_WRITE ?
489 DMA_TO_DEVICE : DMA_FROM_DEVICE);
494 data->sg_count = sg_count;
495 data->host_cookie = cookie;
500 static char *sdhci_kmap_atomic(struct scatterlist *sg, unsigned long *flags)
502 local_irq_save(*flags);
503 return kmap_atomic(sg_page(sg)) + sg->offset;
506 static void sdhci_kunmap_atomic(void *buffer, unsigned long *flags)
508 kunmap_atomic(buffer);
509 local_irq_restore(*flags);
512 static void sdhci_adma_write_desc(struct sdhci_host *host, void *desc,
513 dma_addr_t addr, int len, unsigned cmd)
515 struct sdhci_adma2_64_desc *dma_desc = desc;
517 /* 32-bit and 64-bit descriptors have these members in same position */
518 dma_desc->cmd = cpu_to_le16(cmd);
519 dma_desc->len = cpu_to_le16(len);
520 dma_desc->addr_lo = cpu_to_le32((u32)addr);
522 if (host->flags & SDHCI_USE_64_BIT_DMA)
523 dma_desc->addr_hi = cpu_to_le32((u64)addr >> 32);
526 static void sdhci_adma_mark_end(void *desc)
528 struct sdhci_adma2_64_desc *dma_desc = desc;
530 /* 32-bit and 64-bit descriptors have 'cmd' in same position */
531 dma_desc->cmd |= cpu_to_le16(ADMA2_END);
534 static void sdhci_adma_table_pre(struct sdhci_host *host,
535 struct mmc_data *data, int sg_count)
537 struct scatterlist *sg;
539 dma_addr_t addr, align_addr;
545 * The spec does not specify endianness of descriptor table.
546 * We currently guess that it is LE.
549 host->sg_count = sg_count;
551 desc = host->adma_table;
552 align = host->align_buffer;
554 align_addr = host->align_addr;
556 for_each_sg(data->sg, sg, host->sg_count, i) {
557 addr = sg_dma_address(sg);
558 len = sg_dma_len(sg);
561 * The SDHCI specification states that ADMA addresses must
562 * be 32-bit aligned. If they aren't, then we use a bounce
563 * buffer for the (up to three) bytes that screw up the
566 offset = (SDHCI_ADMA2_ALIGN - (addr & SDHCI_ADMA2_MASK)) &
569 if (data->flags & MMC_DATA_WRITE) {
570 buffer = sdhci_kmap_atomic(sg, &flags);
571 memcpy(align, buffer, offset);
572 sdhci_kunmap_atomic(buffer, &flags);
576 sdhci_adma_write_desc(host, desc, align_addr, offset,
579 BUG_ON(offset > 65536);
581 align += SDHCI_ADMA2_ALIGN;
582 align_addr += SDHCI_ADMA2_ALIGN;
584 desc += host->desc_sz;
594 sdhci_adma_write_desc(host, desc, addr, len,
596 desc += host->desc_sz;
600 * If this triggers then we have a calculation bug
603 WARN_ON((desc - host->adma_table) >= host->adma_table_sz);
606 if (host->quirks & SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC) {
607 /* Mark the last descriptor as the terminating descriptor */
608 if (desc != host->adma_table) {
609 desc -= host->desc_sz;
610 sdhci_adma_mark_end(desc);
613 /* Add a terminating entry - nop, end, valid */
614 sdhci_adma_write_desc(host, desc, 0, 0, ADMA2_NOP_END_VALID);
618 static void sdhci_adma_table_post(struct sdhci_host *host,
619 struct mmc_data *data)
621 struct scatterlist *sg;
627 if (data->flags & MMC_DATA_READ) {
628 bool has_unaligned = false;
630 /* Do a quick scan of the SG list for any unaligned mappings */
631 for_each_sg(data->sg, sg, host->sg_count, i)
632 if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) {
633 has_unaligned = true;
638 dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg,
639 data->sg_len, DMA_FROM_DEVICE);
641 align = host->align_buffer;
643 for_each_sg(data->sg, sg, host->sg_count, i) {
644 if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) {
645 size = SDHCI_ADMA2_ALIGN -
646 (sg_dma_address(sg) & SDHCI_ADMA2_MASK);
648 buffer = sdhci_kmap_atomic(sg, &flags);
649 memcpy(buffer, align, size);
650 sdhci_kunmap_atomic(buffer, &flags);
652 align += SDHCI_ADMA2_ALIGN;
659 static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd)
662 struct mmc_data *data = cmd->data;
663 unsigned target_timeout, current_timeout;
666 * If the host controller provides us with an incorrect timeout
667 * value, just skip the check and use 0xE. The hardware may take
668 * longer to time out, but that's much better than having a too-short
671 if (host->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL)
674 /* Unspecified timeout, assume max */
675 if (!data && !cmd->busy_timeout)
680 target_timeout = cmd->busy_timeout * 1000;
682 target_timeout = DIV_ROUND_UP(data->timeout_ns, 1000);
683 if (host->clock && data->timeout_clks) {
684 unsigned long long val;
687 * data->timeout_clks is in units of clock cycles.
688 * host->clock is in Hz. target_timeout is in us.
689 * Hence, us = 1000000 * cycles / Hz. Round up.
691 val = 1000000ULL * data->timeout_clks;
692 if (do_div(val, host->clock))
694 target_timeout += val;
699 * Figure out needed cycles.
700 * We do this in steps in order to fit inside a 32 bit int.
701 * The first step is the minimum timeout, which will have a
702 * minimum resolution of 6 bits:
703 * (1) 2^13*1000 > 2^22,
704 * (2) host->timeout_clk < 2^16
709 current_timeout = (1 << 13) * 1000 / host->timeout_clk;
710 while (current_timeout < target_timeout) {
712 current_timeout <<= 1;
718 DBG("%s: Too large timeout 0x%x requested for CMD%d!\n",
719 mmc_hostname(host->mmc), count, cmd->opcode);
726 static void sdhci_set_transfer_irqs(struct sdhci_host *host)
728 u32 pio_irqs = SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL;
729 u32 dma_irqs = SDHCI_INT_DMA_END | SDHCI_INT_ADMA_ERROR;
731 if (host->flags & SDHCI_REQ_USE_DMA)
732 host->ier = (host->ier & ~pio_irqs) | dma_irqs;
734 host->ier = (host->ier & ~dma_irqs) | pio_irqs;
736 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
737 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
740 static void sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
744 if (host->ops->set_timeout) {
745 host->ops->set_timeout(host, cmd);
747 count = sdhci_calc_timeout(host, cmd);
748 sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL);
752 static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
755 struct mmc_data *data = cmd->data;
757 if (sdhci_data_line_cmd(cmd))
758 sdhci_set_timeout(host, cmd);
766 BUG_ON(data->blksz * data->blocks > 524288);
767 BUG_ON(data->blksz > host->mmc->max_blk_size);
768 BUG_ON(data->blocks > 65535);
771 host->data_early = 0;
772 host->data->bytes_xfered = 0;
774 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
775 struct scatterlist *sg;
776 unsigned int length_mask, offset_mask;
779 host->flags |= SDHCI_REQ_USE_DMA;
782 * FIXME: This doesn't account for merging when mapping the
785 * The assumption here being that alignment and lengths are
786 * the same after DMA mapping to device address space.
790 if (host->flags & SDHCI_USE_ADMA) {
791 if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE) {
794 * As we use up to 3 byte chunks to work
795 * around alignment problems, we need to
796 * check the offset as well.
801 if (host->quirks & SDHCI_QUIRK_32BIT_DMA_SIZE)
803 if (host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR)
807 if (unlikely(length_mask | offset_mask)) {
808 for_each_sg(data->sg, sg, data->sg_len, i) {
809 if (sg->length & length_mask) {
810 DBG("Reverting to PIO because of transfer size (%d)\n",
812 host->flags &= ~SDHCI_REQ_USE_DMA;
815 if (sg->offset & offset_mask) {
816 DBG("Reverting to PIO because of bad alignment\n");
817 host->flags &= ~SDHCI_REQ_USE_DMA;
824 if (host->flags & SDHCI_REQ_USE_DMA) {
825 int sg_cnt = sdhci_pre_dma_transfer(host, data, COOKIE_MAPPED);
829 * This only happens when someone fed
830 * us an invalid request.
833 host->flags &= ~SDHCI_REQ_USE_DMA;
834 } else if (host->flags & SDHCI_USE_ADMA) {
835 sdhci_adma_table_pre(host, data, sg_cnt);
837 sdhci_writel(host, host->adma_addr, SDHCI_ADMA_ADDRESS);
838 if (host->flags & SDHCI_USE_64_BIT_DMA)
840 (u64)host->adma_addr >> 32,
841 SDHCI_ADMA_ADDRESS_HI);
843 WARN_ON(sg_cnt != 1);
844 sdhci_writel(host, sg_dma_address(data->sg),
850 * Always adjust the DMA selection as some controllers
851 * (e.g. JMicron) can't do PIO properly when the selection
854 if (host->version >= SDHCI_SPEC_200) {
855 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
856 ctrl &= ~SDHCI_CTRL_DMA_MASK;
857 if ((host->flags & SDHCI_REQ_USE_DMA) &&
858 (host->flags & SDHCI_USE_ADMA)) {
859 if (host->flags & SDHCI_USE_64_BIT_DMA)
860 ctrl |= SDHCI_CTRL_ADMA64;
862 ctrl |= SDHCI_CTRL_ADMA32;
864 ctrl |= SDHCI_CTRL_SDMA;
866 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
869 if (!(host->flags & SDHCI_REQ_USE_DMA)) {
872 flags = SG_MITER_ATOMIC;
873 if (host->data->flags & MMC_DATA_READ)
874 flags |= SG_MITER_TO_SG;
876 flags |= SG_MITER_FROM_SG;
877 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
878 host->blocks = data->blocks;
881 sdhci_set_transfer_irqs(host);
883 /* Set the DMA boundary value and block size */
884 sdhci_writew(host, SDHCI_MAKE_BLKSZ(SDHCI_DEFAULT_BOUNDARY_ARG,
885 data->blksz), SDHCI_BLOCK_SIZE);
886 sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT);
889 static inline bool sdhci_auto_cmd12(struct sdhci_host *host,
890 struct mmc_request *mrq)
892 return !mrq->sbc && (host->flags & SDHCI_AUTO_CMD12) &&
893 !mrq->cap_cmd_during_tfr;
896 static void sdhci_set_transfer_mode(struct sdhci_host *host,
897 struct mmc_command *cmd)
900 struct mmc_data *data = cmd->data;
904 SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD) {
905 sdhci_writew(host, 0x0, SDHCI_TRANSFER_MODE);
907 /* clear Auto CMD settings for no data CMDs */
908 mode = sdhci_readw(host, SDHCI_TRANSFER_MODE);
909 sdhci_writew(host, mode & ~(SDHCI_TRNS_AUTO_CMD12 |
910 SDHCI_TRNS_AUTO_CMD23), SDHCI_TRANSFER_MODE);
915 WARN_ON(!host->data);
917 if (!(host->quirks2 & SDHCI_QUIRK2_SUPPORT_SINGLE))
918 mode = SDHCI_TRNS_BLK_CNT_EN;
920 if (mmc_op_multi(cmd->opcode) || data->blocks > 1) {
921 mode = SDHCI_TRNS_BLK_CNT_EN | SDHCI_TRNS_MULTI;
923 * If we are sending CMD23, CMD12 never gets sent
924 * on successful completion (so no Auto-CMD12).
926 if (sdhci_auto_cmd12(host, cmd->mrq) &&
927 (cmd->opcode != SD_IO_RW_EXTENDED))
928 mode |= SDHCI_TRNS_AUTO_CMD12;
929 else if (cmd->mrq->sbc && (host->flags & SDHCI_AUTO_CMD23)) {
930 mode |= SDHCI_TRNS_AUTO_CMD23;
931 sdhci_writel(host, cmd->mrq->sbc->arg, SDHCI_ARGUMENT2);
935 if (data->flags & MMC_DATA_READ)
936 mode |= SDHCI_TRNS_READ;
937 if (host->flags & SDHCI_REQ_USE_DMA)
938 mode |= SDHCI_TRNS_DMA;
940 sdhci_writew(host, mode, SDHCI_TRANSFER_MODE);
943 static bool sdhci_needs_reset(struct sdhci_host *host, struct mmc_request *mrq)
945 return (!(host->flags & SDHCI_DEVICE_DEAD) &&
946 ((mrq->cmd && mrq->cmd->error) ||
947 (mrq->sbc && mrq->sbc->error) ||
948 (mrq->data && ((mrq->data->error && !mrq->data->stop) ||
949 (mrq->data->stop && mrq->data->stop->error))) ||
950 (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST)));
953 static void __sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq)
957 for (i = 0; i < SDHCI_MAX_MRQS; i++) {
958 if (host->mrqs_done[i] == mrq) {
964 for (i = 0; i < SDHCI_MAX_MRQS; i++) {
965 if (!host->mrqs_done[i]) {
966 host->mrqs_done[i] = mrq;
971 WARN_ON(i >= SDHCI_MAX_MRQS);
973 tasklet_schedule(&host->finish_tasklet);
976 static void sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq)
978 if (host->cmd && host->cmd->mrq == mrq)
981 if (host->data_cmd && host->data_cmd->mrq == mrq)
982 host->data_cmd = NULL;
984 if (host->data && host->data->mrq == mrq)
987 if (sdhci_needs_reset(host, mrq))
988 host->pending_reset = true;
990 __sdhci_finish_mrq(host, mrq);
993 static void sdhci_finish_data(struct sdhci_host *host)
995 struct mmc_command *data_cmd = host->data_cmd;
996 struct mmc_data *data = host->data;
999 host->data_cmd = NULL;
1001 if ((host->flags & (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA)) ==
1002 (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA))
1003 sdhci_adma_table_post(host, data);
1006 * The specification states that the block count register must
1007 * be updated, but it does not specify at what point in the
1008 * data flow. That makes the register entirely useless to read
1009 * back so we have to assume that nothing made it to the card
1010 * in the event of an error.
1013 data->bytes_xfered = 0;
1015 data->bytes_xfered = data->blksz * data->blocks;
1018 * Need to send CMD12 if -
1019 * a) open-ended multiblock transfer (no CMD23)
1020 * b) error in multiblock transfer
1027 * The controller needs a reset of internal state machines
1028 * upon error conditions.
1031 if (!host->cmd || host->cmd == data_cmd)
1032 sdhci_do_reset(host, SDHCI_RESET_CMD);
1033 sdhci_do_reset(host, SDHCI_RESET_DATA);
1037 * 'cap_cmd_during_tfr' request must not use the command line
1038 * after mmc_command_done() has been called. It is upper layer's
1039 * responsibility to send the stop command if required.
1041 if (data->mrq->cap_cmd_during_tfr) {
1042 sdhci_finish_mrq(host, data->mrq);
1044 /* Avoid triggering warning in sdhci_send_command() */
1046 sdhci_send_command(host, data->stop);
1049 sdhci_finish_mrq(host, data->mrq);
1053 static void sdhci_mod_timer(struct sdhci_host *host, struct mmc_request *mrq,
1054 unsigned long timeout)
1056 if (sdhci_data_line_cmd(mrq->cmd))
1057 mod_timer(&host->data_timer, timeout);
1059 mod_timer(&host->timer, timeout);
1062 static void sdhci_del_timer(struct sdhci_host *host, struct mmc_request *mrq)
1064 if (sdhci_data_line_cmd(mrq->cmd))
1065 del_timer(&host->data_timer);
1067 del_timer(&host->timer);
1070 void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
1074 unsigned long timeout;
1078 /* Initially, a command has no error */
1081 if ((host->quirks2 & SDHCI_QUIRK2_STOP_WITH_TC) &&
1082 cmd->opcode == MMC_STOP_TRANSMISSION)
1083 cmd->flags |= MMC_RSP_BUSY;
1085 /* Wait max 10 ms */
1088 mask = SDHCI_CMD_INHIBIT;
1089 if (sdhci_data_line_cmd(cmd))
1090 mask |= SDHCI_DATA_INHIBIT;
1092 /* We shouldn't wait for data inihibit for stop commands, even
1093 though they might use busy signaling */
1094 if (cmd->mrq->data && (cmd == cmd->mrq->data->stop))
1095 mask &= ~SDHCI_DATA_INHIBIT;
1097 while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
1099 pr_err("%s: Controller never released inhibit bit(s).\n",
1100 mmc_hostname(host->mmc));
1101 sdhci_dumpregs(host);
1103 sdhci_finish_mrq(host, cmd->mrq);
1111 if (!cmd->data && cmd->busy_timeout > 9000)
1112 timeout += DIV_ROUND_UP(cmd->busy_timeout, 1000) * HZ + HZ;
1115 sdhci_mod_timer(host, cmd->mrq, timeout);
1118 if (sdhci_data_line_cmd(cmd)) {
1119 WARN_ON(host->data_cmd);
1120 host->data_cmd = cmd;
1123 sdhci_prepare_data(host, cmd);
1125 sdhci_writel(host, cmd->arg, SDHCI_ARGUMENT);
1127 sdhci_set_transfer_mode(host, cmd);
1129 if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) {
1130 pr_err("%s: Unsupported response type!\n",
1131 mmc_hostname(host->mmc));
1132 cmd->error = -EINVAL;
1133 sdhci_finish_mrq(host, cmd->mrq);
1137 if (!(cmd->flags & MMC_RSP_PRESENT))
1138 flags = SDHCI_CMD_RESP_NONE;
1139 else if (cmd->flags & MMC_RSP_136)
1140 flags = SDHCI_CMD_RESP_LONG;
1141 else if (cmd->flags & MMC_RSP_BUSY)
1142 flags = SDHCI_CMD_RESP_SHORT_BUSY;
1144 flags = SDHCI_CMD_RESP_SHORT;
1146 if (cmd->flags & MMC_RSP_CRC)
1147 flags |= SDHCI_CMD_CRC;
1148 if (cmd->flags & MMC_RSP_OPCODE)
1149 flags |= SDHCI_CMD_INDEX;
1151 /* CMD19 is special in that the Data Present Select should be set */
1152 if (cmd->data || cmd->opcode == MMC_SEND_TUNING_BLOCK ||
1153 cmd->opcode == MMC_SEND_TUNING_BLOCK_HS200)
1154 flags |= SDHCI_CMD_DATA;
1156 sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND);
1158 EXPORT_SYMBOL_GPL(sdhci_send_command);
1160 static void sdhci_finish_command(struct sdhci_host *host)
1162 struct mmc_command *cmd = host->cmd;
1167 if (cmd->flags & MMC_RSP_PRESENT) {
1168 if (cmd->flags & MMC_RSP_136) {
1169 /* CRC is stripped so we need to do some shifting. */
1170 for (i = 0;i < 4;i++) {
1171 cmd->resp[i] = sdhci_readl(host,
1172 SDHCI_RESPONSE + (3-i)*4) << 8;
1176 SDHCI_RESPONSE + (3-i)*4-1);
1179 cmd->resp[0] = sdhci_readl(host, SDHCI_RESPONSE);
1183 if (cmd->mrq->cap_cmd_during_tfr && cmd == cmd->mrq->cmd)
1184 mmc_command_done(host->mmc, cmd->mrq);
1187 * The host can send and interrupt when the busy state has
1188 * ended, allowing us to wait without wasting CPU cycles.
1189 * The busy signal uses DAT0 so this is similar to waiting
1190 * for data to complete.
1192 * Note: The 1.0 specification is a bit ambiguous about this
1193 * feature so there might be some problems with older
1196 if (cmd->flags & MMC_RSP_BUSY) {
1198 DBG("Cannot wait for busy signal when also doing a data transfer");
1199 } else if (!(host->quirks & SDHCI_QUIRK_NO_BUSY_IRQ) &&
1200 cmd == host->data_cmd) {
1201 /* Command complete before busy is ended */
1206 /* Finished CMD23, now send actual command. */
1207 if (cmd == cmd->mrq->sbc) {
1208 sdhci_send_command(host, cmd->mrq->cmd);
1211 /* Processed actual command. */
1212 if (host->data && host->data_early)
1213 sdhci_finish_data(host);
1216 sdhci_finish_mrq(host, cmd->mrq);
1220 static u16 sdhci_get_preset_value(struct sdhci_host *host)
1224 switch (host->timing) {
1225 case MMC_TIMING_UHS_SDR12:
1226 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12);
1228 case MMC_TIMING_UHS_SDR25:
1229 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR25);
1231 case MMC_TIMING_UHS_SDR50:
1232 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR50);
1234 case MMC_TIMING_UHS_SDR104:
1235 case MMC_TIMING_MMC_HS200:
1236 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR104);
1238 case MMC_TIMING_UHS_DDR50:
1239 case MMC_TIMING_MMC_DDR52:
1240 preset = sdhci_readw(host, SDHCI_PRESET_FOR_DDR50);
1242 case MMC_TIMING_MMC_HS400:
1243 preset = sdhci_readw(host, SDHCI_PRESET_FOR_HS400);
1246 pr_warn("%s: Invalid UHS-I mode selected\n",
1247 mmc_hostname(host->mmc));
1248 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12);
1254 u16 sdhci_calc_clk(struct sdhci_host *host, unsigned int clock,
1255 unsigned int *actual_clock)
1257 int div = 0; /* Initialized for compiler warning */
1258 int real_div = div, clk_mul = 1;
1260 bool switch_base_clk = false;
1262 if (host->version >= SDHCI_SPEC_300) {
1263 if (host->preset_enabled) {
1266 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1267 pre_val = sdhci_get_preset_value(host);
1268 div = (pre_val & SDHCI_PRESET_SDCLK_FREQ_MASK)
1269 >> SDHCI_PRESET_SDCLK_FREQ_SHIFT;
1270 if (host->clk_mul &&
1271 (pre_val & SDHCI_PRESET_CLKGEN_SEL_MASK)) {
1272 clk = SDHCI_PROG_CLOCK_MODE;
1274 clk_mul = host->clk_mul;
1276 real_div = max_t(int, 1, div << 1);
1282 * Check if the Host Controller supports Programmable Clock
1285 if (host->clk_mul) {
1286 for (div = 1; div <= 1024; div++) {
1287 if ((host->max_clk * host->clk_mul / div)
1291 if ((host->max_clk * host->clk_mul / div) <= clock) {
1293 * Set Programmable Clock Mode in the Clock
1296 clk = SDHCI_PROG_CLOCK_MODE;
1298 clk_mul = host->clk_mul;
1302 * Divisor can be too small to reach clock
1303 * speed requirement. Then use the base clock.
1305 switch_base_clk = true;
1309 if (!host->clk_mul || switch_base_clk) {
1310 /* Version 3.00 divisors must be a multiple of 2. */
1311 if (host->max_clk <= clock)
1314 for (div = 2; div < SDHCI_MAX_DIV_SPEC_300;
1316 if ((host->max_clk / div) <= clock)
1322 if ((host->quirks2 & SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN)
1323 && !div && host->max_clk <= 25000000)
1327 /* Version 2.00 divisors must be a power of 2. */
1328 for (div = 1; div < SDHCI_MAX_DIV_SPEC_200; div *= 2) {
1329 if ((host->max_clk / div) <= clock)
1338 *actual_clock = (host->max_clk * clk_mul) / real_div;
1339 clk |= (div & SDHCI_DIV_MASK) << SDHCI_DIVIDER_SHIFT;
1340 clk |= ((div & SDHCI_DIV_HI_MASK) >> SDHCI_DIV_MASK_LEN)
1341 << SDHCI_DIVIDER_HI_SHIFT;
1345 EXPORT_SYMBOL_GPL(sdhci_calc_clk);
1347 void sdhci_enable_clk(struct sdhci_host *host, u16 clk)
1349 unsigned long timeout;
1351 clk |= SDHCI_CLOCK_INT_EN;
1352 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1354 /* Wait max 20 ms */
1356 while (!((clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL))
1357 & SDHCI_CLOCK_INT_STABLE)) {
1359 pr_err("%s: Internal clock never stabilised.\n",
1360 mmc_hostname(host->mmc));
1361 sdhci_dumpregs(host);
1368 clk |= SDHCI_CLOCK_CARD_EN;
1369 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1371 EXPORT_SYMBOL_GPL(sdhci_enable_clk);
1373 void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
1377 host->mmc->actual_clock = 0;
1379 sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
1384 clk = sdhci_calc_clk(host, clock, &host->mmc->actual_clock);
1385 sdhci_enable_clk(host, clk);
1387 EXPORT_SYMBOL_GPL(sdhci_set_clock);
1389 static void sdhci_set_power_reg(struct sdhci_host *host, unsigned char mode,
1392 struct mmc_host *mmc = host->mmc;
1394 spin_unlock_irq(&host->lock);
1395 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
1396 spin_lock_irq(&host->lock);
1398 if (mode != MMC_POWER_OFF)
1399 sdhci_writeb(host, SDHCI_POWER_ON, SDHCI_POWER_CONTROL);
1401 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
1404 void sdhci_set_power_noreg(struct sdhci_host *host, unsigned char mode,
1409 if (mode != MMC_POWER_OFF) {
1411 case MMC_VDD_165_195:
1412 pwr = SDHCI_POWER_180;
1416 pwr = SDHCI_POWER_300;
1420 pwr = SDHCI_POWER_330;
1423 WARN(1, "%s: Invalid vdd %#x\n",
1424 mmc_hostname(host->mmc), vdd);
1429 if (host->pwr == pwr)
1435 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
1436 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
1437 sdhci_runtime_pm_bus_off(host);
1440 * Spec says that we should clear the power reg before setting
1441 * a new value. Some controllers don't seem to like this though.
1443 if (!(host->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE))
1444 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
1447 * At least the Marvell CaFe chip gets confused if we set the
1448 * voltage and set turn on power at the same time, so set the
1451 if (host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER)
1452 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
1454 pwr |= SDHCI_POWER_ON;
1456 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
1458 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
1459 sdhci_runtime_pm_bus_on(host);
1462 * Some controllers need an extra 10ms delay of 10ms before
1463 * they can apply clock after applying power
1465 if (host->quirks & SDHCI_QUIRK_DELAY_AFTER_POWER)
1469 EXPORT_SYMBOL_GPL(sdhci_set_power_noreg);
1471 void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
1474 if (IS_ERR(host->mmc->supply.vmmc))
1475 sdhci_set_power_noreg(host, mode, vdd);
1477 sdhci_set_power_reg(host, mode, vdd);
1479 EXPORT_SYMBOL_GPL(sdhci_set_power);
1481 /*****************************************************************************\
1485 \*****************************************************************************/
1487 static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1489 struct sdhci_host *host;
1491 unsigned long flags;
1493 host = mmc_priv(mmc);
1495 /* Firstly check card presence */
1496 present = mmc->ops->get_cd(mmc);
1498 spin_lock_irqsave(&host->lock, flags);
1500 sdhci_led_activate(host);
1503 * Ensure we don't send the STOP for non-SET_BLOCK_COUNTED
1504 * requests if Auto-CMD12 is enabled.
1506 if (sdhci_auto_cmd12(host, mrq)) {
1508 mrq->data->stop = NULL;
1513 if (!present || host->flags & SDHCI_DEVICE_DEAD) {
1514 mrq->cmd->error = -ENOMEDIUM;
1515 sdhci_finish_mrq(host, mrq);
1517 if (mrq->sbc && !(host->flags & SDHCI_AUTO_CMD23))
1518 sdhci_send_command(host, mrq->sbc);
1520 sdhci_send_command(host, mrq->cmd);
1524 spin_unlock_irqrestore(&host->lock, flags);
1527 void sdhci_set_bus_width(struct sdhci_host *host, int width)
1531 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
1532 if (width == MMC_BUS_WIDTH_8) {
1533 ctrl &= ~SDHCI_CTRL_4BITBUS;
1534 if (host->version >= SDHCI_SPEC_300)
1535 ctrl |= SDHCI_CTRL_8BITBUS;
1537 if (host->version >= SDHCI_SPEC_300)
1538 ctrl &= ~SDHCI_CTRL_8BITBUS;
1539 if (width == MMC_BUS_WIDTH_4)
1540 ctrl |= SDHCI_CTRL_4BITBUS;
1542 ctrl &= ~SDHCI_CTRL_4BITBUS;
1544 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1546 EXPORT_SYMBOL_GPL(sdhci_set_bus_width);
1548 void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing)
1552 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1553 /* Select Bus Speed Mode for host */
1554 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
1555 if ((timing == MMC_TIMING_MMC_HS200) ||
1556 (timing == MMC_TIMING_UHS_SDR104))
1557 ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
1558 else if (timing == MMC_TIMING_UHS_SDR12)
1559 ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
1560 else if (timing == MMC_TIMING_UHS_SDR25)
1561 ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
1562 else if (timing == MMC_TIMING_UHS_SDR50)
1563 ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
1564 else if ((timing == MMC_TIMING_UHS_DDR50) ||
1565 (timing == MMC_TIMING_MMC_DDR52))
1566 ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
1567 else if (timing == MMC_TIMING_MMC_HS400)
1568 ctrl_2 |= SDHCI_CTRL_HS400; /* Non-standard */
1569 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
1571 EXPORT_SYMBOL_GPL(sdhci_set_uhs_signaling);
1573 static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1575 struct sdhci_host *host = mmc_priv(mmc);
1576 unsigned long flags;
1579 spin_lock_irqsave(&host->lock, flags);
1581 if (host->flags & SDHCI_DEVICE_DEAD) {
1582 spin_unlock_irqrestore(&host->lock, flags);
1583 if (!IS_ERR(mmc->supply.vmmc) &&
1584 ios->power_mode == MMC_POWER_OFF)
1585 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
1590 * Reset the chip on each power off.
1591 * Should clear out any weird states.
1593 if (ios->power_mode == MMC_POWER_OFF) {
1594 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
1598 if (host->version >= SDHCI_SPEC_300 &&
1599 (ios->power_mode == MMC_POWER_UP) &&
1600 !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN))
1601 sdhci_enable_preset_value(host, false);
1603 if (!ios->clock || ios->clock != host->clock) {
1604 host->ops->set_clock(host, ios->clock);
1605 host->clock = ios->clock;
1607 if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK &&
1609 host->timeout_clk = host->mmc->actual_clock ?
1610 host->mmc->actual_clock / 1000 :
1612 host->mmc->max_busy_timeout =
1613 host->ops->get_max_timeout_count ?
1614 host->ops->get_max_timeout_count(host) :
1616 host->mmc->max_busy_timeout /= host->timeout_clk;
1620 if (host->ops->set_power)
1621 host->ops->set_power(host, ios->power_mode, ios->vdd);
1623 sdhci_set_power(host, ios->power_mode, ios->vdd);
1625 if (host->ops->platform_send_init_74_clocks)
1626 host->ops->platform_send_init_74_clocks(host, ios->power_mode);
1628 host->ops->set_bus_width(host, ios->bus_width);
1630 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
1632 if ((ios->timing == MMC_TIMING_SD_HS ||
1633 ios->timing == MMC_TIMING_MMC_HS ||
1634 ios->timing == MMC_TIMING_MMC_HS400 ||
1635 ios->timing == MMC_TIMING_MMC_HS200 ||
1636 ios->timing == MMC_TIMING_MMC_DDR52 ||
1637 ios->timing == MMC_TIMING_UHS_SDR50 ||
1638 ios->timing == MMC_TIMING_UHS_SDR104 ||
1639 ios->timing == MMC_TIMING_UHS_DDR50 ||
1640 ios->timing == MMC_TIMING_UHS_SDR25)
1641 && !(host->quirks & SDHCI_QUIRK_NO_HISPD_BIT))
1642 ctrl |= SDHCI_CTRL_HISPD;
1644 ctrl &= ~SDHCI_CTRL_HISPD;
1646 if (host->version >= SDHCI_SPEC_300) {
1649 if (!host->preset_enabled) {
1650 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1652 * We only need to set Driver Strength if the
1653 * preset value enable is not set.
1655 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1656 ctrl_2 &= ~SDHCI_CTRL_DRV_TYPE_MASK;
1657 if (ios->drv_type == MMC_SET_DRIVER_TYPE_A)
1658 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_A;
1659 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_B)
1660 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B;
1661 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_C)
1662 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_C;
1663 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_D)
1664 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_D;
1666 pr_warn("%s: invalid driver type, default to driver type B\n",
1668 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B;
1671 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
1674 * According to SDHC Spec v3.00, if the Preset Value
1675 * Enable in the Host Control 2 register is set, we
1676 * need to reset SD Clock Enable before changing High
1677 * Speed Enable to avoid generating clock gliches.
1680 /* Reset SD Clock Enable */
1681 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1682 clk &= ~SDHCI_CLOCK_CARD_EN;
1683 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1685 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1687 /* Re-enable SD Clock */
1688 host->ops->set_clock(host, host->clock);
1691 /* Reset SD Clock Enable */
1692 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1693 clk &= ~SDHCI_CLOCK_CARD_EN;
1694 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1696 host->ops->set_uhs_signaling(host, ios->timing);
1697 host->timing = ios->timing;
1699 if (!(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN) &&
1700 ((ios->timing == MMC_TIMING_UHS_SDR12) ||
1701 (ios->timing == MMC_TIMING_UHS_SDR25) ||
1702 (ios->timing == MMC_TIMING_UHS_SDR50) ||
1703 (ios->timing == MMC_TIMING_UHS_SDR104) ||
1704 (ios->timing == MMC_TIMING_UHS_DDR50) ||
1705 (ios->timing == MMC_TIMING_MMC_DDR52))) {
1708 sdhci_enable_preset_value(host, true);
1709 preset = sdhci_get_preset_value(host);
1710 ios->drv_type = (preset & SDHCI_PRESET_DRV_MASK)
1711 >> SDHCI_PRESET_DRV_SHIFT;
1714 /* Re-enable SD Clock */
1715 host->ops->set_clock(host, host->clock);
1717 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1720 * Some (ENE) controllers go apeshit on some ios operation,
1721 * signalling timeout and CRC errors even on CMD0. Resetting
1722 * it on each ios seems to solve the problem.
1724 if (host->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS)
1725 sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
1728 spin_unlock_irqrestore(&host->lock, flags);
1731 static int sdhci_get_cd(struct mmc_host *mmc)
1733 struct sdhci_host *host = mmc_priv(mmc);
1734 int gpio_cd = mmc_gpio_get_cd(mmc);
1736 if (host->flags & SDHCI_DEVICE_DEAD)
1739 /* If nonremovable, assume that the card is always present. */
1740 if (!mmc_card_is_removable(host->mmc))
1744 * Try slot gpio detect, if defined it take precedence
1745 * over build in controller functionality
1750 /* If polling, assume that the card is always present. */
1751 if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
1754 /* Host native card detect */
1755 return !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT);
1758 static int sdhci_check_ro(struct sdhci_host *host)
1760 unsigned long flags;
1763 spin_lock_irqsave(&host->lock, flags);
1765 if (host->flags & SDHCI_DEVICE_DEAD)
1767 else if (host->ops->get_ro)
1768 is_readonly = host->ops->get_ro(host);
1770 is_readonly = !(sdhci_readl(host, SDHCI_PRESENT_STATE)
1771 & SDHCI_WRITE_PROTECT);
1773 spin_unlock_irqrestore(&host->lock, flags);
1775 /* This quirk needs to be replaced by a callback-function later */
1776 return host->quirks & SDHCI_QUIRK_INVERTED_WRITE_PROTECT ?
1777 !is_readonly : is_readonly;
1780 #define SAMPLE_COUNT 5
1782 static int sdhci_get_ro(struct mmc_host *mmc)
1784 struct sdhci_host *host = mmc_priv(mmc);
1787 if (!(host->quirks & SDHCI_QUIRK_UNSTABLE_RO_DETECT))
1788 return sdhci_check_ro(host);
1791 for (i = 0; i < SAMPLE_COUNT; i++) {
1792 if (sdhci_check_ro(host)) {
1793 if (++ro_count > SAMPLE_COUNT / 2)
1801 static void sdhci_hw_reset(struct mmc_host *mmc)
1803 struct sdhci_host *host = mmc_priv(mmc);
1805 if (host->ops && host->ops->hw_reset)
1806 host->ops->hw_reset(host);
1809 static void sdhci_enable_sdio_irq_nolock(struct sdhci_host *host, int enable)
1811 if (!(host->flags & SDHCI_DEVICE_DEAD)) {
1813 host->ier |= SDHCI_INT_CARD_INT;
1815 host->ier &= ~SDHCI_INT_CARD_INT;
1817 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
1818 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
1823 static void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable)
1825 struct sdhci_host *host = mmc_priv(mmc);
1826 unsigned long flags;
1828 spin_lock_irqsave(&host->lock, flags);
1830 host->flags |= SDHCI_SDIO_IRQ_ENABLED;
1832 host->flags &= ~SDHCI_SDIO_IRQ_ENABLED;
1834 sdhci_enable_sdio_irq_nolock(host, enable);
1835 spin_unlock_irqrestore(&host->lock, flags);
1838 static int sdhci_start_signal_voltage_switch(struct mmc_host *mmc,
1839 struct mmc_ios *ios)
1841 struct sdhci_host *host = mmc_priv(mmc);
1846 * Signal Voltage Switching is only applicable for Host Controllers
1849 if (host->version < SDHCI_SPEC_300)
1852 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1854 switch (ios->signal_voltage) {
1855 case MMC_SIGNAL_VOLTAGE_330:
1856 if (!(host->flags & SDHCI_SIGNALING_330))
1858 /* Set 1.8V Signal Enable in the Host Control2 register to 0 */
1859 ctrl &= ~SDHCI_CTRL_VDD_180;
1860 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
1862 if (!IS_ERR(mmc->supply.vqmmc)) {
1863 ret = mmc_regulator_set_vqmmc(mmc, ios);
1865 pr_warn("%s: Switching to 3.3V signalling voltage failed\n",
1871 usleep_range(5000, 5500);
1873 /* 3.3V regulator output should be stable within 5 ms */
1874 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1875 if (!(ctrl & SDHCI_CTRL_VDD_180))
1878 pr_warn("%s: 3.3V regulator output did not became stable\n",
1882 case MMC_SIGNAL_VOLTAGE_180:
1883 if (!(host->flags & SDHCI_SIGNALING_180))
1885 if (!IS_ERR(mmc->supply.vqmmc)) {
1886 ret = mmc_regulator_set_vqmmc(mmc, ios);
1888 pr_warn("%s: Switching to 1.8V signalling voltage failed\n",
1895 * Enable 1.8V Signal Enable in the Host Control2
1898 ctrl |= SDHCI_CTRL_VDD_180;
1899 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
1901 /* Some controller need to do more when switching */
1902 if (host->ops->voltage_switch)
1903 host->ops->voltage_switch(host);
1905 /* 1.8V regulator output should be stable within 5 ms */
1906 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1907 if (ctrl & SDHCI_CTRL_VDD_180)
1910 pr_warn("%s: 1.8V regulator output did not became stable\n",
1914 case MMC_SIGNAL_VOLTAGE_120:
1915 if (!(host->flags & SDHCI_SIGNALING_120))
1917 if (!IS_ERR(mmc->supply.vqmmc)) {
1918 ret = mmc_regulator_set_vqmmc(mmc, ios);
1920 pr_warn("%s: Switching to 1.2V signalling voltage failed\n",
1927 /* No signal voltage switch required */
1932 static int sdhci_card_busy(struct mmc_host *mmc)
1934 struct sdhci_host *host = mmc_priv(mmc);
1937 /* Check whether DAT[0] is 0 */
1938 present_state = sdhci_readl(host, SDHCI_PRESENT_STATE);
1940 return !(present_state & SDHCI_DATA_0_LVL_MASK);
1943 static int sdhci_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios)
1945 struct sdhci_host *host = mmc_priv(mmc);
1946 unsigned long flags;
1948 spin_lock_irqsave(&host->lock, flags);
1949 host->flags |= SDHCI_HS400_TUNING;
1950 spin_unlock_irqrestore(&host->lock, flags);
1955 static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
1957 struct sdhci_host *host = mmc_priv(mmc);
1959 int tuning_loop_counter = MAX_TUNING_LOOP;
1961 unsigned long flags;
1962 unsigned int tuning_count = 0;
1965 spin_lock_irqsave(&host->lock, flags);
1967 hs400_tuning = host->flags & SDHCI_HS400_TUNING;
1968 host->flags &= ~SDHCI_HS400_TUNING;
1970 if (host->tuning_mode == SDHCI_TUNING_MODE_1)
1971 tuning_count = host->tuning_count;
1974 * The Host Controller needs tuning in case of SDR104 and DDR50
1975 * mode, and for SDR50 mode when Use Tuning for SDR50 is set in
1976 * the Capabilities register.
1977 * If the Host Controller supports the HS200 mode then the
1978 * tuning function has to be executed.
1980 switch (host->timing) {
1981 /* HS400 tuning is done in HS200 mode */
1982 case MMC_TIMING_MMC_HS400:
1986 case MMC_TIMING_MMC_HS200:
1988 * Periodic re-tuning for HS400 is not expected to be needed, so
1995 case MMC_TIMING_UHS_SDR104:
1996 case MMC_TIMING_UHS_DDR50:
1999 case MMC_TIMING_UHS_SDR50:
2000 if (host->flags & SDHCI_SDR50_NEEDS_TUNING)
2008 if (host->ops->platform_execute_tuning) {
2009 spin_unlock_irqrestore(&host->lock, flags);
2010 err = host->ops->platform_execute_tuning(host, opcode);
2014 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2015 ctrl |= SDHCI_CTRL_EXEC_TUNING;
2016 if (host->quirks2 & SDHCI_QUIRK2_TUNING_WORK_AROUND)
2017 ctrl |= SDHCI_CTRL_TUNED_CLK;
2018 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2021 * As per the Host Controller spec v3.00, tuning command
2022 * generates Buffer Read Ready interrupt, so enable that.
2024 * Note: The spec clearly says that when tuning sequence
2025 * is being performed, the controller does not generate
2026 * interrupts other than Buffer Read Ready interrupt. But
2027 * to make sure we don't hit a controller bug, we _only_
2028 * enable Buffer Read Ready interrupt here.
2030 sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_INT_ENABLE);
2031 sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_SIGNAL_ENABLE);
2034 * Issue CMD19 repeatedly till Execute Tuning is set to 0 or the number
2035 * of loops reaches 40 times.
2038 struct mmc_command cmd = {0};
2039 struct mmc_request mrq = {NULL};
2041 cmd.opcode = opcode;
2043 cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
2049 if (tuning_loop_counter-- == 0)
2055 * In response to CMD19, the card sends 64 bytes of tuning
2056 * block to the Host Controller. So we set the block size
2059 if (cmd.opcode == MMC_SEND_TUNING_BLOCK_HS200) {
2060 if (mmc->ios.bus_width == MMC_BUS_WIDTH_8)
2061 sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 128),
2063 else if (mmc->ios.bus_width == MMC_BUS_WIDTH_4)
2064 sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 64),
2067 sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 64),
2072 * The tuning block is sent by the card to the host controller.
2073 * So we set the TRNS_READ bit in the Transfer Mode register.
2074 * This also takes care of setting DMA Enable and Multi Block
2075 * Select in the same register to 0.
2077 sdhci_writew(host, SDHCI_TRNS_READ, SDHCI_TRANSFER_MODE);
2079 sdhci_send_command(host, &cmd);
2082 sdhci_del_timer(host, &mrq);
2084 spin_unlock_irqrestore(&host->lock, flags);
2085 /* Wait for Buffer Read Ready interrupt */
2086 wait_event_timeout(host->buf_ready_int,
2087 (host->tuning_done == 1),
2088 msecs_to_jiffies(50));
2089 spin_lock_irqsave(&host->lock, flags);
2091 if (!host->tuning_done) {
2092 pr_info(DRIVER_NAME ": Timeout waiting for Buffer Read Ready interrupt during tuning procedure, falling back to fixed sampling clock\n");
2094 sdhci_do_reset(host, SDHCI_RESET_CMD);
2095 sdhci_do_reset(host, SDHCI_RESET_DATA);
2097 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2098 ctrl &= ~SDHCI_CTRL_TUNED_CLK;
2099 ctrl &= ~SDHCI_CTRL_EXEC_TUNING;
2100 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2106 host->tuning_done = 0;
2108 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2110 /* eMMC spec does not require a delay between tuning cycles */
2111 if (opcode == MMC_SEND_TUNING_BLOCK)
2113 } while (ctrl & SDHCI_CTRL_EXEC_TUNING);
2116 * The Host Driver has exhausted the maximum number of loops allowed,
2117 * so use fixed sampling frequency.
2119 if (tuning_loop_counter < 0) {
2120 ctrl &= ~SDHCI_CTRL_TUNED_CLK;
2121 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2123 if (!(ctrl & SDHCI_CTRL_TUNED_CLK)) {
2124 pr_info(DRIVER_NAME ": Tuning procedure failed, falling back to fixed sampling clock\n");
2131 * In case tuning fails, host controllers which support
2132 * re-tuning can try tuning again at a later time, when the
2133 * re-tuning timer expires. So for these controllers, we
2134 * return 0. Since there might be other controllers who do not
2135 * have this capability, we return error for them.
2140 host->mmc->retune_period = err ? 0 : tuning_count;
2142 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
2143 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
2145 spin_unlock_irqrestore(&host->lock, flags);
2149 static int sdhci_select_drive_strength(struct mmc_card *card,
2150 unsigned int max_dtr, int host_drv,
2151 int card_drv, int *drv_type)
2153 struct sdhci_host *host = mmc_priv(card->host);
2155 if (!host->ops->select_drive_strength)
2158 return host->ops->select_drive_strength(host, card, max_dtr, host_drv,
2159 card_drv, drv_type);
2162 static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable)
2164 /* Host Controller v3.00 defines preset value registers */
2165 if (host->version < SDHCI_SPEC_300)
2169 * We only enable or disable Preset Value if they are not already
2170 * enabled or disabled respectively. Otherwise, we bail out.
2172 if (host->preset_enabled != enable) {
2173 u16 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2176 ctrl |= SDHCI_CTRL_PRESET_VAL_ENABLE;
2178 ctrl &= ~SDHCI_CTRL_PRESET_VAL_ENABLE;
2180 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2183 host->flags |= SDHCI_PV_ENABLED;
2185 host->flags &= ~SDHCI_PV_ENABLED;
2187 host->preset_enabled = enable;
2191 static void sdhci_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
2194 struct sdhci_host *host = mmc_priv(mmc);
2195 struct mmc_data *data = mrq->data;
2197 if (data->host_cookie != COOKIE_UNMAPPED)
2198 dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
2199 data->flags & MMC_DATA_WRITE ?
2200 DMA_TO_DEVICE : DMA_FROM_DEVICE);
2202 data->host_cookie = COOKIE_UNMAPPED;
2205 static void sdhci_pre_req(struct mmc_host *mmc, struct mmc_request *mrq,
2208 struct sdhci_host *host = mmc_priv(mmc);
2210 mrq->data->host_cookie = COOKIE_UNMAPPED;
2212 if (host->flags & SDHCI_REQ_USE_DMA)
2213 sdhci_pre_dma_transfer(host, mrq->data, COOKIE_PRE_MAPPED);
2216 static inline bool sdhci_has_requests(struct sdhci_host *host)
2218 return host->cmd || host->data_cmd;
2221 static void sdhci_error_out_mrqs(struct sdhci_host *host, int err)
2223 if (host->data_cmd) {
2224 host->data_cmd->error = err;
2225 sdhci_finish_mrq(host, host->data_cmd->mrq);
2229 host->cmd->error = err;
2230 sdhci_finish_mrq(host, host->cmd->mrq);
2234 static void sdhci_card_event(struct mmc_host *mmc)
2236 struct sdhci_host *host = mmc_priv(mmc);
2237 unsigned long flags;
2240 /* First check if client has provided their own card event */
2241 if (host->ops->card_event)
2242 host->ops->card_event(host);
2244 present = mmc->ops->get_cd(mmc);
2246 spin_lock_irqsave(&host->lock, flags);
2248 /* Check sdhci_has_requests() first in case we are runtime suspended */
2249 if (sdhci_has_requests(host) && !present) {
2250 pr_err("%s: Card removed during transfer!\n",
2251 mmc_hostname(host->mmc));
2252 pr_err("%s: Resetting controller.\n",
2253 mmc_hostname(host->mmc));
2255 sdhci_do_reset(host, SDHCI_RESET_CMD);
2256 sdhci_do_reset(host, SDHCI_RESET_DATA);
2258 sdhci_error_out_mrqs(host, -ENOMEDIUM);
2261 spin_unlock_irqrestore(&host->lock, flags);
2264 static const struct mmc_host_ops sdhci_ops = {
2265 .request = sdhci_request,
2266 .post_req = sdhci_post_req,
2267 .pre_req = sdhci_pre_req,
2268 .set_ios = sdhci_set_ios,
2269 .get_cd = sdhci_get_cd,
2270 .get_ro = sdhci_get_ro,
2271 .hw_reset = sdhci_hw_reset,
2272 .enable_sdio_irq = sdhci_enable_sdio_irq,
2273 .start_signal_voltage_switch = sdhci_start_signal_voltage_switch,
2274 .prepare_hs400_tuning = sdhci_prepare_hs400_tuning,
2275 .execute_tuning = sdhci_execute_tuning,
2276 .select_drive_strength = sdhci_select_drive_strength,
2277 .card_event = sdhci_card_event,
2278 .card_busy = sdhci_card_busy,
2281 /*****************************************************************************\
2285 \*****************************************************************************/
2287 static bool sdhci_request_done(struct sdhci_host *host)
2289 unsigned long flags;
2290 struct mmc_request *mrq;
2293 spin_lock_irqsave(&host->lock, flags);
2295 for (i = 0; i < SDHCI_MAX_MRQS; i++) {
2296 mrq = host->mrqs_done[i];
2302 spin_unlock_irqrestore(&host->lock, flags);
2306 sdhci_del_timer(host, mrq);
2309 * Always unmap the data buffers if they were mapped by
2310 * sdhci_prepare_data() whenever we finish with a request.
2311 * This avoids leaking DMA mappings on error.
2313 if (host->flags & SDHCI_REQ_USE_DMA) {
2314 struct mmc_data *data = mrq->data;
2316 if (data && data->host_cookie == COOKIE_MAPPED) {
2317 dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
2318 (data->flags & MMC_DATA_READ) ?
2319 DMA_FROM_DEVICE : DMA_TO_DEVICE);
2320 data->host_cookie = COOKIE_UNMAPPED;
2325 * The controller needs a reset of internal state machines
2326 * upon error conditions.
2328 if (sdhci_needs_reset(host, mrq)) {
2330 * Do not finish until command and data lines are available for
2331 * reset. Note there can only be one other mrq, so it cannot
2332 * also be in mrqs_done, otherwise host->cmd and host->data_cmd
2333 * would both be null.
2335 if (host->cmd || host->data_cmd) {
2336 spin_unlock_irqrestore(&host->lock, flags);
2340 /* Some controllers need this kick or reset won't work here */
2341 if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET)
2342 /* This is to force an update */
2343 host->ops->set_clock(host, host->clock);
2345 /* Spec says we should do both at the same time, but Ricoh
2346 controllers do not like that. */
2347 sdhci_do_reset(host, SDHCI_RESET_CMD);
2348 sdhci_do_reset(host, SDHCI_RESET_DATA);
2350 host->pending_reset = false;
2353 if (!sdhci_has_requests(host))
2354 sdhci_led_deactivate(host);
2356 host->mrqs_done[i] = NULL;
2359 spin_unlock_irqrestore(&host->lock, flags);
2361 mmc_request_done(host->mmc, mrq);
2366 static void sdhci_tasklet_finish(unsigned long param)
2368 struct sdhci_host *host = (struct sdhci_host *)param;
2370 while (!sdhci_request_done(host))
2374 static void sdhci_timeout_timer(unsigned long data)
2376 struct sdhci_host *host;
2377 unsigned long flags;
2379 host = (struct sdhci_host*)data;
2381 spin_lock_irqsave(&host->lock, flags);
2383 if (host->cmd && !sdhci_data_line_cmd(host->cmd)) {
2384 pr_err("%s: Timeout waiting for hardware cmd interrupt.\n",
2385 mmc_hostname(host->mmc));
2386 sdhci_dumpregs(host);
2388 host->cmd->error = -ETIMEDOUT;
2389 sdhci_finish_mrq(host, host->cmd->mrq);
2393 spin_unlock_irqrestore(&host->lock, flags);
2396 static void sdhci_timeout_data_timer(unsigned long data)
2398 struct sdhci_host *host;
2399 unsigned long flags;
2401 host = (struct sdhci_host *)data;
2403 spin_lock_irqsave(&host->lock, flags);
2405 if (host->data || host->data_cmd ||
2406 (host->cmd && sdhci_data_line_cmd(host->cmd))) {
2407 pr_err("%s: Timeout waiting for hardware interrupt.\n",
2408 mmc_hostname(host->mmc));
2409 sdhci_dumpregs(host);
2412 host->data->error = -ETIMEDOUT;
2413 sdhci_finish_data(host);
2414 } else if (host->data_cmd) {
2415 host->data_cmd->error = -ETIMEDOUT;
2416 sdhci_finish_mrq(host, host->data_cmd->mrq);
2418 host->cmd->error = -ETIMEDOUT;
2419 sdhci_finish_mrq(host, host->cmd->mrq);
2424 spin_unlock_irqrestore(&host->lock, flags);
2427 /*****************************************************************************\
2429 * Interrupt handling *
2431 \*****************************************************************************/
2433 static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask)
2437 * SDHCI recovers from errors by resetting the cmd and data
2438 * circuits. Until that is done, there very well might be more
2439 * interrupts, so ignore them in that case.
2441 if (host->pending_reset)
2443 pr_err("%s: Got command interrupt 0x%08x even though no command operation was in progress.\n",
2444 mmc_hostname(host->mmc), (unsigned)intmask);
2445 sdhci_dumpregs(host);
2449 if (intmask & (SDHCI_INT_TIMEOUT | SDHCI_INT_CRC |
2450 SDHCI_INT_END_BIT | SDHCI_INT_INDEX)) {
2451 if (intmask & SDHCI_INT_TIMEOUT)
2452 host->cmd->error = -ETIMEDOUT;
2454 host->cmd->error = -EILSEQ;
2457 * If this command initiates a data phase and a response
2458 * CRC error is signalled, the card can start transferring
2459 * data - the card may have received the command without
2460 * error. We must not terminate the mmc_request early.
2462 * If the card did not receive the command or returned an
2463 * error which prevented it sending data, the data phase
2466 if (host->cmd->data &&
2467 (intmask & (SDHCI_INT_CRC | SDHCI_INT_TIMEOUT)) ==
2473 sdhci_finish_mrq(host, host->cmd->mrq);
2477 if (intmask & SDHCI_INT_RESPONSE)
2478 sdhci_finish_command(host);
2481 #ifdef CONFIG_MMC_DEBUG
2482 static void sdhci_adma_show_error(struct sdhci_host *host)
2484 const char *name = mmc_hostname(host->mmc);
2485 void *desc = host->adma_table;
2487 sdhci_dumpregs(host);
2490 struct sdhci_adma2_64_desc *dma_desc = desc;
2492 if (host->flags & SDHCI_USE_64_BIT_DMA)
2493 DBG("%s: %p: DMA 0x%08x%08x, LEN 0x%04x, Attr=0x%02x\n",
2494 name, desc, le32_to_cpu(dma_desc->addr_hi),
2495 le32_to_cpu(dma_desc->addr_lo),
2496 le16_to_cpu(dma_desc->len),
2497 le16_to_cpu(dma_desc->cmd));
2499 DBG("%s: %p: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n",
2500 name, desc, le32_to_cpu(dma_desc->addr_lo),
2501 le16_to_cpu(dma_desc->len),
2502 le16_to_cpu(dma_desc->cmd));
2504 desc += host->desc_sz;
2506 if (dma_desc->cmd & cpu_to_le16(ADMA2_END))
2511 static void sdhci_adma_show_error(struct sdhci_host *host) { }
2514 static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
2518 /* CMD19 generates _only_ Buffer Read Ready interrupt */
2519 if (intmask & SDHCI_INT_DATA_AVAIL) {
2520 command = SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND));
2521 if (command == MMC_SEND_TUNING_BLOCK ||
2522 command == MMC_SEND_TUNING_BLOCK_HS200) {
2523 host->tuning_done = 1;
2524 wake_up(&host->buf_ready_int);
2530 struct mmc_command *data_cmd = host->data_cmd;
2533 * The "data complete" interrupt is also used to
2534 * indicate that a busy state has ended. See comment
2535 * above in sdhci_cmd_irq().
2537 if (data_cmd && (data_cmd->flags & MMC_RSP_BUSY)) {
2538 if (intmask & SDHCI_INT_DATA_TIMEOUT) {
2539 host->data_cmd = NULL;
2540 data_cmd->error = -ETIMEDOUT;
2541 sdhci_finish_mrq(host, data_cmd->mrq);
2544 if (intmask & SDHCI_INT_DATA_END) {
2545 host->data_cmd = NULL;
2547 * Some cards handle busy-end interrupt
2548 * before the command completed, so make
2549 * sure we do things in the proper order.
2551 if (host->cmd == data_cmd)
2554 sdhci_finish_mrq(host, data_cmd->mrq);
2560 * SDHCI recovers from errors by resetting the cmd and data
2561 * circuits. Until that is done, there very well might be more
2562 * interrupts, so ignore them in that case.
2564 if (host->pending_reset)
2567 pr_err("%s: Got data interrupt 0x%08x even though no data operation was in progress.\n",
2568 mmc_hostname(host->mmc), (unsigned)intmask);
2569 sdhci_dumpregs(host);
2574 if (intmask & SDHCI_INT_DATA_TIMEOUT)
2575 host->data->error = -ETIMEDOUT;
2576 else if (intmask & SDHCI_INT_DATA_END_BIT)
2577 host->data->error = -EILSEQ;
2578 else if ((intmask & SDHCI_INT_DATA_CRC) &&
2579 SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))
2581 host->data->error = -EILSEQ;
2582 else if (intmask & SDHCI_INT_ADMA_ERROR) {
2583 pr_err("%s: ADMA error\n", mmc_hostname(host->mmc));
2584 sdhci_adma_show_error(host);
2585 host->data->error = -EIO;
2586 if (host->ops->adma_workaround)
2587 host->ops->adma_workaround(host, intmask);
2590 if (host->data->error)
2591 sdhci_finish_data(host);
2593 if (intmask & (SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL))
2594 sdhci_transfer_pio(host);
2597 * We currently don't do anything fancy with DMA
2598 * boundaries, but as we can't disable the feature
2599 * we need to at least restart the transfer.
2601 * According to the spec sdhci_readl(host, SDHCI_DMA_ADDRESS)
2602 * should return a valid address to continue from, but as
2603 * some controllers are faulty, don't trust them.
2605 if (intmask & SDHCI_INT_DMA_END) {
2606 u32 dmastart, dmanow;
2607 dmastart = sg_dma_address(host->data->sg);
2608 dmanow = dmastart + host->data->bytes_xfered;
2610 * Force update to the next DMA block boundary.
2613 ~(SDHCI_DEFAULT_BOUNDARY_SIZE - 1)) +
2614 SDHCI_DEFAULT_BOUNDARY_SIZE;
2615 host->data->bytes_xfered = dmanow - dmastart;
2616 DBG("%s: DMA base 0x%08x, transferred 0x%06x bytes,"
2618 mmc_hostname(host->mmc), dmastart,
2619 host->data->bytes_xfered, dmanow);
2620 sdhci_writel(host, dmanow, SDHCI_DMA_ADDRESS);
2623 if (intmask & SDHCI_INT_DATA_END) {
2624 if (host->cmd == host->data_cmd) {
2626 * Data managed to finish before the
2627 * command completed. Make sure we do
2628 * things in the proper order.
2630 host->data_early = 1;
2632 sdhci_finish_data(host);
2638 static irqreturn_t sdhci_irq(int irq, void *dev_id)
2640 irqreturn_t result = IRQ_NONE;
2641 struct sdhci_host *host = dev_id;
2642 u32 intmask, mask, unexpected = 0;
2645 spin_lock(&host->lock);
2647 if (host->runtime_suspended && !sdhci_sdio_irq_enabled(host)) {
2648 spin_unlock(&host->lock);
2652 intmask = sdhci_readl(host, SDHCI_INT_STATUS);
2653 if (!intmask || intmask == 0xffffffff) {
2659 /* Clear selected interrupts. */
2660 mask = intmask & (SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK |
2661 SDHCI_INT_BUS_POWER);
2662 sdhci_writel(host, mask, SDHCI_INT_STATUS);
2664 DBG("*** %s got interrupt: 0x%08x\n",
2665 mmc_hostname(host->mmc), intmask);
2667 if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
2668 u32 present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
2672 * There is a observation on i.mx esdhc. INSERT
2673 * bit will be immediately set again when it gets
2674 * cleared, if a card is inserted. We have to mask
2675 * the irq to prevent interrupt storm which will
2676 * freeze the system. And the REMOVE gets the
2679 * More testing are needed here to ensure it works
2680 * for other platforms though.
2682 host->ier &= ~(SDHCI_INT_CARD_INSERT |
2683 SDHCI_INT_CARD_REMOVE);
2684 host->ier |= present ? SDHCI_INT_CARD_REMOVE :
2685 SDHCI_INT_CARD_INSERT;
2686 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
2687 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
2689 sdhci_writel(host, intmask & (SDHCI_INT_CARD_INSERT |
2690 SDHCI_INT_CARD_REMOVE), SDHCI_INT_STATUS);
2692 host->thread_isr |= intmask & (SDHCI_INT_CARD_INSERT |
2693 SDHCI_INT_CARD_REMOVE);
2694 result = IRQ_WAKE_THREAD;
2697 if (intmask & SDHCI_INT_CMD_MASK)
2698 sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK);
2700 if (intmask & SDHCI_INT_DATA_MASK)
2701 sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK);
2703 if (intmask & SDHCI_INT_BUS_POWER)
2704 pr_err("%s: Card is consuming too much power!\n",
2705 mmc_hostname(host->mmc));
2707 if (intmask & SDHCI_INT_RETUNE)
2708 mmc_retune_needed(host->mmc);
2710 if (intmask & SDHCI_INT_CARD_INT) {
2711 sdhci_enable_sdio_irq_nolock(host, false);
2712 host->thread_isr |= SDHCI_INT_CARD_INT;
2713 result = IRQ_WAKE_THREAD;
2716 intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE |
2717 SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK |
2718 SDHCI_INT_ERROR | SDHCI_INT_BUS_POWER |
2719 SDHCI_INT_RETUNE | SDHCI_INT_CARD_INT);
2722 unexpected |= intmask;
2723 sdhci_writel(host, intmask, SDHCI_INT_STATUS);
2726 if (result == IRQ_NONE)
2727 result = IRQ_HANDLED;
2729 intmask = sdhci_readl(host, SDHCI_INT_STATUS);
2730 } while (intmask && --max_loops);
2732 spin_unlock(&host->lock);
2735 pr_err("%s: Unexpected interrupt 0x%08x.\n",
2736 mmc_hostname(host->mmc), unexpected);
2737 sdhci_dumpregs(host);
2743 static irqreturn_t sdhci_thread_irq(int irq, void *dev_id)
2745 struct sdhci_host *host = dev_id;
2746 unsigned long flags;
2749 spin_lock_irqsave(&host->lock, flags);
2750 isr = host->thread_isr;
2751 host->thread_isr = 0;
2752 spin_unlock_irqrestore(&host->lock, flags);
2754 if (isr & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
2755 struct mmc_host *mmc = host->mmc;
2757 mmc->ops->card_event(mmc);
2758 mmc_detect_change(mmc, msecs_to_jiffies(200));
2761 if (isr & SDHCI_INT_CARD_INT) {
2762 sdio_run_irqs(host->mmc);
2764 spin_lock_irqsave(&host->lock, flags);
2765 if (host->flags & SDHCI_SDIO_IRQ_ENABLED)
2766 sdhci_enable_sdio_irq_nolock(host, true);
2767 spin_unlock_irqrestore(&host->lock, flags);
2770 return isr ? IRQ_HANDLED : IRQ_NONE;
2773 /*****************************************************************************\
2777 \*****************************************************************************/
2781 * To enable wakeup events, the corresponding events have to be enabled in
2782 * the Interrupt Status Enable register too. See 'Table 1-6: Wakeup Signal
2783 * Table' in the SD Host Controller Standard Specification.
2784 * It is useless to restore SDHCI_INT_ENABLE state in
2785 * sdhci_disable_irq_wakeups() since it will be set by
2786 * sdhci_enable_card_detection() or sdhci_init().
2788 void sdhci_enable_irq_wakeups(struct sdhci_host *host)
2791 u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE
2792 | SDHCI_WAKE_ON_INT;
2793 u32 irq_val = SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE |
2796 val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
2798 /* Avoid fake wake up */
2799 if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) {
2800 val &= ~(SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE);
2801 irq_val &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE);
2803 sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
2804 sdhci_writel(host, irq_val, SDHCI_INT_ENABLE);
2806 EXPORT_SYMBOL_GPL(sdhci_enable_irq_wakeups);
2808 static void sdhci_disable_irq_wakeups(struct sdhci_host *host)
2811 u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE
2812 | SDHCI_WAKE_ON_INT;
2814 val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
2816 sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
2819 int sdhci_suspend_host(struct sdhci_host *host)
2821 sdhci_disable_card_detection(host);
2823 mmc_retune_timer_stop(host->mmc);
2824 if (host->tuning_mode != SDHCI_TUNING_MODE_3)
2825 mmc_retune_needed(host->mmc);
2827 if (!device_may_wakeup(mmc_dev(host->mmc))) {
2829 sdhci_writel(host, 0, SDHCI_INT_ENABLE);
2830 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
2831 free_irq(host->irq, host);
2833 sdhci_enable_irq_wakeups(host);
2834 enable_irq_wake(host->irq);
2839 EXPORT_SYMBOL_GPL(sdhci_suspend_host);
2841 int sdhci_resume_host(struct sdhci_host *host)
2843 struct mmc_host *mmc = host->mmc;
2846 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
2847 if (host->ops->enable_dma)
2848 host->ops->enable_dma(host);
2851 if ((host->mmc->pm_flags & MMC_PM_KEEP_POWER) &&
2852 (host->quirks2 & SDHCI_QUIRK2_HOST_OFF_CARD_ON)) {
2853 /* Card keeps power but host controller does not */
2854 sdhci_init(host, 0);
2857 mmc->ops->set_ios(mmc, &mmc->ios);
2859 sdhci_init(host, (host->mmc->pm_flags & MMC_PM_KEEP_POWER));
2863 if (!device_may_wakeup(mmc_dev(host->mmc))) {
2864 ret = request_threaded_irq(host->irq, sdhci_irq,
2865 sdhci_thread_irq, IRQF_SHARED,
2866 mmc_hostname(host->mmc), host);
2870 sdhci_disable_irq_wakeups(host);
2871 disable_irq_wake(host->irq);
2874 sdhci_enable_card_detection(host);
2879 EXPORT_SYMBOL_GPL(sdhci_resume_host);
2881 int sdhci_runtime_suspend_host(struct sdhci_host *host)
2883 unsigned long flags;
2885 mmc_retune_timer_stop(host->mmc);
2886 if (host->tuning_mode != SDHCI_TUNING_MODE_3)
2887 mmc_retune_needed(host->mmc);
2889 spin_lock_irqsave(&host->lock, flags);
2890 host->ier &= SDHCI_INT_CARD_INT;
2891 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
2892 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
2893 spin_unlock_irqrestore(&host->lock, flags);
2895 synchronize_hardirq(host->irq);
2897 spin_lock_irqsave(&host->lock, flags);
2898 host->runtime_suspended = true;
2899 spin_unlock_irqrestore(&host->lock, flags);
2903 EXPORT_SYMBOL_GPL(sdhci_runtime_suspend_host);
2905 int sdhci_runtime_resume_host(struct sdhci_host *host)
2907 struct mmc_host *mmc = host->mmc;
2908 unsigned long flags;
2909 int host_flags = host->flags;
2911 if (host_flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
2912 if (host->ops->enable_dma)
2913 host->ops->enable_dma(host);
2916 sdhci_init(host, 0);
2918 /* Force clock and power re-program */
2921 mmc->ops->start_signal_voltage_switch(mmc, &mmc->ios);
2922 mmc->ops->set_ios(mmc, &mmc->ios);
2924 if ((host_flags & SDHCI_PV_ENABLED) &&
2925 !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN)) {
2926 spin_lock_irqsave(&host->lock, flags);
2927 sdhci_enable_preset_value(host, true);
2928 spin_unlock_irqrestore(&host->lock, flags);
2931 if ((mmc->caps2 & MMC_CAP2_HS400_ES) &&
2932 mmc->ops->hs400_enhanced_strobe)
2933 mmc->ops->hs400_enhanced_strobe(mmc, &mmc->ios);
2935 spin_lock_irqsave(&host->lock, flags);
2937 host->runtime_suspended = false;
2939 /* Enable SDIO IRQ */
2940 if (host->flags & SDHCI_SDIO_IRQ_ENABLED)
2941 sdhci_enable_sdio_irq_nolock(host, true);
2943 /* Enable Card Detection */
2944 sdhci_enable_card_detection(host);
2946 spin_unlock_irqrestore(&host->lock, flags);
2950 EXPORT_SYMBOL_GPL(sdhci_runtime_resume_host);
2952 #endif /* CONFIG_PM */
2954 /*****************************************************************************\
2956 * Device allocation/registration *
2958 \*****************************************************************************/
2960 struct sdhci_host *sdhci_alloc_host(struct device *dev,
2963 struct mmc_host *mmc;
2964 struct sdhci_host *host;
2966 WARN_ON(dev == NULL);
2968 mmc = mmc_alloc_host(sizeof(struct sdhci_host) + priv_size, dev);
2970 return ERR_PTR(-ENOMEM);
2972 host = mmc_priv(mmc);
2974 host->mmc_host_ops = sdhci_ops;
2975 mmc->ops = &host->mmc_host_ops;
2977 host->flags = SDHCI_SIGNALING_330;
2982 EXPORT_SYMBOL_GPL(sdhci_alloc_host);
2984 static int sdhci_set_dma_mask(struct sdhci_host *host)
2986 struct mmc_host *mmc = host->mmc;
2987 struct device *dev = mmc_dev(mmc);
2990 if (host->quirks2 & SDHCI_QUIRK2_BROKEN_64_BIT_DMA)
2991 host->flags &= ~SDHCI_USE_64_BIT_DMA;
2993 /* Try 64-bit mask if hardware is capable of it */
2994 if (host->flags & SDHCI_USE_64_BIT_DMA) {
2995 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
2997 pr_warn("%s: Failed to set 64-bit DMA mask.\n",
2999 host->flags &= ~SDHCI_USE_64_BIT_DMA;
3003 /* 32-bit mask as default & fallback */
3005 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
3007 pr_warn("%s: Failed to set 32-bit DMA mask.\n",
3014 void __sdhci_read_caps(struct sdhci_host *host, u16 *ver, u32 *caps, u32 *caps1)
3017 u64 dt_caps_mask = 0;
3020 if (host->read_caps)
3023 host->read_caps = true;
3026 host->quirks = debug_quirks;
3029 host->quirks2 = debug_quirks2;
3031 sdhci_do_reset(host, SDHCI_RESET_ALL);
3033 of_property_read_u64(mmc_dev(host->mmc)->of_node,
3034 "sdhci-caps-mask", &dt_caps_mask);
3035 of_property_read_u64(mmc_dev(host->mmc)->of_node,
3036 "sdhci-caps", &dt_caps);
3038 v = ver ? *ver : sdhci_readw(host, SDHCI_HOST_VERSION);
3039 host->version = (v & SDHCI_SPEC_VER_MASK) >> SDHCI_SPEC_VER_SHIFT;
3041 if (host->quirks & SDHCI_QUIRK_MISSING_CAPS)
3047 host->caps = sdhci_readl(host, SDHCI_CAPABILITIES);
3048 host->caps &= ~lower_32_bits(dt_caps_mask);
3049 host->caps |= lower_32_bits(dt_caps);
3052 if (host->version < SDHCI_SPEC_300)
3056 host->caps1 = *caps1;
3058 host->caps1 = sdhci_readl(host, SDHCI_CAPABILITIES_1);
3059 host->caps1 &= ~upper_32_bits(dt_caps_mask);
3060 host->caps1 |= upper_32_bits(dt_caps);
3063 EXPORT_SYMBOL_GPL(__sdhci_read_caps);
3065 int sdhci_setup_host(struct sdhci_host *host)
3067 struct mmc_host *mmc;
3068 u32 max_current_caps;
3069 unsigned int ocr_avail;
3070 unsigned int override_timeout_clk;
3074 WARN_ON(host == NULL);
3081 * If there are external regulators, get them. Note this must be done
3082 * early before resetting the host and reading the capabilities so that
3083 * the host can take the appropriate action if regulators are not
3086 ret = mmc_regulator_get_supply(mmc);
3087 if (ret == -EPROBE_DEFER)
3090 sdhci_read_caps(host);
3092 override_timeout_clk = host->timeout_clk;
3094 if (host->version > SDHCI_SPEC_300) {
3095 pr_err("%s: Unknown controller version (%d). You may experience problems.\n",
3096 mmc_hostname(mmc), host->version);
3099 if (host->quirks & SDHCI_QUIRK_FORCE_DMA)
3100 host->flags |= SDHCI_USE_SDMA;
3101 else if (!(host->caps & SDHCI_CAN_DO_SDMA))
3102 DBG("Controller doesn't have SDMA capability\n");
3104 host->flags |= SDHCI_USE_SDMA;
3106 if ((host->quirks & SDHCI_QUIRK_BROKEN_DMA) &&
3107 (host->flags & SDHCI_USE_SDMA)) {
3108 DBG("Disabling DMA as it is marked broken\n");
3109 host->flags &= ~SDHCI_USE_SDMA;
3112 if ((host->version >= SDHCI_SPEC_200) &&
3113 (host->caps & SDHCI_CAN_DO_ADMA2))
3114 host->flags |= SDHCI_USE_ADMA;
3116 if ((host->quirks & SDHCI_QUIRK_BROKEN_ADMA) &&
3117 (host->flags & SDHCI_USE_ADMA)) {
3118 DBG("Disabling ADMA as it is marked broken\n");
3119 host->flags &= ~SDHCI_USE_ADMA;
3123 * It is assumed that a 64-bit capable device has set a 64-bit DMA mask
3124 * and *must* do 64-bit DMA. A driver has the opportunity to change
3125 * that during the first call to ->enable_dma(). Similarly
3126 * SDHCI_QUIRK2_BROKEN_64_BIT_DMA must be left to the drivers to
3129 if (host->caps & SDHCI_CAN_64BIT)
3130 host->flags |= SDHCI_USE_64_BIT_DMA;
3132 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
3133 ret = sdhci_set_dma_mask(host);
3135 if (!ret && host->ops->enable_dma)
3136 ret = host->ops->enable_dma(host);
3139 pr_warn("%s: No suitable DMA available - falling back to PIO\n",
3141 host->flags &= ~(SDHCI_USE_SDMA | SDHCI_USE_ADMA);
3147 /* SDMA does not support 64-bit DMA */
3148 if (host->flags & SDHCI_USE_64_BIT_DMA)
3149 host->flags &= ~SDHCI_USE_SDMA;
3151 if (host->flags & SDHCI_USE_ADMA) {
3156 * The DMA descriptor table size is calculated as the maximum
3157 * number of segments times 2, to allow for an alignment
3158 * descriptor for each segment, plus 1 for a nop end descriptor,
3159 * all multipled by the descriptor size.
3161 if (host->flags & SDHCI_USE_64_BIT_DMA) {
3162 host->adma_table_sz = (SDHCI_MAX_SEGS * 2 + 1) *
3163 SDHCI_ADMA2_64_DESC_SZ;
3164 host->desc_sz = SDHCI_ADMA2_64_DESC_SZ;
3166 host->adma_table_sz = (SDHCI_MAX_SEGS * 2 + 1) *
3167 SDHCI_ADMA2_32_DESC_SZ;
3168 host->desc_sz = SDHCI_ADMA2_32_DESC_SZ;
3171 host->align_buffer_sz = SDHCI_MAX_SEGS * SDHCI_ADMA2_ALIGN;
3172 buf = dma_alloc_coherent(mmc_dev(mmc), host->align_buffer_sz +
3173 host->adma_table_sz, &dma, GFP_KERNEL);
3175 pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n",
3177 host->flags &= ~SDHCI_USE_ADMA;
3178 } else if ((dma + host->align_buffer_sz) &
3179 (SDHCI_ADMA2_DESC_ALIGN - 1)) {
3180 pr_warn("%s: unable to allocate aligned ADMA descriptor\n",
3182 host->flags &= ~SDHCI_USE_ADMA;
3183 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
3184 host->adma_table_sz, buf, dma);
3186 host->align_buffer = buf;
3187 host->align_addr = dma;
3189 host->adma_table = buf + host->align_buffer_sz;
3190 host->adma_addr = dma + host->align_buffer_sz;
3195 * If we use DMA, then it's up to the caller to set the DMA
3196 * mask, but PIO does not need the hw shim so we set a new
3197 * mask here in that case.
3199 if (!(host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA))) {
3200 host->dma_mask = DMA_BIT_MASK(64);
3201 mmc_dev(mmc)->dma_mask = &host->dma_mask;
3204 if (host->version >= SDHCI_SPEC_300)
3205 host->max_clk = (host->caps & SDHCI_CLOCK_V3_BASE_MASK)
3206 >> SDHCI_CLOCK_BASE_SHIFT;
3208 host->max_clk = (host->caps & SDHCI_CLOCK_BASE_MASK)
3209 >> SDHCI_CLOCK_BASE_SHIFT;
3211 host->max_clk *= 1000000;
3212 if (host->max_clk == 0 || host->quirks &
3213 SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN) {
3214 if (!host->ops->get_max_clock) {
3215 pr_err("%s: Hardware doesn't specify base clock frequency.\n",
3220 host->max_clk = host->ops->get_max_clock(host);
3224 * In case of Host Controller v3.00, find out whether clock
3225 * multiplier is supported.
3227 host->clk_mul = (host->caps1 & SDHCI_CLOCK_MUL_MASK) >>
3228 SDHCI_CLOCK_MUL_SHIFT;
3231 * In case the value in Clock Multiplier is 0, then programmable
3232 * clock mode is not supported, otherwise the actual clock
3233 * multiplier is one more than the value of Clock Multiplier
3234 * in the Capabilities Register.
3240 * Set host parameters.
3242 max_clk = host->max_clk;
3244 if (host->ops->get_min_clock)
3245 mmc->f_min = host->ops->get_min_clock(host);
3246 else if (host->version >= SDHCI_SPEC_300) {
3247 if (host->clk_mul) {
3248 mmc->f_min = (host->max_clk * host->clk_mul) / 1024;
3249 max_clk = host->max_clk * host->clk_mul;
3251 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300;
3253 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200;
3255 if (!mmc->f_max || mmc->f_max > max_clk)
3256 mmc->f_max = max_clk;
3258 if (!(host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) {
3259 host->timeout_clk = (host->caps & SDHCI_TIMEOUT_CLK_MASK) >>
3260 SDHCI_TIMEOUT_CLK_SHIFT;
3261 if (host->timeout_clk == 0) {
3262 if (host->ops->get_timeout_clock) {
3264 host->ops->get_timeout_clock(host);
3266 pr_err("%s: Hardware doesn't specify timeout clock frequency.\n",
3273 if (host->caps & SDHCI_TIMEOUT_CLK_UNIT)
3274 host->timeout_clk *= 1000;
3276 if (override_timeout_clk)
3277 host->timeout_clk = override_timeout_clk;
3279 mmc->max_busy_timeout = host->ops->get_max_timeout_count ?
3280 host->ops->get_max_timeout_count(host) : 1 << 27;
3281 mmc->max_busy_timeout /= host->timeout_clk;
3284 mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_ERASE | MMC_CAP_CMD23;
3285 mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD;
3287 if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12)
3288 host->flags |= SDHCI_AUTO_CMD12;
3290 /* Auto-CMD23 stuff only works in ADMA or PIO. */
3291 if ((host->version >= SDHCI_SPEC_300) &&
3292 ((host->flags & SDHCI_USE_ADMA) ||
3293 !(host->flags & SDHCI_USE_SDMA)) &&
3294 !(host->quirks2 & SDHCI_QUIRK2_ACMD23_BROKEN)) {
3295 host->flags |= SDHCI_AUTO_CMD23;
3296 DBG("%s: Auto-CMD23 available\n", mmc_hostname(mmc));
3298 DBG("%s: Auto-CMD23 unavailable\n", mmc_hostname(mmc));
3302 * A controller may support 8-bit width, but the board itself
3303 * might not have the pins brought out. Boards that support
3304 * 8-bit width must set "mmc->caps |= MMC_CAP_8_BIT_DATA;" in
3305 * their platform code before calling sdhci_add_host(), and we
3306 * won't assume 8-bit width for hosts without that CAP.
3308 if (!(host->quirks & SDHCI_QUIRK_FORCE_1_BIT_DATA))
3309 mmc->caps |= MMC_CAP_4_BIT_DATA;
3311 if (host->quirks2 & SDHCI_QUIRK2_HOST_NO_CMD23)
3312 mmc->caps &= ~MMC_CAP_CMD23;
3314 if (host->caps & SDHCI_CAN_DO_HISPD)
3315 mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
3317 if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) &&
3318 mmc_card_is_removable(mmc) &&
3319 mmc_gpio_get_cd(host->mmc) < 0)
3320 mmc->caps |= MMC_CAP_NEEDS_POLL;
3322 /* If vqmmc regulator and no 1.8V signalling, then there's no UHS */
3323 if (!IS_ERR(mmc->supply.vqmmc)) {
3324 ret = regulator_enable(mmc->supply.vqmmc);
3325 if (!regulator_is_supported_voltage(mmc->supply.vqmmc, 1700000,
3327 host->caps1 &= ~(SDHCI_SUPPORT_SDR104 |
3328 SDHCI_SUPPORT_SDR50 |
3329 SDHCI_SUPPORT_DDR50);
3331 pr_warn("%s: Failed to enable vqmmc regulator: %d\n",
3332 mmc_hostname(mmc), ret);
3333 mmc->supply.vqmmc = ERR_PTR(-EINVAL);
3337 if (host->quirks2 & SDHCI_QUIRK2_NO_1_8_V) {
3338 host->caps1 &= ~(SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
3339 SDHCI_SUPPORT_DDR50);
3342 /* Any UHS-I mode in caps implies SDR12 and SDR25 support. */
3343 if (host->caps1 & (SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
3344 SDHCI_SUPPORT_DDR50))
3345 mmc->caps |= MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25;
3347 /* SDR104 supports also implies SDR50 support */
3348 if (host->caps1 & SDHCI_SUPPORT_SDR104) {
3349 mmc->caps |= MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50;
3350 /* SD3.0: SDR104 is supported so (for eMMC) the caps2
3351 * field can be promoted to support HS200.
3353 if (!(host->quirks2 & SDHCI_QUIRK2_BROKEN_HS200))
3354 mmc->caps2 |= MMC_CAP2_HS200;
3355 } else if (host->caps1 & SDHCI_SUPPORT_SDR50) {
3356 mmc->caps |= MMC_CAP_UHS_SDR50;
3359 if (host->quirks2 & SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 &&
3360 (host->caps1 & SDHCI_SUPPORT_HS400))
3361 mmc->caps2 |= MMC_CAP2_HS400;
3363 if ((mmc->caps2 & MMC_CAP2_HSX00_1_2V) &&
3364 (IS_ERR(mmc->supply.vqmmc) ||
3365 !regulator_is_supported_voltage(mmc->supply.vqmmc, 1100000,
3367 mmc->caps2 &= ~MMC_CAP2_HSX00_1_2V;
3369 if ((host->caps1 & SDHCI_SUPPORT_DDR50) &&
3370 !(host->quirks2 & SDHCI_QUIRK2_BROKEN_DDR50))
3371 mmc->caps |= MMC_CAP_UHS_DDR50;
3373 /* Does the host need tuning for SDR50? */
3374 if (host->caps1 & SDHCI_USE_SDR50_TUNING)
3375 host->flags |= SDHCI_SDR50_NEEDS_TUNING;
3377 /* Driver Type(s) (A, C, D) supported by the host */
3378 if (host->caps1 & SDHCI_DRIVER_TYPE_A)
3379 mmc->caps |= MMC_CAP_DRIVER_TYPE_A;
3380 if (host->caps1 & SDHCI_DRIVER_TYPE_C)
3381 mmc->caps |= MMC_CAP_DRIVER_TYPE_C;
3382 if (host->caps1 & SDHCI_DRIVER_TYPE_D)
3383 mmc->caps |= MMC_CAP_DRIVER_TYPE_D;
3385 /* Initial value for re-tuning timer count */
3386 host->tuning_count = (host->caps1 & SDHCI_RETUNING_TIMER_COUNT_MASK) >>
3387 SDHCI_RETUNING_TIMER_COUNT_SHIFT;
3390 * In case Re-tuning Timer is not disabled, the actual value of
3391 * re-tuning timer will be 2 ^ (n - 1).
3393 if (host->tuning_count)
3394 host->tuning_count = 1 << (host->tuning_count - 1);
3396 /* Re-tuning mode supported by the Host Controller */
3397 host->tuning_mode = (host->caps1 & SDHCI_RETUNING_MODE_MASK) >>
3398 SDHCI_RETUNING_MODE_SHIFT;
3403 * According to SD Host Controller spec v3.00, if the Host System
3404 * can afford more than 150mA, Host Driver should set XPC to 1. Also
3405 * the value is meaningful only if Voltage Support in the Capabilities
3406 * register is set. The actual current value is 4 times the register
3409 max_current_caps = sdhci_readl(host, SDHCI_MAX_CURRENT);
3410 if (!max_current_caps && !IS_ERR(mmc->supply.vmmc)) {
3411 int curr = regulator_get_current_limit(mmc->supply.vmmc);
3414 /* convert to SDHCI_MAX_CURRENT format */
3415 curr = curr/1000; /* convert to mA */
3416 curr = curr/SDHCI_MAX_CURRENT_MULTIPLIER;
3418 curr = min_t(u32, curr, SDHCI_MAX_CURRENT_LIMIT);
3420 (curr << SDHCI_MAX_CURRENT_330_SHIFT) |
3421 (curr << SDHCI_MAX_CURRENT_300_SHIFT) |
3422 (curr << SDHCI_MAX_CURRENT_180_SHIFT);
3426 if (host->caps & SDHCI_CAN_VDD_330) {
3427 ocr_avail |= MMC_VDD_32_33 | MMC_VDD_33_34;
3429 mmc->max_current_330 = ((max_current_caps &
3430 SDHCI_MAX_CURRENT_330_MASK) >>
3431 SDHCI_MAX_CURRENT_330_SHIFT) *
3432 SDHCI_MAX_CURRENT_MULTIPLIER;
3434 if (host->caps & SDHCI_CAN_VDD_300) {
3435 ocr_avail |= MMC_VDD_29_30 | MMC_VDD_30_31;
3437 mmc->max_current_300 = ((max_current_caps &
3438 SDHCI_MAX_CURRENT_300_MASK) >>
3439 SDHCI_MAX_CURRENT_300_SHIFT) *
3440 SDHCI_MAX_CURRENT_MULTIPLIER;
3442 if (host->caps & SDHCI_CAN_VDD_180) {
3443 ocr_avail |= MMC_VDD_165_195;
3445 mmc->max_current_180 = ((max_current_caps &
3446 SDHCI_MAX_CURRENT_180_MASK) >>
3447 SDHCI_MAX_CURRENT_180_SHIFT) *
3448 SDHCI_MAX_CURRENT_MULTIPLIER;
3451 /* If OCR set by host, use it instead. */
3453 ocr_avail = host->ocr_mask;
3455 /* If OCR set by external regulators, give it highest prio. */
3457 ocr_avail = mmc->ocr_avail;
3459 mmc->ocr_avail = ocr_avail;
3460 mmc->ocr_avail_sdio = ocr_avail;
3461 if (host->ocr_avail_sdio)
3462 mmc->ocr_avail_sdio &= host->ocr_avail_sdio;
3463 mmc->ocr_avail_sd = ocr_avail;
3464 if (host->ocr_avail_sd)
3465 mmc->ocr_avail_sd &= host->ocr_avail_sd;
3466 else /* normal SD controllers don't support 1.8V */
3467 mmc->ocr_avail_sd &= ~MMC_VDD_165_195;
3468 mmc->ocr_avail_mmc = ocr_avail;
3469 if (host->ocr_avail_mmc)
3470 mmc->ocr_avail_mmc &= host->ocr_avail_mmc;
3472 if (mmc->ocr_avail == 0) {
3473 pr_err("%s: Hardware doesn't report any support voltages.\n",
3479 if ((mmc->caps & (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 |
3480 MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 |
3481 MMC_CAP_UHS_DDR50 | MMC_CAP_1_8V_DDR)) ||
3482 (mmc->caps2 & (MMC_CAP2_HS200_1_8V_SDR | MMC_CAP2_HS400_1_8V)))
3483 host->flags |= SDHCI_SIGNALING_180;
3485 if (mmc->caps2 & MMC_CAP2_HSX00_1_2V)
3486 host->flags |= SDHCI_SIGNALING_120;
3488 spin_lock_init(&host->lock);
3491 * Maximum number of segments. Depends on if the hardware
3492 * can do scatter/gather or not.
3494 if (host->flags & SDHCI_USE_ADMA)
3495 mmc->max_segs = SDHCI_MAX_SEGS;
3496 else if (host->flags & SDHCI_USE_SDMA)
3499 mmc->max_segs = SDHCI_MAX_SEGS;
3502 * Maximum number of sectors in one transfer. Limited by SDMA boundary
3503 * size (512KiB). Note some tuning modes impose a 4MiB limit, but this
3506 mmc->max_req_size = 524288;
3509 * Maximum segment size. Could be one segment with the maximum number
3510 * of bytes. When doing hardware scatter/gather, each entry cannot
3511 * be larger than 64 KiB though.
3513 if (host->flags & SDHCI_USE_ADMA) {
3514 if (host->quirks & SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC)
3515 mmc->max_seg_size = 65535;
3517 mmc->max_seg_size = 65536;
3519 mmc->max_seg_size = mmc->max_req_size;
3523 * Maximum block size. This varies from controller to controller and
3524 * is specified in the capabilities register.
3526 if (host->quirks & SDHCI_QUIRK_FORCE_BLK_SZ_2048) {
3527 mmc->max_blk_size = 2;
3529 mmc->max_blk_size = (host->caps & SDHCI_MAX_BLOCK_MASK) >>
3530 SDHCI_MAX_BLOCK_SHIFT;
3531 if (mmc->max_blk_size >= 3) {
3532 pr_warn("%s: Invalid maximum block size, assuming 512 bytes\n",
3534 mmc->max_blk_size = 0;
3538 mmc->max_blk_size = 512 << mmc->max_blk_size;
3541 * Maximum block count.
3543 mmc->max_blk_count = (host->quirks & SDHCI_QUIRK_NO_MULTIBLOCK) ? 1 : 65535;
3548 if (!IS_ERR(mmc->supply.vqmmc))
3549 regulator_disable(mmc->supply.vqmmc);
3551 if (host->align_buffer)
3552 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
3553 host->adma_table_sz, host->align_buffer,
3555 host->adma_table = NULL;
3556 host->align_buffer = NULL;
3560 EXPORT_SYMBOL_GPL(sdhci_setup_host);
3562 int __sdhci_add_host(struct sdhci_host *host)
3564 struct mmc_host *mmc = host->mmc;
3570 tasklet_init(&host->finish_tasklet,
3571 sdhci_tasklet_finish, (unsigned long)host);
3573 setup_timer(&host->timer, sdhci_timeout_timer, (unsigned long)host);
3574 setup_timer(&host->data_timer, sdhci_timeout_data_timer,
3575 (unsigned long)host);
3577 init_waitqueue_head(&host->buf_ready_int);
3579 sdhci_init(host, 0);
3581 ret = request_threaded_irq(host->irq, sdhci_irq, sdhci_thread_irq,
3582 IRQF_SHARED, mmc_hostname(mmc), host);
3584 pr_err("%s: Failed to request IRQ %d: %d\n",
3585 mmc_hostname(mmc), host->irq, ret);
3589 #ifdef CONFIG_MMC_DEBUG
3590 sdhci_dumpregs(host);
3593 ret = sdhci_led_register(host);
3595 pr_err("%s: Failed to register LED device: %d\n",
3596 mmc_hostname(mmc), ret);
3602 ret = mmc_add_host(mmc);
3606 pr_info("%s: SDHCI controller on %s [%s] using %s\n",
3607 mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)),
3608 (host->flags & SDHCI_USE_ADMA) ?
3609 (host->flags & SDHCI_USE_64_BIT_DMA) ? "ADMA 64-bit" : "ADMA" :
3610 (host->flags & SDHCI_USE_SDMA) ? "DMA" : "PIO");
3612 sdhci_enable_card_detection(host);
3617 sdhci_led_unregister(host);
3619 sdhci_do_reset(host, SDHCI_RESET_ALL);
3620 sdhci_writel(host, 0, SDHCI_INT_ENABLE);
3621 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
3622 free_irq(host->irq, host);
3624 tasklet_kill(&host->finish_tasklet);
3626 if (!IS_ERR(mmc->supply.vqmmc))
3627 regulator_disable(mmc->supply.vqmmc);
3629 if (host->align_buffer)
3630 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
3631 host->adma_table_sz, host->align_buffer,
3633 host->adma_table = NULL;
3634 host->align_buffer = NULL;
3638 EXPORT_SYMBOL_GPL(__sdhci_add_host);
3640 int sdhci_add_host(struct sdhci_host *host)
3644 ret = sdhci_setup_host(host);
3648 return __sdhci_add_host(host);
3650 EXPORT_SYMBOL_GPL(sdhci_add_host);
3652 void sdhci_remove_host(struct sdhci_host *host, int dead)
3654 struct mmc_host *mmc = host->mmc;
3655 unsigned long flags;
3658 spin_lock_irqsave(&host->lock, flags);
3660 host->flags |= SDHCI_DEVICE_DEAD;
3662 if (sdhci_has_requests(host)) {
3663 pr_err("%s: Controller removed during "
3664 " transfer!\n", mmc_hostname(mmc));
3665 sdhci_error_out_mrqs(host, -ENOMEDIUM);
3668 spin_unlock_irqrestore(&host->lock, flags);
3671 sdhci_disable_card_detection(host);
3673 mmc_remove_host(mmc);
3675 sdhci_led_unregister(host);
3678 sdhci_do_reset(host, SDHCI_RESET_ALL);
3680 sdhci_writel(host, 0, SDHCI_INT_ENABLE);
3681 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
3682 free_irq(host->irq, host);
3684 del_timer_sync(&host->timer);
3685 del_timer_sync(&host->data_timer);
3687 tasklet_kill(&host->finish_tasklet);
3689 if (!IS_ERR(mmc->supply.vqmmc))
3690 regulator_disable(mmc->supply.vqmmc);
3692 if (host->align_buffer)
3693 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
3694 host->adma_table_sz, host->align_buffer,
3697 host->adma_table = NULL;
3698 host->align_buffer = NULL;
3701 EXPORT_SYMBOL_GPL(sdhci_remove_host);
3703 void sdhci_free_host(struct sdhci_host *host)
3705 mmc_free_host(host->mmc);
3708 EXPORT_SYMBOL_GPL(sdhci_free_host);
3710 /*****************************************************************************\
3712 * Driver init/exit *
3714 \*****************************************************************************/
3716 static int __init sdhci_drv_init(void)
3719 ": Secure Digital Host Controller Interface driver\n");
3720 pr_info(DRIVER_NAME ": Copyright(c) Pierre Ossman\n");
3725 static void __exit sdhci_drv_exit(void)
3729 module_init(sdhci_drv_init);
3730 module_exit(sdhci_drv_exit);
3732 module_param(debug_quirks, uint, 0444);
3733 module_param(debug_quirks2, uint, 0444);
3735 MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>");
3736 MODULE_DESCRIPTION("Secure Digital Host Controller Interface core driver");
3737 MODULE_LICENSE("GPL");
3739 MODULE_PARM_DESC(debug_quirks, "Force certain quirks.");
3740 MODULE_PARM_DESC(debug_quirks2, "Force certain other quirks.");