2 * linux/drivers/mmc/host/mmci.c - ARM PrimeCell MMCI PL180/1 driver
4 * Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved.
5 * Copyright (C) 2010 ST-Ericsson SA
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 #include <linux/module.h>
12 #include <linux/moduleparam.h>
13 #include <linux/init.h>
14 #include <linux/ioport.h>
15 #include <linux/device.h>
17 #include <linux/interrupt.h>
18 #include <linux/kernel.h>
19 #include <linux/slab.h>
20 #include <linux/delay.h>
21 #include <linux/err.h>
22 #include <linux/highmem.h>
23 #include <linux/log2.h>
24 #include <linux/mmc/mmc.h>
25 #include <linux/mmc/pm.h>
26 #include <linux/mmc/host.h>
27 #include <linux/mmc/card.h>
28 #include <linux/mmc/slot-gpio.h>
29 #include <linux/amba/bus.h>
30 #include <linux/clk.h>
31 #include <linux/scatterlist.h>
33 #include <linux/regulator/consumer.h>
34 #include <linux/dmaengine.h>
35 #include <linux/dma-mapping.h>
36 #include <linux/amba/mmci.h>
37 #include <linux/pm_runtime.h>
38 #include <linux/types.h>
39 #include <linux/pinctrl/consumer.h>
40 #include <linux/reset.h>
42 #include <asm/div64.h>
47 #define DRIVER_NAME "mmci-pl18x"
49 static void mmci_variant_init(struct mmci_host *host);
50 static void ux500v2_variant_init(struct mmci_host *host);
52 static unsigned int fmax = 515633;
54 static struct variant_data variant_arm = {
56 .fifohalfsize = 8 * 4,
57 .cmdreg_cpsm_enable = MCI_CPSM_ENABLE,
58 .cmdreg_lrsp_crc = MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
59 .cmdreg_srsp_crc = MCI_CPSM_RESPONSE,
60 .cmdreg_srsp = MCI_CPSM_RESPONSE,
61 .datalength_bits = 16,
62 .datactrl_blocksz = 11,
63 .pwrreg_powerup = MCI_PWR_UP,
65 .reversed_irq_handling = true,
67 .irq_pio_mask = MCI_IRQ_PIO_MASK,
68 .start_err = MCI_STARTBITERR,
70 .init = mmci_variant_init,
73 static struct variant_data variant_arm_extended_fifo = {
75 .fifohalfsize = 64 * 4,
76 .cmdreg_cpsm_enable = MCI_CPSM_ENABLE,
77 .cmdreg_lrsp_crc = MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
78 .cmdreg_srsp_crc = MCI_CPSM_RESPONSE,
79 .cmdreg_srsp = MCI_CPSM_RESPONSE,
80 .datalength_bits = 16,
81 .datactrl_blocksz = 11,
82 .pwrreg_powerup = MCI_PWR_UP,
85 .irq_pio_mask = MCI_IRQ_PIO_MASK,
86 .start_err = MCI_STARTBITERR,
88 .init = mmci_variant_init,
91 static struct variant_data variant_arm_extended_fifo_hwfc = {
93 .fifohalfsize = 64 * 4,
94 .clkreg_enable = MCI_ARM_HWFCEN,
95 .cmdreg_cpsm_enable = MCI_CPSM_ENABLE,
96 .cmdreg_lrsp_crc = MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
97 .cmdreg_srsp_crc = MCI_CPSM_RESPONSE,
98 .cmdreg_srsp = MCI_CPSM_RESPONSE,
99 .datalength_bits = 16,
100 .datactrl_blocksz = 11,
101 .pwrreg_powerup = MCI_PWR_UP,
104 .irq_pio_mask = MCI_IRQ_PIO_MASK,
105 .start_err = MCI_STARTBITERR,
106 .opendrain = MCI_ROD,
107 .init = mmci_variant_init,
110 static struct variant_data variant_u300 = {
112 .fifohalfsize = 8 * 4,
113 .clkreg_enable = MCI_ST_U300_HWFCEN,
114 .clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS,
115 .cmdreg_cpsm_enable = MCI_CPSM_ENABLE,
116 .cmdreg_lrsp_crc = MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
117 .cmdreg_srsp_crc = MCI_CPSM_RESPONSE,
118 .cmdreg_srsp = MCI_CPSM_RESPONSE,
119 .datalength_bits = 16,
120 .datactrl_blocksz = 11,
121 .datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN,
123 .pwrreg_powerup = MCI_PWR_ON,
125 .signal_direction = true,
126 .pwrreg_clkgate = true,
127 .pwrreg_nopower = true,
129 .irq_pio_mask = MCI_IRQ_PIO_MASK,
130 .start_err = MCI_STARTBITERR,
132 .init = mmci_variant_init,
135 static struct variant_data variant_nomadik = {
137 .fifohalfsize = 8 * 4,
138 .clkreg = MCI_CLK_ENABLE,
139 .clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS,
140 .cmdreg_cpsm_enable = MCI_CPSM_ENABLE,
141 .cmdreg_lrsp_crc = MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
142 .cmdreg_srsp_crc = MCI_CPSM_RESPONSE,
143 .cmdreg_srsp = MCI_CPSM_RESPONSE,
144 .datalength_bits = 24,
145 .datactrl_blocksz = 11,
146 .datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN,
149 .pwrreg_powerup = MCI_PWR_ON,
151 .signal_direction = true,
152 .pwrreg_clkgate = true,
153 .pwrreg_nopower = true,
155 .irq_pio_mask = MCI_IRQ_PIO_MASK,
156 .start_err = MCI_STARTBITERR,
158 .init = mmci_variant_init,
161 static struct variant_data variant_ux500 = {
163 .fifohalfsize = 8 * 4,
164 .clkreg = MCI_CLK_ENABLE,
165 .clkreg_enable = MCI_ST_UX500_HWFCEN,
166 .clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS,
167 .clkreg_neg_edge_enable = MCI_ST_UX500_NEG_EDGE,
168 .cmdreg_cpsm_enable = MCI_CPSM_ENABLE,
169 .cmdreg_lrsp_crc = MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
170 .cmdreg_srsp_crc = MCI_CPSM_RESPONSE,
171 .cmdreg_srsp = MCI_CPSM_RESPONSE,
172 .datalength_bits = 24,
173 .datactrl_blocksz = 11,
174 .datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN,
177 .pwrreg_powerup = MCI_PWR_ON,
179 .signal_direction = true,
180 .pwrreg_clkgate = true,
182 .busy_dpsm_flag = MCI_DPSM_ST_BUSYMODE,
183 .busy_detect_flag = MCI_ST_CARDBUSY,
184 .busy_detect_mask = MCI_ST_BUSYENDMASK,
185 .pwrreg_nopower = true,
187 .irq_pio_mask = MCI_IRQ_PIO_MASK,
188 .start_err = MCI_STARTBITERR,
190 .init = mmci_variant_init,
193 static struct variant_data variant_ux500v2 = {
195 .fifohalfsize = 8 * 4,
196 .clkreg = MCI_CLK_ENABLE,
197 .clkreg_enable = MCI_ST_UX500_HWFCEN,
198 .clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS,
199 .clkreg_neg_edge_enable = MCI_ST_UX500_NEG_EDGE,
200 .cmdreg_cpsm_enable = MCI_CPSM_ENABLE,
201 .cmdreg_lrsp_crc = MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
202 .cmdreg_srsp_crc = MCI_CPSM_RESPONSE,
203 .cmdreg_srsp = MCI_CPSM_RESPONSE,
204 .datactrl_mask_ddrmode = MCI_DPSM_ST_DDRMODE,
205 .datalength_bits = 24,
206 .datactrl_blocksz = 11,
207 .datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN,
210 .pwrreg_powerup = MCI_PWR_ON,
212 .signal_direction = true,
213 .pwrreg_clkgate = true,
215 .busy_dpsm_flag = MCI_DPSM_ST_BUSYMODE,
216 .busy_detect_flag = MCI_ST_CARDBUSY,
217 .busy_detect_mask = MCI_ST_BUSYENDMASK,
218 .pwrreg_nopower = true,
220 .irq_pio_mask = MCI_IRQ_PIO_MASK,
221 .start_err = MCI_STARTBITERR,
223 .init = ux500v2_variant_init,
226 static struct variant_data variant_stm32 = {
228 .fifohalfsize = 8 * 4,
229 .clkreg = MCI_CLK_ENABLE,
230 .clkreg_enable = MCI_ST_UX500_HWFCEN,
231 .clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS,
232 .clkreg_neg_edge_enable = MCI_ST_UX500_NEG_EDGE,
233 .cmdreg_cpsm_enable = MCI_CPSM_ENABLE,
234 .cmdreg_lrsp_crc = MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
235 .cmdreg_srsp_crc = MCI_CPSM_RESPONSE,
236 .cmdreg_srsp = MCI_CPSM_RESPONSE,
237 .irq_pio_mask = MCI_IRQ_PIO_MASK,
238 .datalength_bits = 24,
239 .datactrl_blocksz = 11,
240 .datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN,
243 .pwrreg_powerup = MCI_PWR_ON,
245 .pwrreg_clkgate = true,
246 .pwrreg_nopower = true,
247 .init = mmci_variant_init,
250 static struct variant_data variant_stm32_sdmmc = {
252 .fifohalfsize = 8 * 4,
254 .stm32_clkdiv = true,
255 .cmdreg_cpsm_enable = MCI_CPSM_STM32_ENABLE,
256 .cmdreg_lrsp_crc = MCI_CPSM_STM32_LRSP_CRC,
257 .cmdreg_srsp_crc = MCI_CPSM_STM32_SRSP_CRC,
258 .cmdreg_srsp = MCI_CPSM_STM32_SRSP,
259 .cmdreg_stop = MCI_CPSM_STM32_CMDSTOP,
260 .data_cmd_enable = MCI_CPSM_STM32_CMDTRANS,
261 .irq_pio_mask = MCI_IRQ_PIO_STM32_MASK,
262 .datactrl_first = true,
263 .datacnt_useless = true,
264 .datalength_bits = 25,
265 .datactrl_blocksz = 14,
266 .stm32_idmabsize_mask = GENMASK(12, 5),
267 .init = sdmmc_variant_init,
270 static struct variant_data variant_qcom = {
272 .fifohalfsize = 8 * 4,
273 .clkreg = MCI_CLK_ENABLE,
274 .clkreg_enable = MCI_QCOM_CLK_FLOWENA |
275 MCI_QCOM_CLK_SELECT_IN_FBCLK,
276 .clkreg_8bit_bus_enable = MCI_QCOM_CLK_WIDEBUS_8,
277 .datactrl_mask_ddrmode = MCI_QCOM_CLK_SELECT_IN_DDR_MODE,
278 .cmdreg_cpsm_enable = MCI_CPSM_ENABLE,
279 .cmdreg_lrsp_crc = MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
280 .cmdreg_srsp_crc = MCI_CPSM_RESPONSE,
281 .cmdreg_srsp = MCI_CPSM_RESPONSE,
282 .data_cmd_enable = MCI_CPSM_QCOM_DATCMD,
283 .datalength_bits = 24,
284 .datactrl_blocksz = 11,
285 .pwrreg_powerup = MCI_PWR_UP,
287 .explicit_mclk_control = true,
291 .irq_pio_mask = MCI_IRQ_PIO_MASK,
292 .start_err = MCI_STARTBITERR,
293 .opendrain = MCI_ROD,
294 .init = qcom_variant_init,
297 /* Busy detection for the ST Micro variant */
298 static int mmci_card_busy(struct mmc_host *mmc)
300 struct mmci_host *host = mmc_priv(mmc);
304 spin_lock_irqsave(&host->lock, flags);
305 if (readl(host->base + MMCISTATUS) & host->variant->busy_detect_flag)
307 spin_unlock_irqrestore(&host->lock, flags);
312 static void mmci_reg_delay(struct mmci_host *host)
315 * According to the spec, at least three feedback clock cycles
316 * of max 52 MHz must pass between two writes to the MMCICLOCK reg.
317 * Three MCLK clock cycles must pass between two MMCIPOWER reg writes.
318 * Worst delay time during card init is at 100 kHz => 30 us.
319 * Worst delay time when up and running is at 25 MHz => 120 ns.
321 if (host->cclk < 25000000)
328 * This must be called with host->lock held
330 void mmci_write_clkreg(struct mmci_host *host, u32 clk)
332 if (host->clk_reg != clk) {
334 writel(clk, host->base + MMCICLOCK);
339 * This must be called with host->lock held
341 void mmci_write_pwrreg(struct mmci_host *host, u32 pwr)
343 if (host->pwr_reg != pwr) {
345 writel(pwr, host->base + MMCIPOWER);
350 * This must be called with host->lock held
352 static void mmci_write_datactrlreg(struct mmci_host *host, u32 datactrl)
354 /* Keep busy mode in DPSM if enabled */
355 datactrl |= host->datactrl_reg & host->variant->busy_dpsm_flag;
357 if (host->datactrl_reg != datactrl) {
358 host->datactrl_reg = datactrl;
359 writel(datactrl, host->base + MMCIDATACTRL);
364 * This must be called with host->lock held
366 static void mmci_set_clkreg(struct mmci_host *host, unsigned int desired)
368 struct variant_data *variant = host->variant;
369 u32 clk = variant->clkreg;
371 /* Make sure cclk reflects the current calculated clock */
375 if (variant->explicit_mclk_control) {
376 host->cclk = host->mclk;
377 } else if (desired >= host->mclk) {
378 clk = MCI_CLK_BYPASS;
379 if (variant->st_clkdiv)
380 clk |= MCI_ST_UX500_NEG_EDGE;
381 host->cclk = host->mclk;
382 } else if (variant->st_clkdiv) {
384 * DB8500 TRM says f = mclk / (clkdiv + 2)
385 * => clkdiv = (mclk / f) - 2
386 * Round the divider up so we don't exceed the max
389 clk = DIV_ROUND_UP(host->mclk, desired) - 2;
392 host->cclk = host->mclk / (clk + 2);
395 * PL180 TRM says f = mclk / (2 * (clkdiv + 1))
396 * => clkdiv = mclk / (2 * f) - 1
398 clk = host->mclk / (2 * desired) - 1;
401 host->cclk = host->mclk / (2 * (clk + 1));
404 clk |= variant->clkreg_enable;
405 clk |= MCI_CLK_ENABLE;
406 /* This hasn't proven to be worthwhile */
407 /* clk |= MCI_CLK_PWRSAVE; */
410 /* Set actual clock for debug */
411 host->mmc->actual_clock = host->cclk;
413 if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4)
415 if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_8)
416 clk |= variant->clkreg_8bit_bus_enable;
418 if (host->mmc->ios.timing == MMC_TIMING_UHS_DDR50 ||
419 host->mmc->ios.timing == MMC_TIMING_MMC_DDR52)
420 clk |= variant->clkreg_neg_edge_enable;
422 mmci_write_clkreg(host, clk);
425 void mmci_dma_release(struct mmci_host *host)
427 if (host->ops && host->ops->dma_release)
428 host->ops->dma_release(host);
430 host->use_dma = false;
433 void mmci_dma_setup(struct mmci_host *host)
435 if (!host->ops || !host->ops->dma_setup)
438 if (host->ops->dma_setup(host))
441 /* initialize pre request cookie */
442 host->next_cookie = 1;
444 host->use_dma = true;
448 * Validate mmc prerequisites
450 static int mmci_validate_data(struct mmci_host *host,
451 struct mmc_data *data)
456 if (!is_power_of_2(data->blksz)) {
457 dev_err(mmc_dev(host->mmc),
458 "unsupported block size (%d bytes)\n", data->blksz);
462 if (host->ops && host->ops->validate_data)
463 return host->ops->validate_data(host, data);
468 int mmci_prep_data(struct mmci_host *host, struct mmc_data *data, bool next)
472 if (!host->ops || !host->ops->prep_data)
475 err = host->ops->prep_data(host, data, next);
478 data->host_cookie = ++host->next_cookie < 0 ?
479 1 : host->next_cookie;
484 void mmci_unprep_data(struct mmci_host *host, struct mmc_data *data,
487 if (host->ops && host->ops->unprep_data)
488 host->ops->unprep_data(host, data, err);
490 data->host_cookie = 0;
493 void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data)
495 WARN_ON(data->host_cookie && data->host_cookie != host->next_cookie);
497 if (host->ops && host->ops->get_next_data)
498 host->ops->get_next_data(host, data);
501 int mmci_dma_start(struct mmci_host *host, unsigned int datactrl)
503 struct mmc_data *data = host->data;
509 ret = mmci_prep_data(host, data, false);
513 if (!host->ops || !host->ops->dma_start)
516 /* Okay, go for it. */
517 dev_vdbg(mmc_dev(host->mmc),
518 "Submit MMCI DMA job, sglen %d blksz %04x blks %04x flags %08x\n",
519 data->sg_len, data->blksz, data->blocks, data->flags);
521 host->ops->dma_start(host, &datactrl);
523 /* Trigger the DMA transfer */
524 mmci_write_datactrlreg(host, datactrl);
527 * Let the MMCI say when the data is ended and it's time
528 * to fire next DMA request. When that happens, MMCI will
529 * call mmci_data_end()
531 writel(readl(host->base + MMCIMASK0) | MCI_DATAENDMASK,
532 host->base + MMCIMASK0);
536 void mmci_dma_finalize(struct mmci_host *host, struct mmc_data *data)
541 if (host->ops && host->ops->dma_finalize)
542 host->ops->dma_finalize(host, data);
545 void mmci_dma_error(struct mmci_host *host)
550 if (host->ops && host->ops->dma_error)
551 host->ops->dma_error(host);
555 mmci_request_end(struct mmci_host *host, struct mmc_request *mrq)
557 writel(0, host->base + MMCICOMMAND);
564 mmc_request_done(host->mmc, mrq);
567 static void mmci_set_mask1(struct mmci_host *host, unsigned int mask)
569 void __iomem *base = host->base;
570 struct variant_data *variant = host->variant;
572 if (host->singleirq) {
573 unsigned int mask0 = readl(base + MMCIMASK0);
575 mask0 &= ~variant->irq_pio_mask;
578 writel(mask0, base + MMCIMASK0);
581 if (variant->mmcimask1)
582 writel(mask, base + MMCIMASK1);
584 host->mask1_reg = mask;
587 static void mmci_stop_data(struct mmci_host *host)
589 mmci_write_datactrlreg(host, 0);
590 mmci_set_mask1(host, 0);
594 static void mmci_init_sg(struct mmci_host *host, struct mmc_data *data)
596 unsigned int flags = SG_MITER_ATOMIC;
598 if (data->flags & MMC_DATA_READ)
599 flags |= SG_MITER_TO_SG;
601 flags |= SG_MITER_FROM_SG;
603 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
606 static u32 mmci_get_dctrl_cfg(struct mmci_host *host)
608 return MCI_DPSM_ENABLE | mmci_dctrl_blksz(host);
611 static u32 ux500v2_get_dctrl_cfg(struct mmci_host *host)
613 return MCI_DPSM_ENABLE | (host->data->blksz << 16);
617 * All the DMA operation mode stuff goes inside this ifdef.
618 * This assumes that you have a generic DMA device interface,
619 * no custom DMA interfaces are supported.
621 #ifdef CONFIG_DMA_ENGINE
622 struct mmci_dmae_next {
623 struct dma_async_tx_descriptor *desc;
624 struct dma_chan *chan;
627 struct mmci_dmae_priv {
628 struct dma_chan *cur;
629 struct dma_chan *rx_channel;
630 struct dma_chan *tx_channel;
631 struct dma_async_tx_descriptor *desc_current;
632 struct mmci_dmae_next next_data;
635 int mmci_dmae_setup(struct mmci_host *host)
637 const char *rxname, *txname;
638 struct mmci_dmae_priv *dmae;
640 dmae = devm_kzalloc(mmc_dev(host->mmc), sizeof(*dmae), GFP_KERNEL);
644 host->dma_priv = dmae;
646 dmae->rx_channel = dma_request_slave_channel(mmc_dev(host->mmc),
648 dmae->tx_channel = dma_request_slave_channel(mmc_dev(host->mmc),
652 * If only an RX channel is specified, the driver will
653 * attempt to use it bidirectionally, however if it is
654 * is specified but cannot be located, DMA will be disabled.
656 if (dmae->rx_channel && !dmae->tx_channel)
657 dmae->tx_channel = dmae->rx_channel;
659 if (dmae->rx_channel)
660 rxname = dma_chan_name(dmae->rx_channel);
664 if (dmae->tx_channel)
665 txname = dma_chan_name(dmae->tx_channel);
669 dev_info(mmc_dev(host->mmc), "DMA channels RX %s, TX %s\n",
673 * Limit the maximum segment size in any SG entry according to
674 * the parameters of the DMA engine device.
676 if (dmae->tx_channel) {
677 struct device *dev = dmae->tx_channel->device->dev;
678 unsigned int max_seg_size = dma_get_max_seg_size(dev);
680 if (max_seg_size < host->mmc->max_seg_size)
681 host->mmc->max_seg_size = max_seg_size;
683 if (dmae->rx_channel) {
684 struct device *dev = dmae->rx_channel->device->dev;
685 unsigned int max_seg_size = dma_get_max_seg_size(dev);
687 if (max_seg_size < host->mmc->max_seg_size)
688 host->mmc->max_seg_size = max_seg_size;
691 if (!dmae->tx_channel || !dmae->rx_channel) {
692 mmci_dmae_release(host);
700 * This is used in or so inline it
701 * so it can be discarded.
703 void mmci_dmae_release(struct mmci_host *host)
705 struct mmci_dmae_priv *dmae = host->dma_priv;
707 if (dmae->rx_channel)
708 dma_release_channel(dmae->rx_channel);
709 if (dmae->tx_channel)
710 dma_release_channel(dmae->tx_channel);
711 dmae->rx_channel = dmae->tx_channel = NULL;
714 static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
716 struct mmci_dmae_priv *dmae = host->dma_priv;
717 struct dma_chan *chan;
719 if (data->flags & MMC_DATA_READ)
720 chan = dmae->rx_channel;
722 chan = dmae->tx_channel;
724 dma_unmap_sg(chan->device->dev, data->sg, data->sg_len,
725 mmc_get_dma_dir(data));
728 void mmci_dmae_error(struct mmci_host *host)
730 struct mmci_dmae_priv *dmae = host->dma_priv;
732 if (!dma_inprogress(host))
735 dev_err(mmc_dev(host->mmc), "error during DMA transfer!\n");
736 dmaengine_terminate_all(dmae->cur);
737 host->dma_in_progress = false;
739 dmae->desc_current = NULL;
740 host->data->host_cookie = 0;
742 mmci_dma_unmap(host, host->data);
745 void mmci_dmae_finalize(struct mmci_host *host, struct mmc_data *data)
747 struct mmci_dmae_priv *dmae = host->dma_priv;
751 if (!dma_inprogress(host))
754 /* Wait up to 1ms for the DMA to complete */
756 status = readl(host->base + MMCISTATUS);
757 if (!(status & MCI_RXDATAAVLBLMASK) || i >= 100)
763 * Check to see whether we still have some data left in the FIFO -
764 * this catches DMA controllers which are unable to monitor the
765 * DMALBREQ and DMALSREQ signals while allowing us to DMA to non-
766 * contiguous buffers. On TX, we'll get a FIFO underrun error.
768 if (status & MCI_RXDATAAVLBLMASK) {
769 mmci_dma_error(host);
772 } else if (!data->host_cookie) {
773 mmci_dma_unmap(host, data);
777 * Use of DMA with scatter-gather is impossible.
778 * Give up with DMA and switch back to PIO mode.
780 if (status & MCI_RXDATAAVLBLMASK) {
781 dev_err(mmc_dev(host->mmc), "buggy DMA detected. Taking evasive action.\n");
782 mmci_dma_release(host);
785 host->dma_in_progress = false;
787 dmae->desc_current = NULL;
790 /* prepares DMA channel and DMA descriptor, returns non-zero on failure */
791 static int _mmci_dmae_prep_data(struct mmci_host *host, struct mmc_data *data,
792 struct dma_chan **dma_chan,
793 struct dma_async_tx_descriptor **dma_desc)
795 struct mmci_dmae_priv *dmae = host->dma_priv;
796 struct variant_data *variant = host->variant;
797 struct dma_slave_config conf = {
798 .src_addr = host->phybase + MMCIFIFO,
799 .dst_addr = host->phybase + MMCIFIFO,
800 .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
801 .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
802 .src_maxburst = variant->fifohalfsize >> 2, /* # of words */
803 .dst_maxburst = variant->fifohalfsize >> 2, /* # of words */
806 struct dma_chan *chan;
807 struct dma_device *device;
808 struct dma_async_tx_descriptor *desc;
810 unsigned long flags = DMA_CTRL_ACK;
812 if (data->flags & MMC_DATA_READ) {
813 conf.direction = DMA_DEV_TO_MEM;
814 chan = dmae->rx_channel;
816 conf.direction = DMA_MEM_TO_DEV;
817 chan = dmae->tx_channel;
820 /* If there's no DMA channel, fall back to PIO */
824 /* If less than or equal to the fifo size, don't bother with DMA */
825 if (data->blksz * data->blocks <= variant->fifosize)
828 device = chan->device;
829 nr_sg = dma_map_sg(device->dev, data->sg, data->sg_len,
830 mmc_get_dma_dir(data));
834 if (host->variant->qcom_dml)
835 flags |= DMA_PREP_INTERRUPT;
837 dmaengine_slave_config(chan, &conf);
838 desc = dmaengine_prep_slave_sg(chan, data->sg, nr_sg,
839 conf.direction, flags);
849 dma_unmap_sg(device->dev, data->sg, data->sg_len,
850 mmc_get_dma_dir(data));
854 int mmci_dmae_prep_data(struct mmci_host *host,
855 struct mmc_data *data,
858 struct mmci_dmae_priv *dmae = host->dma_priv;
859 struct mmci_dmae_next *nd = &dmae->next_data;
865 return _mmci_dmae_prep_data(host, data, &nd->chan, &nd->desc);
866 /* Check if next job is already prepared. */
867 if (dmae->cur && dmae->desc_current)
870 /* No job were prepared thus do it now. */
871 return _mmci_dmae_prep_data(host, data, &dmae->cur,
872 &dmae->desc_current);
875 int mmci_dmae_start(struct mmci_host *host, unsigned int *datactrl)
877 struct mmci_dmae_priv *dmae = host->dma_priv;
879 host->dma_in_progress = true;
880 dmaengine_submit(dmae->desc_current);
881 dma_async_issue_pending(dmae->cur);
883 *datactrl |= MCI_DPSM_DMAENABLE;
888 void mmci_dmae_get_next_data(struct mmci_host *host, struct mmc_data *data)
890 struct mmci_dmae_priv *dmae = host->dma_priv;
891 struct mmci_dmae_next *next = &dmae->next_data;
896 WARN_ON(!data->host_cookie && (next->desc || next->chan));
898 dmae->desc_current = next->desc;
899 dmae->cur = next->chan;
904 void mmci_dmae_unprep_data(struct mmci_host *host,
905 struct mmc_data *data, int err)
908 struct mmci_dmae_priv *dmae = host->dma_priv;
913 mmci_dma_unmap(host, data);
916 struct mmci_dmae_next *next = &dmae->next_data;
917 struct dma_chan *chan;
918 if (data->flags & MMC_DATA_READ)
919 chan = dmae->rx_channel;
921 chan = dmae->tx_channel;
922 dmaengine_terminate_all(chan);
924 if (dmae->desc_current == next->desc)
925 dmae->desc_current = NULL;
927 if (dmae->cur == next->chan) {
928 host->dma_in_progress = false;
937 static struct mmci_host_ops mmci_variant_ops = {
938 .prep_data = mmci_dmae_prep_data,
939 .unprep_data = mmci_dmae_unprep_data,
940 .get_datactrl_cfg = mmci_get_dctrl_cfg,
941 .get_next_data = mmci_dmae_get_next_data,
942 .dma_setup = mmci_dmae_setup,
943 .dma_release = mmci_dmae_release,
944 .dma_start = mmci_dmae_start,
945 .dma_finalize = mmci_dmae_finalize,
946 .dma_error = mmci_dmae_error,
949 static struct mmci_host_ops mmci_variant_ops = {
950 .get_datactrl_cfg = mmci_get_dctrl_cfg,
954 void mmci_variant_init(struct mmci_host *host)
956 host->ops = &mmci_variant_ops;
959 void ux500v2_variant_init(struct mmci_host *host)
961 host->ops = &mmci_variant_ops;
962 host->ops->get_datactrl_cfg = ux500v2_get_dctrl_cfg;
965 static void mmci_pre_request(struct mmc_host *mmc, struct mmc_request *mrq)
967 struct mmci_host *host = mmc_priv(mmc);
968 struct mmc_data *data = mrq->data;
973 WARN_ON(data->host_cookie);
975 if (mmci_validate_data(host, data))
978 mmci_prep_data(host, data, true);
981 static void mmci_post_request(struct mmc_host *mmc, struct mmc_request *mrq,
984 struct mmci_host *host = mmc_priv(mmc);
985 struct mmc_data *data = mrq->data;
987 if (!data || !data->host_cookie)
990 mmci_unprep_data(host, data, err);
993 static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
995 struct variant_data *variant = host->variant;
996 unsigned int datactrl, timeout, irqmask;
997 unsigned long long clks;
1000 dev_dbg(mmc_dev(host->mmc), "blksz %04x blks %04x flags %08x\n",
1001 data->blksz, data->blocks, data->flags);
1004 host->size = data->blksz * data->blocks;
1005 data->bytes_xfered = 0;
1007 clks = (unsigned long long)data->timeout_ns * host->cclk;
1008 do_div(clks, NSEC_PER_SEC);
1010 timeout = data->timeout_clks + (unsigned int)clks;
1013 writel(timeout, base + MMCIDATATIMER);
1014 writel(host->size, base + MMCIDATALENGTH);
1016 datactrl = host->ops->get_datactrl_cfg(host);
1017 datactrl |= host->data->flags & MMC_DATA_READ ? MCI_DPSM_DIRECTION : 0;
1019 if (host->mmc->card && mmc_card_sdio(host->mmc->card)) {
1022 datactrl |= variant->datactrl_mask_sdio;
1025 * The ST Micro variant for SDIO small write transfers
1026 * needs to have clock H/W flow control disabled,
1027 * otherwise the transfer will not start. The threshold
1028 * depends on the rate of MCLK.
1030 if (variant->st_sdio && data->flags & MMC_DATA_WRITE &&
1032 (host->size <= 8 && host->mclk > 50000000)))
1033 clk = host->clk_reg & ~variant->clkreg_enable;
1035 clk = host->clk_reg | variant->clkreg_enable;
1037 mmci_write_clkreg(host, clk);
1040 if (host->mmc->ios.timing == MMC_TIMING_UHS_DDR50 ||
1041 host->mmc->ios.timing == MMC_TIMING_MMC_DDR52)
1042 datactrl |= variant->datactrl_mask_ddrmode;
1045 * Attempt to use DMA operation mode, if this
1046 * should fail, fall back to PIO mode
1048 if (!mmci_dma_start(host, datactrl))
1051 /* IRQ mode, map the SG list for CPU reading/writing */
1052 mmci_init_sg(host, data);
1054 if (data->flags & MMC_DATA_READ) {
1055 irqmask = MCI_RXFIFOHALFFULLMASK;
1058 * If we have less than the fifo 'half-full' threshold to
1059 * transfer, trigger a PIO interrupt as soon as any data
1062 if (host->size < variant->fifohalfsize)
1063 irqmask |= MCI_RXDATAAVLBLMASK;
1066 * We don't actually need to include "FIFO empty" here
1067 * since its implicit in "FIFO half empty".
1069 irqmask = MCI_TXFIFOHALFEMPTYMASK;
1072 mmci_write_datactrlreg(host, datactrl);
1073 writel(readl(base + MMCIMASK0) & ~MCI_DATAENDMASK, base + MMCIMASK0);
1074 mmci_set_mask1(host, irqmask);
1078 mmci_start_command(struct mmci_host *host, struct mmc_command *cmd, u32 c)
1080 void __iomem *base = host->base;
1082 dev_dbg(mmc_dev(host->mmc), "op %02x arg %08x flags %08x\n",
1083 cmd->opcode, cmd->arg, cmd->flags);
1085 if (readl(base + MMCICOMMAND) & host->variant->cmdreg_cpsm_enable) {
1086 writel(0, base + MMCICOMMAND);
1087 mmci_reg_delay(host);
1090 if (host->variant->cmdreg_stop &&
1091 cmd->opcode == MMC_STOP_TRANSMISSION)
1092 c |= host->variant->cmdreg_stop;
1094 c |= cmd->opcode | host->variant->cmdreg_cpsm_enable;
1095 if (cmd->flags & MMC_RSP_PRESENT) {
1096 if (cmd->flags & MMC_RSP_136)
1097 c |= host->variant->cmdreg_lrsp_crc;
1098 else if (cmd->flags & MMC_RSP_CRC)
1099 c |= host->variant->cmdreg_srsp_crc;
1101 c |= host->variant->cmdreg_srsp;
1104 c |= MCI_CPSM_INTERRUPT;
1106 if (mmc_cmd_type(cmd) == MMC_CMD_ADTC)
1107 c |= host->variant->data_cmd_enable;
1111 writel(cmd->arg, base + MMCIARGUMENT);
1112 writel(c, base + MMCICOMMAND);
1115 static void mmci_stop_command(struct mmci_host *host)
1117 host->stop_abort.error = 0;
1118 mmci_start_command(host, &host->stop_abort, 0);
1122 mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
1123 unsigned int status)
1125 unsigned int status_err;
1127 /* Make sure we have data to handle */
1131 /* First check for errors */
1132 status_err = status & (host->variant->start_err |
1133 MCI_DATACRCFAIL | MCI_DATATIMEOUT |
1134 MCI_TXUNDERRUN | MCI_RXOVERRUN);
1137 u32 remain, success;
1139 /* Terminate the DMA transfer */
1140 mmci_dma_error(host);
1143 * Calculate how far we are into the transfer. Note that
1144 * the data counter gives the number of bytes transferred
1145 * on the MMC bus, not on the host side. On reads, this
1146 * can be as much as a FIFO-worth of data ahead. This
1147 * matters for FIFO overruns only.
1149 if (!host->variant->datacnt_useless) {
1150 remain = readl(host->base + MMCIDATACNT);
1151 success = data->blksz * data->blocks - remain;
1156 dev_dbg(mmc_dev(host->mmc), "MCI ERROR IRQ, status 0x%08x at 0x%08x\n",
1157 status_err, success);
1158 if (status_err & MCI_DATACRCFAIL) {
1159 /* Last block was not successful */
1161 data->error = -EILSEQ;
1162 } else if (status_err & MCI_DATATIMEOUT) {
1163 data->error = -ETIMEDOUT;
1164 } else if (status_err & MCI_STARTBITERR) {
1165 data->error = -ECOMM;
1166 } else if (status_err & MCI_TXUNDERRUN) {
1168 } else if (status_err & MCI_RXOVERRUN) {
1169 if (success > host->variant->fifosize)
1170 success -= host->variant->fifosize;
1175 data->bytes_xfered = round_down(success, data->blksz);
1178 if (status & MCI_DATABLOCKEND)
1179 dev_err(mmc_dev(host->mmc), "stray MCI_DATABLOCKEND interrupt\n");
1181 if (status & MCI_DATAEND || data->error) {
1182 mmci_dma_finalize(host, data);
1184 mmci_stop_data(host);
1187 /* The error clause is handled above, success! */
1188 data->bytes_xfered = data->blksz * data->blocks;
1191 if (host->variant->cmdreg_stop && data->error)
1192 mmci_stop_command(host);
1194 mmci_request_end(host, data->mrq);
1195 } else if (host->mrq->sbc && !data->error) {
1196 mmci_request_end(host, data->mrq);
1198 mmci_start_command(host, data->stop, 0);
1204 mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
1205 unsigned int status)
1207 void __iomem *base = host->base;
1208 bool sbc, busy_resp;
1213 sbc = (cmd == host->mrq->sbc);
1214 busy_resp = !!(cmd->flags & MMC_RSP_BUSY);
1217 * We need to be one of these interrupts to be considered worth
1218 * handling. Note that we tag on any latent IRQs postponed
1219 * due to waiting for busy status.
1221 if (!((status|host->busy_status) &
1222 (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT|MCI_CMDSENT|MCI_CMDRESPEND)))
1226 * ST Micro variant: handle busy detection.
1228 if (busy_resp && host->variant->busy_detect) {
1230 /* We are busy with a command, return */
1231 if (host->busy_status &&
1232 (status & host->variant->busy_detect_flag))
1236 * We were not busy, but we now got a busy response on
1237 * something that was not an error, and we double-check
1238 * that the special busy status bit is still set before
1241 if (!host->busy_status &&
1242 !(status & (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT)) &&
1243 (readl(base + MMCISTATUS) & host->variant->busy_detect_flag)) {
1245 /* Clear the busy start IRQ */
1246 writel(host->variant->busy_detect_mask,
1247 host->base + MMCICLEAR);
1249 /* Unmask the busy end IRQ */
1250 writel(readl(base + MMCIMASK0) |
1251 host->variant->busy_detect_mask,
1254 * Now cache the last response status code (until
1255 * the busy bit goes low), and return.
1258 status & (MCI_CMDSENT|MCI_CMDRESPEND);
1263 * At this point we are not busy with a command, we have
1264 * not received a new busy request, clear and mask the busy
1265 * end IRQ and fall through to process the IRQ.
1267 if (host->busy_status) {
1269 writel(host->variant->busy_detect_mask,
1270 host->base + MMCICLEAR);
1272 writel(readl(base + MMCIMASK0) &
1273 ~host->variant->busy_detect_mask,
1275 host->busy_status = 0;
1281 if (status & MCI_CMDTIMEOUT) {
1282 cmd->error = -ETIMEDOUT;
1283 } else if (status & MCI_CMDCRCFAIL && cmd->flags & MMC_RSP_CRC) {
1284 cmd->error = -EILSEQ;
1286 cmd->resp[0] = readl(base + MMCIRESPONSE0);
1287 cmd->resp[1] = readl(base + MMCIRESPONSE1);
1288 cmd->resp[2] = readl(base + MMCIRESPONSE2);
1289 cmd->resp[3] = readl(base + MMCIRESPONSE3);
1292 if ((!sbc && !cmd->data) || cmd->error) {
1294 /* Terminate the DMA transfer */
1295 mmci_dma_error(host);
1297 mmci_stop_data(host);
1298 if (host->variant->cmdreg_stop && cmd->error) {
1299 mmci_stop_command(host);
1303 mmci_request_end(host, host->mrq);
1305 mmci_start_command(host, host->mrq->cmd, 0);
1306 } else if (!host->variant->datactrl_first &&
1307 !(cmd->data->flags & MMC_DATA_READ)) {
1308 mmci_start_data(host, cmd->data);
1312 static int mmci_get_rx_fifocnt(struct mmci_host *host, u32 status, int remain)
1314 return remain - (readl(host->base + MMCIFIFOCNT) << 2);
1317 static int mmci_qcom_get_rx_fifocnt(struct mmci_host *host, u32 status, int r)
1320 * on qcom SDCC4 only 8 words are used in each burst so only 8 addresses
1321 * from the fifo range should be used
1323 if (status & MCI_RXFIFOHALFFULL)
1324 return host->variant->fifohalfsize;
1325 else if (status & MCI_RXDATAAVLBL)
1331 static int mmci_pio_read(struct mmci_host *host, char *buffer, unsigned int remain)
1333 void __iomem *base = host->base;
1335 u32 status = readl(host->base + MMCISTATUS);
1336 int host_remain = host->size;
1339 int count = host->get_rx_fifocnt(host, status, host_remain);
1348 * SDIO especially may want to send something that is
1349 * not divisible by 4 (as opposed to card sectors
1350 * etc). Therefore make sure to always read the last bytes
1351 * while only doing full 32-bit reads towards the FIFO.
1353 if (unlikely(count & 0x3)) {
1355 unsigned char buf[4];
1356 ioread32_rep(base + MMCIFIFO, buf, 1);
1357 memcpy(ptr, buf, count);
1359 ioread32_rep(base + MMCIFIFO, ptr, count >> 2);
1363 ioread32_rep(base + MMCIFIFO, ptr, count >> 2);
1368 host_remain -= count;
1373 status = readl(base + MMCISTATUS);
1374 } while (status & MCI_RXDATAAVLBL);
1376 return ptr - buffer;
1379 static int mmci_pio_write(struct mmci_host *host, char *buffer, unsigned int remain, u32 status)
1381 struct variant_data *variant = host->variant;
1382 void __iomem *base = host->base;
1386 unsigned int count, maxcnt;
1388 maxcnt = status & MCI_TXFIFOEMPTY ?
1389 variant->fifosize : variant->fifohalfsize;
1390 count = min(remain, maxcnt);
1393 * SDIO especially may want to send something that is
1394 * not divisible by 4 (as opposed to card sectors
1395 * etc), and the FIFO only accept full 32-bit writes.
1396 * So compensate by adding +3 on the count, a single
1397 * byte become a 32bit write, 7 bytes will be two
1400 iowrite32_rep(base + MMCIFIFO, ptr, (count + 3) >> 2);
1408 status = readl(base + MMCISTATUS);
1409 } while (status & MCI_TXFIFOHALFEMPTY);
1411 return ptr - buffer;
1415 * PIO data transfer IRQ handler.
1417 static irqreturn_t mmci_pio_irq(int irq, void *dev_id)
1419 struct mmci_host *host = dev_id;
1420 struct sg_mapping_iter *sg_miter = &host->sg_miter;
1421 struct variant_data *variant = host->variant;
1422 void __iomem *base = host->base;
1425 status = readl(base + MMCISTATUS);
1427 dev_dbg(mmc_dev(host->mmc), "irq1 (pio) %08x\n", status);
1430 unsigned int remain, len;
1434 * For write, we only need to test the half-empty flag
1435 * here - if the FIFO is completely empty, then by
1436 * definition it is more than half empty.
1438 * For read, check for data available.
1440 if (!(status & (MCI_TXFIFOHALFEMPTY|MCI_RXDATAAVLBL)))
1443 if (!sg_miter_next(sg_miter))
1446 buffer = sg_miter->addr;
1447 remain = sg_miter->length;
1450 if (status & MCI_RXACTIVE)
1451 len = mmci_pio_read(host, buffer, remain);
1452 if (status & MCI_TXACTIVE)
1453 len = mmci_pio_write(host, buffer, remain, status);
1455 sg_miter->consumed = len;
1463 status = readl(base + MMCISTATUS);
1466 sg_miter_stop(sg_miter);
1469 * If we have less than the fifo 'half-full' threshold to transfer,
1470 * trigger a PIO interrupt as soon as any data is available.
1472 if (status & MCI_RXACTIVE && host->size < variant->fifohalfsize)
1473 mmci_set_mask1(host, MCI_RXDATAAVLBLMASK);
1476 * If we run out of data, disable the data IRQs; this
1477 * prevents a race where the FIFO becomes empty before
1478 * the chip itself has disabled the data path, and
1479 * stops us racing with our data end IRQ.
1481 if (host->size == 0) {
1482 mmci_set_mask1(host, 0);
1483 writel(readl(base + MMCIMASK0) | MCI_DATAENDMASK, base + MMCIMASK0);
1490 * Handle completion of command and data transfers.
1492 static irqreturn_t mmci_irq(int irq, void *dev_id)
1494 struct mmci_host *host = dev_id;
1498 spin_lock(&host->lock);
1501 status = readl(host->base + MMCISTATUS);
1503 if (host->singleirq) {
1504 if (status & host->mask1_reg)
1505 mmci_pio_irq(irq, dev_id);
1507 status &= ~host->variant->irq_pio_mask;
1511 * We intentionally clear the MCI_ST_CARDBUSY IRQ (if it's
1512 * enabled) in mmci_cmd_irq() function where ST Micro busy
1513 * detection variant is handled. Considering the HW seems to be
1514 * triggering the IRQ on both edges while monitoring DAT0 for
1515 * busy completion and that same status bit is used to monitor
1516 * start and end of busy detection, special care must be taken
1517 * to make sure that both start and end interrupts are always
1518 * cleared one after the other.
1520 status &= readl(host->base + MMCIMASK0);
1521 if (host->variant->busy_detect)
1522 writel(status & ~host->variant->busy_detect_mask,
1523 host->base + MMCICLEAR);
1525 writel(status, host->base + MMCICLEAR);
1527 dev_dbg(mmc_dev(host->mmc), "irq0 (data+cmd) %08x\n", status);
1529 if (host->variant->reversed_irq_handling) {
1530 mmci_data_irq(host, host->data, status);
1531 mmci_cmd_irq(host, host->cmd, status);
1533 mmci_cmd_irq(host, host->cmd, status);
1534 mmci_data_irq(host, host->data, status);
1538 * Busy detection has been handled by mmci_cmd_irq() above.
1539 * Clear the status bit to prevent polling in IRQ context.
1541 if (host->variant->busy_detect_flag)
1542 status &= ~host->variant->busy_detect_flag;
1547 spin_unlock(&host->lock);
1549 return IRQ_RETVAL(ret);
1552 static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1554 struct mmci_host *host = mmc_priv(mmc);
1555 unsigned long flags;
1557 WARN_ON(host->mrq != NULL);
1559 mrq->cmd->error = mmci_validate_data(host, mrq->data);
1560 if (mrq->cmd->error) {
1561 mmc_request_done(mmc, mrq);
1565 spin_lock_irqsave(&host->lock, flags);
1570 mmci_get_next_data(host, mrq->data);
1573 (host->variant->datactrl_first || mrq->data->flags & MMC_DATA_READ))
1574 mmci_start_data(host, mrq->data);
1577 mmci_start_command(host, mrq->sbc, 0);
1579 mmci_start_command(host, mrq->cmd, 0);
1581 spin_unlock_irqrestore(&host->lock, flags);
1584 static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1586 struct mmci_host *host = mmc_priv(mmc);
1587 struct variant_data *variant = host->variant;
1589 unsigned long flags;
1592 if (host->plat->ios_handler &&
1593 host->plat->ios_handler(mmc_dev(mmc), ios))
1594 dev_err(mmc_dev(mmc), "platform ios_handler failed\n");
1596 switch (ios->power_mode) {
1598 if (!IS_ERR(mmc->supply.vmmc))
1599 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
1601 if (!IS_ERR(mmc->supply.vqmmc) && host->vqmmc_enabled) {
1602 regulator_disable(mmc->supply.vqmmc);
1603 host->vqmmc_enabled = false;
1608 if (!IS_ERR(mmc->supply.vmmc))
1609 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
1612 * The ST Micro variant doesn't have the PL180s MCI_PWR_UP
1613 * and instead uses MCI_PWR_ON so apply whatever value is
1614 * configured in the variant data.
1616 pwr |= variant->pwrreg_powerup;
1620 if (!IS_ERR(mmc->supply.vqmmc) && !host->vqmmc_enabled) {
1621 ret = regulator_enable(mmc->supply.vqmmc);
1623 dev_err(mmc_dev(mmc),
1624 "failed to enable vqmmc regulator\n");
1626 host->vqmmc_enabled = true;
1633 if (variant->signal_direction && ios->power_mode != MMC_POWER_OFF) {
1635 * The ST Micro variant has some additional bits
1636 * indicating signal direction for the signals in
1637 * the SD/MMC bus and feedback-clock usage.
1639 pwr |= host->pwr_reg_add;
1641 if (ios->bus_width == MMC_BUS_WIDTH_4)
1642 pwr &= ~MCI_ST_DATA74DIREN;
1643 else if (ios->bus_width == MMC_BUS_WIDTH_1)
1644 pwr &= (~MCI_ST_DATA74DIREN &
1645 ~MCI_ST_DATA31DIREN &
1646 ~MCI_ST_DATA2DIREN);
1649 if (variant->opendrain) {
1650 if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN)
1651 pwr |= variant->opendrain;
1654 * If the variant cannot configure the pads by its own, then we
1655 * expect the pinctrl to be able to do that for us
1657 if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN)
1658 pinctrl_select_state(host->pinctrl, host->pins_opendrain);
1660 pinctrl_select_state(host->pinctrl, host->pins_default);
1664 * If clock = 0 and the variant requires the MMCIPOWER to be used for
1665 * gating the clock, the MCI_PWR_ON bit is cleared.
1667 if (!ios->clock && variant->pwrreg_clkgate)
1670 if (host->variant->explicit_mclk_control &&
1671 ios->clock != host->clock_cache) {
1672 ret = clk_set_rate(host->clk, ios->clock);
1674 dev_err(mmc_dev(host->mmc),
1675 "Error setting clock rate (%d)\n", ret);
1677 host->mclk = clk_get_rate(host->clk);
1679 host->clock_cache = ios->clock;
1681 spin_lock_irqsave(&host->lock, flags);
1683 if (host->ops && host->ops->set_clkreg)
1684 host->ops->set_clkreg(host, ios->clock);
1686 mmci_set_clkreg(host, ios->clock);
1688 if (host->ops && host->ops->set_pwrreg)
1689 host->ops->set_pwrreg(host, pwr);
1691 mmci_write_pwrreg(host, pwr);
1693 mmci_reg_delay(host);
1695 spin_unlock_irqrestore(&host->lock, flags);
1698 static int mmci_get_cd(struct mmc_host *mmc)
1700 struct mmci_host *host = mmc_priv(mmc);
1701 struct mmci_platform_data *plat = host->plat;
1702 unsigned int status = mmc_gpio_get_cd(mmc);
1704 if (status == -ENOSYS) {
1706 return 1; /* Assume always present */
1708 status = plat->status(mmc_dev(host->mmc));
1713 static int mmci_sig_volt_switch(struct mmc_host *mmc, struct mmc_ios *ios)
1717 if (!IS_ERR(mmc->supply.vqmmc)) {
1719 switch (ios->signal_voltage) {
1720 case MMC_SIGNAL_VOLTAGE_330:
1721 ret = regulator_set_voltage(mmc->supply.vqmmc,
1724 case MMC_SIGNAL_VOLTAGE_180:
1725 ret = regulator_set_voltage(mmc->supply.vqmmc,
1728 case MMC_SIGNAL_VOLTAGE_120:
1729 ret = regulator_set_voltage(mmc->supply.vqmmc,
1735 dev_warn(mmc_dev(mmc), "Voltage switch failed\n");
1741 static struct mmc_host_ops mmci_ops = {
1742 .request = mmci_request,
1743 .pre_req = mmci_pre_request,
1744 .post_req = mmci_post_request,
1745 .set_ios = mmci_set_ios,
1746 .get_ro = mmc_gpio_get_ro,
1747 .get_cd = mmci_get_cd,
1748 .start_signal_voltage_switch = mmci_sig_volt_switch,
1751 static int mmci_of_parse(struct device_node *np, struct mmc_host *mmc)
1753 struct mmci_host *host = mmc_priv(mmc);
1754 int ret = mmc_of_parse(mmc);
1759 if (of_get_property(np, "st,sig-dir-dat0", NULL))
1760 host->pwr_reg_add |= MCI_ST_DATA0DIREN;
1761 if (of_get_property(np, "st,sig-dir-dat2", NULL))
1762 host->pwr_reg_add |= MCI_ST_DATA2DIREN;
1763 if (of_get_property(np, "st,sig-dir-dat31", NULL))
1764 host->pwr_reg_add |= MCI_ST_DATA31DIREN;
1765 if (of_get_property(np, "st,sig-dir-dat74", NULL))
1766 host->pwr_reg_add |= MCI_ST_DATA74DIREN;
1767 if (of_get_property(np, "st,sig-dir-cmd", NULL))
1768 host->pwr_reg_add |= MCI_ST_CMDDIREN;
1769 if (of_get_property(np, "st,sig-pin-fbclk", NULL))
1770 host->pwr_reg_add |= MCI_ST_FBCLKEN;
1771 if (of_get_property(np, "st,sig-dir", NULL))
1772 host->pwr_reg_add |= MCI_STM32_DIRPOL;
1773 if (of_get_property(np, "st,neg-edge", NULL))
1774 host->clk_reg_add |= MCI_STM32_CLK_NEGEDGE;
1775 if (of_get_property(np, "st,use-ckin", NULL))
1776 host->clk_reg_add |= MCI_STM32_CLK_SELCKIN;
1778 if (of_get_property(np, "mmc-cap-mmc-highspeed", NULL))
1779 mmc->caps |= MMC_CAP_MMC_HIGHSPEED;
1780 if (of_get_property(np, "mmc-cap-sd-highspeed", NULL))
1781 mmc->caps |= MMC_CAP_SD_HIGHSPEED;
1786 static int mmci_probe(struct amba_device *dev,
1787 const struct amba_id *id)
1789 struct mmci_platform_data *plat = dev->dev.platform_data;
1790 struct device_node *np = dev->dev.of_node;
1791 struct variant_data *variant = id->data;
1792 struct mmci_host *host;
1793 struct mmc_host *mmc;
1796 /* Must have platform data or Device Tree. */
1798 dev_err(&dev->dev, "No plat data or DT found\n");
1803 plat = devm_kzalloc(&dev->dev, sizeof(*plat), GFP_KERNEL);
1808 mmc = mmc_alloc_host(sizeof(struct mmci_host), &dev->dev);
1812 ret = mmci_of_parse(np, mmc);
1816 host = mmc_priv(mmc);
1820 * Some variant (STM32) doesn't have opendrain bit, nevertheless
1821 * pins can be set accordingly using pinctrl
1823 if (!variant->opendrain) {
1824 host->pinctrl = devm_pinctrl_get(&dev->dev);
1825 if (IS_ERR(host->pinctrl)) {
1826 dev_err(&dev->dev, "failed to get pinctrl");
1827 ret = PTR_ERR(host->pinctrl);
1831 host->pins_default = pinctrl_lookup_state(host->pinctrl,
1832 PINCTRL_STATE_DEFAULT);
1833 if (IS_ERR(host->pins_default)) {
1834 dev_err(mmc_dev(mmc), "Can't select default pins\n");
1835 ret = PTR_ERR(host->pins_default);
1839 host->pins_opendrain = pinctrl_lookup_state(host->pinctrl,
1840 MMCI_PINCTRL_STATE_OPENDRAIN);
1841 if (IS_ERR(host->pins_opendrain)) {
1842 dev_err(mmc_dev(mmc), "Can't select opendrain pins\n");
1843 ret = PTR_ERR(host->pins_opendrain);
1848 host->hw_designer = amba_manf(dev);
1849 host->hw_revision = amba_rev(dev);
1850 dev_dbg(mmc_dev(mmc), "designer ID = 0x%02x\n", host->hw_designer);
1851 dev_dbg(mmc_dev(mmc), "revision = 0x%01x\n", host->hw_revision);
1853 host->clk = devm_clk_get(&dev->dev, NULL);
1854 if (IS_ERR(host->clk)) {
1855 ret = PTR_ERR(host->clk);
1859 ret = clk_prepare_enable(host->clk);
1863 if (variant->qcom_fifo)
1864 host->get_rx_fifocnt = mmci_qcom_get_rx_fifocnt;
1866 host->get_rx_fifocnt = mmci_get_rx_fifocnt;
1869 host->variant = variant;
1870 host->mclk = clk_get_rate(host->clk);
1872 * According to the spec, mclk is max 100 MHz,
1873 * so we try to adjust the clock down to this,
1876 if (host->mclk > variant->f_max) {
1877 ret = clk_set_rate(host->clk, variant->f_max);
1880 host->mclk = clk_get_rate(host->clk);
1881 dev_dbg(mmc_dev(mmc), "eventual mclk rate: %u Hz\n",
1885 host->phybase = dev->res.start;
1886 host->base = devm_ioremap_resource(&dev->dev, &dev->res);
1887 if (IS_ERR(host->base)) {
1888 ret = PTR_ERR(host->base);
1893 variant->init(host);
1896 * The ARM and ST versions of the block have slightly different
1897 * clock divider equations which means that the minimum divider
1899 * on Qualcomm like controllers get the nearest minimum clock to 100Khz
1901 if (variant->st_clkdiv)
1902 mmc->f_min = DIV_ROUND_UP(host->mclk, 257);
1903 else if (variant->stm32_clkdiv)
1904 mmc->f_min = DIV_ROUND_UP(host->mclk, 2046);
1905 else if (variant->explicit_mclk_control)
1906 mmc->f_min = clk_round_rate(host->clk, 100000);
1908 mmc->f_min = DIV_ROUND_UP(host->mclk, 512);
1910 * If no maximum operating frequency is supplied, fall back to use
1911 * the module parameter, which has a (low) default value in case it
1912 * is not specified. Either value must not exceed the clock rate into
1913 * the block, of course.
1916 mmc->f_max = variant->explicit_mclk_control ?
1917 min(variant->f_max, mmc->f_max) :
1918 min(host->mclk, mmc->f_max);
1920 mmc->f_max = variant->explicit_mclk_control ?
1921 fmax : min(host->mclk, fmax);
1924 dev_dbg(mmc_dev(mmc), "clocking block at %u Hz\n", mmc->f_max);
1926 host->rst = devm_reset_control_get_optional_exclusive(&dev->dev, NULL);
1927 if (IS_ERR(host->rst)) {
1928 ret = PTR_ERR(host->rst);
1932 /* Get regulators and the supported OCR mask */
1933 ret = mmc_regulator_get_supply(mmc);
1937 if (!mmc->ocr_avail)
1938 mmc->ocr_avail = plat->ocr_mask;
1939 else if (plat->ocr_mask)
1940 dev_warn(mmc_dev(mmc), "Platform OCR mask is ignored\n");
1942 /* We support these capabilities. */
1943 mmc->caps |= MMC_CAP_CMD23;
1946 * Enable busy detection.
1948 if (variant->busy_detect) {
1949 mmci_ops.card_busy = mmci_card_busy;
1951 * Not all variants have a flag to enable busy detection
1952 * in the DPSM, but if they do, set it here.
1954 if (variant->busy_dpsm_flag)
1955 mmci_write_datactrlreg(host,
1956 host->variant->busy_dpsm_flag);
1957 mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY;
1958 mmc->max_busy_timeout = 0;
1961 /* Prepare a CMD12 - needed to clear the DPSM on some variants. */
1962 host->stop_abort.opcode = MMC_STOP_TRANSMISSION;
1963 host->stop_abort.arg = 0;
1964 host->stop_abort.flags = MMC_RSP_R1B | MMC_CMD_AC;
1966 mmc->ops = &mmci_ops;
1968 /* We support these PM capabilities. */
1969 mmc->pm_caps |= MMC_PM_KEEP_POWER;
1974 mmc->max_segs = NR_SG;
1977 * Since only a certain number of bits are valid in the data length
1978 * register, we must ensure that we don't exceed 2^num-1 bytes in a
1981 mmc->max_req_size = (1 << variant->datalength_bits) - 1;
1984 * Set the maximum segment size. Since we aren't doing DMA
1985 * (yet) we are only limited by the data length register.
1987 mmc->max_seg_size = mmc->max_req_size;
1990 * Block size can be up to 2048 bytes, but must be a power of two.
1992 mmc->max_blk_size = 1 << variant->datactrl_blocksz;
1995 * Limit the number of blocks transferred so that we don't overflow
1996 * the maximum request size.
1998 mmc->max_blk_count = mmc->max_req_size >> variant->datactrl_blocksz;
2000 spin_lock_init(&host->lock);
2002 writel(0, host->base + MMCIMASK0);
2004 if (variant->mmcimask1)
2005 writel(0, host->base + MMCIMASK1);
2007 writel(0xfff, host->base + MMCICLEAR);
2011 * - not using DT but using a descriptor table, or
2012 * - using a table of descriptors ALONGSIDE DT, or
2013 * look up these descriptors named "cd" and "wp" right here, fail
2014 * silently of these do not exist
2017 ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0, NULL);
2018 if (ret == -EPROBE_DEFER)
2021 ret = mmc_gpiod_request_ro(mmc, "wp", 0, 0, NULL);
2022 if (ret == -EPROBE_DEFER)
2026 ret = devm_request_irq(&dev->dev, dev->irq[0], mmci_irq, IRQF_SHARED,
2027 DRIVER_NAME " (cmd)", host);
2032 host->singleirq = true;
2034 ret = devm_request_irq(&dev->dev, dev->irq[1], mmci_pio_irq,
2035 IRQF_SHARED, DRIVER_NAME " (pio)", host);
2040 writel(MCI_IRQENABLE | variant->start_err, host->base + MMCIMASK0);
2042 amba_set_drvdata(dev, mmc);
2044 dev_info(&dev->dev, "%s: PL%03x manf %x rev%u at 0x%08llx irq %d,%d (pio)\n",
2045 mmc_hostname(mmc), amba_part(dev), amba_manf(dev),
2046 amba_rev(dev), (unsigned long long)dev->res.start,
2047 dev->irq[0], dev->irq[1]);
2049 mmci_dma_setup(host);
2051 pm_runtime_set_autosuspend_delay(&dev->dev, 50);
2052 pm_runtime_use_autosuspend(&dev->dev);
2056 pm_runtime_put(&dev->dev);
2060 clk_disable_unprepare(host->clk);
2066 static int mmci_remove(struct amba_device *dev)
2068 struct mmc_host *mmc = amba_get_drvdata(dev);
2071 struct mmci_host *host = mmc_priv(mmc);
2072 struct variant_data *variant = host->variant;
2075 * Undo pm_runtime_put() in probe. We use the _sync
2076 * version here so that we can access the primecell.
2078 pm_runtime_get_sync(&dev->dev);
2080 mmc_remove_host(mmc);
2082 writel(0, host->base + MMCIMASK0);
2084 if (variant->mmcimask1)
2085 writel(0, host->base + MMCIMASK1);
2087 writel(0, host->base + MMCICOMMAND);
2088 writel(0, host->base + MMCIDATACTRL);
2090 mmci_dma_release(host);
2091 clk_disable_unprepare(host->clk);
2099 static void mmci_save(struct mmci_host *host)
2101 unsigned long flags;
2103 spin_lock_irqsave(&host->lock, flags);
2105 writel(0, host->base + MMCIMASK0);
2106 if (host->variant->pwrreg_nopower) {
2107 writel(0, host->base + MMCIDATACTRL);
2108 writel(0, host->base + MMCIPOWER);
2109 writel(0, host->base + MMCICLOCK);
2111 mmci_reg_delay(host);
2113 spin_unlock_irqrestore(&host->lock, flags);
2116 static void mmci_restore(struct mmci_host *host)
2118 unsigned long flags;
2120 spin_lock_irqsave(&host->lock, flags);
2122 if (host->variant->pwrreg_nopower) {
2123 writel(host->clk_reg, host->base + MMCICLOCK);
2124 writel(host->datactrl_reg, host->base + MMCIDATACTRL);
2125 writel(host->pwr_reg, host->base + MMCIPOWER);
2127 writel(MCI_IRQENABLE | host->variant->start_err,
2128 host->base + MMCIMASK0);
2129 mmci_reg_delay(host);
2131 spin_unlock_irqrestore(&host->lock, flags);
2134 static int mmci_runtime_suspend(struct device *dev)
2136 struct amba_device *adev = to_amba_device(dev);
2137 struct mmc_host *mmc = amba_get_drvdata(adev);
2140 struct mmci_host *host = mmc_priv(mmc);
2141 pinctrl_pm_select_sleep_state(dev);
2143 clk_disable_unprepare(host->clk);
2149 static int mmci_runtime_resume(struct device *dev)
2151 struct amba_device *adev = to_amba_device(dev);
2152 struct mmc_host *mmc = amba_get_drvdata(adev);
2155 struct mmci_host *host = mmc_priv(mmc);
2156 clk_prepare_enable(host->clk);
2158 pinctrl_pm_select_default_state(dev);
2165 static const struct dev_pm_ops mmci_dev_pm_ops = {
2166 SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
2167 pm_runtime_force_resume)
2168 SET_RUNTIME_PM_OPS(mmci_runtime_suspend, mmci_runtime_resume, NULL)
2171 static const struct amba_id mmci_ids[] = {
2175 .data = &variant_arm,
2180 .data = &variant_arm_extended_fifo,
2185 .data = &variant_arm_extended_fifo_hwfc,
2190 .data = &variant_arm,
2192 /* ST Micro variants */
2196 .data = &variant_u300,
2201 .data = &variant_nomadik,
2206 .data = &variant_nomadik,
2211 .data = &variant_ux500,
2216 .data = &variant_ux500v2,
2221 .data = &variant_stm32,
2226 .data = &variant_stm32_sdmmc,
2228 /* Qualcomm variants */
2232 .data = &variant_qcom,
2237 MODULE_DEVICE_TABLE(amba, mmci_ids);
2239 static struct amba_driver mmci_driver = {
2241 .name = DRIVER_NAME,
2242 .pm = &mmci_dev_pm_ops,
2244 .probe = mmci_probe,
2245 .remove = mmci_remove,
2246 .id_table = mmci_ids,
2249 module_amba_driver(mmci_driver);
2251 module_param(fmax, uint, 0444);
2253 MODULE_DESCRIPTION("ARM PrimeCell PL180/181 Multimedia Card Interface driver");
2254 MODULE_LICENSE("GPL");