1 // SPDX-License-Identifier: GPL-2.0-only
3 * drivers/mmc/host/sdhci-msm.c - Qualcomm SDHCI Platform driver
5 * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
8 #include <linux/module.h>
9 #include <linux/of_device.h>
10 #include <linux/delay.h>
11 #include <linux/mmc/mmc.h>
12 #include <linux/pm_runtime.h>
13 #include <linux/pm_opp.h>
14 #include <linux/slab.h>
15 #include <linux/iopoll.h>
16 #include <linux/regulator/consumer.h>
18 #include "sdhci-pltfm.h"
21 #define CORE_MCI_VERSION 0x50
22 #define CORE_VERSION_MAJOR_SHIFT 28
23 #define CORE_VERSION_MAJOR_MASK (0xf << CORE_VERSION_MAJOR_SHIFT)
24 #define CORE_VERSION_MINOR_MASK 0xff
26 #define CORE_MCI_GENERICS 0x70
27 #define SWITCHABLE_SIGNALING_VOLTAGE BIT(29)
29 #define HC_MODE_EN 0x1
30 #define CORE_POWER 0x0
31 #define CORE_SW_RST BIT(7)
32 #define FF_CLK_SW_RST_DIS BIT(13)
34 #define CORE_PWRCTL_BUS_OFF BIT(0)
35 #define CORE_PWRCTL_BUS_ON BIT(1)
36 #define CORE_PWRCTL_IO_LOW BIT(2)
37 #define CORE_PWRCTL_IO_HIGH BIT(3)
38 #define CORE_PWRCTL_BUS_SUCCESS BIT(0)
39 #define CORE_PWRCTL_IO_SUCCESS BIT(2)
40 #define REQ_BUS_OFF BIT(0)
41 #define REQ_BUS_ON BIT(1)
42 #define REQ_IO_LOW BIT(2)
43 #define REQ_IO_HIGH BIT(3)
46 #define CORE_DLL_LOCK BIT(7)
47 #define CORE_DDR_DLL_LOCK BIT(11)
48 #define CORE_DLL_EN BIT(16)
49 #define CORE_CDR_EN BIT(17)
50 #define CORE_CK_OUT_EN BIT(18)
51 #define CORE_CDR_EXT_EN BIT(19)
52 #define CORE_DLL_PDN BIT(29)
53 #define CORE_DLL_RST BIT(30)
54 #define CORE_CMD_DAT_TRACK_SEL BIT(0)
56 #define CORE_DDR_CAL_EN BIT(0)
57 #define CORE_FLL_CYCLE_CNT BIT(18)
58 #define CORE_DLL_CLOCK_DISABLE BIT(21)
60 #define CORE_VENDOR_SPEC_POR_VAL 0xa9c
61 #define CORE_CLK_PWRSAVE BIT(1)
62 #define CORE_HC_MCLK_SEL_DFLT (2 << 8)
63 #define CORE_HC_MCLK_SEL_HS400 (3 << 8)
64 #define CORE_HC_MCLK_SEL_MASK (3 << 8)
65 #define CORE_IO_PAD_PWR_SWITCH_EN BIT(15)
66 #define CORE_IO_PAD_PWR_SWITCH BIT(16)
67 #define CORE_HC_SELECT_IN_EN BIT(18)
68 #define CORE_HC_SELECT_IN_HS400 (6 << 19)
69 #define CORE_HC_SELECT_IN_MASK (7 << 19)
71 #define CORE_3_0V_SUPPORT BIT(25)
72 #define CORE_1_8V_SUPPORT BIT(26)
73 #define CORE_VOLT_SUPPORT (CORE_3_0V_SUPPORT | CORE_1_8V_SUPPORT)
75 #define CORE_CSR_CDC_CTLR_CFG0 0x130
76 #define CORE_SW_TRIG_FULL_CALIB BIT(16)
77 #define CORE_HW_AUTOCAL_ENA BIT(17)
79 #define CORE_CSR_CDC_CTLR_CFG1 0x134
80 #define CORE_CSR_CDC_CAL_TIMER_CFG0 0x138
81 #define CORE_TIMER_ENA BIT(16)
83 #define CORE_CSR_CDC_CAL_TIMER_CFG1 0x13C
84 #define CORE_CSR_CDC_REFCOUNT_CFG 0x140
85 #define CORE_CSR_CDC_COARSE_CAL_CFG 0x144
86 #define CORE_CDC_OFFSET_CFG 0x14C
87 #define CORE_CSR_CDC_DELAY_CFG 0x150
88 #define CORE_CDC_SLAVE_DDA_CFG 0x160
89 #define CORE_CSR_CDC_STATUS0 0x164
90 #define CORE_CALIBRATION_DONE BIT(0)
92 #define CORE_CDC_ERROR_CODE_MASK 0x7000000
94 #define CORE_CSR_CDC_GEN_CFG 0x178
95 #define CORE_CDC_SWITCH_BYPASS_OFF BIT(0)
96 #define CORE_CDC_SWITCH_RC_EN BIT(1)
98 #define CORE_CDC_T4_DLY_SEL BIT(0)
99 #define CORE_CMDIN_RCLK_EN BIT(1)
100 #define CORE_START_CDC_TRAFFIC BIT(6)
102 #define CORE_PWRSAVE_DLL BIT(3)
104 #define DDR_CONFIG_POR_VAL 0x80040873
107 #define INVALID_TUNING_PHASE -1
108 #define SDHCI_MSM_MIN_CLOCK 400000
109 #define CORE_FREQ_100MHZ (100 * 1000 * 1000)
111 #define CDR_SELEXT_SHIFT 20
112 #define CDR_SELEXT_MASK (0xf << CDR_SELEXT_SHIFT)
113 #define CMUX_SHIFT_PHASE_SHIFT 24
114 #define CMUX_SHIFT_PHASE_MASK (7 << CMUX_SHIFT_PHASE_SHIFT)
116 #define MSM_MMC_AUTOSUSPEND_DELAY_MS 50
118 /* Timeout value to avoid infinite waiting for pwr_irq */
119 #define MSM_PWR_IRQ_TIMEOUT_MS 5000
121 #define msm_host_readl(msm_host, host, offset) \
122 msm_host->var_ops->msm_readl_relaxed(host, offset)
124 #define msm_host_writel(msm_host, val, host, offset) \
125 msm_host->var_ops->msm_writel_relaxed(val, host, offset)
127 /* CQHCI vendor specific registers */
128 #define CQHCI_VENDOR_CFG1 0xA00
129 #define CQHCI_VENDOR_DIS_RST_ON_CQ_EN (0x3 << 13)
131 struct sdhci_msm_offset {
133 u32 core_mci_data_cnt;
135 u32 core_mci_fifo_cnt;
136 u32 core_mci_version;
138 u32 core_testbus_config;
139 u32 core_testbus_sel2_bit;
140 u32 core_testbus_ena;
141 u32 core_testbus_sel2;
142 u32 core_pwrctl_status;
143 u32 core_pwrctl_mask;
144 u32 core_pwrctl_clear;
146 u32 core_sdcc_debug_reg;
149 u32 core_vendor_spec;
150 u32 core_vendor_spec_adma_err_addr0;
151 u32 core_vendor_spec_adma_err_addr1;
152 u32 core_vendor_spec_func2;
153 u32 core_vendor_spec_capabilities0;
154 u32 core_ddr_200_cfg;
155 u32 core_vendor_spec3;
156 u32 core_dll_config_2;
157 u32 core_dll_config_3;
158 u32 core_ddr_config_old; /* Applicable to sdcc minor ver < 0x49 */
162 static const struct sdhci_msm_offset sdhci_msm_v5_offset = {
163 .core_mci_data_cnt = 0x35c,
164 .core_mci_status = 0x324,
165 .core_mci_fifo_cnt = 0x308,
166 .core_mci_version = 0x318,
167 .core_generics = 0x320,
168 .core_testbus_config = 0x32c,
169 .core_testbus_sel2_bit = 3,
170 .core_testbus_ena = (1 << 31),
171 .core_testbus_sel2 = (1 << 3),
172 .core_pwrctl_status = 0x240,
173 .core_pwrctl_mask = 0x244,
174 .core_pwrctl_clear = 0x248,
175 .core_pwrctl_ctl = 0x24c,
176 .core_sdcc_debug_reg = 0x358,
177 .core_dll_config = 0x200,
178 .core_dll_status = 0x208,
179 .core_vendor_spec = 0x20c,
180 .core_vendor_spec_adma_err_addr0 = 0x214,
181 .core_vendor_spec_adma_err_addr1 = 0x218,
182 .core_vendor_spec_func2 = 0x210,
183 .core_vendor_spec_capabilities0 = 0x21c,
184 .core_ddr_200_cfg = 0x224,
185 .core_vendor_spec3 = 0x250,
186 .core_dll_config_2 = 0x254,
187 .core_dll_config_3 = 0x258,
188 .core_ddr_config = 0x25c,
191 static const struct sdhci_msm_offset sdhci_msm_mci_offset = {
192 .core_hc_mode = 0x78,
193 .core_mci_data_cnt = 0x30,
194 .core_mci_status = 0x34,
195 .core_mci_fifo_cnt = 0x44,
196 .core_mci_version = 0x050,
197 .core_generics = 0x70,
198 .core_testbus_config = 0x0cc,
199 .core_testbus_sel2_bit = 4,
200 .core_testbus_ena = (1 << 3),
201 .core_testbus_sel2 = (1 << 4),
202 .core_pwrctl_status = 0xdc,
203 .core_pwrctl_mask = 0xe0,
204 .core_pwrctl_clear = 0xe4,
205 .core_pwrctl_ctl = 0xe8,
206 .core_sdcc_debug_reg = 0x124,
207 .core_dll_config = 0x100,
208 .core_dll_status = 0x108,
209 .core_vendor_spec = 0x10c,
210 .core_vendor_spec_adma_err_addr0 = 0x114,
211 .core_vendor_spec_adma_err_addr1 = 0x118,
212 .core_vendor_spec_func2 = 0x110,
213 .core_vendor_spec_capabilities0 = 0x11c,
214 .core_ddr_200_cfg = 0x184,
215 .core_vendor_spec3 = 0x1b0,
216 .core_dll_config_2 = 0x1b4,
217 .core_ddr_config_old = 0x1b8,
218 .core_ddr_config = 0x1bc,
221 struct sdhci_msm_variant_ops {
222 u32 (*msm_readl_relaxed)(struct sdhci_host *host, u32 offset);
223 void (*msm_writel_relaxed)(u32 val, struct sdhci_host *host,
228 * From V5, register spaces have changed. Wrap this info in a structure
229 * and choose the data_structure based on version info mentioned in DT.
231 struct sdhci_msm_variant_info {
233 bool restore_dll_config;
234 const struct sdhci_msm_variant_ops *var_ops;
235 const struct sdhci_msm_offset *offset;
238 struct sdhci_msm_host {
239 struct platform_device *pdev;
240 void __iomem *core_mem; /* MSM SDCC mapped address */
241 int pwr_irq; /* power irq */
242 struct clk *bus_clk; /* SDHC bus voter clock */
243 struct clk *xo_clk; /* TCXO clk needed for FLL feature of cm_dll*/
244 struct clk_bulk_data bulk_clks[4]; /* core, iface, cal, sleep clocks */
245 unsigned long clk_rate;
246 struct mmc_host *mmc;
247 struct opp_table *opp_table;
249 bool use_14lpp_dll_reset;
251 bool calibration_done;
252 u8 saved_tuning_phase;
256 wait_queue_head_t pwr_irq_wait;
260 bool restore_dll_config;
261 const struct sdhci_msm_variant_ops *var_ops;
262 const struct sdhci_msm_offset *offset;
265 bool updated_ddr_cfg;
268 static const struct sdhci_msm_offset *sdhci_priv_msm_offset(struct sdhci_host *host)
270 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
271 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
273 return msm_host->offset;
277 * APIs to read/write to vendor specific registers which were there in the
278 * core_mem region before MCI was removed.
280 static u32 sdhci_msm_mci_variant_readl_relaxed(struct sdhci_host *host,
283 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
284 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
286 return readl_relaxed(msm_host->core_mem + offset);
289 static u32 sdhci_msm_v5_variant_readl_relaxed(struct sdhci_host *host,
292 return readl_relaxed(host->ioaddr + offset);
295 static void sdhci_msm_mci_variant_writel_relaxed(u32 val,
296 struct sdhci_host *host, u32 offset)
298 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
299 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
301 writel_relaxed(val, msm_host->core_mem + offset);
304 static void sdhci_msm_v5_variant_writel_relaxed(u32 val,
305 struct sdhci_host *host, u32 offset)
307 writel_relaxed(val, host->ioaddr + offset);
310 static unsigned int msm_get_clock_rate_for_bus_mode(struct sdhci_host *host,
313 struct mmc_ios ios = host->mmc->ios;
315 * The SDHC requires internal clock frequency to be double the
316 * actual clock that will be set for DDR mode. The controller
317 * uses the faster clock(100/400MHz) for some of its parts and
318 * send the actual required clock (50/200MHz) to the card.
320 if (ios.timing == MMC_TIMING_UHS_DDR50 ||
321 ios.timing == MMC_TIMING_MMC_DDR52 ||
322 ios.timing == MMC_TIMING_MMC_HS400 ||
323 host->flags & SDHCI_HS400_TUNING)
328 static void msm_set_clock_rate_for_bus_mode(struct sdhci_host *host,
331 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
332 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
333 struct mmc_ios curr_ios = host->mmc->ios;
334 struct clk *core_clk = msm_host->bulk_clks[0].clk;
337 clock = msm_get_clock_rate_for_bus_mode(host, clock);
338 rc = dev_pm_opp_set_rate(mmc_dev(host->mmc), clock);
340 pr_err("%s: Failed to set clock at rate %u at timing %d\n",
341 mmc_hostname(host->mmc), clock,
345 msm_host->clk_rate = clock;
346 pr_debug("%s: Setting clock at rate %lu at timing %d\n",
347 mmc_hostname(host->mmc), clk_get_rate(core_clk),
351 /* Platform specific tuning */
352 static inline int msm_dll_poll_ck_out_en(struct sdhci_host *host, u8 poll)
356 struct mmc_host *mmc = host->mmc;
357 const struct sdhci_msm_offset *msm_offset =
358 sdhci_priv_msm_offset(host);
360 /* Poll for CK_OUT_EN bit. max. poll time = 50us */
361 ck_out_en = !!(readl_relaxed(host->ioaddr +
362 msm_offset->core_dll_config) & CORE_CK_OUT_EN);
364 while (ck_out_en != poll) {
365 if (--wait_cnt == 0) {
366 dev_err(mmc_dev(mmc), "%s: CK_OUT_EN bit is not %d\n",
367 mmc_hostname(mmc), poll);
372 ck_out_en = !!(readl_relaxed(host->ioaddr +
373 msm_offset->core_dll_config) & CORE_CK_OUT_EN);
379 static int msm_config_cm_dll_phase(struct sdhci_host *host, u8 phase)
382 static const u8 grey_coded_phase_table[] = {
383 0x0, 0x1, 0x3, 0x2, 0x6, 0x7, 0x5, 0x4,
384 0xc, 0xd, 0xf, 0xe, 0xa, 0xb, 0x9, 0x8
388 struct mmc_host *mmc = host->mmc;
389 const struct sdhci_msm_offset *msm_offset =
390 sdhci_priv_msm_offset(host);
395 spin_lock_irqsave(&host->lock, flags);
397 config = readl_relaxed(host->ioaddr + msm_offset->core_dll_config);
398 config &= ~(CORE_CDR_EN | CORE_CK_OUT_EN);
399 config |= (CORE_CDR_EXT_EN | CORE_DLL_EN);
400 writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config);
402 /* Wait until CK_OUT_EN bit of DLL_CONFIG register becomes '0' */
403 rc = msm_dll_poll_ck_out_en(host, 0);
408 * Write the selected DLL clock output phase (0 ... 15)
409 * to CDR_SELEXT bit field of DLL_CONFIG register.
411 config = readl_relaxed(host->ioaddr + msm_offset->core_dll_config);
412 config &= ~CDR_SELEXT_MASK;
413 config |= grey_coded_phase_table[phase] << CDR_SELEXT_SHIFT;
414 writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config);
416 config = readl_relaxed(host->ioaddr + msm_offset->core_dll_config);
417 config |= CORE_CK_OUT_EN;
418 writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config);
420 /* Wait until CK_OUT_EN bit of DLL_CONFIG register becomes '1' */
421 rc = msm_dll_poll_ck_out_en(host, 1);
425 config = readl_relaxed(host->ioaddr + msm_offset->core_dll_config);
426 config |= CORE_CDR_EN;
427 config &= ~CORE_CDR_EXT_EN;
428 writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config);
432 dev_err(mmc_dev(mmc), "%s: Failed to set DLL phase: %d\n",
433 mmc_hostname(mmc), phase);
435 spin_unlock_irqrestore(&host->lock, flags);
440 * Find out the greatest range of consecuitive selected
441 * DLL clock output phases that can be used as sampling
442 * setting for SD3.0 UHS-I card read operation (in SDR104
443 * timing mode) or for eMMC4.5 card read operation (in
444 * HS400/HS200 timing mode).
445 * Select the 3/4 of the range and configure the DLL with the
446 * selected DLL clock output phase.
449 static int msm_find_most_appropriate_phase(struct sdhci_host *host,
450 u8 *phase_table, u8 total_phases)
453 u8 ranges[MAX_PHASES][MAX_PHASES] = { {0}, {0} };
454 u8 phases_per_row[MAX_PHASES] = { 0 };
455 int row_index = 0, col_index = 0, selected_row_index = 0, curr_max = 0;
456 int i, cnt, phase_0_raw_index = 0, phase_15_raw_index = 0;
457 bool phase_0_found = false, phase_15_found = false;
458 struct mmc_host *mmc = host->mmc;
460 if (!total_phases || (total_phases > MAX_PHASES)) {
461 dev_err(mmc_dev(mmc), "%s: Invalid argument: total_phases=%d\n",
462 mmc_hostname(mmc), total_phases);
466 for (cnt = 0; cnt < total_phases; cnt++) {
467 ranges[row_index][col_index] = phase_table[cnt];
468 phases_per_row[row_index] += 1;
471 if ((cnt + 1) == total_phases) {
473 /* check if next phase in phase_table is consecutive or not */
474 } else if ((phase_table[cnt] + 1) != phase_table[cnt + 1]) {
480 if (row_index >= MAX_PHASES)
483 /* Check if phase-0 is present in first valid window? */
485 phase_0_found = true;
486 phase_0_raw_index = 0;
487 /* Check if cycle exist between 2 valid windows */
488 for (cnt = 1; cnt <= row_index; cnt++) {
489 if (phases_per_row[cnt]) {
490 for (i = 0; i < phases_per_row[cnt]; i++) {
491 if (ranges[cnt][i] == 15) {
492 phase_15_found = true;
493 phase_15_raw_index = cnt;
501 /* If 2 valid windows form cycle then merge them as single window */
502 if (phase_0_found && phase_15_found) {
503 /* number of phases in raw where phase 0 is present */
504 u8 phases_0 = phases_per_row[phase_0_raw_index];
505 /* number of phases in raw where phase 15 is present */
506 u8 phases_15 = phases_per_row[phase_15_raw_index];
508 if (phases_0 + phases_15 >= MAX_PHASES)
510 * If there are more than 1 phase windows then total
511 * number of phases in both the windows should not be
512 * more than or equal to MAX_PHASES.
516 /* Merge 2 cyclic windows */
518 for (cnt = 0; cnt < phases_0; cnt++) {
519 ranges[phase_15_raw_index][i] =
520 ranges[phase_0_raw_index][cnt];
521 if (++i >= MAX_PHASES)
525 phases_per_row[phase_0_raw_index] = 0;
526 phases_per_row[phase_15_raw_index] = phases_15 + phases_0;
529 for (cnt = 0; cnt <= row_index; cnt++) {
530 if (phases_per_row[cnt] > curr_max) {
531 curr_max = phases_per_row[cnt];
532 selected_row_index = cnt;
536 i = (curr_max * 3) / 4;
540 ret = ranges[selected_row_index][i];
542 if (ret >= MAX_PHASES) {
544 dev_err(mmc_dev(mmc), "%s: Invalid phase selected=%d\n",
545 mmc_hostname(mmc), ret);
551 static inline void msm_cm_dll_set_freq(struct sdhci_host *host)
553 u32 mclk_freq = 0, config;
554 const struct sdhci_msm_offset *msm_offset =
555 sdhci_priv_msm_offset(host);
557 /* Program the MCLK value to MCLK_FREQ bit field */
558 if (host->clock <= 112000000)
560 else if (host->clock <= 125000000)
562 else if (host->clock <= 137000000)
564 else if (host->clock <= 150000000)
566 else if (host->clock <= 162000000)
568 else if (host->clock <= 175000000)
570 else if (host->clock <= 187000000)
572 else if (host->clock <= 200000000)
575 config = readl_relaxed(host->ioaddr + msm_offset->core_dll_config);
576 config &= ~CMUX_SHIFT_PHASE_MASK;
577 config |= mclk_freq << CMUX_SHIFT_PHASE_SHIFT;
578 writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config);
581 /* Initialize the DLL (Programmable Delay Line) */
582 static int msm_init_cm_dll(struct sdhci_host *host)
584 struct mmc_host *mmc = host->mmc;
585 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
586 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
588 unsigned long flags, xo_clk = 0;
590 const struct sdhci_msm_offset *msm_offset =
593 if (msm_host->use_14lpp_dll_reset && !IS_ERR_OR_NULL(msm_host->xo_clk))
594 xo_clk = clk_get_rate(msm_host->xo_clk);
596 spin_lock_irqsave(&host->lock, flags);
599 * Make sure that clock is always enabled when DLL
600 * tuning is in progress. Keeping PWRSAVE ON may
601 * turn off the clock.
603 config = readl_relaxed(host->ioaddr + msm_offset->core_vendor_spec);
604 config &= ~CORE_CLK_PWRSAVE;
605 writel_relaxed(config, host->ioaddr + msm_offset->core_vendor_spec);
607 if (msm_host->use_14lpp_dll_reset) {
608 config = readl_relaxed(host->ioaddr +
609 msm_offset->core_dll_config);
610 config &= ~CORE_CK_OUT_EN;
611 writel_relaxed(config, host->ioaddr +
612 msm_offset->core_dll_config);
614 config = readl_relaxed(host->ioaddr +
615 msm_offset->core_dll_config_2);
616 config |= CORE_DLL_CLOCK_DISABLE;
617 writel_relaxed(config, host->ioaddr +
618 msm_offset->core_dll_config_2);
621 config = readl_relaxed(host->ioaddr +
622 msm_offset->core_dll_config);
623 config |= CORE_DLL_RST;
624 writel_relaxed(config, host->ioaddr +
625 msm_offset->core_dll_config);
627 config = readl_relaxed(host->ioaddr +
628 msm_offset->core_dll_config);
629 config |= CORE_DLL_PDN;
630 writel_relaxed(config, host->ioaddr +
631 msm_offset->core_dll_config);
632 msm_cm_dll_set_freq(host);
634 if (msm_host->use_14lpp_dll_reset &&
635 !IS_ERR_OR_NULL(msm_host->xo_clk)) {
638 config = readl_relaxed(host->ioaddr +
639 msm_offset->core_dll_config_2);
640 config &= CORE_FLL_CYCLE_CNT;
642 mclk_freq = DIV_ROUND_CLOSEST_ULL((host->clock * 8),
645 mclk_freq = DIV_ROUND_CLOSEST_ULL((host->clock * 4),
648 config = readl_relaxed(host->ioaddr +
649 msm_offset->core_dll_config_2);
650 config &= ~(0xFF << 10);
651 config |= mclk_freq << 10;
653 writel_relaxed(config, host->ioaddr +
654 msm_offset->core_dll_config_2);
655 /* wait for 5us before enabling DLL clock */
659 config = readl_relaxed(host->ioaddr +
660 msm_offset->core_dll_config);
661 config &= ~CORE_DLL_RST;
662 writel_relaxed(config, host->ioaddr +
663 msm_offset->core_dll_config);
665 config = readl_relaxed(host->ioaddr +
666 msm_offset->core_dll_config);
667 config &= ~CORE_DLL_PDN;
668 writel_relaxed(config, host->ioaddr +
669 msm_offset->core_dll_config);
671 if (msm_host->use_14lpp_dll_reset) {
672 msm_cm_dll_set_freq(host);
673 config = readl_relaxed(host->ioaddr +
674 msm_offset->core_dll_config_2);
675 config &= ~CORE_DLL_CLOCK_DISABLE;
676 writel_relaxed(config, host->ioaddr +
677 msm_offset->core_dll_config_2);
680 config = readl_relaxed(host->ioaddr +
681 msm_offset->core_dll_config);
682 config |= CORE_DLL_EN;
683 writel_relaxed(config, host->ioaddr +
684 msm_offset->core_dll_config);
686 config = readl_relaxed(host->ioaddr +
687 msm_offset->core_dll_config);
688 config |= CORE_CK_OUT_EN;
689 writel_relaxed(config, host->ioaddr +
690 msm_offset->core_dll_config);
692 /* Wait until DLL_LOCK bit of DLL_STATUS register becomes '1' */
693 while (!(readl_relaxed(host->ioaddr + msm_offset->core_dll_status) &
695 /* max. wait for 50us sec for LOCK bit to be set */
696 if (--wait_cnt == 0) {
697 dev_err(mmc_dev(mmc), "%s: DLL failed to LOCK\n",
699 spin_unlock_irqrestore(&host->lock, flags);
705 spin_unlock_irqrestore(&host->lock, flags);
709 static void msm_hc_select_default(struct sdhci_host *host)
711 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
712 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
714 const struct sdhci_msm_offset *msm_offset =
717 if (!msm_host->use_cdclp533) {
718 config = readl_relaxed(host->ioaddr +
719 msm_offset->core_vendor_spec3);
720 config &= ~CORE_PWRSAVE_DLL;
721 writel_relaxed(config, host->ioaddr +
722 msm_offset->core_vendor_spec3);
725 config = readl_relaxed(host->ioaddr + msm_offset->core_vendor_spec);
726 config &= ~CORE_HC_MCLK_SEL_MASK;
727 config |= CORE_HC_MCLK_SEL_DFLT;
728 writel_relaxed(config, host->ioaddr + msm_offset->core_vendor_spec);
731 * Disable HC_SELECT_IN to be able to use the UHS mode select
732 * configuration from Host Control2 register for all other
734 * Write 0 to HC_SELECT_IN and HC_SELECT_IN_EN field
735 * in VENDOR_SPEC_FUNC
737 config = readl_relaxed(host->ioaddr + msm_offset->core_vendor_spec);
738 config &= ~CORE_HC_SELECT_IN_EN;
739 config &= ~CORE_HC_SELECT_IN_MASK;
740 writel_relaxed(config, host->ioaddr + msm_offset->core_vendor_spec);
743 * Make sure above writes impacting free running MCLK are completed
744 * before changing the clk_rate at GCC.
749 static void msm_hc_select_hs400(struct sdhci_host *host)
751 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
752 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
753 struct mmc_ios ios = host->mmc->ios;
754 u32 config, dll_lock;
756 const struct sdhci_msm_offset *msm_offset =
759 /* Select the divided clock (free running MCLK/2) */
760 config = readl_relaxed(host->ioaddr + msm_offset->core_vendor_spec);
761 config &= ~CORE_HC_MCLK_SEL_MASK;
762 config |= CORE_HC_MCLK_SEL_HS400;
764 writel_relaxed(config, host->ioaddr + msm_offset->core_vendor_spec);
766 * Select HS400 mode using the HC_SELECT_IN from VENDOR SPEC
769 if ((msm_host->tuning_done || ios.enhanced_strobe) &&
770 !msm_host->calibration_done) {
771 config = readl_relaxed(host->ioaddr +
772 msm_offset->core_vendor_spec);
773 config |= CORE_HC_SELECT_IN_HS400;
774 config |= CORE_HC_SELECT_IN_EN;
775 writel_relaxed(config, host->ioaddr +
776 msm_offset->core_vendor_spec);
778 if (!msm_host->clk_rate && !msm_host->use_cdclp533) {
780 * Poll on DLL_LOCK or DDR_DLL_LOCK bits in
781 * core_dll_status to be set. This should get set
782 * within 15 us at 200 MHz.
784 rc = readl_relaxed_poll_timeout(host->ioaddr +
785 msm_offset->core_dll_status,
789 CORE_DDR_DLL_LOCK)), 10,
791 if (rc == -ETIMEDOUT)
792 pr_err("%s: Unable to get DLL_LOCK/DDR_DLL_LOCK, dll_status: 0x%08x\n",
793 mmc_hostname(host->mmc), dll_lock);
796 * Make sure above writes impacting free running MCLK are completed
797 * before changing the clk_rate at GCC.
803 * sdhci_msm_hc_select_mode :- In general all timing modes are
804 * controlled via UHS mode select in Host Control2 register.
805 * eMMC specific HS200/HS400 doesn't have their respective modes
806 * defined here, hence we use these values.
808 * HS200 - SDR104 (Since they both are equivalent in functionality)
809 * HS400 - This involves multiple configurations
810 * Initially SDR104 - when tuning is required as HS200
811 * Then when switching to DDR @ 400MHz (HS400) we use
812 * the vendor specific HC_SELECT_IN to control the mode.
814 * In addition to controlling the modes we also need to select the
815 * correct input clock for DLL depending on the mode.
817 * HS400 - divided clock (free running MCLK/2)
818 * All other modes - default (free running MCLK)
820 static void sdhci_msm_hc_select_mode(struct sdhci_host *host)
822 struct mmc_ios ios = host->mmc->ios;
824 if (ios.timing == MMC_TIMING_MMC_HS400 ||
825 host->flags & SDHCI_HS400_TUNING)
826 msm_hc_select_hs400(host);
828 msm_hc_select_default(host);
831 static int sdhci_msm_cdclp533_calibration(struct sdhci_host *host)
833 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
834 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
835 u32 config, calib_done;
837 const struct sdhci_msm_offset *msm_offset =
840 pr_debug("%s: %s: Enter\n", mmc_hostname(host->mmc), __func__);
843 * Retuning in HS400 (DDR mode) will fail, just reset the
844 * tuning block and restore the saved tuning phase.
846 ret = msm_init_cm_dll(host);
850 /* Set the selected phase in delay line hw block */
851 ret = msm_config_cm_dll_phase(host, msm_host->saved_tuning_phase);
855 config = readl_relaxed(host->ioaddr + msm_offset->core_dll_config);
856 config |= CORE_CMD_DAT_TRACK_SEL;
857 writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config);
859 config = readl_relaxed(host->ioaddr + msm_offset->core_ddr_200_cfg);
860 config &= ~CORE_CDC_T4_DLY_SEL;
861 writel_relaxed(config, host->ioaddr + msm_offset->core_ddr_200_cfg);
863 config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_GEN_CFG);
864 config &= ~CORE_CDC_SWITCH_BYPASS_OFF;
865 writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_GEN_CFG);
867 config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_GEN_CFG);
868 config |= CORE_CDC_SWITCH_RC_EN;
869 writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_GEN_CFG);
871 config = readl_relaxed(host->ioaddr + msm_offset->core_ddr_200_cfg);
872 config &= ~CORE_START_CDC_TRAFFIC;
873 writel_relaxed(config, host->ioaddr + msm_offset->core_ddr_200_cfg);
875 /* Perform CDC Register Initialization Sequence */
877 writel_relaxed(0x11800EC, host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
878 writel_relaxed(0x3011111, host->ioaddr + CORE_CSR_CDC_CTLR_CFG1);
879 writel_relaxed(0x1201000, host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG0);
880 writel_relaxed(0x4, host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG1);
881 writel_relaxed(0xCB732020, host->ioaddr + CORE_CSR_CDC_REFCOUNT_CFG);
882 writel_relaxed(0xB19, host->ioaddr + CORE_CSR_CDC_COARSE_CAL_CFG);
883 writel_relaxed(0x4E2, host->ioaddr + CORE_CSR_CDC_DELAY_CFG);
884 writel_relaxed(0x0, host->ioaddr + CORE_CDC_OFFSET_CFG);
885 writel_relaxed(0x16334, host->ioaddr + CORE_CDC_SLAVE_DDA_CFG);
887 /* CDC HW Calibration */
889 config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
890 config |= CORE_SW_TRIG_FULL_CALIB;
891 writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
893 config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
894 config &= ~CORE_SW_TRIG_FULL_CALIB;
895 writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
897 config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
898 config |= CORE_HW_AUTOCAL_ENA;
899 writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
901 config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG0);
902 config |= CORE_TIMER_ENA;
903 writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG0);
905 ret = readl_relaxed_poll_timeout(host->ioaddr + CORE_CSR_CDC_STATUS0,
907 (calib_done & CORE_CALIBRATION_DONE),
910 if (ret == -ETIMEDOUT) {
911 pr_err("%s: %s: CDC calibration was not completed\n",
912 mmc_hostname(host->mmc), __func__);
916 ret = readl_relaxed(host->ioaddr + CORE_CSR_CDC_STATUS0)
917 & CORE_CDC_ERROR_CODE_MASK;
919 pr_err("%s: %s: CDC error code %d\n",
920 mmc_hostname(host->mmc), __func__, ret);
925 config = readl_relaxed(host->ioaddr + msm_offset->core_ddr_200_cfg);
926 config |= CORE_START_CDC_TRAFFIC;
927 writel_relaxed(config, host->ioaddr + msm_offset->core_ddr_200_cfg);
929 pr_debug("%s: %s: Exit, ret %d\n", mmc_hostname(host->mmc),
934 static int sdhci_msm_cm_dll_sdc4_calibration(struct sdhci_host *host)
936 struct mmc_host *mmc = host->mmc;
937 u32 dll_status, config, ddr_cfg_offset;
939 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
940 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
941 const struct sdhci_msm_offset *msm_offset =
942 sdhci_priv_msm_offset(host);
944 pr_debug("%s: %s: Enter\n", mmc_hostname(host->mmc), __func__);
947 * Currently the core_ddr_config register defaults to desired
948 * configuration on reset. Currently reprogramming the power on
949 * reset (POR) value in case it might have been modified by
950 * bootloaders. In the future, if this changes, then the desired
951 * values will need to be programmed appropriately.
953 if (msm_host->updated_ddr_cfg)
954 ddr_cfg_offset = msm_offset->core_ddr_config;
956 ddr_cfg_offset = msm_offset->core_ddr_config_old;
957 writel_relaxed(DDR_CONFIG_POR_VAL, host->ioaddr + ddr_cfg_offset);
959 if (mmc->ios.enhanced_strobe) {
960 config = readl_relaxed(host->ioaddr +
961 msm_offset->core_ddr_200_cfg);
962 config |= CORE_CMDIN_RCLK_EN;
963 writel_relaxed(config, host->ioaddr +
964 msm_offset->core_ddr_200_cfg);
967 config = readl_relaxed(host->ioaddr + msm_offset->core_dll_config_2);
968 config |= CORE_DDR_CAL_EN;
969 writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config_2);
971 ret = readl_relaxed_poll_timeout(host->ioaddr +
972 msm_offset->core_dll_status,
974 (dll_status & CORE_DDR_DLL_LOCK),
977 if (ret == -ETIMEDOUT) {
978 pr_err("%s: %s: CM_DLL_SDC4 calibration was not completed\n",
979 mmc_hostname(host->mmc), __func__);
984 * Set CORE_PWRSAVE_DLL bit in CORE_VENDOR_SPEC3.
985 * When MCLK is gated OFF, it is not gated for less than 0.5us
986 * and MCLK must be switched on for at-least 1us before DATA
987 * starts coming. Controllers with 14lpp and later tech DLL cannot
988 * guarantee above requirement. So PWRSAVE_DLL should not be
989 * turned on for host controllers using this DLL.
991 if (!msm_host->use_14lpp_dll_reset) {
992 config = readl_relaxed(host->ioaddr +
993 msm_offset->core_vendor_spec3);
994 config |= CORE_PWRSAVE_DLL;
995 writel_relaxed(config, host->ioaddr +
996 msm_offset->core_vendor_spec3);
1000 * Drain writebuffer to ensure above DLL calibration
1001 * and PWRSAVE DLL is enabled.
1005 pr_debug("%s: %s: Exit, ret %d\n", mmc_hostname(host->mmc),
1010 static int sdhci_msm_hs400_dll_calibration(struct sdhci_host *host)
1012 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1013 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
1014 struct mmc_host *mmc = host->mmc;
1017 const struct sdhci_msm_offset *msm_offset =
1020 pr_debug("%s: %s: Enter\n", mmc_hostname(host->mmc), __func__);
1023 * Retuning in HS400 (DDR mode) will fail, just reset the
1024 * tuning block and restore the saved tuning phase.
1026 ret = msm_init_cm_dll(host);
1030 if (!mmc->ios.enhanced_strobe) {
1031 /* Set the selected phase in delay line hw block */
1032 ret = msm_config_cm_dll_phase(host,
1033 msm_host->saved_tuning_phase);
1036 config = readl_relaxed(host->ioaddr +
1037 msm_offset->core_dll_config);
1038 config |= CORE_CMD_DAT_TRACK_SEL;
1039 writel_relaxed(config, host->ioaddr +
1040 msm_offset->core_dll_config);
1043 if (msm_host->use_cdclp533)
1044 ret = sdhci_msm_cdclp533_calibration(host);
1046 ret = sdhci_msm_cm_dll_sdc4_calibration(host);
1048 pr_debug("%s: %s: Exit, ret %d\n", mmc_hostname(host->mmc),
1053 static bool sdhci_msm_is_tuning_needed(struct sdhci_host *host)
1055 struct mmc_ios *ios = &host->mmc->ios;
1058 * Tuning is required for SDR104, HS200 and HS400 cards and
1059 * if clock frequency is greater than 100MHz in these modes.
1061 if (host->clock <= CORE_FREQ_100MHZ ||
1062 !(ios->timing == MMC_TIMING_MMC_HS400 ||
1063 ios->timing == MMC_TIMING_MMC_HS200 ||
1064 ios->timing == MMC_TIMING_UHS_SDR104) ||
1065 ios->enhanced_strobe)
1071 static int sdhci_msm_restore_sdr_dll_config(struct sdhci_host *host)
1073 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1074 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
1078 * SDR DLL comes into picture only for timing modes which needs
1081 if (!sdhci_msm_is_tuning_needed(host))
1084 /* Reset the tuning block */
1085 ret = msm_init_cm_dll(host);
1089 /* Restore the tuning block */
1090 ret = msm_config_cm_dll_phase(host, msm_host->saved_tuning_phase);
1095 static void sdhci_msm_set_cdr(struct sdhci_host *host, bool enable)
1097 const struct sdhci_msm_offset *msm_offset = sdhci_priv_msm_offset(host);
1098 u32 config, oldconfig = readl_relaxed(host->ioaddr +
1099 msm_offset->core_dll_config);
1103 config |= CORE_CDR_EN;
1104 config &= ~CORE_CDR_EXT_EN;
1106 config &= ~CORE_CDR_EN;
1107 config |= CORE_CDR_EXT_EN;
1110 if (config != oldconfig) {
1111 writel_relaxed(config, host->ioaddr +
1112 msm_offset->core_dll_config);
1116 static int sdhci_msm_execute_tuning(struct mmc_host *mmc, u32 opcode)
1118 struct sdhci_host *host = mmc_priv(mmc);
1119 int tuning_seq_cnt = 3;
1120 u8 phase, tuned_phases[16], tuned_phase_cnt = 0;
1122 struct mmc_ios ios = host->mmc->ios;
1123 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1124 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
1126 if (!sdhci_msm_is_tuning_needed(host)) {
1127 msm_host->use_cdr = false;
1128 sdhci_msm_set_cdr(host, false);
1132 /* Clock-Data-Recovery used to dynamically adjust RX sampling point */
1133 msm_host->use_cdr = true;
1136 * For HS400 tuning in HS200 timing requires:
1137 * - select MCLK/2 in VENDOR_SPEC
1138 * - program MCLK to 400MHz (or nearest supported) in GCC
1140 if (host->flags & SDHCI_HS400_TUNING) {
1141 sdhci_msm_hc_select_mode(host);
1142 msm_set_clock_rate_for_bus_mode(host, ios.clock);
1143 host->flags &= ~SDHCI_HS400_TUNING;
1147 /* First of all reset the tuning block */
1148 rc = msm_init_cm_dll(host);
1154 /* Set the phase in delay line hw block */
1155 rc = msm_config_cm_dll_phase(host, phase);
1159 rc = mmc_send_tuning(mmc, opcode, NULL);
1161 /* Tuning is successful at this tuning point */
1162 tuned_phases[tuned_phase_cnt++] = phase;
1163 dev_dbg(mmc_dev(mmc), "%s: Found good phase = %d\n",
1164 mmc_hostname(mmc), phase);
1166 } while (++phase < ARRAY_SIZE(tuned_phases));
1168 if (tuned_phase_cnt) {
1169 rc = msm_find_most_appropriate_phase(host, tuned_phases,
1177 * Finally set the selected phase in delay
1180 rc = msm_config_cm_dll_phase(host, phase);
1183 msm_host->saved_tuning_phase = phase;
1184 dev_dbg(mmc_dev(mmc), "%s: Setting the tuning phase to %d\n",
1185 mmc_hostname(mmc), phase);
1187 if (--tuning_seq_cnt)
1190 dev_dbg(mmc_dev(mmc), "%s: No tuning point found\n",
1196 msm_host->tuning_done = true;
1201 * sdhci_msm_hs400 - Calibrate the DLL for HS400 bus speed mode operation.
1202 * This needs to be done for both tuning and enhanced_strobe mode.
1203 * DLL operation is only needed for clock > 100MHz. For clock <= 100MHz
1204 * fixed feedback clock is used.
1206 static void sdhci_msm_hs400(struct sdhci_host *host, struct mmc_ios *ios)
1208 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1209 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
1212 if (host->clock > CORE_FREQ_100MHZ &&
1213 (msm_host->tuning_done || ios->enhanced_strobe) &&
1214 !msm_host->calibration_done) {
1215 ret = sdhci_msm_hs400_dll_calibration(host);
1217 msm_host->calibration_done = true;
1219 pr_err("%s: Failed to calibrate DLL for hs400 mode (%d)\n",
1220 mmc_hostname(host->mmc), ret);
1224 static void sdhci_msm_set_uhs_signaling(struct sdhci_host *host,
1227 struct mmc_host *mmc = host->mmc;
1228 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1229 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
1232 const struct sdhci_msm_offset *msm_offset =
1235 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1236 /* Select Bus Speed Mode for host */
1237 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
1239 case MMC_TIMING_UHS_SDR12:
1240 ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
1242 case MMC_TIMING_UHS_SDR25:
1243 ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
1245 case MMC_TIMING_UHS_SDR50:
1246 ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
1248 case MMC_TIMING_MMC_HS400:
1249 case MMC_TIMING_MMC_HS200:
1250 case MMC_TIMING_UHS_SDR104:
1251 ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
1253 case MMC_TIMING_UHS_DDR50:
1254 case MMC_TIMING_MMC_DDR52:
1255 ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
1260 * When clock frequency is less than 100MHz, the feedback clock must be
1261 * provided and DLL must not be used so that tuning can be skipped. To
1262 * provide feedback clock, the mode selection can be any value less
1263 * than 3'b011 in bits [2:0] of HOST CONTROL2 register.
1265 if (host->clock <= CORE_FREQ_100MHZ) {
1266 if (uhs == MMC_TIMING_MMC_HS400 ||
1267 uhs == MMC_TIMING_MMC_HS200 ||
1268 uhs == MMC_TIMING_UHS_SDR104)
1269 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
1271 * DLL is not required for clock <= 100MHz
1272 * Thus, make sure DLL it is disabled when not required
1274 config = readl_relaxed(host->ioaddr +
1275 msm_offset->core_dll_config);
1276 config |= CORE_DLL_RST;
1277 writel_relaxed(config, host->ioaddr +
1278 msm_offset->core_dll_config);
1280 config = readl_relaxed(host->ioaddr +
1281 msm_offset->core_dll_config);
1282 config |= CORE_DLL_PDN;
1283 writel_relaxed(config, host->ioaddr +
1284 msm_offset->core_dll_config);
1287 * The DLL needs to be restored and CDCLP533 recalibrated
1288 * when the clock frequency is set back to 400MHz.
1290 msm_host->calibration_done = false;
1293 dev_dbg(mmc_dev(mmc), "%s: clock=%u uhs=%u ctrl_2=0x%x\n",
1294 mmc_hostname(host->mmc), host->clock, uhs, ctrl_2);
1295 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
1297 if (mmc->ios.timing == MMC_TIMING_MMC_HS400)
1298 sdhci_msm_hs400(host, &mmc->ios);
1301 static inline void sdhci_msm_init_pwr_irq_wait(struct sdhci_msm_host *msm_host)
1303 init_waitqueue_head(&msm_host->pwr_irq_wait);
1306 static inline void sdhci_msm_complete_pwr_irq_wait(
1307 struct sdhci_msm_host *msm_host)
1309 wake_up(&msm_host->pwr_irq_wait);
1313 * sdhci_msm_check_power_status API should be called when registers writes
1314 * which can toggle sdhci IO bus ON/OFF or change IO lines HIGH/LOW happens.
1315 * To what state the register writes will change the IO lines should be passed
1316 * as the argument req_type. This API will check whether the IO line's state
1317 * is already the expected state and will wait for power irq only if
1318 * power irq is expected to be trigerred based on the current IO line state
1319 * and expected IO line state.
1321 static void sdhci_msm_check_power_status(struct sdhci_host *host, u32 req_type)
1323 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1324 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
1326 u32 val = SWITCHABLE_SIGNALING_VOLTAGE;
1327 const struct sdhci_msm_offset *msm_offset =
1330 pr_debug("%s: %s: request %d curr_pwr_state %x curr_io_level %x\n",
1331 mmc_hostname(host->mmc), __func__, req_type,
1332 msm_host->curr_pwr_state, msm_host->curr_io_level);
1335 * The power interrupt will not be generated for signal voltage
1336 * switches if SWITCHABLE_SIGNALING_VOLTAGE in MCI_GENERICS is not set.
1337 * Since sdhci-msm-v5, this bit has been removed and SW must consider
1340 if (!msm_host->mci_removed)
1341 val = msm_host_readl(msm_host, host,
1342 msm_offset->core_generics);
1343 if ((req_type & REQ_IO_HIGH || req_type & REQ_IO_LOW) &&
1344 !(val & SWITCHABLE_SIGNALING_VOLTAGE)) {
1349 * The IRQ for request type IO High/LOW will be generated when -
1350 * there is a state change in 1.8V enable bit (bit 3) of
1351 * SDHCI_HOST_CONTROL2 register. The reset state of that bit is 0
1352 * which indicates 3.3V IO voltage. So, when MMC core layer tries
1353 * to set it to 3.3V before card detection happens, the
1354 * IRQ doesn't get triggered as there is no state change in this bit.
1355 * The driver already handles this case by changing the IO voltage
1356 * level to high as part of controller power up sequence. Hence, check
1357 * for host->pwr to handle a case where IO voltage high request is
1358 * issued even before controller power up.
1360 if ((req_type & REQ_IO_HIGH) && !host->pwr) {
1361 pr_debug("%s: do not wait for power IRQ that never comes, req_type: %d\n",
1362 mmc_hostname(host->mmc), req_type);
1365 if ((req_type & msm_host->curr_pwr_state) ||
1366 (req_type & msm_host->curr_io_level))
1369 * This is needed here to handle cases where register writes will
1370 * not change the current bus state or io level of the controller.
1371 * In this case, no power irq will be triggerred and we should
1375 if (!wait_event_timeout(msm_host->pwr_irq_wait,
1376 msm_host->pwr_irq_flag,
1377 msecs_to_jiffies(MSM_PWR_IRQ_TIMEOUT_MS)))
1378 dev_warn(&msm_host->pdev->dev,
1379 "%s: pwr_irq for req: (%d) timed out\n",
1380 mmc_hostname(host->mmc), req_type);
1382 pr_debug("%s: %s: request %d done\n", mmc_hostname(host->mmc),
1383 __func__, req_type);
1386 static void sdhci_msm_dump_pwr_ctrl_regs(struct sdhci_host *host)
1388 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1389 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
1390 const struct sdhci_msm_offset *msm_offset =
1393 pr_err("%s: PWRCTL_STATUS: 0x%08x | PWRCTL_MASK: 0x%08x | PWRCTL_CTL: 0x%08x\n",
1394 mmc_hostname(host->mmc),
1395 msm_host_readl(msm_host, host, msm_offset->core_pwrctl_status),
1396 msm_host_readl(msm_host, host, msm_offset->core_pwrctl_mask),
1397 msm_host_readl(msm_host, host, msm_offset->core_pwrctl_ctl));
1400 static void sdhci_msm_handle_pwr_irq(struct sdhci_host *host, int irq)
1402 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1403 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
1404 u32 irq_status, irq_ack = 0;
1406 u32 pwr_state = 0, io_level = 0;
1408 const struct sdhci_msm_offset *msm_offset = msm_host->offset;
1410 irq_status = msm_host_readl(msm_host, host,
1411 msm_offset->core_pwrctl_status);
1412 irq_status &= INT_MASK;
1414 msm_host_writel(msm_host, irq_status, host,
1415 msm_offset->core_pwrctl_clear);
1418 * There is a rare HW scenario where the first clear pulse could be
1419 * lost when actual reset and clear/read of status register is
1420 * happening at a time. Hence, retry for at least 10 times to make
1421 * sure status register is cleared. Otherwise, this will result in
1422 * a spurious power IRQ resulting in system instability.
1424 while (irq_status & msm_host_readl(msm_host, host,
1425 msm_offset->core_pwrctl_status)) {
1427 pr_err("%s: Timedout clearing (0x%x) pwrctl status register\n",
1428 mmc_hostname(host->mmc), irq_status);
1429 sdhci_msm_dump_pwr_ctrl_regs(host);
1433 msm_host_writel(msm_host, irq_status, host,
1434 msm_offset->core_pwrctl_clear);
1439 /* Handle BUS ON/OFF*/
1440 if (irq_status & CORE_PWRCTL_BUS_ON) {
1441 pwr_state = REQ_BUS_ON;
1442 io_level = REQ_IO_HIGH;
1443 irq_ack |= CORE_PWRCTL_BUS_SUCCESS;
1445 if (irq_status & CORE_PWRCTL_BUS_OFF) {
1446 pwr_state = REQ_BUS_OFF;
1447 io_level = REQ_IO_LOW;
1448 irq_ack |= CORE_PWRCTL_BUS_SUCCESS;
1450 /* Handle IO LOW/HIGH */
1451 if (irq_status & CORE_PWRCTL_IO_LOW) {
1452 io_level = REQ_IO_LOW;
1453 irq_ack |= CORE_PWRCTL_IO_SUCCESS;
1455 if (irq_status & CORE_PWRCTL_IO_HIGH) {
1456 io_level = REQ_IO_HIGH;
1457 irq_ack |= CORE_PWRCTL_IO_SUCCESS;
1461 * The driver has to acknowledge the interrupt, switch voltages and
1462 * report back if it succeded or not to this register. The voltage
1463 * switches are handled by the sdhci core, so just report success.
1465 msm_host_writel(msm_host, irq_ack, host,
1466 msm_offset->core_pwrctl_ctl);
1469 * If we don't have info regarding the voltage levels supported by
1470 * regulators, don't change the IO PAD PWR SWITCH.
1472 if (msm_host->caps_0 & CORE_VOLT_SUPPORT) {
1475 * We should unset IO PAD PWR switch only if the register write
1476 * can set IO lines high and the regulator also switches to 3 V.
1477 * Else, we should keep the IO PAD PWR switch set.
1478 * This is applicable to certain targets where eMMC vccq supply
1479 * is only 1.8V. In such targets, even during REQ_IO_HIGH, the
1480 * IO PAD PWR switch must be kept set to reflect actual
1481 * regulator voltage. This way, during initialization of
1482 * controllers with only 1.8V, we will set the IO PAD bit
1483 * without waiting for a REQ_IO_LOW.
1485 config = readl_relaxed(host->ioaddr +
1486 msm_offset->core_vendor_spec);
1487 new_config = config;
1489 if ((io_level & REQ_IO_HIGH) &&
1490 (msm_host->caps_0 & CORE_3_0V_SUPPORT))
1491 new_config &= ~CORE_IO_PAD_PWR_SWITCH;
1492 else if ((io_level & REQ_IO_LOW) ||
1493 (msm_host->caps_0 & CORE_1_8V_SUPPORT))
1494 new_config |= CORE_IO_PAD_PWR_SWITCH;
1496 if (config ^ new_config)
1497 writel_relaxed(new_config, host->ioaddr +
1498 msm_offset->core_vendor_spec);
1502 msm_host->curr_pwr_state = pwr_state;
1504 msm_host->curr_io_level = io_level;
1506 pr_debug("%s: %s: Handled IRQ(%d), irq_status=0x%x, ack=0x%x\n",
1507 mmc_hostname(msm_host->mmc), __func__, irq, irq_status,
1511 static irqreturn_t sdhci_msm_pwr_irq(int irq, void *data)
1513 struct sdhci_host *host = (struct sdhci_host *)data;
1514 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1515 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
1517 sdhci_msm_handle_pwr_irq(host, irq);
1518 msm_host->pwr_irq_flag = 1;
1519 sdhci_msm_complete_pwr_irq_wait(msm_host);
1525 static unsigned int sdhci_msm_get_max_clock(struct sdhci_host *host)
1527 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1528 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
1529 struct clk *core_clk = msm_host->bulk_clks[0].clk;
1531 return clk_round_rate(core_clk, ULONG_MAX);
1534 static unsigned int sdhci_msm_get_min_clock(struct sdhci_host *host)
1536 return SDHCI_MSM_MIN_CLOCK;
1540 * __sdhci_msm_set_clock - sdhci_msm clock control.
1543 * MSM controller does not use internal divider and
1544 * instead directly control the GCC clock as per
1545 * HW recommendation.
1547 static void __sdhci_msm_set_clock(struct sdhci_host *host, unsigned int clock)
1551 * Keep actual_clock as zero -
1552 * - since there is no divider used so no need of having actual_clock.
1553 * - MSM controller uses SDCLK for data timeout calculation. If
1554 * actual_clock is zero, host->clock is taken for calculation.
1556 host->mmc->actual_clock = 0;
1558 sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
1564 * MSM controller do not use clock divider.
1565 * Thus read SDHCI_CLOCK_CONTROL and only enable
1566 * clock with no divider value programmed.
1568 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1569 sdhci_enable_clk(host, clk);
1572 /* sdhci_msm_set_clock - Called with (host->lock) spinlock held. */
1573 static void sdhci_msm_set_clock(struct sdhci_host *host, unsigned int clock)
1575 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1576 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
1579 msm_host->clk_rate = clock;
1583 sdhci_msm_hc_select_mode(host);
1585 msm_set_clock_rate_for_bus_mode(host, clock);
1587 __sdhci_msm_set_clock(host, clock);
1590 /*****************************************************************************\
1592 * MSM Command Queue Engine (CQE) *
1594 \*****************************************************************************/
1596 static u32 sdhci_msm_cqe_irq(struct sdhci_host *host, u32 intmask)
1601 if (!sdhci_cqe_irq(host, intmask, &cmd_error, &data_error))
1604 cqhci_irq(host->mmc, intmask, cmd_error, data_error);
1608 static void sdhci_msm_cqe_disable(struct mmc_host *mmc, bool recovery)
1610 struct sdhci_host *host = mmc_priv(mmc);
1611 unsigned long flags;
1615 * When CQE is halted, the legacy SDHCI path operates only
1616 * on 16-byte descriptors in 64bit mode.
1618 if (host->flags & SDHCI_USE_64_BIT_DMA)
1621 spin_lock_irqsave(&host->lock, flags);
1624 * During CQE command transfers, command complete bit gets latched.
1625 * So s/w should clear command complete interrupt status when CQE is
1626 * either halted or disabled. Otherwise unexpected SDCHI legacy
1627 * interrupt gets triggered when CQE is halted/disabled.
1629 ctrl = sdhci_readl(host, SDHCI_INT_ENABLE);
1630 ctrl |= SDHCI_INT_RESPONSE;
1631 sdhci_writel(host, ctrl, SDHCI_INT_ENABLE);
1632 sdhci_writel(host, SDHCI_INT_RESPONSE, SDHCI_INT_STATUS);
1634 spin_unlock_irqrestore(&host->lock, flags);
1636 sdhci_cqe_disable(mmc, recovery);
1639 static const struct cqhci_host_ops sdhci_msm_cqhci_ops = {
1640 .enable = sdhci_cqe_enable,
1641 .disable = sdhci_msm_cqe_disable,
1644 static int sdhci_msm_cqe_add_host(struct sdhci_host *host,
1645 struct platform_device *pdev)
1647 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1648 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
1649 struct cqhci_host *cq_host;
1655 * When CQE is halted, SDHC operates only on 16byte ADMA descriptors.
1656 * So ensure ADMA table is allocated for 16byte descriptors.
1658 if (host->caps & SDHCI_CAN_64BIT)
1659 host->alloc_desc_sz = 16;
1661 ret = sdhci_setup_host(host);
1665 cq_host = cqhci_pltfm_init(pdev);
1666 if (IS_ERR(cq_host)) {
1667 ret = PTR_ERR(cq_host);
1668 dev_err(&pdev->dev, "cqhci-pltfm init: failed: %d\n", ret);
1672 msm_host->mmc->caps2 |= MMC_CAP2_CQE | MMC_CAP2_CQE_DCMD;
1673 cq_host->ops = &sdhci_msm_cqhci_ops;
1675 dma64 = host->flags & SDHCI_USE_64_BIT_DMA;
1677 ret = cqhci_init(cq_host, host->mmc, dma64);
1679 dev_err(&pdev->dev, "%s: CQE init: failed (%d)\n",
1680 mmc_hostname(host->mmc), ret);
1684 /* Disable cqe reset due to cqe enable signal */
1685 cqcfg = cqhci_readl(cq_host, CQHCI_VENDOR_CFG1);
1686 cqcfg |= CQHCI_VENDOR_DIS_RST_ON_CQ_EN;
1687 cqhci_writel(cq_host, cqcfg, CQHCI_VENDOR_CFG1);
1690 * SDHC expects 12byte ADMA descriptors till CQE is enabled.
1691 * So limit desc_sz to 12 so that the data commands that are sent
1692 * during card initialization (before CQE gets enabled) would
1693 * get executed without any issues.
1695 if (host->flags & SDHCI_USE_64_BIT_DMA)
1698 ret = __sdhci_add_host(host);
1702 dev_info(&pdev->dev, "%s: CQE init: success\n",
1703 mmc_hostname(host->mmc));
1707 sdhci_cleanup_host(host);
1712 * Platform specific register write functions. This is so that, if any
1713 * register write needs to be followed up by platform specific actions,
1714 * they can be added here. These functions can go to sleep when writes
1715 * to certain registers are done.
1716 * These functions are relying on sdhci_set_ios not using spinlock.
1718 static int __sdhci_msm_check_write(struct sdhci_host *host, u16 val, int reg)
1720 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1721 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
1725 case SDHCI_HOST_CONTROL2:
1726 req_type = (val & SDHCI_CTRL_VDD_180) ? REQ_IO_LOW :
1729 case SDHCI_SOFTWARE_RESET:
1730 if (host->pwr && (val & SDHCI_RESET_ALL))
1731 req_type = REQ_BUS_OFF;
1733 case SDHCI_POWER_CONTROL:
1734 req_type = !val ? REQ_BUS_OFF : REQ_BUS_ON;
1736 case SDHCI_TRANSFER_MODE:
1737 msm_host->transfer_mode = val;
1740 if (!msm_host->use_cdr)
1742 if ((msm_host->transfer_mode & SDHCI_TRNS_READ) &&
1743 SDHCI_GET_CMD(val) != MMC_SEND_TUNING_BLOCK_HS200 &&
1744 SDHCI_GET_CMD(val) != MMC_SEND_TUNING_BLOCK)
1745 sdhci_msm_set_cdr(host, true);
1747 sdhci_msm_set_cdr(host, false);
1752 msm_host->pwr_irq_flag = 0;
1754 * Since this register write may trigger a power irq, ensure
1755 * all previous register writes are complete by this point.
1762 /* This function may sleep*/
1763 static void sdhci_msm_writew(struct sdhci_host *host, u16 val, int reg)
1767 req_type = __sdhci_msm_check_write(host, val, reg);
1768 writew_relaxed(val, host->ioaddr + reg);
1771 sdhci_msm_check_power_status(host, req_type);
1774 /* This function may sleep*/
1775 static void sdhci_msm_writeb(struct sdhci_host *host, u8 val, int reg)
1779 req_type = __sdhci_msm_check_write(host, val, reg);
1781 writeb_relaxed(val, host->ioaddr + reg);
1784 sdhci_msm_check_power_status(host, req_type);
1787 static void sdhci_msm_set_regulator_caps(struct sdhci_msm_host *msm_host)
1789 struct mmc_host *mmc = msm_host->mmc;
1790 struct regulator *supply = mmc->supply.vqmmc;
1791 u32 caps = 0, config;
1792 struct sdhci_host *host = mmc_priv(mmc);
1793 const struct sdhci_msm_offset *msm_offset = msm_host->offset;
1795 if (!IS_ERR(mmc->supply.vqmmc)) {
1796 if (regulator_is_supported_voltage(supply, 1700000, 1950000))
1797 caps |= CORE_1_8V_SUPPORT;
1798 if (regulator_is_supported_voltage(supply, 2700000, 3600000))
1799 caps |= CORE_3_0V_SUPPORT;
1802 pr_warn("%s: 1.8/3V not supported for vqmmc\n",
1808 * Set the PAD_PWR_SWITCH_EN bit so that the PAD_PWR_SWITCH
1809 * bit can be used as required later on.
1811 u32 io_level = msm_host->curr_io_level;
1813 config = readl_relaxed(host->ioaddr +
1814 msm_offset->core_vendor_spec);
1815 config |= CORE_IO_PAD_PWR_SWITCH_EN;
1817 if ((io_level & REQ_IO_HIGH) && (caps & CORE_3_0V_SUPPORT))
1818 config &= ~CORE_IO_PAD_PWR_SWITCH;
1819 else if ((io_level & REQ_IO_LOW) || (caps & CORE_1_8V_SUPPORT))
1820 config |= CORE_IO_PAD_PWR_SWITCH;
1822 writel_relaxed(config,
1823 host->ioaddr + msm_offset->core_vendor_spec);
1825 msm_host->caps_0 |= caps;
1826 pr_debug("%s: supported caps: 0x%08x\n", mmc_hostname(mmc), caps);
1829 static void sdhci_msm_reset(struct sdhci_host *host, u8 mask)
1831 if ((host->mmc->caps2 & MMC_CAP2_CQE) && (mask & SDHCI_RESET_ALL))
1832 cqhci_deactivate(host->mmc);
1833 sdhci_reset(host, mask);
1836 static const struct sdhci_msm_variant_ops mci_var_ops = {
1837 .msm_readl_relaxed = sdhci_msm_mci_variant_readl_relaxed,
1838 .msm_writel_relaxed = sdhci_msm_mci_variant_writel_relaxed,
1841 static const struct sdhci_msm_variant_ops v5_var_ops = {
1842 .msm_readl_relaxed = sdhci_msm_v5_variant_readl_relaxed,
1843 .msm_writel_relaxed = sdhci_msm_v5_variant_writel_relaxed,
1846 static const struct sdhci_msm_variant_info sdhci_msm_mci_var = {
1847 .var_ops = &mci_var_ops,
1848 .offset = &sdhci_msm_mci_offset,
1851 static const struct sdhci_msm_variant_info sdhci_msm_v5_var = {
1852 .mci_removed = true,
1853 .var_ops = &v5_var_ops,
1854 .offset = &sdhci_msm_v5_offset,
1857 static const struct sdhci_msm_variant_info sdm845_sdhci_var = {
1858 .mci_removed = true,
1859 .restore_dll_config = true,
1860 .var_ops = &v5_var_ops,
1861 .offset = &sdhci_msm_v5_offset,
1864 static const struct of_device_id sdhci_msm_dt_match[] = {
1865 {.compatible = "qcom,sdhci-msm-v4", .data = &sdhci_msm_mci_var},
1866 {.compatible = "qcom,sdhci-msm-v5", .data = &sdhci_msm_v5_var},
1867 {.compatible = "qcom,sdm845-sdhci", .data = &sdm845_sdhci_var},
1871 MODULE_DEVICE_TABLE(of, sdhci_msm_dt_match);
1873 static const struct sdhci_ops sdhci_msm_ops = {
1874 .reset = sdhci_msm_reset,
1875 .set_clock = sdhci_msm_set_clock,
1876 .get_min_clock = sdhci_msm_get_min_clock,
1877 .get_max_clock = sdhci_msm_get_max_clock,
1878 .set_bus_width = sdhci_set_bus_width,
1879 .set_uhs_signaling = sdhci_msm_set_uhs_signaling,
1880 .write_w = sdhci_msm_writew,
1881 .write_b = sdhci_msm_writeb,
1882 .irq = sdhci_msm_cqe_irq,
1885 static const struct sdhci_pltfm_data sdhci_msm_pdata = {
1886 .quirks = SDHCI_QUIRK_BROKEN_CARD_DETECTION |
1887 SDHCI_QUIRK_SINGLE_POWER_WRITE |
1888 SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN |
1889 SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
1891 .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
1892 .ops = &sdhci_msm_ops,
1895 static int sdhci_msm_probe(struct platform_device *pdev)
1897 struct sdhci_host *host;
1898 struct sdhci_pltfm_host *pltfm_host;
1899 struct sdhci_msm_host *msm_host;
1902 u16 host_version, core_minor;
1903 u32 core_version, config;
1905 const struct sdhci_msm_offset *msm_offset;
1906 const struct sdhci_msm_variant_info *var_info;
1907 struct device_node *node = pdev->dev.of_node;
1909 host = sdhci_pltfm_init(pdev, &sdhci_msm_pdata, sizeof(*msm_host));
1911 return PTR_ERR(host);
1913 host->sdma_boundary = 0;
1914 pltfm_host = sdhci_priv(host);
1915 msm_host = sdhci_pltfm_priv(pltfm_host);
1916 msm_host->mmc = host->mmc;
1917 msm_host->pdev = pdev;
1919 ret = mmc_of_parse(host->mmc);
1924 * Based on the compatible string, load the required msm host info from
1925 * the data associated with the version info.
1927 var_info = of_device_get_match_data(&pdev->dev);
1929 msm_host->mci_removed = var_info->mci_removed;
1930 msm_host->restore_dll_config = var_info->restore_dll_config;
1931 msm_host->var_ops = var_info->var_ops;
1932 msm_host->offset = var_info->offset;
1934 msm_offset = msm_host->offset;
1936 sdhci_get_of_property(pdev);
1938 msm_host->saved_tuning_phase = INVALID_TUNING_PHASE;
1940 /* Setup SDCC bus voter clock. */
1941 msm_host->bus_clk = devm_clk_get(&pdev->dev, "bus");
1942 if (!IS_ERR(msm_host->bus_clk)) {
1943 /* Vote for max. clk rate for max. performance */
1944 ret = clk_set_rate(msm_host->bus_clk, INT_MAX);
1947 ret = clk_prepare_enable(msm_host->bus_clk);
1952 /* Setup main peripheral bus clock */
1953 clk = devm_clk_get(&pdev->dev, "iface");
1956 dev_err(&pdev->dev, "Peripheral clk setup failed (%d)\n", ret);
1957 goto bus_clk_disable;
1959 msm_host->bulk_clks[1].clk = clk;
1961 /* Setup SDC MMC clock */
1962 clk = devm_clk_get(&pdev->dev, "core");
1965 dev_err(&pdev->dev, "SDC MMC clk setup failed (%d)\n", ret);
1966 goto bus_clk_disable;
1968 msm_host->bulk_clks[0].clk = clk;
1970 msm_host->opp_table = dev_pm_opp_set_clkname(&pdev->dev, "core");
1971 if (IS_ERR(msm_host->opp_table)) {
1972 ret = PTR_ERR(msm_host->opp_table);
1973 goto bus_clk_disable;
1976 /* OPP table is optional */
1977 ret = dev_pm_opp_of_add_table(&pdev->dev);
1979 msm_host->has_opp_table = true;
1980 } else if (ret != -ENODEV) {
1981 dev_err(&pdev->dev, "Invalid OPP table in Device tree\n");
1985 /* Vote for maximum clock rate for maximum performance */
1986 ret = dev_pm_opp_set_rate(&pdev->dev, INT_MAX);
1988 dev_warn(&pdev->dev, "core clock boost failed\n");
1990 clk = devm_clk_get(&pdev->dev, "cal");
1993 msm_host->bulk_clks[2].clk = clk;
1995 clk = devm_clk_get(&pdev->dev, "sleep");
1998 msm_host->bulk_clks[3].clk = clk;
2000 ret = clk_bulk_prepare_enable(ARRAY_SIZE(msm_host->bulk_clks),
2001 msm_host->bulk_clks);
2006 * xo clock is needed for FLL feature of cm_dll.
2007 * In case if xo clock is not mentioned in DT, warn and proceed.
2009 msm_host->xo_clk = devm_clk_get(&pdev->dev, "xo");
2010 if (IS_ERR(msm_host->xo_clk)) {
2011 ret = PTR_ERR(msm_host->xo_clk);
2012 dev_warn(&pdev->dev, "TCXO clk not present (%d)\n", ret);
2015 if (!msm_host->mci_removed) {
2016 msm_host->core_mem = devm_platform_ioremap_resource(pdev, 1);
2017 if (IS_ERR(msm_host->core_mem)) {
2018 ret = PTR_ERR(msm_host->core_mem);
2023 /* Reset the vendor spec register to power on reset state */
2024 writel_relaxed(CORE_VENDOR_SPEC_POR_VAL,
2025 host->ioaddr + msm_offset->core_vendor_spec);
2027 if (!msm_host->mci_removed) {
2028 /* Set HC_MODE_EN bit in HC_MODE register */
2029 msm_host_writel(msm_host, HC_MODE_EN, host,
2030 msm_offset->core_hc_mode);
2031 config = msm_host_readl(msm_host, host,
2032 msm_offset->core_hc_mode);
2033 config |= FF_CLK_SW_RST_DIS;
2034 msm_host_writel(msm_host, config, host,
2035 msm_offset->core_hc_mode);
2038 host_version = readw_relaxed((host->ioaddr + SDHCI_HOST_VERSION));
2039 dev_dbg(&pdev->dev, "Host Version: 0x%x Vendor Version 0x%x\n",
2040 host_version, ((host_version & SDHCI_VENDOR_VER_MASK) >>
2041 SDHCI_VENDOR_VER_SHIFT));
2043 core_version = msm_host_readl(msm_host, host,
2044 msm_offset->core_mci_version);
2045 core_major = (core_version & CORE_VERSION_MAJOR_MASK) >>
2046 CORE_VERSION_MAJOR_SHIFT;
2047 core_minor = core_version & CORE_VERSION_MINOR_MASK;
2048 dev_dbg(&pdev->dev, "MCI Version: 0x%08x, major: 0x%04x, minor: 0x%02x\n",
2049 core_version, core_major, core_minor);
2051 if (core_major == 1 && core_minor >= 0x42)
2052 msm_host->use_14lpp_dll_reset = true;
2055 * SDCC 5 controller with major version 1, minor version 0x34 and later
2056 * with HS 400 mode support will use CM DLL instead of CDC LP 533 DLL.
2058 if (core_major == 1 && core_minor < 0x34)
2059 msm_host->use_cdclp533 = true;
2062 * Support for some capabilities is not advertised by newer
2063 * controller versions and must be explicitly enabled.
2065 if (core_major >= 1 && core_minor != 0x11 && core_minor != 0x12) {
2066 config = readl_relaxed(host->ioaddr + SDHCI_CAPABILITIES);
2067 config |= SDHCI_CAN_VDD_300 | SDHCI_CAN_DO_8BIT;
2068 writel_relaxed(config, host->ioaddr +
2069 msm_offset->core_vendor_spec_capabilities0);
2072 if (core_major == 1 && core_minor >= 0x49)
2073 msm_host->updated_ddr_cfg = true;
2076 * Power on reset state may trigger power irq if previous status of
2077 * PWRCTL was either BUS_ON or IO_HIGH_V. So before enabling pwr irq
2078 * interrupt in GIC, any pending power irq interrupt should be
2079 * acknowledged. Otherwise power irq interrupt handler would be
2080 * fired prematurely.
2082 sdhci_msm_handle_pwr_irq(host, 0);
2085 * Ensure that above writes are propogated before interrupt enablement
2090 /* Setup IRQ for handling power/voltage tasks with PMIC */
2091 msm_host->pwr_irq = platform_get_irq_byname(pdev, "pwr_irq");
2092 if (msm_host->pwr_irq < 0) {
2093 ret = msm_host->pwr_irq;
2097 sdhci_msm_init_pwr_irq_wait(msm_host);
2098 /* Enable pwr irq interrupts */
2099 msm_host_writel(msm_host, INT_MASK, host,
2100 msm_offset->core_pwrctl_mask);
2102 ret = devm_request_threaded_irq(&pdev->dev, msm_host->pwr_irq, NULL,
2103 sdhci_msm_pwr_irq, IRQF_ONESHOT,
2104 dev_name(&pdev->dev), host);
2106 dev_err(&pdev->dev, "Request IRQ failed (%d)\n", ret);
2110 msm_host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_NEED_RSP_BUSY;
2112 pm_runtime_get_noresume(&pdev->dev);
2113 pm_runtime_set_active(&pdev->dev);
2114 pm_runtime_enable(&pdev->dev);
2115 pm_runtime_set_autosuspend_delay(&pdev->dev,
2116 MSM_MMC_AUTOSUSPEND_DELAY_MS);
2117 pm_runtime_use_autosuspend(&pdev->dev);
2119 host->mmc_host_ops.execute_tuning = sdhci_msm_execute_tuning;
2120 if (of_property_read_bool(node, "supports-cqe"))
2121 ret = sdhci_msm_cqe_add_host(host, pdev);
2123 ret = sdhci_add_host(host);
2125 goto pm_runtime_disable;
2126 sdhci_msm_set_regulator_caps(msm_host);
2128 pm_runtime_mark_last_busy(&pdev->dev);
2129 pm_runtime_put_autosuspend(&pdev->dev);
2134 pm_runtime_disable(&pdev->dev);
2135 pm_runtime_set_suspended(&pdev->dev);
2136 pm_runtime_put_noidle(&pdev->dev);
2138 clk_bulk_disable_unprepare(ARRAY_SIZE(msm_host->bulk_clks),
2139 msm_host->bulk_clks);
2141 if (msm_host->has_opp_table)
2142 dev_pm_opp_of_remove_table(&pdev->dev);
2143 dev_pm_opp_put_clkname(msm_host->opp_table);
2145 if (!IS_ERR(msm_host->bus_clk))
2146 clk_disable_unprepare(msm_host->bus_clk);
2148 sdhci_pltfm_free(pdev);
2152 static int sdhci_msm_remove(struct platform_device *pdev)
2154 struct sdhci_host *host = platform_get_drvdata(pdev);
2155 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2156 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
2157 int dead = (readl_relaxed(host->ioaddr + SDHCI_INT_STATUS) ==
2160 sdhci_remove_host(host, dead);
2162 if (msm_host->has_opp_table)
2163 dev_pm_opp_of_remove_table(&pdev->dev);
2164 dev_pm_opp_put_clkname(msm_host->opp_table);
2165 pm_runtime_get_sync(&pdev->dev);
2166 pm_runtime_disable(&pdev->dev);
2167 pm_runtime_put_noidle(&pdev->dev);
2169 clk_bulk_disable_unprepare(ARRAY_SIZE(msm_host->bulk_clks),
2170 msm_host->bulk_clks);
2171 if (!IS_ERR(msm_host->bus_clk))
2172 clk_disable_unprepare(msm_host->bus_clk);
2173 sdhci_pltfm_free(pdev);
2177 static __maybe_unused int sdhci_msm_runtime_suspend(struct device *dev)
2179 struct sdhci_host *host = dev_get_drvdata(dev);
2180 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2181 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
2183 /* Drop the performance vote */
2184 dev_pm_opp_set_rate(dev, 0);
2185 clk_bulk_disable_unprepare(ARRAY_SIZE(msm_host->bulk_clks),
2186 msm_host->bulk_clks);
2191 static __maybe_unused int sdhci_msm_runtime_resume(struct device *dev)
2193 struct sdhci_host *host = dev_get_drvdata(dev);
2194 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2195 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
2198 ret = clk_bulk_prepare_enable(ARRAY_SIZE(msm_host->bulk_clks),
2199 msm_host->bulk_clks);
2203 * Whenever core-clock is gated dynamically, it's needed to
2204 * restore the SDR DLL settings when the clock is ungated.
2206 if (msm_host->restore_dll_config && msm_host->clk_rate)
2207 ret = sdhci_msm_restore_sdr_dll_config(host);
2209 dev_pm_opp_set_rate(dev, msm_host->clk_rate);
2214 static const struct dev_pm_ops sdhci_msm_pm_ops = {
2215 SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
2216 pm_runtime_force_resume)
2217 SET_RUNTIME_PM_OPS(sdhci_msm_runtime_suspend,
2218 sdhci_msm_runtime_resume,
2222 static struct platform_driver sdhci_msm_driver = {
2223 .probe = sdhci_msm_probe,
2224 .remove = sdhci_msm_remove,
2226 .name = "sdhci_msm",
2227 .of_match_table = sdhci_msm_dt_match,
2228 .pm = &sdhci_msm_pm_ops,
2232 module_platform_driver(sdhci_msm_driver);
2234 MODULE_DESCRIPTION("Qualcomm Secure Digital Host Controller Interface driver");
2235 MODULE_LICENSE("GPL v2");