mmc: sdricoh_cs: Respect the cmd->busy_timeout from the mmc core
[linux-2.6-microblaze.git] / drivers / mmc / host / sdhci-msm.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * drivers/mmc/host/sdhci-msm.c - Qualcomm SDHCI Platform driver
4  *
5  * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
6  */
7
8 #include <linux/module.h>
9 #include <linux/of_device.h>
10 #include <linux/delay.h>
11 #include <linux/mmc/mmc.h>
12 #include <linux/pm_runtime.h>
13 #include <linux/pm_opp.h>
14 #include <linux/slab.h>
15 #include <linux/iopoll.h>
16 #include <linux/regulator/consumer.h>
17
18 #include "sdhci-pltfm.h"
19 #include "cqhci.h"
20
21 #define CORE_MCI_VERSION                0x50
22 #define CORE_VERSION_MAJOR_SHIFT        28
23 #define CORE_VERSION_MAJOR_MASK         (0xf << CORE_VERSION_MAJOR_SHIFT)
24 #define CORE_VERSION_MINOR_MASK         0xff
25
26 #define CORE_MCI_GENERICS               0x70
27 #define SWITCHABLE_SIGNALING_VOLTAGE    BIT(29)
28
29 #define HC_MODE_EN              0x1
30 #define CORE_POWER              0x0
31 #define CORE_SW_RST             BIT(7)
32 #define FF_CLK_SW_RST_DIS       BIT(13)
33
34 #define CORE_PWRCTL_BUS_OFF     BIT(0)
35 #define CORE_PWRCTL_BUS_ON      BIT(1)
36 #define CORE_PWRCTL_IO_LOW      BIT(2)
37 #define CORE_PWRCTL_IO_HIGH     BIT(3)
38 #define CORE_PWRCTL_BUS_SUCCESS BIT(0)
39 #define CORE_PWRCTL_IO_SUCCESS  BIT(2)
40 #define REQ_BUS_OFF             BIT(0)
41 #define REQ_BUS_ON              BIT(1)
42 #define REQ_IO_LOW              BIT(2)
43 #define REQ_IO_HIGH             BIT(3)
44 #define INT_MASK                0xf
45 #define MAX_PHASES              16
46 #define CORE_DLL_LOCK           BIT(7)
47 #define CORE_DDR_DLL_LOCK       BIT(11)
48 #define CORE_DLL_EN             BIT(16)
49 #define CORE_CDR_EN             BIT(17)
50 #define CORE_CK_OUT_EN          BIT(18)
51 #define CORE_CDR_EXT_EN         BIT(19)
52 #define CORE_DLL_PDN            BIT(29)
53 #define CORE_DLL_RST            BIT(30)
54 #define CORE_CMD_DAT_TRACK_SEL  BIT(0)
55
56 #define CORE_DDR_CAL_EN         BIT(0)
57 #define CORE_FLL_CYCLE_CNT      BIT(18)
58 #define CORE_DLL_CLOCK_DISABLE  BIT(21)
59
60 #define CORE_VENDOR_SPEC_POR_VAL 0xa9c
61 #define CORE_CLK_PWRSAVE        BIT(1)
62 #define CORE_HC_MCLK_SEL_DFLT   (2 << 8)
63 #define CORE_HC_MCLK_SEL_HS400  (3 << 8)
64 #define CORE_HC_MCLK_SEL_MASK   (3 << 8)
65 #define CORE_IO_PAD_PWR_SWITCH_EN       BIT(15)
66 #define CORE_IO_PAD_PWR_SWITCH  BIT(16)
67 #define CORE_HC_SELECT_IN_EN    BIT(18)
68 #define CORE_HC_SELECT_IN_HS400 (6 << 19)
69 #define CORE_HC_SELECT_IN_MASK  (7 << 19)
70
71 #define CORE_3_0V_SUPPORT       BIT(25)
72 #define CORE_1_8V_SUPPORT       BIT(26)
73 #define CORE_VOLT_SUPPORT       (CORE_3_0V_SUPPORT | CORE_1_8V_SUPPORT)
74
75 #define CORE_CSR_CDC_CTLR_CFG0          0x130
76 #define CORE_SW_TRIG_FULL_CALIB         BIT(16)
77 #define CORE_HW_AUTOCAL_ENA             BIT(17)
78
79 #define CORE_CSR_CDC_CTLR_CFG1          0x134
80 #define CORE_CSR_CDC_CAL_TIMER_CFG0     0x138
81 #define CORE_TIMER_ENA                  BIT(16)
82
83 #define CORE_CSR_CDC_CAL_TIMER_CFG1     0x13C
84 #define CORE_CSR_CDC_REFCOUNT_CFG       0x140
85 #define CORE_CSR_CDC_COARSE_CAL_CFG     0x144
86 #define CORE_CDC_OFFSET_CFG             0x14C
87 #define CORE_CSR_CDC_DELAY_CFG          0x150
88 #define CORE_CDC_SLAVE_DDA_CFG          0x160
89 #define CORE_CSR_CDC_STATUS0            0x164
90 #define CORE_CALIBRATION_DONE           BIT(0)
91
92 #define CORE_CDC_ERROR_CODE_MASK        0x7000000
93
94 #define CORE_CSR_CDC_GEN_CFG            0x178
95 #define CORE_CDC_SWITCH_BYPASS_OFF      BIT(0)
96 #define CORE_CDC_SWITCH_RC_EN           BIT(1)
97
98 #define CORE_CDC_T4_DLY_SEL             BIT(0)
99 #define CORE_CMDIN_RCLK_EN              BIT(1)
100 #define CORE_START_CDC_TRAFFIC          BIT(6)
101
102 #define CORE_PWRSAVE_DLL        BIT(3)
103
104 #define DDR_CONFIG_POR_VAL      0x80040873
105
106
107 #define INVALID_TUNING_PHASE    -1
108 #define SDHCI_MSM_MIN_CLOCK     400000
109 #define CORE_FREQ_100MHZ        (100 * 1000 * 1000)
110
111 #define CDR_SELEXT_SHIFT        20
112 #define CDR_SELEXT_MASK         (0xf << CDR_SELEXT_SHIFT)
113 #define CMUX_SHIFT_PHASE_SHIFT  24
114 #define CMUX_SHIFT_PHASE_MASK   (7 << CMUX_SHIFT_PHASE_SHIFT)
115
116 #define MSM_MMC_AUTOSUSPEND_DELAY_MS    50
117
118 /* Timeout value to avoid infinite waiting for pwr_irq */
119 #define MSM_PWR_IRQ_TIMEOUT_MS 5000
120
121 #define msm_host_readl(msm_host, host, offset) \
122         msm_host->var_ops->msm_readl_relaxed(host, offset)
123
124 #define msm_host_writel(msm_host, val, host, offset) \
125         msm_host->var_ops->msm_writel_relaxed(val, host, offset)
126
127 /* CQHCI vendor specific registers */
128 #define CQHCI_VENDOR_CFG1       0xA00
129 #define CQHCI_VENDOR_DIS_RST_ON_CQ_EN   (0x3 << 13)
130
131 struct sdhci_msm_offset {
132         u32 core_hc_mode;
133         u32 core_mci_data_cnt;
134         u32 core_mci_status;
135         u32 core_mci_fifo_cnt;
136         u32 core_mci_version;
137         u32 core_generics;
138         u32 core_testbus_config;
139         u32 core_testbus_sel2_bit;
140         u32 core_testbus_ena;
141         u32 core_testbus_sel2;
142         u32 core_pwrctl_status;
143         u32 core_pwrctl_mask;
144         u32 core_pwrctl_clear;
145         u32 core_pwrctl_ctl;
146         u32 core_sdcc_debug_reg;
147         u32 core_dll_config;
148         u32 core_dll_status;
149         u32 core_vendor_spec;
150         u32 core_vendor_spec_adma_err_addr0;
151         u32 core_vendor_spec_adma_err_addr1;
152         u32 core_vendor_spec_func2;
153         u32 core_vendor_spec_capabilities0;
154         u32 core_ddr_200_cfg;
155         u32 core_vendor_spec3;
156         u32 core_dll_config_2;
157         u32 core_dll_config_3;
158         u32 core_ddr_config_old; /* Applicable to sdcc minor ver < 0x49 */
159         u32 core_ddr_config;
160 };
161
162 static const struct sdhci_msm_offset sdhci_msm_v5_offset = {
163         .core_mci_data_cnt = 0x35c,
164         .core_mci_status = 0x324,
165         .core_mci_fifo_cnt = 0x308,
166         .core_mci_version = 0x318,
167         .core_generics = 0x320,
168         .core_testbus_config = 0x32c,
169         .core_testbus_sel2_bit = 3,
170         .core_testbus_ena = (1 << 31),
171         .core_testbus_sel2 = (1 << 3),
172         .core_pwrctl_status = 0x240,
173         .core_pwrctl_mask = 0x244,
174         .core_pwrctl_clear = 0x248,
175         .core_pwrctl_ctl = 0x24c,
176         .core_sdcc_debug_reg = 0x358,
177         .core_dll_config = 0x200,
178         .core_dll_status = 0x208,
179         .core_vendor_spec = 0x20c,
180         .core_vendor_spec_adma_err_addr0 = 0x214,
181         .core_vendor_spec_adma_err_addr1 = 0x218,
182         .core_vendor_spec_func2 = 0x210,
183         .core_vendor_spec_capabilities0 = 0x21c,
184         .core_ddr_200_cfg = 0x224,
185         .core_vendor_spec3 = 0x250,
186         .core_dll_config_2 = 0x254,
187         .core_dll_config_3 = 0x258,
188         .core_ddr_config = 0x25c,
189 };
190
191 static const struct sdhci_msm_offset sdhci_msm_mci_offset = {
192         .core_hc_mode = 0x78,
193         .core_mci_data_cnt = 0x30,
194         .core_mci_status = 0x34,
195         .core_mci_fifo_cnt = 0x44,
196         .core_mci_version = 0x050,
197         .core_generics = 0x70,
198         .core_testbus_config = 0x0cc,
199         .core_testbus_sel2_bit = 4,
200         .core_testbus_ena = (1 << 3),
201         .core_testbus_sel2 = (1 << 4),
202         .core_pwrctl_status = 0xdc,
203         .core_pwrctl_mask = 0xe0,
204         .core_pwrctl_clear = 0xe4,
205         .core_pwrctl_ctl = 0xe8,
206         .core_sdcc_debug_reg = 0x124,
207         .core_dll_config = 0x100,
208         .core_dll_status = 0x108,
209         .core_vendor_spec = 0x10c,
210         .core_vendor_spec_adma_err_addr0 = 0x114,
211         .core_vendor_spec_adma_err_addr1 = 0x118,
212         .core_vendor_spec_func2 = 0x110,
213         .core_vendor_spec_capabilities0 = 0x11c,
214         .core_ddr_200_cfg = 0x184,
215         .core_vendor_spec3 = 0x1b0,
216         .core_dll_config_2 = 0x1b4,
217         .core_ddr_config_old = 0x1b8,
218         .core_ddr_config = 0x1bc,
219 };
220
221 struct sdhci_msm_variant_ops {
222         u32 (*msm_readl_relaxed)(struct sdhci_host *host, u32 offset);
223         void (*msm_writel_relaxed)(u32 val, struct sdhci_host *host,
224                         u32 offset);
225 };
226
227 /*
228  * From V5, register spaces have changed. Wrap this info in a structure
229  * and choose the data_structure based on version info mentioned in DT.
230  */
231 struct sdhci_msm_variant_info {
232         bool mci_removed;
233         bool restore_dll_config;
234         const struct sdhci_msm_variant_ops *var_ops;
235         const struct sdhci_msm_offset *offset;
236 };
237
238 struct sdhci_msm_host {
239         struct platform_device *pdev;
240         void __iomem *core_mem; /* MSM SDCC mapped address */
241         int pwr_irq;            /* power irq */
242         struct clk *bus_clk;    /* SDHC bus voter clock */
243         struct clk *xo_clk;     /* TCXO clk needed for FLL feature of cm_dll*/
244         struct clk_bulk_data bulk_clks[4]; /* core, iface, cal, sleep clocks */
245         unsigned long clk_rate;
246         struct mmc_host *mmc;
247         struct opp_table *opp_table;
248         bool has_opp_table;
249         bool use_14lpp_dll_reset;
250         bool tuning_done;
251         bool calibration_done;
252         u8 saved_tuning_phase;
253         bool use_cdclp533;
254         u32 curr_pwr_state;
255         u32 curr_io_level;
256         wait_queue_head_t pwr_irq_wait;
257         bool pwr_irq_flag;
258         u32 caps_0;
259         bool mci_removed;
260         bool restore_dll_config;
261         const struct sdhci_msm_variant_ops *var_ops;
262         const struct sdhci_msm_offset *offset;
263         bool use_cdr;
264         u32 transfer_mode;
265         bool updated_ddr_cfg;
266 };
267
268 static const struct sdhci_msm_offset *sdhci_priv_msm_offset(struct sdhci_host *host)
269 {
270         struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
271         struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
272
273         return msm_host->offset;
274 }
275
276 /*
277  * APIs to read/write to vendor specific registers which were there in the
278  * core_mem region before MCI was removed.
279  */
280 static u32 sdhci_msm_mci_variant_readl_relaxed(struct sdhci_host *host,
281                 u32 offset)
282 {
283         struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
284         struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
285
286         return readl_relaxed(msm_host->core_mem + offset);
287 }
288
289 static u32 sdhci_msm_v5_variant_readl_relaxed(struct sdhci_host *host,
290                 u32 offset)
291 {
292         return readl_relaxed(host->ioaddr + offset);
293 }
294
295 static void sdhci_msm_mci_variant_writel_relaxed(u32 val,
296                 struct sdhci_host *host, u32 offset)
297 {
298         struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
299         struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
300
301         writel_relaxed(val, msm_host->core_mem + offset);
302 }
303
304 static void sdhci_msm_v5_variant_writel_relaxed(u32 val,
305                 struct sdhci_host *host, u32 offset)
306 {
307         writel_relaxed(val, host->ioaddr + offset);
308 }
309
310 static unsigned int msm_get_clock_rate_for_bus_mode(struct sdhci_host *host,
311                                                     unsigned int clock)
312 {
313         struct mmc_ios ios = host->mmc->ios;
314         /*
315          * The SDHC requires internal clock frequency to be double the
316          * actual clock that will be set for DDR mode. The controller
317          * uses the faster clock(100/400MHz) for some of its parts and
318          * send the actual required clock (50/200MHz) to the card.
319          */
320         if (ios.timing == MMC_TIMING_UHS_DDR50 ||
321             ios.timing == MMC_TIMING_MMC_DDR52 ||
322             ios.timing == MMC_TIMING_MMC_HS400 ||
323             host->flags & SDHCI_HS400_TUNING)
324                 clock *= 2;
325         return clock;
326 }
327
328 static void msm_set_clock_rate_for_bus_mode(struct sdhci_host *host,
329                                             unsigned int clock)
330 {
331         struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
332         struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
333         struct mmc_ios curr_ios = host->mmc->ios;
334         struct clk *core_clk = msm_host->bulk_clks[0].clk;
335         int rc;
336
337         clock = msm_get_clock_rate_for_bus_mode(host, clock);
338         rc = dev_pm_opp_set_rate(mmc_dev(host->mmc), clock);
339         if (rc) {
340                 pr_err("%s: Failed to set clock at rate %u at timing %d\n",
341                        mmc_hostname(host->mmc), clock,
342                        curr_ios.timing);
343                 return;
344         }
345         msm_host->clk_rate = clock;
346         pr_debug("%s: Setting clock at rate %lu at timing %d\n",
347                  mmc_hostname(host->mmc), clk_get_rate(core_clk),
348                  curr_ios.timing);
349 }
350
351 /* Platform specific tuning */
352 static inline int msm_dll_poll_ck_out_en(struct sdhci_host *host, u8 poll)
353 {
354         u32 wait_cnt = 50;
355         u8 ck_out_en;
356         struct mmc_host *mmc = host->mmc;
357         const struct sdhci_msm_offset *msm_offset =
358                                         sdhci_priv_msm_offset(host);
359
360         /* Poll for CK_OUT_EN bit.  max. poll time = 50us */
361         ck_out_en = !!(readl_relaxed(host->ioaddr +
362                         msm_offset->core_dll_config) & CORE_CK_OUT_EN);
363
364         while (ck_out_en != poll) {
365                 if (--wait_cnt == 0) {
366                         dev_err(mmc_dev(mmc), "%s: CK_OUT_EN bit is not %d\n",
367                                mmc_hostname(mmc), poll);
368                         return -ETIMEDOUT;
369                 }
370                 udelay(1);
371
372                 ck_out_en = !!(readl_relaxed(host->ioaddr +
373                         msm_offset->core_dll_config) & CORE_CK_OUT_EN);
374         }
375
376         return 0;
377 }
378
379 static int msm_config_cm_dll_phase(struct sdhci_host *host, u8 phase)
380 {
381         int rc;
382         static const u8 grey_coded_phase_table[] = {
383                 0x0, 0x1, 0x3, 0x2, 0x6, 0x7, 0x5, 0x4,
384                 0xc, 0xd, 0xf, 0xe, 0xa, 0xb, 0x9, 0x8
385         };
386         unsigned long flags;
387         u32 config;
388         struct mmc_host *mmc = host->mmc;
389         const struct sdhci_msm_offset *msm_offset =
390                                         sdhci_priv_msm_offset(host);
391
392         if (phase > 0xf)
393                 return -EINVAL;
394
395         spin_lock_irqsave(&host->lock, flags);
396
397         config = readl_relaxed(host->ioaddr + msm_offset->core_dll_config);
398         config &= ~(CORE_CDR_EN | CORE_CK_OUT_EN);
399         config |= (CORE_CDR_EXT_EN | CORE_DLL_EN);
400         writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config);
401
402         /* Wait until CK_OUT_EN bit of DLL_CONFIG register becomes '0' */
403         rc = msm_dll_poll_ck_out_en(host, 0);
404         if (rc)
405                 goto err_out;
406
407         /*
408          * Write the selected DLL clock output phase (0 ... 15)
409          * to CDR_SELEXT bit field of DLL_CONFIG register.
410          */
411         config = readl_relaxed(host->ioaddr + msm_offset->core_dll_config);
412         config &= ~CDR_SELEXT_MASK;
413         config |= grey_coded_phase_table[phase] << CDR_SELEXT_SHIFT;
414         writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config);
415
416         config = readl_relaxed(host->ioaddr + msm_offset->core_dll_config);
417         config |= CORE_CK_OUT_EN;
418         writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config);
419
420         /* Wait until CK_OUT_EN bit of DLL_CONFIG register becomes '1' */
421         rc = msm_dll_poll_ck_out_en(host, 1);
422         if (rc)
423                 goto err_out;
424
425         config = readl_relaxed(host->ioaddr + msm_offset->core_dll_config);
426         config |= CORE_CDR_EN;
427         config &= ~CORE_CDR_EXT_EN;
428         writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config);
429         goto out;
430
431 err_out:
432         dev_err(mmc_dev(mmc), "%s: Failed to set DLL phase: %d\n",
433                mmc_hostname(mmc), phase);
434 out:
435         spin_unlock_irqrestore(&host->lock, flags);
436         return rc;
437 }
438
439 /*
440  * Find out the greatest range of consecuitive selected
441  * DLL clock output phases that can be used as sampling
442  * setting for SD3.0 UHS-I card read operation (in SDR104
443  * timing mode) or for eMMC4.5 card read operation (in
444  * HS400/HS200 timing mode).
445  * Select the 3/4 of the range and configure the DLL with the
446  * selected DLL clock output phase.
447  */
448
449 static int msm_find_most_appropriate_phase(struct sdhci_host *host,
450                                            u8 *phase_table, u8 total_phases)
451 {
452         int ret;
453         u8 ranges[MAX_PHASES][MAX_PHASES] = { {0}, {0} };
454         u8 phases_per_row[MAX_PHASES] = { 0 };
455         int row_index = 0, col_index = 0, selected_row_index = 0, curr_max = 0;
456         int i, cnt, phase_0_raw_index = 0, phase_15_raw_index = 0;
457         bool phase_0_found = false, phase_15_found = false;
458         struct mmc_host *mmc = host->mmc;
459
460         if (!total_phases || (total_phases > MAX_PHASES)) {
461                 dev_err(mmc_dev(mmc), "%s: Invalid argument: total_phases=%d\n",
462                        mmc_hostname(mmc), total_phases);
463                 return -EINVAL;
464         }
465
466         for (cnt = 0; cnt < total_phases; cnt++) {
467                 ranges[row_index][col_index] = phase_table[cnt];
468                 phases_per_row[row_index] += 1;
469                 col_index++;
470
471                 if ((cnt + 1) == total_phases) {
472                         continue;
473                 /* check if next phase in phase_table is consecutive or not */
474                 } else if ((phase_table[cnt] + 1) != phase_table[cnt + 1]) {
475                         row_index++;
476                         col_index = 0;
477                 }
478         }
479
480         if (row_index >= MAX_PHASES)
481                 return -EINVAL;
482
483         /* Check if phase-0 is present in first valid window? */
484         if (!ranges[0][0]) {
485                 phase_0_found = true;
486                 phase_0_raw_index = 0;
487                 /* Check if cycle exist between 2 valid windows */
488                 for (cnt = 1; cnt <= row_index; cnt++) {
489                         if (phases_per_row[cnt]) {
490                                 for (i = 0; i < phases_per_row[cnt]; i++) {
491                                         if (ranges[cnt][i] == 15) {
492                                                 phase_15_found = true;
493                                                 phase_15_raw_index = cnt;
494                                                 break;
495                                         }
496                                 }
497                         }
498                 }
499         }
500
501         /* If 2 valid windows form cycle then merge them as single window */
502         if (phase_0_found && phase_15_found) {
503                 /* number of phases in raw where phase 0 is present */
504                 u8 phases_0 = phases_per_row[phase_0_raw_index];
505                 /* number of phases in raw where phase 15 is present */
506                 u8 phases_15 = phases_per_row[phase_15_raw_index];
507
508                 if (phases_0 + phases_15 >= MAX_PHASES)
509                         /*
510                          * If there are more than 1 phase windows then total
511                          * number of phases in both the windows should not be
512                          * more than or equal to MAX_PHASES.
513                          */
514                         return -EINVAL;
515
516                 /* Merge 2 cyclic windows */
517                 i = phases_15;
518                 for (cnt = 0; cnt < phases_0; cnt++) {
519                         ranges[phase_15_raw_index][i] =
520                             ranges[phase_0_raw_index][cnt];
521                         if (++i >= MAX_PHASES)
522                                 break;
523                 }
524
525                 phases_per_row[phase_0_raw_index] = 0;
526                 phases_per_row[phase_15_raw_index] = phases_15 + phases_0;
527         }
528
529         for (cnt = 0; cnt <= row_index; cnt++) {
530                 if (phases_per_row[cnt] > curr_max) {
531                         curr_max = phases_per_row[cnt];
532                         selected_row_index = cnt;
533                 }
534         }
535
536         i = (curr_max * 3) / 4;
537         if (i)
538                 i--;
539
540         ret = ranges[selected_row_index][i];
541
542         if (ret >= MAX_PHASES) {
543                 ret = -EINVAL;
544                 dev_err(mmc_dev(mmc), "%s: Invalid phase selected=%d\n",
545                        mmc_hostname(mmc), ret);
546         }
547
548         return ret;
549 }
550
551 static inline void msm_cm_dll_set_freq(struct sdhci_host *host)
552 {
553         u32 mclk_freq = 0, config;
554         const struct sdhci_msm_offset *msm_offset =
555                                         sdhci_priv_msm_offset(host);
556
557         /* Program the MCLK value to MCLK_FREQ bit field */
558         if (host->clock <= 112000000)
559                 mclk_freq = 0;
560         else if (host->clock <= 125000000)
561                 mclk_freq = 1;
562         else if (host->clock <= 137000000)
563                 mclk_freq = 2;
564         else if (host->clock <= 150000000)
565                 mclk_freq = 3;
566         else if (host->clock <= 162000000)
567                 mclk_freq = 4;
568         else if (host->clock <= 175000000)
569                 mclk_freq = 5;
570         else if (host->clock <= 187000000)
571                 mclk_freq = 6;
572         else if (host->clock <= 200000000)
573                 mclk_freq = 7;
574
575         config = readl_relaxed(host->ioaddr + msm_offset->core_dll_config);
576         config &= ~CMUX_SHIFT_PHASE_MASK;
577         config |= mclk_freq << CMUX_SHIFT_PHASE_SHIFT;
578         writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config);
579 }
580
581 /* Initialize the DLL (Programmable Delay Line) */
582 static int msm_init_cm_dll(struct sdhci_host *host)
583 {
584         struct mmc_host *mmc = host->mmc;
585         struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
586         struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
587         int wait_cnt = 50;
588         unsigned long flags, xo_clk = 0;
589         u32 config;
590         const struct sdhci_msm_offset *msm_offset =
591                                         msm_host->offset;
592
593         if (msm_host->use_14lpp_dll_reset && !IS_ERR_OR_NULL(msm_host->xo_clk))
594                 xo_clk = clk_get_rate(msm_host->xo_clk);
595
596         spin_lock_irqsave(&host->lock, flags);
597
598         /*
599          * Make sure that clock is always enabled when DLL
600          * tuning is in progress. Keeping PWRSAVE ON may
601          * turn off the clock.
602          */
603         config = readl_relaxed(host->ioaddr + msm_offset->core_vendor_spec);
604         config &= ~CORE_CLK_PWRSAVE;
605         writel_relaxed(config, host->ioaddr + msm_offset->core_vendor_spec);
606
607         if (msm_host->use_14lpp_dll_reset) {
608                 config = readl_relaxed(host->ioaddr +
609                                 msm_offset->core_dll_config);
610                 config &= ~CORE_CK_OUT_EN;
611                 writel_relaxed(config, host->ioaddr +
612                                 msm_offset->core_dll_config);
613
614                 config = readl_relaxed(host->ioaddr +
615                                 msm_offset->core_dll_config_2);
616                 config |= CORE_DLL_CLOCK_DISABLE;
617                 writel_relaxed(config, host->ioaddr +
618                                 msm_offset->core_dll_config_2);
619         }
620
621         config = readl_relaxed(host->ioaddr +
622                         msm_offset->core_dll_config);
623         config |= CORE_DLL_RST;
624         writel_relaxed(config, host->ioaddr +
625                         msm_offset->core_dll_config);
626
627         config = readl_relaxed(host->ioaddr +
628                         msm_offset->core_dll_config);
629         config |= CORE_DLL_PDN;
630         writel_relaxed(config, host->ioaddr +
631                         msm_offset->core_dll_config);
632         msm_cm_dll_set_freq(host);
633
634         if (msm_host->use_14lpp_dll_reset &&
635             !IS_ERR_OR_NULL(msm_host->xo_clk)) {
636                 u32 mclk_freq = 0;
637
638                 config = readl_relaxed(host->ioaddr +
639                                 msm_offset->core_dll_config_2);
640                 config &= CORE_FLL_CYCLE_CNT;
641                 if (config)
642                         mclk_freq = DIV_ROUND_CLOSEST_ULL((host->clock * 8),
643                                         xo_clk);
644                 else
645                         mclk_freq = DIV_ROUND_CLOSEST_ULL((host->clock * 4),
646                                         xo_clk);
647
648                 config = readl_relaxed(host->ioaddr +
649                                 msm_offset->core_dll_config_2);
650                 config &= ~(0xFF << 10);
651                 config |= mclk_freq << 10;
652
653                 writel_relaxed(config, host->ioaddr +
654                                 msm_offset->core_dll_config_2);
655                 /* wait for 5us before enabling DLL clock */
656                 udelay(5);
657         }
658
659         config = readl_relaxed(host->ioaddr +
660                         msm_offset->core_dll_config);
661         config &= ~CORE_DLL_RST;
662         writel_relaxed(config, host->ioaddr +
663                         msm_offset->core_dll_config);
664
665         config = readl_relaxed(host->ioaddr +
666                         msm_offset->core_dll_config);
667         config &= ~CORE_DLL_PDN;
668         writel_relaxed(config, host->ioaddr +
669                         msm_offset->core_dll_config);
670
671         if (msm_host->use_14lpp_dll_reset) {
672                 msm_cm_dll_set_freq(host);
673                 config = readl_relaxed(host->ioaddr +
674                                 msm_offset->core_dll_config_2);
675                 config &= ~CORE_DLL_CLOCK_DISABLE;
676                 writel_relaxed(config, host->ioaddr +
677                                 msm_offset->core_dll_config_2);
678         }
679
680         config = readl_relaxed(host->ioaddr +
681                         msm_offset->core_dll_config);
682         config |= CORE_DLL_EN;
683         writel_relaxed(config, host->ioaddr +
684                         msm_offset->core_dll_config);
685
686         config = readl_relaxed(host->ioaddr +
687                         msm_offset->core_dll_config);
688         config |= CORE_CK_OUT_EN;
689         writel_relaxed(config, host->ioaddr +
690                         msm_offset->core_dll_config);
691
692         /* Wait until DLL_LOCK bit of DLL_STATUS register becomes '1' */
693         while (!(readl_relaxed(host->ioaddr + msm_offset->core_dll_status) &
694                  CORE_DLL_LOCK)) {
695                 /* max. wait for 50us sec for LOCK bit to be set */
696                 if (--wait_cnt == 0) {
697                         dev_err(mmc_dev(mmc), "%s: DLL failed to LOCK\n",
698                                mmc_hostname(mmc));
699                         spin_unlock_irqrestore(&host->lock, flags);
700                         return -ETIMEDOUT;
701                 }
702                 udelay(1);
703         }
704
705         spin_unlock_irqrestore(&host->lock, flags);
706         return 0;
707 }
708
709 static void msm_hc_select_default(struct sdhci_host *host)
710 {
711         struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
712         struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
713         u32 config;
714         const struct sdhci_msm_offset *msm_offset =
715                                         msm_host->offset;
716
717         if (!msm_host->use_cdclp533) {
718                 config = readl_relaxed(host->ioaddr +
719                                 msm_offset->core_vendor_spec3);
720                 config &= ~CORE_PWRSAVE_DLL;
721                 writel_relaxed(config, host->ioaddr +
722                                 msm_offset->core_vendor_spec3);
723         }
724
725         config = readl_relaxed(host->ioaddr + msm_offset->core_vendor_spec);
726         config &= ~CORE_HC_MCLK_SEL_MASK;
727         config |= CORE_HC_MCLK_SEL_DFLT;
728         writel_relaxed(config, host->ioaddr + msm_offset->core_vendor_spec);
729
730         /*
731          * Disable HC_SELECT_IN to be able to use the UHS mode select
732          * configuration from Host Control2 register for all other
733          * modes.
734          * Write 0 to HC_SELECT_IN and HC_SELECT_IN_EN field
735          * in VENDOR_SPEC_FUNC
736          */
737         config = readl_relaxed(host->ioaddr + msm_offset->core_vendor_spec);
738         config &= ~CORE_HC_SELECT_IN_EN;
739         config &= ~CORE_HC_SELECT_IN_MASK;
740         writel_relaxed(config, host->ioaddr + msm_offset->core_vendor_spec);
741
742         /*
743          * Make sure above writes impacting free running MCLK are completed
744          * before changing the clk_rate at GCC.
745          */
746         wmb();
747 }
748
749 static void msm_hc_select_hs400(struct sdhci_host *host)
750 {
751         struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
752         struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
753         struct mmc_ios ios = host->mmc->ios;
754         u32 config, dll_lock;
755         int rc;
756         const struct sdhci_msm_offset *msm_offset =
757                                         msm_host->offset;
758
759         /* Select the divided clock (free running MCLK/2) */
760         config = readl_relaxed(host->ioaddr + msm_offset->core_vendor_spec);
761         config &= ~CORE_HC_MCLK_SEL_MASK;
762         config |= CORE_HC_MCLK_SEL_HS400;
763
764         writel_relaxed(config, host->ioaddr + msm_offset->core_vendor_spec);
765         /*
766          * Select HS400 mode using the HC_SELECT_IN from VENDOR SPEC
767          * register
768          */
769         if ((msm_host->tuning_done || ios.enhanced_strobe) &&
770             !msm_host->calibration_done) {
771                 config = readl_relaxed(host->ioaddr +
772                                 msm_offset->core_vendor_spec);
773                 config |= CORE_HC_SELECT_IN_HS400;
774                 config |= CORE_HC_SELECT_IN_EN;
775                 writel_relaxed(config, host->ioaddr +
776                                 msm_offset->core_vendor_spec);
777         }
778         if (!msm_host->clk_rate && !msm_host->use_cdclp533) {
779                 /*
780                  * Poll on DLL_LOCK or DDR_DLL_LOCK bits in
781                  * core_dll_status to be set. This should get set
782                  * within 15 us at 200 MHz.
783                  */
784                 rc = readl_relaxed_poll_timeout(host->ioaddr +
785                                                 msm_offset->core_dll_status,
786                                                 dll_lock,
787                                                 (dll_lock &
788                                                 (CORE_DLL_LOCK |
789                                                 CORE_DDR_DLL_LOCK)), 10,
790                                                 1000);
791                 if (rc == -ETIMEDOUT)
792                         pr_err("%s: Unable to get DLL_LOCK/DDR_DLL_LOCK, dll_status: 0x%08x\n",
793                                mmc_hostname(host->mmc), dll_lock);
794         }
795         /*
796          * Make sure above writes impacting free running MCLK are completed
797          * before changing the clk_rate at GCC.
798          */
799         wmb();
800 }
801
802 /*
803  * sdhci_msm_hc_select_mode :- In general all timing modes are
804  * controlled via UHS mode select in Host Control2 register.
805  * eMMC specific HS200/HS400 doesn't have their respective modes
806  * defined here, hence we use these values.
807  *
808  * HS200 - SDR104 (Since they both are equivalent in functionality)
809  * HS400 - This involves multiple configurations
810  *              Initially SDR104 - when tuning is required as HS200
811  *              Then when switching to DDR @ 400MHz (HS400) we use
812  *              the vendor specific HC_SELECT_IN to control the mode.
813  *
814  * In addition to controlling the modes we also need to select the
815  * correct input clock for DLL depending on the mode.
816  *
817  * HS400 - divided clock (free running MCLK/2)
818  * All other modes - default (free running MCLK)
819  */
820 static void sdhci_msm_hc_select_mode(struct sdhci_host *host)
821 {
822         struct mmc_ios ios = host->mmc->ios;
823
824         if (ios.timing == MMC_TIMING_MMC_HS400 ||
825             host->flags & SDHCI_HS400_TUNING)
826                 msm_hc_select_hs400(host);
827         else
828                 msm_hc_select_default(host);
829 }
830
831 static int sdhci_msm_cdclp533_calibration(struct sdhci_host *host)
832 {
833         struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
834         struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
835         u32 config, calib_done;
836         int ret;
837         const struct sdhci_msm_offset *msm_offset =
838                                         msm_host->offset;
839
840         pr_debug("%s: %s: Enter\n", mmc_hostname(host->mmc), __func__);
841
842         /*
843          * Retuning in HS400 (DDR mode) will fail, just reset the
844          * tuning block and restore the saved tuning phase.
845          */
846         ret = msm_init_cm_dll(host);
847         if (ret)
848                 goto out;
849
850         /* Set the selected phase in delay line hw block */
851         ret = msm_config_cm_dll_phase(host, msm_host->saved_tuning_phase);
852         if (ret)
853                 goto out;
854
855         config = readl_relaxed(host->ioaddr + msm_offset->core_dll_config);
856         config |= CORE_CMD_DAT_TRACK_SEL;
857         writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config);
858
859         config = readl_relaxed(host->ioaddr + msm_offset->core_ddr_200_cfg);
860         config &= ~CORE_CDC_T4_DLY_SEL;
861         writel_relaxed(config, host->ioaddr + msm_offset->core_ddr_200_cfg);
862
863         config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_GEN_CFG);
864         config &= ~CORE_CDC_SWITCH_BYPASS_OFF;
865         writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_GEN_CFG);
866
867         config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_GEN_CFG);
868         config |= CORE_CDC_SWITCH_RC_EN;
869         writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_GEN_CFG);
870
871         config = readl_relaxed(host->ioaddr + msm_offset->core_ddr_200_cfg);
872         config &= ~CORE_START_CDC_TRAFFIC;
873         writel_relaxed(config, host->ioaddr + msm_offset->core_ddr_200_cfg);
874
875         /* Perform CDC Register Initialization Sequence */
876
877         writel_relaxed(0x11800EC, host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
878         writel_relaxed(0x3011111, host->ioaddr + CORE_CSR_CDC_CTLR_CFG1);
879         writel_relaxed(0x1201000, host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG0);
880         writel_relaxed(0x4, host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG1);
881         writel_relaxed(0xCB732020, host->ioaddr + CORE_CSR_CDC_REFCOUNT_CFG);
882         writel_relaxed(0xB19, host->ioaddr + CORE_CSR_CDC_COARSE_CAL_CFG);
883         writel_relaxed(0x4E2, host->ioaddr + CORE_CSR_CDC_DELAY_CFG);
884         writel_relaxed(0x0, host->ioaddr + CORE_CDC_OFFSET_CFG);
885         writel_relaxed(0x16334, host->ioaddr + CORE_CDC_SLAVE_DDA_CFG);
886
887         /* CDC HW Calibration */
888
889         config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
890         config |= CORE_SW_TRIG_FULL_CALIB;
891         writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
892
893         config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
894         config &= ~CORE_SW_TRIG_FULL_CALIB;
895         writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
896
897         config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
898         config |= CORE_HW_AUTOCAL_ENA;
899         writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
900
901         config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG0);
902         config |= CORE_TIMER_ENA;
903         writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG0);
904
905         ret = readl_relaxed_poll_timeout(host->ioaddr + CORE_CSR_CDC_STATUS0,
906                                          calib_done,
907                                          (calib_done & CORE_CALIBRATION_DONE),
908                                          1, 50);
909
910         if (ret == -ETIMEDOUT) {
911                 pr_err("%s: %s: CDC calibration was not completed\n",
912                        mmc_hostname(host->mmc), __func__);
913                 goto out;
914         }
915
916         ret = readl_relaxed(host->ioaddr + CORE_CSR_CDC_STATUS0)
917                         & CORE_CDC_ERROR_CODE_MASK;
918         if (ret) {
919                 pr_err("%s: %s: CDC error code %d\n",
920                        mmc_hostname(host->mmc), __func__, ret);
921                 ret = -EINVAL;
922                 goto out;
923         }
924
925         config = readl_relaxed(host->ioaddr + msm_offset->core_ddr_200_cfg);
926         config |= CORE_START_CDC_TRAFFIC;
927         writel_relaxed(config, host->ioaddr + msm_offset->core_ddr_200_cfg);
928 out:
929         pr_debug("%s: %s: Exit, ret %d\n", mmc_hostname(host->mmc),
930                  __func__, ret);
931         return ret;
932 }
933
934 static int sdhci_msm_cm_dll_sdc4_calibration(struct sdhci_host *host)
935 {
936         struct mmc_host *mmc = host->mmc;
937         u32 dll_status, config, ddr_cfg_offset;
938         int ret;
939         struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
940         struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
941         const struct sdhci_msm_offset *msm_offset =
942                                         sdhci_priv_msm_offset(host);
943
944         pr_debug("%s: %s: Enter\n", mmc_hostname(host->mmc), __func__);
945
946         /*
947          * Currently the core_ddr_config register defaults to desired
948          * configuration on reset. Currently reprogramming the power on
949          * reset (POR) value in case it might have been modified by
950          * bootloaders. In the future, if this changes, then the desired
951          * values will need to be programmed appropriately.
952          */
953         if (msm_host->updated_ddr_cfg)
954                 ddr_cfg_offset = msm_offset->core_ddr_config;
955         else
956                 ddr_cfg_offset = msm_offset->core_ddr_config_old;
957         writel_relaxed(DDR_CONFIG_POR_VAL, host->ioaddr + ddr_cfg_offset);
958
959         if (mmc->ios.enhanced_strobe) {
960                 config = readl_relaxed(host->ioaddr +
961                                 msm_offset->core_ddr_200_cfg);
962                 config |= CORE_CMDIN_RCLK_EN;
963                 writel_relaxed(config, host->ioaddr +
964                                 msm_offset->core_ddr_200_cfg);
965         }
966
967         config = readl_relaxed(host->ioaddr + msm_offset->core_dll_config_2);
968         config |= CORE_DDR_CAL_EN;
969         writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config_2);
970
971         ret = readl_relaxed_poll_timeout(host->ioaddr +
972                                         msm_offset->core_dll_status,
973                                         dll_status,
974                                         (dll_status & CORE_DDR_DLL_LOCK),
975                                         10, 1000);
976
977         if (ret == -ETIMEDOUT) {
978                 pr_err("%s: %s: CM_DLL_SDC4 calibration was not completed\n",
979                        mmc_hostname(host->mmc), __func__);
980                 goto out;
981         }
982
983         /*
984          * Set CORE_PWRSAVE_DLL bit in CORE_VENDOR_SPEC3.
985          * When MCLK is gated OFF, it is not gated for less than 0.5us
986          * and MCLK must be switched on for at-least 1us before DATA
987          * starts coming. Controllers with 14lpp and later tech DLL cannot
988          * guarantee above requirement. So PWRSAVE_DLL should not be
989          * turned on for host controllers using this DLL.
990          */
991         if (!msm_host->use_14lpp_dll_reset) {
992                 config = readl_relaxed(host->ioaddr +
993                                 msm_offset->core_vendor_spec3);
994                 config |= CORE_PWRSAVE_DLL;
995                 writel_relaxed(config, host->ioaddr +
996                                 msm_offset->core_vendor_spec3);
997         }
998
999         /*
1000          * Drain writebuffer to ensure above DLL calibration
1001          * and PWRSAVE DLL is enabled.
1002          */
1003         wmb();
1004 out:
1005         pr_debug("%s: %s: Exit, ret %d\n", mmc_hostname(host->mmc),
1006                  __func__, ret);
1007         return ret;
1008 }
1009
1010 static int sdhci_msm_hs400_dll_calibration(struct sdhci_host *host)
1011 {
1012         struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1013         struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
1014         struct mmc_host *mmc = host->mmc;
1015         int ret;
1016         u32 config;
1017         const struct sdhci_msm_offset *msm_offset =
1018                                         msm_host->offset;
1019
1020         pr_debug("%s: %s: Enter\n", mmc_hostname(host->mmc), __func__);
1021
1022         /*
1023          * Retuning in HS400 (DDR mode) will fail, just reset the
1024          * tuning block and restore the saved tuning phase.
1025          */
1026         ret = msm_init_cm_dll(host);
1027         if (ret)
1028                 goto out;
1029
1030         if (!mmc->ios.enhanced_strobe) {
1031                 /* Set the selected phase in delay line hw block */
1032                 ret = msm_config_cm_dll_phase(host,
1033                                               msm_host->saved_tuning_phase);
1034                 if (ret)
1035                         goto out;
1036                 config = readl_relaxed(host->ioaddr +
1037                                 msm_offset->core_dll_config);
1038                 config |= CORE_CMD_DAT_TRACK_SEL;
1039                 writel_relaxed(config, host->ioaddr +
1040                                 msm_offset->core_dll_config);
1041         }
1042
1043         if (msm_host->use_cdclp533)
1044                 ret = sdhci_msm_cdclp533_calibration(host);
1045         else
1046                 ret = sdhci_msm_cm_dll_sdc4_calibration(host);
1047 out:
1048         pr_debug("%s: %s: Exit, ret %d\n", mmc_hostname(host->mmc),
1049                  __func__, ret);
1050         return ret;
1051 }
1052
1053 static bool sdhci_msm_is_tuning_needed(struct sdhci_host *host)
1054 {
1055         struct mmc_ios *ios = &host->mmc->ios;
1056
1057         /*
1058          * Tuning is required for SDR104, HS200 and HS400 cards and
1059          * if clock frequency is greater than 100MHz in these modes.
1060          */
1061         if (host->clock <= CORE_FREQ_100MHZ ||
1062             !(ios->timing == MMC_TIMING_MMC_HS400 ||
1063             ios->timing == MMC_TIMING_MMC_HS200 ||
1064             ios->timing == MMC_TIMING_UHS_SDR104) ||
1065             ios->enhanced_strobe)
1066                 return false;
1067
1068         return true;
1069 }
1070
1071 static int sdhci_msm_restore_sdr_dll_config(struct sdhci_host *host)
1072 {
1073         struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1074         struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
1075         int ret;
1076
1077         /*
1078          * SDR DLL comes into picture only for timing modes which needs
1079          * tuning.
1080          */
1081         if (!sdhci_msm_is_tuning_needed(host))
1082                 return 0;
1083
1084         /* Reset the tuning block */
1085         ret = msm_init_cm_dll(host);
1086         if (ret)
1087                 return ret;
1088
1089         /* Restore the tuning block */
1090         ret = msm_config_cm_dll_phase(host, msm_host->saved_tuning_phase);
1091
1092         return ret;
1093 }
1094
1095 static void sdhci_msm_set_cdr(struct sdhci_host *host, bool enable)
1096 {
1097         const struct sdhci_msm_offset *msm_offset = sdhci_priv_msm_offset(host);
1098         u32 config, oldconfig = readl_relaxed(host->ioaddr +
1099                                               msm_offset->core_dll_config);
1100
1101         config = oldconfig;
1102         if (enable) {
1103                 config |= CORE_CDR_EN;
1104                 config &= ~CORE_CDR_EXT_EN;
1105         } else {
1106                 config &= ~CORE_CDR_EN;
1107                 config |= CORE_CDR_EXT_EN;
1108         }
1109
1110         if (config != oldconfig) {
1111                 writel_relaxed(config, host->ioaddr +
1112                                msm_offset->core_dll_config);
1113         }
1114 }
1115
1116 static int sdhci_msm_execute_tuning(struct mmc_host *mmc, u32 opcode)
1117 {
1118         struct sdhci_host *host = mmc_priv(mmc);
1119         int tuning_seq_cnt = 3;
1120         u8 phase, tuned_phases[16], tuned_phase_cnt = 0;
1121         int rc;
1122         struct mmc_ios ios = host->mmc->ios;
1123         struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1124         struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
1125
1126         if (!sdhci_msm_is_tuning_needed(host)) {
1127                 msm_host->use_cdr = false;
1128                 sdhci_msm_set_cdr(host, false);
1129                 return 0;
1130         }
1131
1132         /* Clock-Data-Recovery used to dynamically adjust RX sampling point */
1133         msm_host->use_cdr = true;
1134
1135         /*
1136          * For HS400 tuning in HS200 timing requires:
1137          * - select MCLK/2 in VENDOR_SPEC
1138          * - program MCLK to 400MHz (or nearest supported) in GCC
1139          */
1140         if (host->flags & SDHCI_HS400_TUNING) {
1141                 sdhci_msm_hc_select_mode(host);
1142                 msm_set_clock_rate_for_bus_mode(host, ios.clock);
1143                 host->flags &= ~SDHCI_HS400_TUNING;
1144         }
1145
1146 retry:
1147         /* First of all reset the tuning block */
1148         rc = msm_init_cm_dll(host);
1149         if (rc)
1150                 return rc;
1151
1152         phase = 0;
1153         do {
1154                 /* Set the phase in delay line hw block */
1155                 rc = msm_config_cm_dll_phase(host, phase);
1156                 if (rc)
1157                         return rc;
1158
1159                 rc = mmc_send_tuning(mmc, opcode, NULL);
1160                 if (!rc) {
1161                         /* Tuning is successful at this tuning point */
1162                         tuned_phases[tuned_phase_cnt++] = phase;
1163                         dev_dbg(mmc_dev(mmc), "%s: Found good phase = %d\n",
1164                                  mmc_hostname(mmc), phase);
1165                 }
1166         } while (++phase < ARRAY_SIZE(tuned_phases));
1167
1168         if (tuned_phase_cnt) {
1169                 rc = msm_find_most_appropriate_phase(host, tuned_phases,
1170                                                      tuned_phase_cnt);
1171                 if (rc < 0)
1172                         return rc;
1173                 else
1174                         phase = rc;
1175
1176                 /*
1177                  * Finally set the selected phase in delay
1178                  * line hw block.
1179                  */
1180                 rc = msm_config_cm_dll_phase(host, phase);
1181                 if (rc)
1182                         return rc;
1183                 msm_host->saved_tuning_phase = phase;
1184                 dev_dbg(mmc_dev(mmc), "%s: Setting the tuning phase to %d\n",
1185                          mmc_hostname(mmc), phase);
1186         } else {
1187                 if (--tuning_seq_cnt)
1188                         goto retry;
1189                 /* Tuning failed */
1190                 dev_dbg(mmc_dev(mmc), "%s: No tuning point found\n",
1191                        mmc_hostname(mmc));
1192                 rc = -EIO;
1193         }
1194
1195         if (!rc)
1196                 msm_host->tuning_done = true;
1197         return rc;
1198 }
1199
1200 /*
1201  * sdhci_msm_hs400 - Calibrate the DLL for HS400 bus speed mode operation.
1202  * This needs to be done for both tuning and enhanced_strobe mode.
1203  * DLL operation is only needed for clock > 100MHz. For clock <= 100MHz
1204  * fixed feedback clock is used.
1205  */
1206 static void sdhci_msm_hs400(struct sdhci_host *host, struct mmc_ios *ios)
1207 {
1208         struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1209         struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
1210         int ret;
1211
1212         if (host->clock > CORE_FREQ_100MHZ &&
1213             (msm_host->tuning_done || ios->enhanced_strobe) &&
1214             !msm_host->calibration_done) {
1215                 ret = sdhci_msm_hs400_dll_calibration(host);
1216                 if (!ret)
1217                         msm_host->calibration_done = true;
1218                 else
1219                         pr_err("%s: Failed to calibrate DLL for hs400 mode (%d)\n",
1220                                mmc_hostname(host->mmc), ret);
1221         }
1222 }
1223
1224 static void sdhci_msm_set_uhs_signaling(struct sdhci_host *host,
1225                                         unsigned int uhs)
1226 {
1227         struct mmc_host *mmc = host->mmc;
1228         struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1229         struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
1230         u16 ctrl_2;
1231         u32 config;
1232         const struct sdhci_msm_offset *msm_offset =
1233                                         msm_host->offset;
1234
1235         ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1236         /* Select Bus Speed Mode for host */
1237         ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
1238         switch (uhs) {
1239         case MMC_TIMING_UHS_SDR12:
1240                 ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
1241                 break;
1242         case MMC_TIMING_UHS_SDR25:
1243                 ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
1244                 break;
1245         case MMC_TIMING_UHS_SDR50:
1246                 ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
1247                 break;
1248         case MMC_TIMING_MMC_HS400:
1249         case MMC_TIMING_MMC_HS200:
1250         case MMC_TIMING_UHS_SDR104:
1251                 ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
1252                 break;
1253         case MMC_TIMING_UHS_DDR50:
1254         case MMC_TIMING_MMC_DDR52:
1255                 ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
1256                 break;
1257         }
1258
1259         /*
1260          * When clock frequency is less than 100MHz, the feedback clock must be
1261          * provided and DLL must not be used so that tuning can be skipped. To
1262          * provide feedback clock, the mode selection can be any value less
1263          * than 3'b011 in bits [2:0] of HOST CONTROL2 register.
1264          */
1265         if (host->clock <= CORE_FREQ_100MHZ) {
1266                 if (uhs == MMC_TIMING_MMC_HS400 ||
1267                     uhs == MMC_TIMING_MMC_HS200 ||
1268                     uhs == MMC_TIMING_UHS_SDR104)
1269                         ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
1270                 /*
1271                  * DLL is not required for clock <= 100MHz
1272                  * Thus, make sure DLL it is disabled when not required
1273                  */
1274                 config = readl_relaxed(host->ioaddr +
1275                                 msm_offset->core_dll_config);
1276                 config |= CORE_DLL_RST;
1277                 writel_relaxed(config, host->ioaddr +
1278                                 msm_offset->core_dll_config);
1279
1280                 config = readl_relaxed(host->ioaddr +
1281                                 msm_offset->core_dll_config);
1282                 config |= CORE_DLL_PDN;
1283                 writel_relaxed(config, host->ioaddr +
1284                                 msm_offset->core_dll_config);
1285
1286                 /*
1287                  * The DLL needs to be restored and CDCLP533 recalibrated
1288                  * when the clock frequency is set back to 400MHz.
1289                  */
1290                 msm_host->calibration_done = false;
1291         }
1292
1293         dev_dbg(mmc_dev(mmc), "%s: clock=%u uhs=%u ctrl_2=0x%x\n",
1294                 mmc_hostname(host->mmc), host->clock, uhs, ctrl_2);
1295         sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
1296
1297         if (mmc->ios.timing == MMC_TIMING_MMC_HS400)
1298                 sdhci_msm_hs400(host, &mmc->ios);
1299 }
1300
1301 static inline void sdhci_msm_init_pwr_irq_wait(struct sdhci_msm_host *msm_host)
1302 {
1303         init_waitqueue_head(&msm_host->pwr_irq_wait);
1304 }
1305
1306 static inline void sdhci_msm_complete_pwr_irq_wait(
1307                 struct sdhci_msm_host *msm_host)
1308 {
1309         wake_up(&msm_host->pwr_irq_wait);
1310 }
1311
1312 /*
1313  * sdhci_msm_check_power_status API should be called when registers writes
1314  * which can toggle sdhci IO bus ON/OFF or change IO lines HIGH/LOW happens.
1315  * To what state the register writes will change the IO lines should be passed
1316  * as the argument req_type. This API will check whether the IO line's state
1317  * is already the expected state and will wait for power irq only if
1318  * power irq is expected to be trigerred based on the current IO line state
1319  * and expected IO line state.
1320  */
1321 static void sdhci_msm_check_power_status(struct sdhci_host *host, u32 req_type)
1322 {
1323         struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1324         struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
1325         bool done = false;
1326         u32 val = SWITCHABLE_SIGNALING_VOLTAGE;
1327         const struct sdhci_msm_offset *msm_offset =
1328                                         msm_host->offset;
1329
1330         pr_debug("%s: %s: request %d curr_pwr_state %x curr_io_level %x\n",
1331                         mmc_hostname(host->mmc), __func__, req_type,
1332                         msm_host->curr_pwr_state, msm_host->curr_io_level);
1333
1334         /*
1335          * The power interrupt will not be generated for signal voltage
1336          * switches if SWITCHABLE_SIGNALING_VOLTAGE in MCI_GENERICS is not set.
1337          * Since sdhci-msm-v5, this bit has been removed and SW must consider
1338          * it as always set.
1339          */
1340         if (!msm_host->mci_removed)
1341                 val = msm_host_readl(msm_host, host,
1342                                 msm_offset->core_generics);
1343         if ((req_type & REQ_IO_HIGH || req_type & REQ_IO_LOW) &&
1344             !(val & SWITCHABLE_SIGNALING_VOLTAGE)) {
1345                 return;
1346         }
1347
1348         /*
1349          * The IRQ for request type IO High/LOW will be generated when -
1350          * there is a state change in 1.8V enable bit (bit 3) of
1351          * SDHCI_HOST_CONTROL2 register. The reset state of that bit is 0
1352          * which indicates 3.3V IO voltage. So, when MMC core layer tries
1353          * to set it to 3.3V before card detection happens, the
1354          * IRQ doesn't get triggered as there is no state change in this bit.
1355          * The driver already handles this case by changing the IO voltage
1356          * level to high as part of controller power up sequence. Hence, check
1357          * for host->pwr to handle a case where IO voltage high request is
1358          * issued even before controller power up.
1359          */
1360         if ((req_type & REQ_IO_HIGH) && !host->pwr) {
1361                 pr_debug("%s: do not wait for power IRQ that never comes, req_type: %d\n",
1362                                 mmc_hostname(host->mmc), req_type);
1363                 return;
1364         }
1365         if ((req_type & msm_host->curr_pwr_state) ||
1366                         (req_type & msm_host->curr_io_level))
1367                 done = true;
1368         /*
1369          * This is needed here to handle cases where register writes will
1370          * not change the current bus state or io level of the controller.
1371          * In this case, no power irq will be triggerred and we should
1372          * not wait.
1373          */
1374         if (!done) {
1375                 if (!wait_event_timeout(msm_host->pwr_irq_wait,
1376                                 msm_host->pwr_irq_flag,
1377                                 msecs_to_jiffies(MSM_PWR_IRQ_TIMEOUT_MS)))
1378                         dev_warn(&msm_host->pdev->dev,
1379                                  "%s: pwr_irq for req: (%d) timed out\n",
1380                                  mmc_hostname(host->mmc), req_type);
1381         }
1382         pr_debug("%s: %s: request %d done\n", mmc_hostname(host->mmc),
1383                         __func__, req_type);
1384 }
1385
1386 static void sdhci_msm_dump_pwr_ctrl_regs(struct sdhci_host *host)
1387 {
1388         struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1389         struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
1390         const struct sdhci_msm_offset *msm_offset =
1391                                         msm_host->offset;
1392
1393         pr_err("%s: PWRCTL_STATUS: 0x%08x | PWRCTL_MASK: 0x%08x | PWRCTL_CTL: 0x%08x\n",
1394                 mmc_hostname(host->mmc),
1395                 msm_host_readl(msm_host, host, msm_offset->core_pwrctl_status),
1396                 msm_host_readl(msm_host, host, msm_offset->core_pwrctl_mask),
1397                 msm_host_readl(msm_host, host, msm_offset->core_pwrctl_ctl));
1398 }
1399
1400 static void sdhci_msm_handle_pwr_irq(struct sdhci_host *host, int irq)
1401 {
1402         struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1403         struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
1404         u32 irq_status, irq_ack = 0;
1405         int retry = 10;
1406         u32 pwr_state = 0, io_level = 0;
1407         u32 config;
1408         const struct sdhci_msm_offset *msm_offset = msm_host->offset;
1409
1410         irq_status = msm_host_readl(msm_host, host,
1411                         msm_offset->core_pwrctl_status);
1412         irq_status &= INT_MASK;
1413
1414         msm_host_writel(msm_host, irq_status, host,
1415                         msm_offset->core_pwrctl_clear);
1416
1417         /*
1418          * There is a rare HW scenario where the first clear pulse could be
1419          * lost when actual reset and clear/read of status register is
1420          * happening at a time. Hence, retry for at least 10 times to make
1421          * sure status register is cleared. Otherwise, this will result in
1422          * a spurious power IRQ resulting in system instability.
1423          */
1424         while (irq_status & msm_host_readl(msm_host, host,
1425                                 msm_offset->core_pwrctl_status)) {
1426                 if (retry == 0) {
1427                         pr_err("%s: Timedout clearing (0x%x) pwrctl status register\n",
1428                                         mmc_hostname(host->mmc), irq_status);
1429                         sdhci_msm_dump_pwr_ctrl_regs(host);
1430                         WARN_ON(1);
1431                         break;
1432                 }
1433                 msm_host_writel(msm_host, irq_status, host,
1434                         msm_offset->core_pwrctl_clear);
1435                 retry--;
1436                 udelay(10);
1437         }
1438
1439         /* Handle BUS ON/OFF*/
1440         if (irq_status & CORE_PWRCTL_BUS_ON) {
1441                 pwr_state = REQ_BUS_ON;
1442                 io_level = REQ_IO_HIGH;
1443                 irq_ack |= CORE_PWRCTL_BUS_SUCCESS;
1444         }
1445         if (irq_status & CORE_PWRCTL_BUS_OFF) {
1446                 pwr_state = REQ_BUS_OFF;
1447                 io_level = REQ_IO_LOW;
1448                 irq_ack |= CORE_PWRCTL_BUS_SUCCESS;
1449         }
1450         /* Handle IO LOW/HIGH */
1451         if (irq_status & CORE_PWRCTL_IO_LOW) {
1452                 io_level = REQ_IO_LOW;
1453                 irq_ack |= CORE_PWRCTL_IO_SUCCESS;
1454         }
1455         if (irq_status & CORE_PWRCTL_IO_HIGH) {
1456                 io_level = REQ_IO_HIGH;
1457                 irq_ack |= CORE_PWRCTL_IO_SUCCESS;
1458         }
1459
1460         /*
1461          * The driver has to acknowledge the interrupt, switch voltages and
1462          * report back if it succeded or not to this register. The voltage
1463          * switches are handled by the sdhci core, so just report success.
1464          */
1465         msm_host_writel(msm_host, irq_ack, host,
1466                         msm_offset->core_pwrctl_ctl);
1467
1468         /*
1469          * If we don't have info regarding the voltage levels supported by
1470          * regulators, don't change the IO PAD PWR SWITCH.
1471          */
1472         if (msm_host->caps_0 & CORE_VOLT_SUPPORT) {
1473                 u32 new_config;
1474                 /*
1475                  * We should unset IO PAD PWR switch only if the register write
1476                  * can set IO lines high and the regulator also switches to 3 V.
1477                  * Else, we should keep the IO PAD PWR switch set.
1478                  * This is applicable to certain targets where eMMC vccq supply
1479                  * is only 1.8V. In such targets, even during REQ_IO_HIGH, the
1480                  * IO PAD PWR switch must be kept set to reflect actual
1481                  * regulator voltage. This way, during initialization of
1482                  * controllers with only 1.8V, we will set the IO PAD bit
1483                  * without waiting for a REQ_IO_LOW.
1484                  */
1485                 config = readl_relaxed(host->ioaddr +
1486                                 msm_offset->core_vendor_spec);
1487                 new_config = config;
1488
1489                 if ((io_level & REQ_IO_HIGH) &&
1490                                 (msm_host->caps_0 & CORE_3_0V_SUPPORT))
1491                         new_config &= ~CORE_IO_PAD_PWR_SWITCH;
1492                 else if ((io_level & REQ_IO_LOW) ||
1493                                 (msm_host->caps_0 & CORE_1_8V_SUPPORT))
1494                         new_config |= CORE_IO_PAD_PWR_SWITCH;
1495
1496                 if (config ^ new_config)
1497                         writel_relaxed(new_config, host->ioaddr +
1498                                         msm_offset->core_vendor_spec);
1499         }
1500
1501         if (pwr_state)
1502                 msm_host->curr_pwr_state = pwr_state;
1503         if (io_level)
1504                 msm_host->curr_io_level = io_level;
1505
1506         pr_debug("%s: %s: Handled IRQ(%d), irq_status=0x%x, ack=0x%x\n",
1507                 mmc_hostname(msm_host->mmc), __func__, irq, irq_status,
1508                 irq_ack);
1509 }
1510
1511 static irqreturn_t sdhci_msm_pwr_irq(int irq, void *data)
1512 {
1513         struct sdhci_host *host = (struct sdhci_host *)data;
1514         struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1515         struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
1516
1517         sdhci_msm_handle_pwr_irq(host, irq);
1518         msm_host->pwr_irq_flag = 1;
1519         sdhci_msm_complete_pwr_irq_wait(msm_host);
1520
1521
1522         return IRQ_HANDLED;
1523 }
1524
1525 static unsigned int sdhci_msm_get_max_clock(struct sdhci_host *host)
1526 {
1527         struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1528         struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
1529         struct clk *core_clk = msm_host->bulk_clks[0].clk;
1530
1531         return clk_round_rate(core_clk, ULONG_MAX);
1532 }
1533
1534 static unsigned int sdhci_msm_get_min_clock(struct sdhci_host *host)
1535 {
1536         return SDHCI_MSM_MIN_CLOCK;
1537 }
1538
1539 /**
1540  * __sdhci_msm_set_clock - sdhci_msm clock control.
1541  *
1542  * Description:
1543  * MSM controller does not use internal divider and
1544  * instead directly control the GCC clock as per
1545  * HW recommendation.
1546  **/
1547 static void __sdhci_msm_set_clock(struct sdhci_host *host, unsigned int clock)
1548 {
1549         u16 clk;
1550         /*
1551          * Keep actual_clock as zero -
1552          * - since there is no divider used so no need of having actual_clock.
1553          * - MSM controller uses SDCLK for data timeout calculation. If
1554          *   actual_clock is zero, host->clock is taken for calculation.
1555          */
1556         host->mmc->actual_clock = 0;
1557
1558         sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
1559
1560         if (clock == 0)
1561                 return;
1562
1563         /*
1564          * MSM controller do not use clock divider.
1565          * Thus read SDHCI_CLOCK_CONTROL and only enable
1566          * clock with no divider value programmed.
1567          */
1568         clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1569         sdhci_enable_clk(host, clk);
1570 }
1571
1572 /* sdhci_msm_set_clock - Called with (host->lock) spinlock held. */
1573 static void sdhci_msm_set_clock(struct sdhci_host *host, unsigned int clock)
1574 {
1575         struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1576         struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
1577
1578         if (!clock) {
1579                 msm_host->clk_rate = clock;
1580                 goto out;
1581         }
1582
1583         sdhci_msm_hc_select_mode(host);
1584
1585         msm_set_clock_rate_for_bus_mode(host, clock);
1586 out:
1587         __sdhci_msm_set_clock(host, clock);
1588 }
1589
1590 /*****************************************************************************\
1591  *                                                                           *
1592  * MSM Command Queue Engine (CQE)                                            *
1593  *                                                                           *
1594 \*****************************************************************************/
1595
1596 static u32 sdhci_msm_cqe_irq(struct sdhci_host *host, u32 intmask)
1597 {
1598         int cmd_error = 0;
1599         int data_error = 0;
1600
1601         if (!sdhci_cqe_irq(host, intmask, &cmd_error, &data_error))
1602                 return intmask;
1603
1604         cqhci_irq(host->mmc, intmask, cmd_error, data_error);
1605         return 0;
1606 }
1607
1608 static void sdhci_msm_cqe_disable(struct mmc_host *mmc, bool recovery)
1609 {
1610         struct sdhci_host *host = mmc_priv(mmc);
1611         unsigned long flags;
1612         u32 ctrl;
1613
1614         /*
1615          * When CQE is halted, the legacy SDHCI path operates only
1616          * on 16-byte descriptors in 64bit mode.
1617          */
1618         if (host->flags & SDHCI_USE_64_BIT_DMA)
1619                 host->desc_sz = 16;
1620
1621         spin_lock_irqsave(&host->lock, flags);
1622
1623         /*
1624          * During CQE command transfers, command complete bit gets latched.
1625          * So s/w should clear command complete interrupt status when CQE is
1626          * either halted or disabled. Otherwise unexpected SDCHI legacy
1627          * interrupt gets triggered when CQE is halted/disabled.
1628          */
1629         ctrl = sdhci_readl(host, SDHCI_INT_ENABLE);
1630         ctrl |= SDHCI_INT_RESPONSE;
1631         sdhci_writel(host,  ctrl, SDHCI_INT_ENABLE);
1632         sdhci_writel(host, SDHCI_INT_RESPONSE, SDHCI_INT_STATUS);
1633
1634         spin_unlock_irqrestore(&host->lock, flags);
1635
1636         sdhci_cqe_disable(mmc, recovery);
1637 }
1638
1639 static const struct cqhci_host_ops sdhci_msm_cqhci_ops = {
1640         .enable         = sdhci_cqe_enable,
1641         .disable        = sdhci_msm_cqe_disable,
1642 };
1643
1644 static int sdhci_msm_cqe_add_host(struct sdhci_host *host,
1645                                 struct platform_device *pdev)
1646 {
1647         struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1648         struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
1649         struct cqhci_host *cq_host;
1650         bool dma64;
1651         u32 cqcfg;
1652         int ret;
1653
1654         /*
1655          * When CQE is halted, SDHC operates only on 16byte ADMA descriptors.
1656          * So ensure ADMA table is allocated for 16byte descriptors.
1657          */
1658         if (host->caps & SDHCI_CAN_64BIT)
1659                 host->alloc_desc_sz = 16;
1660
1661         ret = sdhci_setup_host(host);
1662         if (ret)
1663                 return ret;
1664
1665         cq_host = cqhci_pltfm_init(pdev);
1666         if (IS_ERR(cq_host)) {
1667                 ret = PTR_ERR(cq_host);
1668                 dev_err(&pdev->dev, "cqhci-pltfm init: failed: %d\n", ret);
1669                 goto cleanup;
1670         }
1671
1672         msm_host->mmc->caps2 |= MMC_CAP2_CQE | MMC_CAP2_CQE_DCMD;
1673         cq_host->ops = &sdhci_msm_cqhci_ops;
1674
1675         dma64 = host->flags & SDHCI_USE_64_BIT_DMA;
1676
1677         ret = cqhci_init(cq_host, host->mmc, dma64);
1678         if (ret) {
1679                 dev_err(&pdev->dev, "%s: CQE init: failed (%d)\n",
1680                                 mmc_hostname(host->mmc), ret);
1681                 goto cleanup;
1682         }
1683
1684         /* Disable cqe reset due to cqe enable signal */
1685         cqcfg = cqhci_readl(cq_host, CQHCI_VENDOR_CFG1);
1686         cqcfg |= CQHCI_VENDOR_DIS_RST_ON_CQ_EN;
1687         cqhci_writel(cq_host, cqcfg, CQHCI_VENDOR_CFG1);
1688
1689         /*
1690          * SDHC expects 12byte ADMA descriptors till CQE is enabled.
1691          * So limit desc_sz to 12 so that the data commands that are sent
1692          * during card initialization (before CQE gets enabled) would
1693          * get executed without any issues.
1694          */
1695         if (host->flags & SDHCI_USE_64_BIT_DMA)
1696                 host->desc_sz = 12;
1697
1698         ret = __sdhci_add_host(host);
1699         if (ret)
1700                 goto cleanup;
1701
1702         dev_info(&pdev->dev, "%s: CQE init: success\n",
1703                         mmc_hostname(host->mmc));
1704         return ret;
1705
1706 cleanup:
1707         sdhci_cleanup_host(host);
1708         return ret;
1709 }
1710
1711 /*
1712  * Platform specific register write functions. This is so that, if any
1713  * register write needs to be followed up by platform specific actions,
1714  * they can be added here. These functions can go to sleep when writes
1715  * to certain registers are done.
1716  * These functions are relying on sdhci_set_ios not using spinlock.
1717  */
1718 static int __sdhci_msm_check_write(struct sdhci_host *host, u16 val, int reg)
1719 {
1720         struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1721         struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
1722         u32 req_type = 0;
1723
1724         switch (reg) {
1725         case SDHCI_HOST_CONTROL2:
1726                 req_type = (val & SDHCI_CTRL_VDD_180) ? REQ_IO_LOW :
1727                         REQ_IO_HIGH;
1728                 break;
1729         case SDHCI_SOFTWARE_RESET:
1730                 if (host->pwr && (val & SDHCI_RESET_ALL))
1731                         req_type = REQ_BUS_OFF;
1732                 break;
1733         case SDHCI_POWER_CONTROL:
1734                 req_type = !val ? REQ_BUS_OFF : REQ_BUS_ON;
1735                 break;
1736         case SDHCI_TRANSFER_MODE:
1737                 msm_host->transfer_mode = val;
1738                 break;
1739         case SDHCI_COMMAND:
1740                 if (!msm_host->use_cdr)
1741                         break;
1742                 if ((msm_host->transfer_mode & SDHCI_TRNS_READ) &&
1743                     SDHCI_GET_CMD(val) != MMC_SEND_TUNING_BLOCK_HS200 &&
1744                     SDHCI_GET_CMD(val) != MMC_SEND_TUNING_BLOCK)
1745                         sdhci_msm_set_cdr(host, true);
1746                 else
1747                         sdhci_msm_set_cdr(host, false);
1748                 break;
1749         }
1750
1751         if (req_type) {
1752                 msm_host->pwr_irq_flag = 0;
1753                 /*
1754                  * Since this register write may trigger a power irq, ensure
1755                  * all previous register writes are complete by this point.
1756                  */
1757                 mb();
1758         }
1759         return req_type;
1760 }
1761
1762 /* This function may sleep*/
1763 static void sdhci_msm_writew(struct sdhci_host *host, u16 val, int reg)
1764 {
1765         u32 req_type = 0;
1766
1767         req_type = __sdhci_msm_check_write(host, val, reg);
1768         writew_relaxed(val, host->ioaddr + reg);
1769
1770         if (req_type)
1771                 sdhci_msm_check_power_status(host, req_type);
1772 }
1773
1774 /* This function may sleep*/
1775 static void sdhci_msm_writeb(struct sdhci_host *host, u8 val, int reg)
1776 {
1777         u32 req_type = 0;
1778
1779         req_type = __sdhci_msm_check_write(host, val, reg);
1780
1781         writeb_relaxed(val, host->ioaddr + reg);
1782
1783         if (req_type)
1784                 sdhci_msm_check_power_status(host, req_type);
1785 }
1786
1787 static void sdhci_msm_set_regulator_caps(struct sdhci_msm_host *msm_host)
1788 {
1789         struct mmc_host *mmc = msm_host->mmc;
1790         struct regulator *supply = mmc->supply.vqmmc;
1791         u32 caps = 0, config;
1792         struct sdhci_host *host = mmc_priv(mmc);
1793         const struct sdhci_msm_offset *msm_offset = msm_host->offset;
1794
1795         if (!IS_ERR(mmc->supply.vqmmc)) {
1796                 if (regulator_is_supported_voltage(supply, 1700000, 1950000))
1797                         caps |= CORE_1_8V_SUPPORT;
1798                 if (regulator_is_supported_voltage(supply, 2700000, 3600000))
1799                         caps |= CORE_3_0V_SUPPORT;
1800
1801                 if (!caps)
1802                         pr_warn("%s: 1.8/3V not supported for vqmmc\n",
1803                                         mmc_hostname(mmc));
1804         }
1805
1806         if (caps) {
1807                 /*
1808                  * Set the PAD_PWR_SWITCH_EN bit so that the PAD_PWR_SWITCH
1809                  * bit can be used as required later on.
1810                  */
1811                 u32 io_level = msm_host->curr_io_level;
1812
1813                 config = readl_relaxed(host->ioaddr +
1814                                 msm_offset->core_vendor_spec);
1815                 config |= CORE_IO_PAD_PWR_SWITCH_EN;
1816
1817                 if ((io_level & REQ_IO_HIGH) && (caps & CORE_3_0V_SUPPORT))
1818                         config &= ~CORE_IO_PAD_PWR_SWITCH;
1819                 else if ((io_level & REQ_IO_LOW) || (caps & CORE_1_8V_SUPPORT))
1820                         config |= CORE_IO_PAD_PWR_SWITCH;
1821
1822                 writel_relaxed(config,
1823                                 host->ioaddr + msm_offset->core_vendor_spec);
1824         }
1825         msm_host->caps_0 |= caps;
1826         pr_debug("%s: supported caps: 0x%08x\n", mmc_hostname(mmc), caps);
1827 }
1828
1829 static void sdhci_msm_reset(struct sdhci_host *host, u8 mask)
1830 {
1831         if ((host->mmc->caps2 & MMC_CAP2_CQE) && (mask & SDHCI_RESET_ALL))
1832                 cqhci_deactivate(host->mmc);
1833         sdhci_reset(host, mask);
1834 }
1835
1836 static const struct sdhci_msm_variant_ops mci_var_ops = {
1837         .msm_readl_relaxed = sdhci_msm_mci_variant_readl_relaxed,
1838         .msm_writel_relaxed = sdhci_msm_mci_variant_writel_relaxed,
1839 };
1840
1841 static const struct sdhci_msm_variant_ops v5_var_ops = {
1842         .msm_readl_relaxed = sdhci_msm_v5_variant_readl_relaxed,
1843         .msm_writel_relaxed = sdhci_msm_v5_variant_writel_relaxed,
1844 };
1845
1846 static const struct sdhci_msm_variant_info sdhci_msm_mci_var = {
1847         .var_ops = &mci_var_ops,
1848         .offset = &sdhci_msm_mci_offset,
1849 };
1850
1851 static const struct sdhci_msm_variant_info sdhci_msm_v5_var = {
1852         .mci_removed = true,
1853         .var_ops = &v5_var_ops,
1854         .offset = &sdhci_msm_v5_offset,
1855 };
1856
1857 static const struct sdhci_msm_variant_info sdm845_sdhci_var = {
1858         .mci_removed = true,
1859         .restore_dll_config = true,
1860         .var_ops = &v5_var_ops,
1861         .offset = &sdhci_msm_v5_offset,
1862 };
1863
1864 static const struct of_device_id sdhci_msm_dt_match[] = {
1865         {.compatible = "qcom,sdhci-msm-v4", .data = &sdhci_msm_mci_var},
1866         {.compatible = "qcom,sdhci-msm-v5", .data = &sdhci_msm_v5_var},
1867         {.compatible = "qcom,sdm845-sdhci", .data = &sdm845_sdhci_var},
1868         {},
1869 };
1870
1871 MODULE_DEVICE_TABLE(of, sdhci_msm_dt_match);
1872
1873 static const struct sdhci_ops sdhci_msm_ops = {
1874         .reset = sdhci_msm_reset,
1875         .set_clock = sdhci_msm_set_clock,
1876         .get_min_clock = sdhci_msm_get_min_clock,
1877         .get_max_clock = sdhci_msm_get_max_clock,
1878         .set_bus_width = sdhci_set_bus_width,
1879         .set_uhs_signaling = sdhci_msm_set_uhs_signaling,
1880         .write_w = sdhci_msm_writew,
1881         .write_b = sdhci_msm_writeb,
1882         .irq    = sdhci_msm_cqe_irq,
1883 };
1884
1885 static const struct sdhci_pltfm_data sdhci_msm_pdata = {
1886         .quirks = SDHCI_QUIRK_BROKEN_CARD_DETECTION |
1887                   SDHCI_QUIRK_SINGLE_POWER_WRITE |
1888                   SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN |
1889                   SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
1890
1891         .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
1892         .ops = &sdhci_msm_ops,
1893 };
1894
1895 static int sdhci_msm_probe(struct platform_device *pdev)
1896 {
1897         struct sdhci_host *host;
1898         struct sdhci_pltfm_host *pltfm_host;
1899         struct sdhci_msm_host *msm_host;
1900         struct clk *clk;
1901         int ret;
1902         u16 host_version, core_minor;
1903         u32 core_version, config;
1904         u8 core_major;
1905         const struct sdhci_msm_offset *msm_offset;
1906         const struct sdhci_msm_variant_info *var_info;
1907         struct device_node *node = pdev->dev.of_node;
1908
1909         host = sdhci_pltfm_init(pdev, &sdhci_msm_pdata, sizeof(*msm_host));
1910         if (IS_ERR(host))
1911                 return PTR_ERR(host);
1912
1913         host->sdma_boundary = 0;
1914         pltfm_host = sdhci_priv(host);
1915         msm_host = sdhci_pltfm_priv(pltfm_host);
1916         msm_host->mmc = host->mmc;
1917         msm_host->pdev = pdev;
1918
1919         ret = mmc_of_parse(host->mmc);
1920         if (ret)
1921                 goto pltfm_free;
1922
1923         /*
1924          * Based on the compatible string, load the required msm host info from
1925          * the data associated with the version info.
1926          */
1927         var_info = of_device_get_match_data(&pdev->dev);
1928
1929         msm_host->mci_removed = var_info->mci_removed;
1930         msm_host->restore_dll_config = var_info->restore_dll_config;
1931         msm_host->var_ops = var_info->var_ops;
1932         msm_host->offset = var_info->offset;
1933
1934         msm_offset = msm_host->offset;
1935
1936         sdhci_get_of_property(pdev);
1937
1938         msm_host->saved_tuning_phase = INVALID_TUNING_PHASE;
1939
1940         /* Setup SDCC bus voter clock. */
1941         msm_host->bus_clk = devm_clk_get(&pdev->dev, "bus");
1942         if (!IS_ERR(msm_host->bus_clk)) {
1943                 /* Vote for max. clk rate for max. performance */
1944                 ret = clk_set_rate(msm_host->bus_clk, INT_MAX);
1945                 if (ret)
1946                         goto pltfm_free;
1947                 ret = clk_prepare_enable(msm_host->bus_clk);
1948                 if (ret)
1949                         goto pltfm_free;
1950         }
1951
1952         /* Setup main peripheral bus clock */
1953         clk = devm_clk_get(&pdev->dev, "iface");
1954         if (IS_ERR(clk)) {
1955                 ret = PTR_ERR(clk);
1956                 dev_err(&pdev->dev, "Peripheral clk setup failed (%d)\n", ret);
1957                 goto bus_clk_disable;
1958         }
1959         msm_host->bulk_clks[1].clk = clk;
1960
1961         /* Setup SDC MMC clock */
1962         clk = devm_clk_get(&pdev->dev, "core");
1963         if (IS_ERR(clk)) {
1964                 ret = PTR_ERR(clk);
1965                 dev_err(&pdev->dev, "SDC MMC clk setup failed (%d)\n", ret);
1966                 goto bus_clk_disable;
1967         }
1968         msm_host->bulk_clks[0].clk = clk;
1969
1970         msm_host->opp_table = dev_pm_opp_set_clkname(&pdev->dev, "core");
1971         if (IS_ERR(msm_host->opp_table)) {
1972                 ret = PTR_ERR(msm_host->opp_table);
1973                 goto bus_clk_disable;
1974         }
1975
1976         /* OPP table is optional */
1977         ret = dev_pm_opp_of_add_table(&pdev->dev);
1978         if (!ret) {
1979                 msm_host->has_opp_table = true;
1980         } else if (ret != -ENODEV) {
1981                 dev_err(&pdev->dev, "Invalid OPP table in Device tree\n");
1982                 goto opp_cleanup;
1983         }
1984
1985         /* Vote for maximum clock rate for maximum performance */
1986         ret = dev_pm_opp_set_rate(&pdev->dev, INT_MAX);
1987         if (ret)
1988                 dev_warn(&pdev->dev, "core clock boost failed\n");
1989
1990         clk = devm_clk_get(&pdev->dev, "cal");
1991         if (IS_ERR(clk))
1992                 clk = NULL;
1993         msm_host->bulk_clks[2].clk = clk;
1994
1995         clk = devm_clk_get(&pdev->dev, "sleep");
1996         if (IS_ERR(clk))
1997                 clk = NULL;
1998         msm_host->bulk_clks[3].clk = clk;
1999
2000         ret = clk_bulk_prepare_enable(ARRAY_SIZE(msm_host->bulk_clks),
2001                                       msm_host->bulk_clks);
2002         if (ret)
2003                 goto opp_cleanup;
2004
2005         /*
2006          * xo clock is needed for FLL feature of cm_dll.
2007          * In case if xo clock is not mentioned in DT, warn and proceed.
2008          */
2009         msm_host->xo_clk = devm_clk_get(&pdev->dev, "xo");
2010         if (IS_ERR(msm_host->xo_clk)) {
2011                 ret = PTR_ERR(msm_host->xo_clk);
2012                 dev_warn(&pdev->dev, "TCXO clk not present (%d)\n", ret);
2013         }
2014
2015         if (!msm_host->mci_removed) {
2016                 msm_host->core_mem = devm_platform_ioremap_resource(pdev, 1);
2017                 if (IS_ERR(msm_host->core_mem)) {
2018                         ret = PTR_ERR(msm_host->core_mem);
2019                         goto clk_disable;
2020                 }
2021         }
2022
2023         /* Reset the vendor spec register to power on reset state */
2024         writel_relaxed(CORE_VENDOR_SPEC_POR_VAL,
2025                         host->ioaddr + msm_offset->core_vendor_spec);
2026
2027         if (!msm_host->mci_removed) {
2028                 /* Set HC_MODE_EN bit in HC_MODE register */
2029                 msm_host_writel(msm_host, HC_MODE_EN, host,
2030                                 msm_offset->core_hc_mode);
2031                 config = msm_host_readl(msm_host, host,
2032                                 msm_offset->core_hc_mode);
2033                 config |= FF_CLK_SW_RST_DIS;
2034                 msm_host_writel(msm_host, config, host,
2035                                 msm_offset->core_hc_mode);
2036         }
2037
2038         host_version = readw_relaxed((host->ioaddr + SDHCI_HOST_VERSION));
2039         dev_dbg(&pdev->dev, "Host Version: 0x%x Vendor Version 0x%x\n",
2040                 host_version, ((host_version & SDHCI_VENDOR_VER_MASK) >>
2041                                SDHCI_VENDOR_VER_SHIFT));
2042
2043         core_version = msm_host_readl(msm_host, host,
2044                         msm_offset->core_mci_version);
2045         core_major = (core_version & CORE_VERSION_MAJOR_MASK) >>
2046                       CORE_VERSION_MAJOR_SHIFT;
2047         core_minor = core_version & CORE_VERSION_MINOR_MASK;
2048         dev_dbg(&pdev->dev, "MCI Version: 0x%08x, major: 0x%04x, minor: 0x%02x\n",
2049                 core_version, core_major, core_minor);
2050
2051         if (core_major == 1 && core_minor >= 0x42)
2052                 msm_host->use_14lpp_dll_reset = true;
2053
2054         /*
2055          * SDCC 5 controller with major version 1, minor version 0x34 and later
2056          * with HS 400 mode support will use CM DLL instead of CDC LP 533 DLL.
2057          */
2058         if (core_major == 1 && core_minor < 0x34)
2059                 msm_host->use_cdclp533 = true;
2060
2061         /*
2062          * Support for some capabilities is not advertised by newer
2063          * controller versions and must be explicitly enabled.
2064          */
2065         if (core_major >= 1 && core_minor != 0x11 && core_minor != 0x12) {
2066                 config = readl_relaxed(host->ioaddr + SDHCI_CAPABILITIES);
2067                 config |= SDHCI_CAN_VDD_300 | SDHCI_CAN_DO_8BIT;
2068                 writel_relaxed(config, host->ioaddr +
2069                                 msm_offset->core_vendor_spec_capabilities0);
2070         }
2071
2072         if (core_major == 1 && core_minor >= 0x49)
2073                 msm_host->updated_ddr_cfg = true;
2074
2075         /*
2076          * Power on reset state may trigger power irq if previous status of
2077          * PWRCTL was either BUS_ON or IO_HIGH_V. So before enabling pwr irq
2078          * interrupt in GIC, any pending power irq interrupt should be
2079          * acknowledged. Otherwise power irq interrupt handler would be
2080          * fired prematurely.
2081          */
2082         sdhci_msm_handle_pwr_irq(host, 0);
2083
2084         /*
2085          * Ensure that above writes are propogated before interrupt enablement
2086          * in GIC.
2087          */
2088         mb();
2089
2090         /* Setup IRQ for handling power/voltage tasks with PMIC */
2091         msm_host->pwr_irq = platform_get_irq_byname(pdev, "pwr_irq");
2092         if (msm_host->pwr_irq < 0) {
2093                 ret = msm_host->pwr_irq;
2094                 goto clk_disable;
2095         }
2096
2097         sdhci_msm_init_pwr_irq_wait(msm_host);
2098         /* Enable pwr irq interrupts */
2099         msm_host_writel(msm_host, INT_MASK, host,
2100                 msm_offset->core_pwrctl_mask);
2101
2102         ret = devm_request_threaded_irq(&pdev->dev, msm_host->pwr_irq, NULL,
2103                                         sdhci_msm_pwr_irq, IRQF_ONESHOT,
2104                                         dev_name(&pdev->dev), host);
2105         if (ret) {
2106                 dev_err(&pdev->dev, "Request IRQ failed (%d)\n", ret);
2107                 goto clk_disable;
2108         }
2109
2110         msm_host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_NEED_RSP_BUSY;
2111
2112         pm_runtime_get_noresume(&pdev->dev);
2113         pm_runtime_set_active(&pdev->dev);
2114         pm_runtime_enable(&pdev->dev);
2115         pm_runtime_set_autosuspend_delay(&pdev->dev,
2116                                          MSM_MMC_AUTOSUSPEND_DELAY_MS);
2117         pm_runtime_use_autosuspend(&pdev->dev);
2118
2119         host->mmc_host_ops.execute_tuning = sdhci_msm_execute_tuning;
2120         if (of_property_read_bool(node, "supports-cqe"))
2121                 ret = sdhci_msm_cqe_add_host(host, pdev);
2122         else
2123                 ret = sdhci_add_host(host);
2124         if (ret)
2125                 goto pm_runtime_disable;
2126         sdhci_msm_set_regulator_caps(msm_host);
2127
2128         pm_runtime_mark_last_busy(&pdev->dev);
2129         pm_runtime_put_autosuspend(&pdev->dev);
2130
2131         return 0;
2132
2133 pm_runtime_disable:
2134         pm_runtime_disable(&pdev->dev);
2135         pm_runtime_set_suspended(&pdev->dev);
2136         pm_runtime_put_noidle(&pdev->dev);
2137 clk_disable:
2138         clk_bulk_disable_unprepare(ARRAY_SIZE(msm_host->bulk_clks),
2139                                    msm_host->bulk_clks);
2140 opp_cleanup:
2141         if (msm_host->has_opp_table)
2142                 dev_pm_opp_of_remove_table(&pdev->dev);
2143         dev_pm_opp_put_clkname(msm_host->opp_table);
2144 bus_clk_disable:
2145         if (!IS_ERR(msm_host->bus_clk))
2146                 clk_disable_unprepare(msm_host->bus_clk);
2147 pltfm_free:
2148         sdhci_pltfm_free(pdev);
2149         return ret;
2150 }
2151
2152 static int sdhci_msm_remove(struct platform_device *pdev)
2153 {
2154         struct sdhci_host *host = platform_get_drvdata(pdev);
2155         struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2156         struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
2157         int dead = (readl_relaxed(host->ioaddr + SDHCI_INT_STATUS) ==
2158                     0xffffffff);
2159
2160         sdhci_remove_host(host, dead);
2161
2162         if (msm_host->has_opp_table)
2163                 dev_pm_opp_of_remove_table(&pdev->dev);
2164         dev_pm_opp_put_clkname(msm_host->opp_table);
2165         pm_runtime_get_sync(&pdev->dev);
2166         pm_runtime_disable(&pdev->dev);
2167         pm_runtime_put_noidle(&pdev->dev);
2168
2169         clk_bulk_disable_unprepare(ARRAY_SIZE(msm_host->bulk_clks),
2170                                    msm_host->bulk_clks);
2171         if (!IS_ERR(msm_host->bus_clk))
2172                 clk_disable_unprepare(msm_host->bus_clk);
2173         sdhci_pltfm_free(pdev);
2174         return 0;
2175 }
2176
2177 static __maybe_unused int sdhci_msm_runtime_suspend(struct device *dev)
2178 {
2179         struct sdhci_host *host = dev_get_drvdata(dev);
2180         struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2181         struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
2182
2183         /* Drop the performance vote */
2184         dev_pm_opp_set_rate(dev, 0);
2185         clk_bulk_disable_unprepare(ARRAY_SIZE(msm_host->bulk_clks),
2186                                    msm_host->bulk_clks);
2187
2188         return 0;
2189 }
2190
2191 static __maybe_unused int sdhci_msm_runtime_resume(struct device *dev)
2192 {
2193         struct sdhci_host *host = dev_get_drvdata(dev);
2194         struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2195         struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
2196         int ret;
2197
2198         ret = clk_bulk_prepare_enable(ARRAY_SIZE(msm_host->bulk_clks),
2199                                        msm_host->bulk_clks);
2200         if (ret)
2201                 return ret;
2202         /*
2203          * Whenever core-clock is gated dynamically, it's needed to
2204          * restore the SDR DLL settings when the clock is ungated.
2205          */
2206         if (msm_host->restore_dll_config && msm_host->clk_rate)
2207                 ret = sdhci_msm_restore_sdr_dll_config(host);
2208
2209         dev_pm_opp_set_rate(dev, msm_host->clk_rate);
2210
2211         return ret;
2212 }
2213
2214 static const struct dev_pm_ops sdhci_msm_pm_ops = {
2215         SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
2216                                 pm_runtime_force_resume)
2217         SET_RUNTIME_PM_OPS(sdhci_msm_runtime_suspend,
2218                            sdhci_msm_runtime_resume,
2219                            NULL)
2220 };
2221
2222 static struct platform_driver sdhci_msm_driver = {
2223         .probe = sdhci_msm_probe,
2224         .remove = sdhci_msm_remove,
2225         .driver = {
2226                    .name = "sdhci_msm",
2227                    .of_match_table = sdhci_msm_dt_match,
2228                    .pm = &sdhci_msm_pm_ops,
2229         },
2230 };
2231
2232 module_platform_driver(sdhci_msm_driver);
2233
2234 MODULE_DESCRIPTION("Qualcomm Secure Digital Host Controller Interface driver");
2235 MODULE_LICENSE("GPL v2");