838726b68ff3e0ea892f35dbd0c2b416ccd22a67
[linux-2.6-microblaze.git] / drivers / mmc / core / mmc.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  linux/drivers/mmc/core/mmc.c
4  *
5  *  Copyright (C) 2003-2004 Russell King, All Rights Reserved.
6  *  Copyright (C) 2005-2007 Pierre Ossman, All Rights Reserved.
7  *  MMCv4 support Copyright (C) 2006 Philip Langdale, All Rights Reserved.
8  */
9
10 #include <linux/err.h>
11 #include <linux/of.h>
12 #include <linux/slab.h>
13 #include <linux/stat.h>
14 #include <linux/pm_runtime.h>
15
16 #include <linux/mmc/host.h>
17 #include <linux/mmc/card.h>
18 #include <linux/mmc/mmc.h>
19
20 #include "core.h"
21 #include "card.h"
22 #include "host.h"
23 #include "bus.h"
24 #include "mmc_ops.h"
25 #include "quirks.h"
26 #include "sd_ops.h"
27 #include "pwrseq.h"
28
29 #define DEFAULT_CMD6_TIMEOUT_MS 500
30 #define MIN_CACHE_EN_TIMEOUT_MS 1600
31 #define CACHE_FLUSH_TIMEOUT_MS 30000 /* 30s */
32
33 static const unsigned int tran_exp[] = {
34         10000,          100000,         1000000,        10000000,
35         0,              0,              0,              0
36 };
37
38 static const unsigned char tran_mant[] = {
39         0,      10,     12,     13,     15,     20,     25,     30,
40         35,     40,     45,     50,     55,     60,     70,     80,
41 };
42
43 static const unsigned int taac_exp[] = {
44         1,      10,     100,    1000,   10000,  100000, 1000000, 10000000,
45 };
46
47 static const unsigned int taac_mant[] = {
48         0,      10,     12,     13,     15,     20,     25,     30,
49         35,     40,     45,     50,     55,     60,     70,     80,
50 };
51
52 #define UNSTUFF_BITS(resp,start,size)                                   \
53         ({                                                              \
54                 const int __size = size;                                \
55                 const u32 __mask = (__size < 32 ? 1 << __size : 0) - 1; \
56                 const int __off = 3 - ((start) / 32);                   \
57                 const int __shft = (start) & 31;                        \
58                 u32 __res;                                              \
59                                                                         \
60                 __res = resp[__off] >> __shft;                          \
61                 if (__size + __shft > 32)                               \
62                         __res |= resp[__off-1] << ((32 - __shft) % 32); \
63                 __res & __mask;                                         \
64         })
65
66 /*
67  * Given the decoded CSD structure, decode the raw CID to our CID structure.
68  */
69 static int mmc_decode_cid(struct mmc_card *card)
70 {
71         u32 *resp = card->raw_cid;
72
73         /*
74          * The selection of the format here is based upon published
75          * specs from sandisk and from what people have reported.
76          */
77         switch (card->csd.mmca_vsn) {
78         case 0: /* MMC v1.0 - v1.2 */
79         case 1: /* MMC v1.4 */
80                 card->cid.manfid        = UNSTUFF_BITS(resp, 104, 24);
81                 card->cid.prod_name[0]  = UNSTUFF_BITS(resp, 96, 8);
82                 card->cid.prod_name[1]  = UNSTUFF_BITS(resp, 88, 8);
83                 card->cid.prod_name[2]  = UNSTUFF_BITS(resp, 80, 8);
84                 card->cid.prod_name[3]  = UNSTUFF_BITS(resp, 72, 8);
85                 card->cid.prod_name[4]  = UNSTUFF_BITS(resp, 64, 8);
86                 card->cid.prod_name[5]  = UNSTUFF_BITS(resp, 56, 8);
87                 card->cid.prod_name[6]  = UNSTUFF_BITS(resp, 48, 8);
88                 card->cid.hwrev         = UNSTUFF_BITS(resp, 44, 4);
89                 card->cid.fwrev         = UNSTUFF_BITS(resp, 40, 4);
90                 card->cid.serial        = UNSTUFF_BITS(resp, 16, 24);
91                 card->cid.month         = UNSTUFF_BITS(resp, 12, 4);
92                 card->cid.year          = UNSTUFF_BITS(resp, 8, 4) + 1997;
93                 break;
94
95         case 2: /* MMC v2.0 - v2.2 */
96         case 3: /* MMC v3.1 - v3.3 */
97         case 4: /* MMC v4 */
98                 card->cid.manfid        = UNSTUFF_BITS(resp, 120, 8);
99                 card->cid.oemid         = UNSTUFF_BITS(resp, 104, 16);
100                 card->cid.prod_name[0]  = UNSTUFF_BITS(resp, 96, 8);
101                 card->cid.prod_name[1]  = UNSTUFF_BITS(resp, 88, 8);
102                 card->cid.prod_name[2]  = UNSTUFF_BITS(resp, 80, 8);
103                 card->cid.prod_name[3]  = UNSTUFF_BITS(resp, 72, 8);
104                 card->cid.prod_name[4]  = UNSTUFF_BITS(resp, 64, 8);
105                 card->cid.prod_name[5]  = UNSTUFF_BITS(resp, 56, 8);
106                 card->cid.prv           = UNSTUFF_BITS(resp, 48, 8);
107                 card->cid.serial        = UNSTUFF_BITS(resp, 16, 32);
108                 card->cid.month         = UNSTUFF_BITS(resp, 12, 4);
109                 card->cid.year          = UNSTUFF_BITS(resp, 8, 4) + 1997;
110                 break;
111
112         default:
113                 pr_err("%s: card has unknown MMCA version %d\n",
114                         mmc_hostname(card->host), card->csd.mmca_vsn);
115                 return -EINVAL;
116         }
117
118         return 0;
119 }
120
121 static void mmc_set_erase_size(struct mmc_card *card)
122 {
123         if (card->ext_csd.erase_group_def & 1)
124                 card->erase_size = card->ext_csd.hc_erase_size;
125         else
126                 card->erase_size = card->csd.erase_size;
127
128         mmc_init_erase(card);
129 }
130
131 /*
132  * Given a 128-bit response, decode to our card CSD structure.
133  */
134 static int mmc_decode_csd(struct mmc_card *card)
135 {
136         struct mmc_csd *csd = &card->csd;
137         unsigned int e, m, a, b;
138         u32 *resp = card->raw_csd;
139
140         /*
141          * We only understand CSD structure v1.1 and v1.2.
142          * v1.2 has extra information in bits 15, 11 and 10.
143          * We also support eMMC v4.4 & v4.41.
144          */
145         csd->structure = UNSTUFF_BITS(resp, 126, 2);
146         if (csd->structure == 0) {
147                 pr_err("%s: unrecognised CSD structure version %d\n",
148                         mmc_hostname(card->host), csd->structure);
149                 return -EINVAL;
150         }
151
152         csd->mmca_vsn    = UNSTUFF_BITS(resp, 122, 4);
153         m = UNSTUFF_BITS(resp, 115, 4);
154         e = UNSTUFF_BITS(resp, 112, 3);
155         csd->taac_ns     = (taac_exp[e] * taac_mant[m] + 9) / 10;
156         csd->taac_clks   = UNSTUFF_BITS(resp, 104, 8) * 100;
157
158         m = UNSTUFF_BITS(resp, 99, 4);
159         e = UNSTUFF_BITS(resp, 96, 3);
160         csd->max_dtr      = tran_exp[e] * tran_mant[m];
161         csd->cmdclass     = UNSTUFF_BITS(resp, 84, 12);
162
163         e = UNSTUFF_BITS(resp, 47, 3);
164         m = UNSTUFF_BITS(resp, 62, 12);
165         csd->capacity     = (1 + m) << (e + 2);
166
167         csd->read_blkbits = UNSTUFF_BITS(resp, 80, 4);
168         csd->read_partial = UNSTUFF_BITS(resp, 79, 1);
169         csd->write_misalign = UNSTUFF_BITS(resp, 78, 1);
170         csd->read_misalign = UNSTUFF_BITS(resp, 77, 1);
171         csd->dsr_imp = UNSTUFF_BITS(resp, 76, 1);
172         csd->r2w_factor = UNSTUFF_BITS(resp, 26, 3);
173         csd->write_blkbits = UNSTUFF_BITS(resp, 22, 4);
174         csd->write_partial = UNSTUFF_BITS(resp, 21, 1);
175
176         if (csd->write_blkbits >= 9) {
177                 a = UNSTUFF_BITS(resp, 42, 5);
178                 b = UNSTUFF_BITS(resp, 37, 5);
179                 csd->erase_size = (a + 1) * (b + 1);
180                 csd->erase_size <<= csd->write_blkbits - 9;
181         }
182
183         return 0;
184 }
185
186 static void mmc_select_card_type(struct mmc_card *card)
187 {
188         struct mmc_host *host = card->host;
189         u8 card_type = card->ext_csd.raw_card_type;
190         u32 caps = host->caps, caps2 = host->caps2;
191         unsigned int hs_max_dtr = 0, hs200_max_dtr = 0;
192         unsigned int avail_type = 0;
193
194         if (caps & MMC_CAP_MMC_HIGHSPEED &&
195             card_type & EXT_CSD_CARD_TYPE_HS_26) {
196                 hs_max_dtr = MMC_HIGH_26_MAX_DTR;
197                 avail_type |= EXT_CSD_CARD_TYPE_HS_26;
198         }
199
200         if (caps & MMC_CAP_MMC_HIGHSPEED &&
201             card_type & EXT_CSD_CARD_TYPE_HS_52) {
202                 hs_max_dtr = MMC_HIGH_52_MAX_DTR;
203                 avail_type |= EXT_CSD_CARD_TYPE_HS_52;
204         }
205
206         if (caps & (MMC_CAP_1_8V_DDR | MMC_CAP_3_3V_DDR) &&
207             card_type & EXT_CSD_CARD_TYPE_DDR_1_8V) {
208                 hs_max_dtr = MMC_HIGH_DDR_MAX_DTR;
209                 avail_type |= EXT_CSD_CARD_TYPE_DDR_1_8V;
210         }
211
212         if (caps & MMC_CAP_1_2V_DDR &&
213             card_type & EXT_CSD_CARD_TYPE_DDR_1_2V) {
214                 hs_max_dtr = MMC_HIGH_DDR_MAX_DTR;
215                 avail_type |= EXT_CSD_CARD_TYPE_DDR_1_2V;
216         }
217
218         if (caps2 & MMC_CAP2_HS200_1_8V_SDR &&
219             card_type & EXT_CSD_CARD_TYPE_HS200_1_8V) {
220                 hs200_max_dtr = MMC_HS200_MAX_DTR;
221                 avail_type |= EXT_CSD_CARD_TYPE_HS200_1_8V;
222         }
223
224         if (caps2 & MMC_CAP2_HS200_1_2V_SDR &&
225             card_type & EXT_CSD_CARD_TYPE_HS200_1_2V) {
226                 hs200_max_dtr = MMC_HS200_MAX_DTR;
227                 avail_type |= EXT_CSD_CARD_TYPE_HS200_1_2V;
228         }
229
230         if (caps2 & MMC_CAP2_HS400_1_8V &&
231             card_type & EXT_CSD_CARD_TYPE_HS400_1_8V) {
232                 hs200_max_dtr = MMC_HS200_MAX_DTR;
233                 avail_type |= EXT_CSD_CARD_TYPE_HS400_1_8V;
234         }
235
236         if (caps2 & MMC_CAP2_HS400_1_2V &&
237             card_type & EXT_CSD_CARD_TYPE_HS400_1_2V) {
238                 hs200_max_dtr = MMC_HS200_MAX_DTR;
239                 avail_type |= EXT_CSD_CARD_TYPE_HS400_1_2V;
240         }
241
242         if ((caps2 & MMC_CAP2_HS400_ES) &&
243             card->ext_csd.strobe_support &&
244             (avail_type & EXT_CSD_CARD_TYPE_HS400))
245                 avail_type |= EXT_CSD_CARD_TYPE_HS400ES;
246
247         card->ext_csd.hs_max_dtr = hs_max_dtr;
248         card->ext_csd.hs200_max_dtr = hs200_max_dtr;
249         card->mmc_avail_type = avail_type;
250 }
251
252 static void mmc_manage_enhanced_area(struct mmc_card *card, u8 *ext_csd)
253 {
254         u8 hc_erase_grp_sz, hc_wp_grp_sz;
255
256         /*
257          * Disable these attributes by default
258          */
259         card->ext_csd.enhanced_area_offset = -EINVAL;
260         card->ext_csd.enhanced_area_size = -EINVAL;
261
262         /*
263          * Enhanced area feature support -- check whether the eMMC
264          * card has the Enhanced area enabled.  If so, export enhanced
265          * area offset and size to user by adding sysfs interface.
266          */
267         if ((ext_csd[EXT_CSD_PARTITION_SUPPORT] & 0x2) &&
268             (ext_csd[EXT_CSD_PARTITION_ATTRIBUTE] & 0x1)) {
269                 if (card->ext_csd.partition_setting_completed) {
270                         hc_erase_grp_sz =
271                                 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
272                         hc_wp_grp_sz =
273                                 ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
274
275                         /*
276                          * calculate the enhanced data area offset, in bytes
277                          */
278                         card->ext_csd.enhanced_area_offset =
279                                 (((unsigned long long)ext_csd[139]) << 24) +
280                                 (((unsigned long long)ext_csd[138]) << 16) +
281                                 (((unsigned long long)ext_csd[137]) << 8) +
282                                 (((unsigned long long)ext_csd[136]));
283                         if (mmc_card_blockaddr(card))
284                                 card->ext_csd.enhanced_area_offset <<= 9;
285                         /*
286                          * calculate the enhanced data area size, in kilobytes
287                          */
288                         card->ext_csd.enhanced_area_size =
289                                 (ext_csd[142] << 16) + (ext_csd[141] << 8) +
290                                 ext_csd[140];
291                         card->ext_csd.enhanced_area_size *=
292                                 (size_t)(hc_erase_grp_sz * hc_wp_grp_sz);
293                         card->ext_csd.enhanced_area_size <<= 9;
294                 } else {
295                         pr_warn("%s: defines enhanced area without partition setting complete\n",
296                                 mmc_hostname(card->host));
297                 }
298         }
299 }
300
301 static void mmc_part_add(struct mmc_card *card, u64 size,
302                          unsigned int part_cfg, char *name, int idx, bool ro,
303                          int area_type)
304 {
305         card->part[card->nr_parts].size = size;
306         card->part[card->nr_parts].part_cfg = part_cfg;
307         sprintf(card->part[card->nr_parts].name, name, idx);
308         card->part[card->nr_parts].force_ro = ro;
309         card->part[card->nr_parts].area_type = area_type;
310         card->nr_parts++;
311 }
312
313 static void mmc_manage_gp_partitions(struct mmc_card *card, u8 *ext_csd)
314 {
315         int idx;
316         u8 hc_erase_grp_sz, hc_wp_grp_sz;
317         u64 part_size;
318
319         /*
320          * General purpose partition feature support --
321          * If ext_csd has the size of general purpose partitions,
322          * set size, part_cfg, partition name in mmc_part.
323          */
324         if (ext_csd[EXT_CSD_PARTITION_SUPPORT] &
325             EXT_CSD_PART_SUPPORT_PART_EN) {
326                 hc_erase_grp_sz =
327                         ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
328                 hc_wp_grp_sz =
329                         ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
330
331                 for (idx = 0; idx < MMC_NUM_GP_PARTITION; idx++) {
332                         if (!ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3] &&
333                             !ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 1] &&
334                             !ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 2])
335                                 continue;
336                         if (card->ext_csd.partition_setting_completed == 0) {
337                                 pr_warn("%s: has partition size defined without partition complete\n",
338                                         mmc_hostname(card->host));
339                                 break;
340                         }
341                         part_size =
342                                 (ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 2]
343                                 << 16) +
344                                 (ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 1]
345                                 << 8) +
346                                 ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3];
347                         part_size *= (hc_erase_grp_sz * hc_wp_grp_sz);
348                         mmc_part_add(card, part_size << 19,
349                                 EXT_CSD_PART_CONFIG_ACC_GP0 + idx,
350                                 "gp%d", idx, false,
351                                 MMC_BLK_DATA_AREA_GP);
352                 }
353         }
354 }
355
356 /* Minimum partition switch timeout in milliseconds */
357 #define MMC_MIN_PART_SWITCH_TIME        300
358
359 /*
360  * Decode extended CSD.
361  */
362 static int mmc_decode_ext_csd(struct mmc_card *card, u8 *ext_csd)
363 {
364         int err = 0, idx;
365         u64 part_size;
366         struct device_node *np;
367         bool broken_hpi = false;
368
369         /* Version is coded in the CSD_STRUCTURE byte in the EXT_CSD register */
370         card->ext_csd.raw_ext_csd_structure = ext_csd[EXT_CSD_STRUCTURE];
371         if (card->csd.structure == 3) {
372                 if (card->ext_csd.raw_ext_csd_structure > 2) {
373                         pr_err("%s: unrecognised EXT_CSD structure "
374                                 "version %d\n", mmc_hostname(card->host),
375                                         card->ext_csd.raw_ext_csd_structure);
376                         err = -EINVAL;
377                         goto out;
378                 }
379         }
380
381         np = mmc_of_find_child_device(card->host, 0);
382         if (np && of_device_is_compatible(np, "mmc-card"))
383                 broken_hpi = of_property_read_bool(np, "broken-hpi");
384         of_node_put(np);
385
386         /*
387          * The EXT_CSD format is meant to be forward compatible. As long
388          * as CSD_STRUCTURE does not change, all values for EXT_CSD_REV
389          * are authorized, see JEDEC JESD84-B50 section B.8.
390          */
391         card->ext_csd.rev = ext_csd[EXT_CSD_REV];
392
393         /* fixup device after ext_csd revision field is updated */
394         mmc_fixup_device(card, mmc_ext_csd_fixups);
395
396         card->ext_csd.raw_sectors[0] = ext_csd[EXT_CSD_SEC_CNT + 0];
397         card->ext_csd.raw_sectors[1] = ext_csd[EXT_CSD_SEC_CNT + 1];
398         card->ext_csd.raw_sectors[2] = ext_csd[EXT_CSD_SEC_CNT + 2];
399         card->ext_csd.raw_sectors[3] = ext_csd[EXT_CSD_SEC_CNT + 3];
400         if (card->ext_csd.rev >= 2) {
401                 card->ext_csd.sectors =
402                         ext_csd[EXT_CSD_SEC_CNT + 0] << 0 |
403                         ext_csd[EXT_CSD_SEC_CNT + 1] << 8 |
404                         ext_csd[EXT_CSD_SEC_CNT + 2] << 16 |
405                         ext_csd[EXT_CSD_SEC_CNT + 3] << 24;
406
407                 /* Cards with density > 2GiB are sector addressed */
408                 if (card->ext_csd.sectors > (2u * 1024 * 1024 * 1024) / 512)
409                         mmc_card_set_blockaddr(card);
410         }
411
412         card->ext_csd.strobe_support = ext_csd[EXT_CSD_STROBE_SUPPORT];
413         card->ext_csd.raw_card_type = ext_csd[EXT_CSD_CARD_TYPE];
414         mmc_select_card_type(card);
415
416         card->ext_csd.raw_s_a_timeout = ext_csd[EXT_CSD_S_A_TIMEOUT];
417         card->ext_csd.raw_erase_timeout_mult =
418                 ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT];
419         card->ext_csd.raw_hc_erase_grp_size =
420                 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
421         if (card->ext_csd.rev >= 3) {
422                 u8 sa_shift = ext_csd[EXT_CSD_S_A_TIMEOUT];
423                 card->ext_csd.part_config = ext_csd[EXT_CSD_PART_CONFIG];
424
425                 /* EXT_CSD value is in units of 10ms, but we store in ms */
426                 card->ext_csd.part_time = 10 * ext_csd[EXT_CSD_PART_SWITCH_TIME];
427
428                 /* Sleep / awake timeout in 100ns units */
429                 if (sa_shift > 0 && sa_shift <= 0x17)
430                         card->ext_csd.sa_timeout =
431                                         1 << ext_csd[EXT_CSD_S_A_TIMEOUT];
432                 card->ext_csd.erase_group_def =
433                         ext_csd[EXT_CSD_ERASE_GROUP_DEF];
434                 card->ext_csd.hc_erase_timeout = 300 *
435                         ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT];
436                 card->ext_csd.hc_erase_size =
437                         ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] << 10;
438
439                 card->ext_csd.rel_sectors = ext_csd[EXT_CSD_REL_WR_SEC_C];
440
441                 /*
442                  * There are two boot regions of equal size, defined in
443                  * multiples of 128K.
444                  */
445                 if (ext_csd[EXT_CSD_BOOT_MULT] && mmc_boot_partition_access(card->host)) {
446                         for (idx = 0; idx < MMC_NUM_BOOT_PARTITION; idx++) {
447                                 part_size = ext_csd[EXT_CSD_BOOT_MULT] << 17;
448                                 mmc_part_add(card, part_size,
449                                         EXT_CSD_PART_CONFIG_ACC_BOOT0 + idx,
450                                         "boot%d", idx, true,
451                                         MMC_BLK_DATA_AREA_BOOT);
452                         }
453                 }
454         }
455
456         card->ext_csd.raw_hc_erase_gap_size =
457                 ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
458         card->ext_csd.raw_sec_trim_mult =
459                 ext_csd[EXT_CSD_SEC_TRIM_MULT];
460         card->ext_csd.raw_sec_erase_mult =
461                 ext_csd[EXT_CSD_SEC_ERASE_MULT];
462         card->ext_csd.raw_sec_feature_support =
463                 ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT];
464         card->ext_csd.raw_trim_mult =
465                 ext_csd[EXT_CSD_TRIM_MULT];
466         card->ext_csd.raw_partition_support = ext_csd[EXT_CSD_PARTITION_SUPPORT];
467         card->ext_csd.raw_driver_strength = ext_csd[EXT_CSD_DRIVER_STRENGTH];
468         if (card->ext_csd.rev >= 4) {
469                 if (ext_csd[EXT_CSD_PARTITION_SETTING_COMPLETED] &
470                     EXT_CSD_PART_SETTING_COMPLETED)
471                         card->ext_csd.partition_setting_completed = 1;
472                 else
473                         card->ext_csd.partition_setting_completed = 0;
474
475                 mmc_manage_enhanced_area(card, ext_csd);
476
477                 mmc_manage_gp_partitions(card, ext_csd);
478
479                 card->ext_csd.sec_trim_mult =
480                         ext_csd[EXT_CSD_SEC_TRIM_MULT];
481                 card->ext_csd.sec_erase_mult =
482                         ext_csd[EXT_CSD_SEC_ERASE_MULT];
483                 card->ext_csd.sec_feature_support =
484                         ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT];
485                 card->ext_csd.trim_timeout = 300 *
486                         ext_csd[EXT_CSD_TRIM_MULT];
487
488                 /*
489                  * Note that the call to mmc_part_add above defaults to read
490                  * only. If this default assumption is changed, the call must
491                  * take into account the value of boot_locked below.
492                  */
493                 card->ext_csd.boot_ro_lock = ext_csd[EXT_CSD_BOOT_WP];
494                 card->ext_csd.boot_ro_lockable = true;
495
496                 /* Save power class values */
497                 card->ext_csd.raw_pwr_cl_52_195 =
498                         ext_csd[EXT_CSD_PWR_CL_52_195];
499                 card->ext_csd.raw_pwr_cl_26_195 =
500                         ext_csd[EXT_CSD_PWR_CL_26_195];
501                 card->ext_csd.raw_pwr_cl_52_360 =
502                         ext_csd[EXT_CSD_PWR_CL_52_360];
503                 card->ext_csd.raw_pwr_cl_26_360 =
504                         ext_csd[EXT_CSD_PWR_CL_26_360];
505                 card->ext_csd.raw_pwr_cl_200_195 =
506                         ext_csd[EXT_CSD_PWR_CL_200_195];
507                 card->ext_csd.raw_pwr_cl_200_360 =
508                         ext_csd[EXT_CSD_PWR_CL_200_360];
509                 card->ext_csd.raw_pwr_cl_ddr_52_195 =
510                         ext_csd[EXT_CSD_PWR_CL_DDR_52_195];
511                 card->ext_csd.raw_pwr_cl_ddr_52_360 =
512                         ext_csd[EXT_CSD_PWR_CL_DDR_52_360];
513                 card->ext_csd.raw_pwr_cl_ddr_200_360 =
514                         ext_csd[EXT_CSD_PWR_CL_DDR_200_360];
515         }
516
517         if (card->ext_csd.rev >= 5) {
518                 /* Adjust production date as per JEDEC JESD84-B451 */
519                 if (card->cid.year < 2010)
520                         card->cid.year += 16;
521
522                 /* check whether the eMMC card supports BKOPS */
523                 if (ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1) {
524                         card->ext_csd.bkops = 1;
525                         card->ext_csd.man_bkops_en =
526                                         (ext_csd[EXT_CSD_BKOPS_EN] &
527                                                 EXT_CSD_MANUAL_BKOPS_MASK);
528                         card->ext_csd.raw_bkops_status =
529                                 ext_csd[EXT_CSD_BKOPS_STATUS];
530                         if (card->ext_csd.man_bkops_en)
531                                 pr_debug("%s: MAN_BKOPS_EN bit is set\n",
532                                         mmc_hostname(card->host));
533                         card->ext_csd.auto_bkops_en =
534                                         (ext_csd[EXT_CSD_BKOPS_EN] &
535                                                 EXT_CSD_AUTO_BKOPS_MASK);
536                         if (card->ext_csd.auto_bkops_en)
537                                 pr_debug("%s: AUTO_BKOPS_EN bit is set\n",
538                                         mmc_hostname(card->host));
539                 }
540
541                 /* check whether the eMMC card supports HPI */
542                 if (!mmc_card_broken_hpi(card) &&
543                     !broken_hpi && (ext_csd[EXT_CSD_HPI_FEATURES] & 0x1)) {
544                         card->ext_csd.hpi = 1;
545                         if (ext_csd[EXT_CSD_HPI_FEATURES] & 0x2)
546                                 card->ext_csd.hpi_cmd = MMC_STOP_TRANSMISSION;
547                         else
548                                 card->ext_csd.hpi_cmd = MMC_SEND_STATUS;
549                         /*
550                          * Indicate the maximum timeout to close
551                          * a command interrupted by HPI
552                          */
553                         card->ext_csd.out_of_int_time =
554                                 ext_csd[EXT_CSD_OUT_OF_INTERRUPT_TIME] * 10;
555                 }
556
557                 card->ext_csd.rel_param = ext_csd[EXT_CSD_WR_REL_PARAM];
558                 card->ext_csd.rst_n_function = ext_csd[EXT_CSD_RST_N_FUNCTION];
559
560                 /*
561                  * RPMB regions are defined in multiples of 128K.
562                  */
563                 card->ext_csd.raw_rpmb_size_mult = ext_csd[EXT_CSD_RPMB_MULT];
564                 if (ext_csd[EXT_CSD_RPMB_MULT] && mmc_host_cmd23(card->host)) {
565                         mmc_part_add(card, ext_csd[EXT_CSD_RPMB_MULT] << 17,
566                                 EXT_CSD_PART_CONFIG_ACC_RPMB,
567                                 "rpmb", 0, false,
568                                 MMC_BLK_DATA_AREA_RPMB);
569                 }
570         }
571
572         card->ext_csd.raw_erased_mem_count = ext_csd[EXT_CSD_ERASED_MEM_CONT];
573         if (ext_csd[EXT_CSD_ERASED_MEM_CONT])
574                 card->erased_byte = 0xFF;
575         else
576                 card->erased_byte = 0x0;
577
578         /* eMMC v4.5 or later */
579         card->ext_csd.generic_cmd6_time = DEFAULT_CMD6_TIMEOUT_MS;
580         if (card->ext_csd.rev >= 6) {
581                 card->ext_csd.feature_support |= MMC_DISCARD_FEATURE;
582
583                 card->ext_csd.generic_cmd6_time = 10 *
584                         ext_csd[EXT_CSD_GENERIC_CMD6_TIME];
585                 card->ext_csd.power_off_longtime = 10 *
586                         ext_csd[EXT_CSD_POWER_OFF_LONG_TIME];
587
588                 card->ext_csd.cache_size =
589                         ext_csd[EXT_CSD_CACHE_SIZE + 0] << 0 |
590                         ext_csd[EXT_CSD_CACHE_SIZE + 1] << 8 |
591                         ext_csd[EXT_CSD_CACHE_SIZE + 2] << 16 |
592                         ext_csd[EXT_CSD_CACHE_SIZE + 3] << 24;
593
594                 if (ext_csd[EXT_CSD_DATA_SECTOR_SIZE] == 1)
595                         card->ext_csd.data_sector_size = 4096;
596                 else
597                         card->ext_csd.data_sector_size = 512;
598
599                 if ((ext_csd[EXT_CSD_DATA_TAG_SUPPORT] & 1) &&
600                     (ext_csd[EXT_CSD_TAG_UNIT_SIZE] <= 8)) {
601                         card->ext_csd.data_tag_unit_size =
602                         ((unsigned int) 1 << ext_csd[EXT_CSD_TAG_UNIT_SIZE]) *
603                         (card->ext_csd.data_sector_size);
604                 } else {
605                         card->ext_csd.data_tag_unit_size = 0;
606                 }
607
608                 card->ext_csd.max_packed_writes =
609                         ext_csd[EXT_CSD_MAX_PACKED_WRITES];
610                 card->ext_csd.max_packed_reads =
611                         ext_csd[EXT_CSD_MAX_PACKED_READS];
612         } else {
613                 card->ext_csd.data_sector_size = 512;
614         }
615
616         /*
617          * GENERIC_CMD6_TIME is to be used "unless a specific timeout is defined
618          * when accessing a specific field", so use it here if there is no
619          * PARTITION_SWITCH_TIME.
620          */
621         if (!card->ext_csd.part_time)
622                 card->ext_csd.part_time = card->ext_csd.generic_cmd6_time;
623         /* Some eMMC set the value too low so set a minimum */
624         if (card->ext_csd.part_time < MMC_MIN_PART_SWITCH_TIME)
625                 card->ext_csd.part_time = MMC_MIN_PART_SWITCH_TIME;
626
627         /* eMMC v5 or later */
628         if (card->ext_csd.rev >= 7) {
629                 memcpy(card->ext_csd.fwrev, &ext_csd[EXT_CSD_FIRMWARE_VERSION],
630                        MMC_FIRMWARE_LEN);
631                 card->ext_csd.ffu_capable =
632                         (ext_csd[EXT_CSD_SUPPORTED_MODE] & 0x1) &&
633                         !(ext_csd[EXT_CSD_FW_CONFIG] & 0x1);
634
635                 card->ext_csd.pre_eol_info = ext_csd[EXT_CSD_PRE_EOL_INFO];
636                 card->ext_csd.device_life_time_est_typ_a =
637                         ext_csd[EXT_CSD_DEVICE_LIFE_TIME_EST_TYP_A];
638                 card->ext_csd.device_life_time_est_typ_b =
639                         ext_csd[EXT_CSD_DEVICE_LIFE_TIME_EST_TYP_B];
640         }
641
642         /* eMMC v5.1 or later */
643         if (card->ext_csd.rev >= 8) {
644                 card->ext_csd.cmdq_support = ext_csd[EXT_CSD_CMDQ_SUPPORT] &
645                                              EXT_CSD_CMDQ_SUPPORTED;
646                 card->ext_csd.cmdq_depth = (ext_csd[EXT_CSD_CMDQ_DEPTH] &
647                                             EXT_CSD_CMDQ_DEPTH_MASK) + 1;
648                 /* Exclude inefficiently small queue depths */
649                 if (card->ext_csd.cmdq_depth <= 2) {
650                         card->ext_csd.cmdq_support = false;
651                         card->ext_csd.cmdq_depth = 0;
652                 }
653                 if (card->ext_csd.cmdq_support) {
654                         pr_debug("%s: Command Queue supported depth %u\n",
655                                  mmc_hostname(card->host),
656                                  card->ext_csd.cmdq_depth);
657                 }
658                 card->ext_csd.enhanced_rpmb_supported =
659                                         (card->ext_csd.rel_param &
660                                          EXT_CSD_WR_REL_PARAM_EN_RPMB_REL_WR);
661         }
662 out:
663         return err;
664 }
665
666 static int mmc_read_ext_csd(struct mmc_card *card)
667 {
668         u8 *ext_csd;
669         int err;
670
671         if (!mmc_can_ext_csd(card))
672                 return 0;
673
674         err = mmc_get_ext_csd(card, &ext_csd);
675         if (err) {
676                 /* If the host or the card can't do the switch,
677                  * fail more gracefully. */
678                 if ((err != -EINVAL)
679                  && (err != -ENOSYS)
680                  && (err != -EFAULT))
681                         return err;
682
683                 /*
684                  * High capacity cards should have this "magic" size
685                  * stored in their CSD.
686                  */
687                 if (card->csd.capacity == (4096 * 512)) {
688                         pr_err("%s: unable to read EXT_CSD on a possible high capacity card. Card will be ignored.\n",
689                                 mmc_hostname(card->host));
690                 } else {
691                         pr_warn("%s: unable to read EXT_CSD, performance might suffer\n",
692                                 mmc_hostname(card->host));
693                         err = 0;
694                 }
695
696                 return err;
697         }
698
699         err = mmc_decode_ext_csd(card, ext_csd);
700         kfree(ext_csd);
701         return err;
702 }
703
704 static int mmc_compare_ext_csds(struct mmc_card *card, unsigned bus_width)
705 {
706         u8 *bw_ext_csd;
707         int err;
708
709         if (bus_width == MMC_BUS_WIDTH_1)
710                 return 0;
711
712         err = mmc_get_ext_csd(card, &bw_ext_csd);
713         if (err)
714                 return err;
715
716         /* only compare read only fields */
717         err = !((card->ext_csd.raw_partition_support ==
718                         bw_ext_csd[EXT_CSD_PARTITION_SUPPORT]) &&
719                 (card->ext_csd.raw_erased_mem_count ==
720                         bw_ext_csd[EXT_CSD_ERASED_MEM_CONT]) &&
721                 (card->ext_csd.rev ==
722                         bw_ext_csd[EXT_CSD_REV]) &&
723                 (card->ext_csd.raw_ext_csd_structure ==
724                         bw_ext_csd[EXT_CSD_STRUCTURE]) &&
725                 (card->ext_csd.raw_card_type ==
726                         bw_ext_csd[EXT_CSD_CARD_TYPE]) &&
727                 (card->ext_csd.raw_s_a_timeout ==
728                         bw_ext_csd[EXT_CSD_S_A_TIMEOUT]) &&
729                 (card->ext_csd.raw_hc_erase_gap_size ==
730                         bw_ext_csd[EXT_CSD_HC_WP_GRP_SIZE]) &&
731                 (card->ext_csd.raw_erase_timeout_mult ==
732                         bw_ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT]) &&
733                 (card->ext_csd.raw_hc_erase_grp_size ==
734                         bw_ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]) &&
735                 (card->ext_csd.raw_sec_trim_mult ==
736                         bw_ext_csd[EXT_CSD_SEC_TRIM_MULT]) &&
737                 (card->ext_csd.raw_sec_erase_mult ==
738                         bw_ext_csd[EXT_CSD_SEC_ERASE_MULT]) &&
739                 (card->ext_csd.raw_sec_feature_support ==
740                         bw_ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT]) &&
741                 (card->ext_csd.raw_trim_mult ==
742                         bw_ext_csd[EXT_CSD_TRIM_MULT]) &&
743                 (card->ext_csd.raw_sectors[0] ==
744                         bw_ext_csd[EXT_CSD_SEC_CNT + 0]) &&
745                 (card->ext_csd.raw_sectors[1] ==
746                         bw_ext_csd[EXT_CSD_SEC_CNT + 1]) &&
747                 (card->ext_csd.raw_sectors[2] ==
748                         bw_ext_csd[EXT_CSD_SEC_CNT + 2]) &&
749                 (card->ext_csd.raw_sectors[3] ==
750                         bw_ext_csd[EXT_CSD_SEC_CNT + 3]) &&
751                 (card->ext_csd.raw_pwr_cl_52_195 ==
752                         bw_ext_csd[EXT_CSD_PWR_CL_52_195]) &&
753                 (card->ext_csd.raw_pwr_cl_26_195 ==
754                         bw_ext_csd[EXT_CSD_PWR_CL_26_195]) &&
755                 (card->ext_csd.raw_pwr_cl_52_360 ==
756                         bw_ext_csd[EXT_CSD_PWR_CL_52_360]) &&
757                 (card->ext_csd.raw_pwr_cl_26_360 ==
758                         bw_ext_csd[EXT_CSD_PWR_CL_26_360]) &&
759                 (card->ext_csd.raw_pwr_cl_200_195 ==
760                         bw_ext_csd[EXT_CSD_PWR_CL_200_195]) &&
761                 (card->ext_csd.raw_pwr_cl_200_360 ==
762                         bw_ext_csd[EXT_CSD_PWR_CL_200_360]) &&
763                 (card->ext_csd.raw_pwr_cl_ddr_52_195 ==
764                         bw_ext_csd[EXT_CSD_PWR_CL_DDR_52_195]) &&
765                 (card->ext_csd.raw_pwr_cl_ddr_52_360 ==
766                         bw_ext_csd[EXT_CSD_PWR_CL_DDR_52_360]) &&
767                 (card->ext_csd.raw_pwr_cl_ddr_200_360 ==
768                         bw_ext_csd[EXT_CSD_PWR_CL_DDR_200_360]));
769
770         if (err)
771                 err = -EINVAL;
772
773         kfree(bw_ext_csd);
774         return err;
775 }
776
777 MMC_DEV_ATTR(cid, "%08x%08x%08x%08x\n", card->raw_cid[0], card->raw_cid[1],
778         card->raw_cid[2], card->raw_cid[3]);
779 MMC_DEV_ATTR(csd, "%08x%08x%08x%08x\n", card->raw_csd[0], card->raw_csd[1],
780         card->raw_csd[2], card->raw_csd[3]);
781 MMC_DEV_ATTR(date, "%02d/%04d\n", card->cid.month, card->cid.year);
782 MMC_DEV_ATTR(erase_size, "%u\n", card->erase_size << 9);
783 MMC_DEV_ATTR(preferred_erase_size, "%u\n", card->pref_erase << 9);
784 MMC_DEV_ATTR(ffu_capable, "%d\n", card->ext_csd.ffu_capable);
785 MMC_DEV_ATTR(hwrev, "0x%x\n", card->cid.hwrev);
786 MMC_DEV_ATTR(manfid, "0x%06x\n", card->cid.manfid);
787 MMC_DEV_ATTR(name, "%s\n", card->cid.prod_name);
788 MMC_DEV_ATTR(oemid, "0x%04x\n", card->cid.oemid);
789 MMC_DEV_ATTR(prv, "0x%x\n", card->cid.prv);
790 MMC_DEV_ATTR(rev, "0x%x\n", card->ext_csd.rev);
791 MMC_DEV_ATTR(pre_eol_info, "0x%02x\n", card->ext_csd.pre_eol_info);
792 MMC_DEV_ATTR(life_time, "0x%02x 0x%02x\n",
793         card->ext_csd.device_life_time_est_typ_a,
794         card->ext_csd.device_life_time_est_typ_b);
795 MMC_DEV_ATTR(serial, "0x%08x\n", card->cid.serial);
796 MMC_DEV_ATTR(enhanced_area_offset, "%llu\n",
797                 card->ext_csd.enhanced_area_offset);
798 MMC_DEV_ATTR(enhanced_area_size, "%u\n", card->ext_csd.enhanced_area_size);
799 MMC_DEV_ATTR(raw_rpmb_size_mult, "%#x\n", card->ext_csd.raw_rpmb_size_mult);
800 MMC_DEV_ATTR(enhanced_rpmb_supported, "%#x\n",
801         card->ext_csd.enhanced_rpmb_supported);
802 MMC_DEV_ATTR(rel_sectors, "%#x\n", card->ext_csd.rel_sectors);
803 MMC_DEV_ATTR(ocr, "0x%08x\n", card->ocr);
804 MMC_DEV_ATTR(rca, "0x%04x\n", card->rca);
805 MMC_DEV_ATTR(cmdq_en, "%d\n", card->ext_csd.cmdq_en);
806
807 static ssize_t mmc_fwrev_show(struct device *dev,
808                               struct device_attribute *attr,
809                               char *buf)
810 {
811         struct mmc_card *card = mmc_dev_to_card(dev);
812
813         if (card->ext_csd.rev < 7) {
814                 return sprintf(buf, "0x%x\n", card->cid.fwrev);
815         } else {
816                 return sprintf(buf, "0x%*phN\n", MMC_FIRMWARE_LEN,
817                                card->ext_csd.fwrev);
818         }
819 }
820
821 static DEVICE_ATTR(fwrev, S_IRUGO, mmc_fwrev_show, NULL);
822
823 static ssize_t mmc_dsr_show(struct device *dev,
824                             struct device_attribute *attr,
825                             char *buf)
826 {
827         struct mmc_card *card = mmc_dev_to_card(dev);
828         struct mmc_host *host = card->host;
829
830         if (card->csd.dsr_imp && host->dsr_req)
831                 return sprintf(buf, "0x%x\n", host->dsr);
832         else
833                 /* return default DSR value */
834                 return sprintf(buf, "0x%x\n", 0x404);
835 }
836
837 static DEVICE_ATTR(dsr, S_IRUGO, mmc_dsr_show, NULL);
838
839 static struct attribute *mmc_std_attrs[] = {
840         &dev_attr_cid.attr,
841         &dev_attr_csd.attr,
842         &dev_attr_date.attr,
843         &dev_attr_erase_size.attr,
844         &dev_attr_preferred_erase_size.attr,
845         &dev_attr_fwrev.attr,
846         &dev_attr_ffu_capable.attr,
847         &dev_attr_hwrev.attr,
848         &dev_attr_manfid.attr,
849         &dev_attr_name.attr,
850         &dev_attr_oemid.attr,
851         &dev_attr_prv.attr,
852         &dev_attr_rev.attr,
853         &dev_attr_pre_eol_info.attr,
854         &dev_attr_life_time.attr,
855         &dev_attr_serial.attr,
856         &dev_attr_enhanced_area_offset.attr,
857         &dev_attr_enhanced_area_size.attr,
858         &dev_attr_raw_rpmb_size_mult.attr,
859         &dev_attr_enhanced_rpmb_supported.attr,
860         &dev_attr_rel_sectors.attr,
861         &dev_attr_ocr.attr,
862         &dev_attr_rca.attr,
863         &dev_attr_dsr.attr,
864         &dev_attr_cmdq_en.attr,
865         NULL,
866 };
867 ATTRIBUTE_GROUPS(mmc_std);
868
869 static struct device_type mmc_type = {
870         .groups = mmc_std_groups,
871 };
872
873 /*
874  * Select the PowerClass for the current bus width
875  * If power class is defined for 4/8 bit bus in the
876  * extended CSD register, select it by executing the
877  * mmc_switch command.
878  */
879 static int __mmc_select_powerclass(struct mmc_card *card,
880                                    unsigned int bus_width)
881 {
882         struct mmc_host *host = card->host;
883         struct mmc_ext_csd *ext_csd = &card->ext_csd;
884         unsigned int pwrclass_val = 0;
885         int err = 0;
886
887         switch (1 << host->ios.vdd) {
888         case MMC_VDD_165_195:
889                 if (host->ios.clock <= MMC_HIGH_26_MAX_DTR)
890                         pwrclass_val = ext_csd->raw_pwr_cl_26_195;
891                 else if (host->ios.clock <= MMC_HIGH_52_MAX_DTR)
892                         pwrclass_val = (bus_width <= EXT_CSD_BUS_WIDTH_8) ?
893                                 ext_csd->raw_pwr_cl_52_195 :
894                                 ext_csd->raw_pwr_cl_ddr_52_195;
895                 else if (host->ios.clock <= MMC_HS200_MAX_DTR)
896                         pwrclass_val = ext_csd->raw_pwr_cl_200_195;
897                 break;
898         case MMC_VDD_27_28:
899         case MMC_VDD_28_29:
900         case MMC_VDD_29_30:
901         case MMC_VDD_30_31:
902         case MMC_VDD_31_32:
903         case MMC_VDD_32_33:
904         case MMC_VDD_33_34:
905         case MMC_VDD_34_35:
906         case MMC_VDD_35_36:
907                 if (host->ios.clock <= MMC_HIGH_26_MAX_DTR)
908                         pwrclass_val = ext_csd->raw_pwr_cl_26_360;
909                 else if (host->ios.clock <= MMC_HIGH_52_MAX_DTR)
910                         pwrclass_val = (bus_width <= EXT_CSD_BUS_WIDTH_8) ?
911                                 ext_csd->raw_pwr_cl_52_360 :
912                                 ext_csd->raw_pwr_cl_ddr_52_360;
913                 else if (host->ios.clock <= MMC_HS200_MAX_DTR)
914                         pwrclass_val = (bus_width == EXT_CSD_DDR_BUS_WIDTH_8) ?
915                                 ext_csd->raw_pwr_cl_ddr_200_360 :
916                                 ext_csd->raw_pwr_cl_200_360;
917                 break;
918         default:
919                 pr_warn("%s: Voltage range not supported for power class\n",
920                         mmc_hostname(host));
921                 return -EINVAL;
922         }
923
924         if (bus_width & (EXT_CSD_BUS_WIDTH_8 | EXT_CSD_DDR_BUS_WIDTH_8))
925                 pwrclass_val = (pwrclass_val & EXT_CSD_PWR_CL_8BIT_MASK) >>
926                                 EXT_CSD_PWR_CL_8BIT_SHIFT;
927         else
928                 pwrclass_val = (pwrclass_val & EXT_CSD_PWR_CL_4BIT_MASK) >>
929                                 EXT_CSD_PWR_CL_4BIT_SHIFT;
930
931         /* If the power class is different from the default value */
932         if (pwrclass_val > 0) {
933                 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
934                                  EXT_CSD_POWER_CLASS,
935                                  pwrclass_val,
936                                  card->ext_csd.generic_cmd6_time);
937         }
938
939         return err;
940 }
941
942 static int mmc_select_powerclass(struct mmc_card *card)
943 {
944         struct mmc_host *host = card->host;
945         u32 bus_width, ext_csd_bits;
946         int err, ddr;
947
948         /* Power class selection is supported for versions >= 4.0 */
949         if (!mmc_can_ext_csd(card))
950                 return 0;
951
952         bus_width = host->ios.bus_width;
953         /* Power class values are defined only for 4/8 bit bus */
954         if (bus_width == MMC_BUS_WIDTH_1)
955                 return 0;
956
957         ddr = card->mmc_avail_type & EXT_CSD_CARD_TYPE_DDR_52;
958         if (ddr)
959                 ext_csd_bits = (bus_width == MMC_BUS_WIDTH_8) ?
960                         EXT_CSD_DDR_BUS_WIDTH_8 : EXT_CSD_DDR_BUS_WIDTH_4;
961         else
962                 ext_csd_bits = (bus_width == MMC_BUS_WIDTH_8) ?
963                         EXT_CSD_BUS_WIDTH_8 :  EXT_CSD_BUS_WIDTH_4;
964
965         err = __mmc_select_powerclass(card, ext_csd_bits);
966         if (err)
967                 pr_warn("%s: power class selection to bus width %d ddr %d failed\n",
968                         mmc_hostname(host), 1 << bus_width, ddr);
969
970         return err;
971 }
972
973 /*
974  * Set the bus speed for the selected speed mode.
975  */
976 static void mmc_set_bus_speed(struct mmc_card *card)
977 {
978         unsigned int max_dtr = (unsigned int)-1;
979
980         if ((mmc_card_hs200(card) || mmc_card_hs400(card)) &&
981              max_dtr > card->ext_csd.hs200_max_dtr)
982                 max_dtr = card->ext_csd.hs200_max_dtr;
983         else if (mmc_card_hs(card) && max_dtr > card->ext_csd.hs_max_dtr)
984                 max_dtr = card->ext_csd.hs_max_dtr;
985         else if (max_dtr > card->csd.max_dtr)
986                 max_dtr = card->csd.max_dtr;
987
988         mmc_set_clock(card->host, max_dtr);
989 }
990
991 /*
992  * Select the bus width amoung 4-bit and 8-bit(SDR).
993  * If the bus width is changed successfully, return the selected width value.
994  * Zero is returned instead of error value if the wide width is not supported.
995  */
996 static int mmc_select_bus_width(struct mmc_card *card)
997 {
998         static unsigned ext_csd_bits[] = {
999                 EXT_CSD_BUS_WIDTH_8,
1000                 EXT_CSD_BUS_WIDTH_4,
1001         };
1002         static unsigned bus_widths[] = {
1003                 MMC_BUS_WIDTH_8,
1004                 MMC_BUS_WIDTH_4,
1005         };
1006         struct mmc_host *host = card->host;
1007         unsigned idx, bus_width = 0;
1008         int err = 0;
1009
1010         if (!mmc_can_ext_csd(card) ||
1011             !(host->caps & (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA)))
1012                 return 0;
1013
1014         idx = (host->caps & MMC_CAP_8_BIT_DATA) ? 0 : 1;
1015
1016         /*
1017          * Unlike SD, MMC cards dont have a configuration register to notify
1018          * supported bus width. So bus test command should be run to identify
1019          * the supported bus width or compare the ext csd values of current
1020          * bus width and ext csd values of 1 bit mode read earlier.
1021          */
1022         for (; idx < ARRAY_SIZE(bus_widths); idx++) {
1023                 /*
1024                  * Host is capable of 8bit transfer, then switch
1025                  * the device to work in 8bit transfer mode. If the
1026                  * mmc switch command returns error then switch to
1027                  * 4bit transfer mode. On success set the corresponding
1028                  * bus width on the host.
1029                  */
1030                 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1031                                  EXT_CSD_BUS_WIDTH,
1032                                  ext_csd_bits[idx],
1033                                  card->ext_csd.generic_cmd6_time);
1034                 if (err)
1035                         continue;
1036
1037                 bus_width = bus_widths[idx];
1038                 mmc_set_bus_width(host, bus_width);
1039
1040                 /*
1041                  * If controller can't handle bus width test,
1042                  * compare ext_csd previously read in 1 bit mode
1043                  * against ext_csd at new bus width
1044                  */
1045                 if (!(host->caps & MMC_CAP_BUS_WIDTH_TEST))
1046                         err = mmc_compare_ext_csds(card, bus_width);
1047                 else
1048                         err = mmc_bus_test(card, bus_width);
1049
1050                 if (!err) {
1051                         err = bus_width;
1052                         break;
1053                 } else {
1054                         pr_warn("%s: switch to bus width %d failed\n",
1055                                 mmc_hostname(host), 1 << bus_width);
1056                 }
1057         }
1058
1059         return err;
1060 }
1061
1062 /*
1063  * Switch to the high-speed mode
1064  */
1065 static int mmc_select_hs(struct mmc_card *card)
1066 {
1067         int err;
1068
1069         err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1070                            EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS,
1071                            card->ext_csd.generic_cmd6_time, MMC_TIMING_MMC_HS,
1072                            true, true, MMC_CMD_RETRIES);
1073         if (err)
1074                 pr_warn("%s: switch to high-speed failed, err:%d\n",
1075                         mmc_hostname(card->host), err);
1076
1077         return err;
1078 }
1079
1080 /*
1081  * Activate wide bus and DDR if supported.
1082  */
1083 static int mmc_select_hs_ddr(struct mmc_card *card)
1084 {
1085         struct mmc_host *host = card->host;
1086         u32 bus_width, ext_csd_bits;
1087         int err = 0;
1088
1089         if (!(card->mmc_avail_type & EXT_CSD_CARD_TYPE_DDR_52))
1090                 return 0;
1091
1092         bus_width = host->ios.bus_width;
1093         if (bus_width == MMC_BUS_WIDTH_1)
1094                 return 0;
1095
1096         ext_csd_bits = (bus_width == MMC_BUS_WIDTH_8) ?
1097                 EXT_CSD_DDR_BUS_WIDTH_8 : EXT_CSD_DDR_BUS_WIDTH_4;
1098
1099         err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1100                            EXT_CSD_BUS_WIDTH,
1101                            ext_csd_bits,
1102                            card->ext_csd.generic_cmd6_time,
1103                            MMC_TIMING_MMC_DDR52,
1104                            true, true, MMC_CMD_RETRIES);
1105         if (err) {
1106                 pr_err("%s: switch to bus width %d ddr failed\n",
1107                         mmc_hostname(host), 1 << bus_width);
1108                 return err;
1109         }
1110
1111         /*
1112          * eMMC cards can support 3.3V to 1.2V i/o (vccq)
1113          * signaling.
1114          *
1115          * EXT_CSD_CARD_TYPE_DDR_1_8V means 3.3V or 1.8V vccq.
1116          *
1117          * 1.8V vccq at 3.3V core voltage (vcc) is not required
1118          * in the JEDEC spec for DDR.
1119          *
1120          * Even (e)MMC card can support 3.3v to 1.2v vccq, but not all
1121          * host controller can support this, like some of the SDHCI
1122          * controller which connect to an eMMC device. Some of these
1123          * host controller still needs to use 1.8v vccq for supporting
1124          * DDR mode.
1125          *
1126          * So the sequence will be:
1127          * if (host and device can both support 1.2v IO)
1128          *      use 1.2v IO;
1129          * else if (host and device can both support 1.8v IO)
1130          *      use 1.8v IO;
1131          * so if host and device can only support 3.3v IO, this is the
1132          * last choice.
1133          *
1134          * WARNING: eMMC rules are NOT the same as SD DDR
1135          */
1136         if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_DDR_1_2V) {
1137                 err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120);
1138                 if (!err)
1139                         return 0;
1140         }
1141
1142         if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_DDR_1_8V &&
1143             host->caps & MMC_CAP_1_8V_DDR)
1144                 err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180);
1145
1146         /* make sure vccq is 3.3v after switching disaster */
1147         if (err)
1148                 err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330);
1149
1150         return err;
1151 }
1152
1153 static int mmc_select_hs400(struct mmc_card *card)
1154 {
1155         struct mmc_host *host = card->host;
1156         unsigned int max_dtr;
1157         int err = 0;
1158         u8 val;
1159
1160         /*
1161          * HS400 mode requires 8-bit bus width
1162          */
1163         if (!(card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400 &&
1164               host->ios.bus_width == MMC_BUS_WIDTH_8))
1165                 return 0;
1166
1167         /* Switch card to HS mode */
1168         val = EXT_CSD_TIMING_HS;
1169         err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1170                            EXT_CSD_HS_TIMING, val,
1171                            card->ext_csd.generic_cmd6_time, 0,
1172                            false, true, MMC_CMD_RETRIES);
1173         if (err) {
1174                 pr_err("%s: switch to high-speed from hs200 failed, err:%d\n",
1175                         mmc_hostname(host), err);
1176                 return err;
1177         }
1178
1179         /* Prepare host to downgrade to HS timing */
1180         if (host->ops->hs400_downgrade)
1181                 host->ops->hs400_downgrade(host);
1182
1183         /* Set host controller to HS timing */
1184         mmc_set_timing(host, MMC_TIMING_MMC_HS);
1185
1186         /* Reduce frequency to HS frequency */
1187         max_dtr = card->ext_csd.hs_max_dtr;
1188         mmc_set_clock(host, max_dtr);
1189
1190         err = mmc_switch_status(card, true);
1191         if (err)
1192                 goto out_err;
1193
1194         if (host->ops->hs400_prepare_ddr)
1195                 host->ops->hs400_prepare_ddr(host);
1196
1197         /* Switch card to DDR */
1198         err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1199                          EXT_CSD_BUS_WIDTH,
1200                          EXT_CSD_DDR_BUS_WIDTH_8,
1201                          card->ext_csd.generic_cmd6_time);
1202         if (err) {
1203                 pr_err("%s: switch to bus width for hs400 failed, err:%d\n",
1204                         mmc_hostname(host), err);
1205                 return err;
1206         }
1207
1208         /* Switch card to HS400 */
1209         val = EXT_CSD_TIMING_HS400 |
1210               card->drive_strength << EXT_CSD_DRV_STR_SHIFT;
1211         err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1212                            EXT_CSD_HS_TIMING, val,
1213                            card->ext_csd.generic_cmd6_time, 0,
1214                            false, true, MMC_CMD_RETRIES);
1215         if (err) {
1216                 pr_err("%s: switch to hs400 failed, err:%d\n",
1217                          mmc_hostname(host), err);
1218                 return err;
1219         }
1220
1221         /* Set host controller to HS400 timing and frequency */
1222         mmc_set_timing(host, MMC_TIMING_MMC_HS400);
1223         mmc_set_bus_speed(card);
1224
1225         if (host->ops->hs400_complete)
1226                 host->ops->hs400_complete(host);
1227
1228         err = mmc_switch_status(card, true);
1229         if (err)
1230                 goto out_err;
1231
1232         return 0;
1233
1234 out_err:
1235         pr_err("%s: %s failed, error %d\n", mmc_hostname(card->host),
1236                __func__, err);
1237         return err;
1238 }
1239
1240 int mmc_hs200_to_hs400(struct mmc_card *card)
1241 {
1242         return mmc_select_hs400(card);
1243 }
1244
1245 int mmc_hs400_to_hs200(struct mmc_card *card)
1246 {
1247         struct mmc_host *host = card->host;
1248         unsigned int max_dtr;
1249         int err;
1250         u8 val;
1251
1252         /* Reduce frequency to HS */
1253         max_dtr = card->ext_csd.hs_max_dtr;
1254         mmc_set_clock(host, max_dtr);
1255
1256         /* Switch HS400 to HS DDR */
1257         val = EXT_CSD_TIMING_HS;
1258         err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING,
1259                            val, card->ext_csd.generic_cmd6_time, 0,
1260                            false, true, MMC_CMD_RETRIES);
1261         if (err)
1262                 goto out_err;
1263
1264         if (host->ops->hs400_downgrade)
1265                 host->ops->hs400_downgrade(host);
1266
1267         mmc_set_timing(host, MMC_TIMING_MMC_DDR52);
1268
1269         err = mmc_switch_status(card, true);
1270         if (err)
1271                 goto out_err;
1272
1273         /* Switch HS DDR to HS */
1274         err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH,
1275                            EXT_CSD_BUS_WIDTH_8, card->ext_csd.generic_cmd6_time,
1276                            0, false, true, MMC_CMD_RETRIES);
1277         if (err)
1278                 goto out_err;
1279
1280         mmc_set_timing(host, MMC_TIMING_MMC_HS);
1281
1282         err = mmc_switch_status(card, true);
1283         if (err)
1284                 goto out_err;
1285
1286         /* Switch HS to HS200 */
1287         val = EXT_CSD_TIMING_HS200 |
1288               card->drive_strength << EXT_CSD_DRV_STR_SHIFT;
1289         err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING,
1290                            val, card->ext_csd.generic_cmd6_time, 0,
1291                            false, true, MMC_CMD_RETRIES);
1292         if (err)
1293                 goto out_err;
1294
1295         mmc_set_timing(host, MMC_TIMING_MMC_HS200);
1296
1297         /*
1298          * For HS200, CRC errors are not a reliable way to know the switch
1299          * failed. If there really is a problem, we would expect tuning will
1300          * fail and the result ends up the same.
1301          */
1302         err = mmc_switch_status(card, false);
1303         if (err)
1304                 goto out_err;
1305
1306         mmc_set_bus_speed(card);
1307
1308         /* Prepare tuning for HS400 mode. */
1309         if (host->ops->prepare_hs400_tuning)
1310                 host->ops->prepare_hs400_tuning(host, &host->ios);
1311
1312         return 0;
1313
1314 out_err:
1315         pr_err("%s: %s failed, error %d\n", mmc_hostname(card->host),
1316                __func__, err);
1317         return err;
1318 }
1319
1320 static void mmc_select_driver_type(struct mmc_card *card)
1321 {
1322         int card_drv_type, drive_strength, drv_type = 0;
1323         int fixed_drv_type = card->host->fixed_drv_type;
1324
1325         card_drv_type = card->ext_csd.raw_driver_strength |
1326                         mmc_driver_type_mask(0);
1327
1328         if (fixed_drv_type >= 0)
1329                 drive_strength = card_drv_type & mmc_driver_type_mask(fixed_drv_type)
1330                                  ? fixed_drv_type : 0;
1331         else
1332                 drive_strength = mmc_select_drive_strength(card,
1333                                                            card->ext_csd.hs200_max_dtr,
1334                                                            card_drv_type, &drv_type);
1335
1336         card->drive_strength = drive_strength;
1337
1338         if (drv_type)
1339                 mmc_set_driver_type(card->host, drv_type);
1340 }
1341
1342 static int mmc_select_hs400es(struct mmc_card *card)
1343 {
1344         struct mmc_host *host = card->host;
1345         int err = -EINVAL;
1346         u8 val;
1347
1348         if (!(host->caps & MMC_CAP_8_BIT_DATA)) {
1349                 err = -ENOTSUPP;
1350                 goto out_err;
1351         }
1352
1353         if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400_1_2V)
1354                 err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120);
1355
1356         if (err && card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400_1_8V)
1357                 err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180);
1358
1359         /* If fails try again during next card power cycle */
1360         if (err)
1361                 goto out_err;
1362
1363         err = mmc_select_bus_width(card);
1364         if (err != MMC_BUS_WIDTH_8) {
1365                 pr_err("%s: switch to 8bit bus width failed, err:%d\n",
1366                         mmc_hostname(host), err);
1367                 err = err < 0 ? err : -ENOTSUPP;
1368                 goto out_err;
1369         }
1370
1371         /* Switch card to HS mode */
1372         err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1373                            EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS,
1374                            card->ext_csd.generic_cmd6_time, 0,
1375                            false, true, MMC_CMD_RETRIES);
1376         if (err) {
1377                 pr_err("%s: switch to hs for hs400es failed, err:%d\n",
1378                         mmc_hostname(host), err);
1379                 goto out_err;
1380         }
1381
1382         mmc_set_timing(host, MMC_TIMING_MMC_HS);
1383         err = mmc_switch_status(card, true);
1384         if (err)
1385                 goto out_err;
1386
1387         mmc_set_clock(host, card->ext_csd.hs_max_dtr);
1388
1389         /* Switch card to DDR with strobe bit */
1390         val = EXT_CSD_DDR_BUS_WIDTH_8 | EXT_CSD_BUS_WIDTH_STROBE;
1391         err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1392                          EXT_CSD_BUS_WIDTH,
1393                          val,
1394                          card->ext_csd.generic_cmd6_time);
1395         if (err) {
1396                 pr_err("%s: switch to bus width for hs400es failed, err:%d\n",
1397                         mmc_hostname(host), err);
1398                 goto out_err;
1399         }
1400
1401         mmc_select_driver_type(card);
1402
1403         /* Switch card to HS400 */
1404         val = EXT_CSD_TIMING_HS400 |
1405               card->drive_strength << EXT_CSD_DRV_STR_SHIFT;
1406         err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1407                            EXT_CSD_HS_TIMING, val,
1408                            card->ext_csd.generic_cmd6_time, 0,
1409                            false, true, MMC_CMD_RETRIES);
1410         if (err) {
1411                 pr_err("%s: switch to hs400es failed, err:%d\n",
1412                         mmc_hostname(host), err);
1413                 goto out_err;
1414         }
1415
1416         /* Set host controller to HS400 timing and frequency */
1417         mmc_set_timing(host, MMC_TIMING_MMC_HS400);
1418
1419         /* Controller enable enhanced strobe function */
1420         host->ios.enhanced_strobe = true;
1421         if (host->ops->hs400_enhanced_strobe)
1422                 host->ops->hs400_enhanced_strobe(host, &host->ios);
1423
1424         err = mmc_switch_status(card, true);
1425         if (err)
1426                 goto out_err;
1427
1428         return 0;
1429
1430 out_err:
1431         pr_err("%s: %s failed, error %d\n", mmc_hostname(card->host),
1432                __func__, err);
1433         return err;
1434 }
1435
1436 /*
1437  * For device supporting HS200 mode, the following sequence
1438  * should be done before executing the tuning process.
1439  * 1. set the desired bus width(4-bit or 8-bit, 1-bit is not supported)
1440  * 2. switch to HS200 mode
1441  * 3. set the clock to > 52Mhz and <=200MHz
1442  */
1443 static int mmc_select_hs200(struct mmc_card *card)
1444 {
1445         struct mmc_host *host = card->host;
1446         unsigned int old_timing, old_signal_voltage;
1447         int err = -EINVAL;
1448         u8 val;
1449
1450         old_signal_voltage = host->ios.signal_voltage;
1451         if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS200_1_2V)
1452                 err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120);
1453
1454         if (err && card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS200_1_8V)
1455                 err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180);
1456
1457         /* If fails try again during next card power cycle */
1458         if (err)
1459                 return err;
1460
1461         mmc_select_driver_type(card);
1462
1463         /*
1464          * Set the bus width(4 or 8) with host's support and
1465          * switch to HS200 mode if bus width is set successfully.
1466          */
1467         err = mmc_select_bus_width(card);
1468         if (err > 0) {
1469                 val = EXT_CSD_TIMING_HS200 |
1470                       card->drive_strength << EXT_CSD_DRV_STR_SHIFT;
1471                 err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1472                                    EXT_CSD_HS_TIMING, val,
1473                                    card->ext_csd.generic_cmd6_time, 0,
1474                                    false, true, MMC_CMD_RETRIES);
1475                 if (err)
1476                         goto err;
1477                 old_timing = host->ios.timing;
1478                 mmc_set_timing(host, MMC_TIMING_MMC_HS200);
1479
1480                 /*
1481                  * For HS200, CRC errors are not a reliable way to know the
1482                  * switch failed. If there really is a problem, we would expect
1483                  * tuning will fail and the result ends up the same.
1484                  */
1485                 err = mmc_switch_status(card, false);
1486
1487                 /*
1488                  * mmc_select_timing() assumes timing has not changed if
1489                  * it is a switch error.
1490                  */
1491                 if (err == -EBADMSG)
1492                         mmc_set_timing(host, old_timing);
1493         }
1494 err:
1495         if (err) {
1496                 /* fall back to the old signal voltage, if fails report error */
1497                 if (mmc_set_signal_voltage(host, old_signal_voltage))
1498                         err = -EIO;
1499
1500                 pr_err("%s: %s failed, error %d\n", mmc_hostname(card->host),
1501                        __func__, err);
1502         }
1503         return err;
1504 }
1505
1506 /*
1507  * Activate High Speed, HS200 or HS400ES mode if supported.
1508  */
1509 static int mmc_select_timing(struct mmc_card *card)
1510 {
1511         int err = 0;
1512
1513         if (!mmc_can_ext_csd(card))
1514                 goto bus_speed;
1515
1516         if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400ES)
1517                 err = mmc_select_hs400es(card);
1518         else if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS200)
1519                 err = mmc_select_hs200(card);
1520         else if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS)
1521                 err = mmc_select_hs(card);
1522
1523         if (err && err != -EBADMSG)
1524                 return err;
1525
1526 bus_speed:
1527         /*
1528          * Set the bus speed to the selected bus timing.
1529          * If timing is not selected, backward compatible is the default.
1530          */
1531         mmc_set_bus_speed(card);
1532         return 0;
1533 }
1534
1535 /*
1536  * Execute tuning sequence to seek the proper bus operating
1537  * conditions for HS200 and HS400, which sends CMD21 to the device.
1538  */
1539 static int mmc_hs200_tuning(struct mmc_card *card)
1540 {
1541         struct mmc_host *host = card->host;
1542
1543         /*
1544          * Timing should be adjusted to the HS400 target
1545          * operation frequency for tuning process
1546          */
1547         if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400 &&
1548             host->ios.bus_width == MMC_BUS_WIDTH_8)
1549                 if (host->ops->prepare_hs400_tuning)
1550                         host->ops->prepare_hs400_tuning(host, &host->ios);
1551
1552         return mmc_execute_tuning(card);
1553 }
1554
1555 /*
1556  * Handle the detection and initialisation of a card.
1557  *
1558  * In the case of a resume, "oldcard" will contain the card
1559  * we're trying to reinitialise.
1560  */
1561 static int mmc_init_card(struct mmc_host *host, u32 ocr,
1562         struct mmc_card *oldcard)
1563 {
1564         struct mmc_card *card;
1565         int err;
1566         u32 cid[4];
1567         u32 rocr;
1568
1569         WARN_ON(!host->claimed);
1570
1571         /* Set correct bus mode for MMC before attempting init */
1572         if (!mmc_host_is_spi(host))
1573                 mmc_set_bus_mode(host, MMC_BUSMODE_OPENDRAIN);
1574
1575         /*
1576          * Since we're changing the OCR value, we seem to
1577          * need to tell some cards to go back to the idle
1578          * state.  We wait 1ms to give cards time to
1579          * respond.
1580          * mmc_go_idle is needed for eMMC that are asleep
1581          */
1582         mmc_go_idle(host);
1583
1584         /* The extra bit indicates that we support high capacity */
1585         err = mmc_send_op_cond(host, ocr | (1 << 30), &rocr);
1586         if (err)
1587                 goto err;
1588
1589         /*
1590          * For SPI, enable CRC as appropriate.
1591          */
1592         if (mmc_host_is_spi(host)) {
1593                 err = mmc_spi_set_crc(host, use_spi_crc);
1594                 if (err)
1595                         goto err;
1596         }
1597
1598         /*
1599          * Fetch CID from card.
1600          */
1601         err = mmc_send_cid(host, cid);
1602         if (err)
1603                 goto err;
1604
1605         if (oldcard) {
1606                 if (memcmp(cid, oldcard->raw_cid, sizeof(cid)) != 0) {
1607                         pr_debug("%s: Perhaps the card was replaced\n",
1608                                 mmc_hostname(host));
1609                         err = -ENOENT;
1610                         goto err;
1611                 }
1612
1613                 card = oldcard;
1614         } else {
1615                 /*
1616                  * Allocate card structure.
1617                  */
1618                 card = mmc_alloc_card(host, &mmc_type);
1619                 if (IS_ERR(card)) {
1620                         err = PTR_ERR(card);
1621                         goto err;
1622                 }
1623
1624                 card->ocr = ocr;
1625                 card->type = MMC_TYPE_MMC;
1626                 card->rca = 1;
1627                 memcpy(card->raw_cid, cid, sizeof(card->raw_cid));
1628         }
1629
1630         /*
1631          * Call the optional HC's init_card function to handle quirks.
1632          */
1633         if (host->ops->init_card)
1634                 host->ops->init_card(host, card);
1635
1636         /*
1637          * For native busses:  set card RCA and quit open drain mode.
1638          */
1639         if (!mmc_host_is_spi(host)) {
1640                 err = mmc_set_relative_addr(card);
1641                 if (err)
1642                         goto free_card;
1643
1644                 mmc_set_bus_mode(host, MMC_BUSMODE_PUSHPULL);
1645         }
1646
1647         if (!oldcard) {
1648                 /*
1649                  * Fetch CSD from card.
1650                  */
1651                 err = mmc_send_csd(card, card->raw_csd);
1652                 if (err)
1653                         goto free_card;
1654
1655                 err = mmc_decode_csd(card);
1656                 if (err)
1657                         goto free_card;
1658                 err = mmc_decode_cid(card);
1659                 if (err)
1660                         goto free_card;
1661         }
1662
1663         /*
1664          * handling only for cards supporting DSR and hosts requesting
1665          * DSR configuration
1666          */
1667         if (card->csd.dsr_imp && host->dsr_req)
1668                 mmc_set_dsr(host);
1669
1670         /*
1671          * Select card, as all following commands rely on that.
1672          */
1673         if (!mmc_host_is_spi(host)) {
1674                 err = mmc_select_card(card);
1675                 if (err)
1676                         goto free_card;
1677         }
1678
1679         if (!oldcard) {
1680                 /* Read extended CSD. */
1681                 err = mmc_read_ext_csd(card);
1682                 if (err)
1683                         goto free_card;
1684
1685                 /*
1686                  * If doing byte addressing, check if required to do sector
1687                  * addressing.  Handle the case of <2GB cards needing sector
1688                  * addressing.  See section 8.1 JEDEC Standard JED84-A441;
1689                  * ocr register has bit 30 set for sector addressing.
1690                  */
1691                 if (rocr & BIT(30))
1692                         mmc_card_set_blockaddr(card);
1693
1694                 /* Erase size depends on CSD and Extended CSD */
1695                 mmc_set_erase_size(card);
1696         }
1697
1698         /* Enable ERASE_GRP_DEF. This bit is lost after a reset or power off. */
1699         if (card->ext_csd.rev >= 3) {
1700                 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1701                                  EXT_CSD_ERASE_GROUP_DEF, 1,
1702                                  card->ext_csd.generic_cmd6_time);
1703
1704                 if (err && err != -EBADMSG)
1705                         goto free_card;
1706
1707                 if (err) {
1708                         /*
1709                          * Just disable enhanced area off & sz
1710                          * will try to enable ERASE_GROUP_DEF
1711                          * during next time reinit
1712                          */
1713                         card->ext_csd.enhanced_area_offset = -EINVAL;
1714                         card->ext_csd.enhanced_area_size = -EINVAL;
1715                 } else {
1716                         card->ext_csd.erase_group_def = 1;
1717                         /*
1718                          * enable ERASE_GRP_DEF successfully.
1719                          * This will affect the erase size, so
1720                          * here need to reset erase size
1721                          */
1722                         mmc_set_erase_size(card);
1723                 }
1724         }
1725
1726         /*
1727          * Ensure eMMC user default partition is enabled
1728          */
1729         if (card->ext_csd.part_config & EXT_CSD_PART_CONFIG_ACC_MASK) {
1730                 card->ext_csd.part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK;
1731                 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_PART_CONFIG,
1732                                  card->ext_csd.part_config,
1733                                  card->ext_csd.part_time);
1734                 if (err && err != -EBADMSG)
1735                         goto free_card;
1736         }
1737
1738         /*
1739          * Enable power_off_notification byte in the ext_csd register
1740          */
1741         if (card->ext_csd.rev >= 6) {
1742                 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1743                                  EXT_CSD_POWER_OFF_NOTIFICATION,
1744                                  EXT_CSD_POWER_ON,
1745                                  card->ext_csd.generic_cmd6_time);
1746                 if (err && err != -EBADMSG)
1747                         goto free_card;
1748
1749                 /*
1750                  * The err can be -EBADMSG or 0,
1751                  * so check for success and update the flag
1752                  */
1753                 if (!err)
1754                         card->ext_csd.power_off_notification = EXT_CSD_POWER_ON;
1755         }
1756
1757         /* set erase_arg */
1758         if (mmc_can_discard(card))
1759                 card->erase_arg = MMC_DISCARD_ARG;
1760         else if (mmc_can_trim(card))
1761                 card->erase_arg = MMC_TRIM_ARG;
1762         else
1763                 card->erase_arg = MMC_ERASE_ARG;
1764
1765         /*
1766          * Select timing interface
1767          */
1768         err = mmc_select_timing(card);
1769         if (err)
1770                 goto free_card;
1771
1772         if (mmc_card_hs200(card)) {
1773                 host->doing_init_tune = 1;
1774
1775                 err = mmc_hs200_tuning(card);
1776                 if (!err)
1777                         err = mmc_select_hs400(card);
1778
1779                 host->doing_init_tune = 0;
1780
1781                 if (err)
1782                         goto free_card;
1783
1784         } else if (!mmc_card_hs400es(card)) {
1785                 /* Select the desired bus width optionally */
1786                 err = mmc_select_bus_width(card);
1787                 if (err > 0 && mmc_card_hs(card)) {
1788                         err = mmc_select_hs_ddr(card);
1789                         if (err)
1790                                 goto free_card;
1791                 }
1792         }
1793
1794         /*
1795          * Choose the power class with selected bus interface
1796          */
1797         mmc_select_powerclass(card);
1798
1799         /*
1800          * Enable HPI feature (if supported)
1801          */
1802         if (card->ext_csd.hpi) {
1803                 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1804                                 EXT_CSD_HPI_MGMT, 1,
1805                                 card->ext_csd.generic_cmd6_time);
1806                 if (err && err != -EBADMSG)
1807                         goto free_card;
1808                 if (err) {
1809                         pr_warn("%s: Enabling HPI failed\n",
1810                                 mmc_hostname(card->host));
1811                         card->ext_csd.hpi_en = 0;
1812                 } else {
1813                         card->ext_csd.hpi_en = 1;
1814                 }
1815         }
1816
1817         /*
1818          * If cache size is higher than 0, this indicates the existence of cache
1819          * and it can be turned on. Note that some eMMCs from Micron has been
1820          * reported to need ~800 ms timeout, while enabling the cache after
1821          * sudden power failure tests. Let's extend the timeout to a minimum of
1822          * DEFAULT_CACHE_EN_TIMEOUT_MS and do it for all cards.
1823          */
1824         if (card->ext_csd.cache_size > 0) {
1825                 unsigned int timeout_ms = MIN_CACHE_EN_TIMEOUT_MS;
1826
1827                 timeout_ms = max(card->ext_csd.generic_cmd6_time, timeout_ms);
1828                 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1829                                 EXT_CSD_CACHE_CTRL, 1, timeout_ms);
1830                 if (err && err != -EBADMSG)
1831                         goto free_card;
1832
1833                 /*
1834                  * Only if no error, cache is turned on successfully.
1835                  */
1836                 if (err) {
1837                         pr_warn("%s: Cache is supported, but failed to turn on (%d)\n",
1838                                 mmc_hostname(card->host), err);
1839                         card->ext_csd.cache_ctrl = 0;
1840                 } else {
1841                         card->ext_csd.cache_ctrl = 1;
1842                 }
1843         }
1844
1845         /*
1846          * Enable Command Queue if supported. Note that Packed Commands cannot
1847          * be used with Command Queue.
1848          */
1849         card->ext_csd.cmdq_en = false;
1850         if (card->ext_csd.cmdq_support && host->caps2 & MMC_CAP2_CQE) {
1851                 err = mmc_cmdq_enable(card);
1852                 if (err && err != -EBADMSG)
1853                         goto free_card;
1854                 if (err) {
1855                         pr_warn("%s: Enabling CMDQ failed\n",
1856                                 mmc_hostname(card->host));
1857                         card->ext_csd.cmdq_support = false;
1858                         card->ext_csd.cmdq_depth = 0;
1859                 }
1860         }
1861         /*
1862          * In some cases (e.g. RPMB or mmc_test), the Command Queue must be
1863          * disabled for a time, so a flag is needed to indicate to re-enable the
1864          * Command Queue.
1865          */
1866         card->reenable_cmdq = card->ext_csd.cmdq_en;
1867
1868         if (host->cqe_ops && !host->cqe_enabled) {
1869                 err = host->cqe_ops->cqe_enable(host, card);
1870                 if (!err) {
1871                         host->cqe_enabled = true;
1872
1873                         if (card->ext_csd.cmdq_en) {
1874                                 pr_info("%s: Command Queue Engine enabled\n",
1875                                         mmc_hostname(host));
1876                         } else {
1877                                 host->hsq_enabled = true;
1878                                 pr_info("%s: Host Software Queue enabled\n",
1879                                         mmc_hostname(host));
1880                         }
1881                 }
1882         }
1883
1884         if (host->caps2 & MMC_CAP2_AVOID_3_3V &&
1885             host->ios.signal_voltage == MMC_SIGNAL_VOLTAGE_330) {
1886                 pr_err("%s: Host failed to negotiate down from 3.3V\n",
1887                         mmc_hostname(host));
1888                 err = -EINVAL;
1889                 goto free_card;
1890         }
1891
1892         if (!oldcard)
1893                 host->card = card;
1894
1895         return 0;
1896
1897 free_card:
1898         if (!oldcard)
1899                 mmc_remove_card(card);
1900 err:
1901         return err;
1902 }
1903
1904 static int mmc_can_sleep(struct mmc_card *card)
1905 {
1906         return card->ext_csd.rev >= 3;
1907 }
1908
1909 static int mmc_sleep_busy_cb(void *cb_data, bool *busy)
1910 {
1911         struct mmc_host *host = cb_data;
1912
1913         *busy = host->ops->card_busy(host);
1914         return 0;
1915 }
1916
1917 static int mmc_sleep(struct mmc_host *host)
1918 {
1919         struct mmc_command cmd = {};
1920         struct mmc_card *card = host->card;
1921         unsigned int timeout_ms = DIV_ROUND_UP(card->ext_csd.sa_timeout, 10000);
1922         bool use_r1b_resp;
1923         int err;
1924
1925         /* Re-tuning can't be done once the card is deselected */
1926         mmc_retune_hold(host);
1927
1928         err = mmc_deselect_cards(host);
1929         if (err)
1930                 goto out_release;
1931
1932         cmd.opcode = MMC_SLEEP_AWAKE;
1933         cmd.arg = card->rca << 16;
1934         cmd.arg |= 1 << 15;
1935         use_r1b_resp = mmc_prepare_busy_cmd(host, &cmd, timeout_ms);
1936
1937         err = mmc_wait_for_cmd(host, &cmd, 0);
1938         if (err)
1939                 goto out_release;
1940
1941         /*
1942          * If the host does not wait while the card signals busy, then we can
1943          * try to poll, but only if the host supports HW polling, as the
1944          * SEND_STATUS cmd is not allowed. If we can't poll, then we simply need
1945          * to wait the sleep/awake timeout.
1946          */
1947         if (host->caps & MMC_CAP_WAIT_WHILE_BUSY && use_r1b_resp)
1948                 goto out_release;
1949
1950         if (!host->ops->card_busy) {
1951                 mmc_delay(timeout_ms);
1952                 goto out_release;
1953         }
1954
1955         err = __mmc_poll_for_busy(card, timeout_ms, &mmc_sleep_busy_cb, host);
1956
1957 out_release:
1958         mmc_retune_release(host);
1959         return err;
1960 }
1961
1962 static int mmc_can_poweroff_notify(const struct mmc_card *card)
1963 {
1964         return card &&
1965                 mmc_card_mmc(card) &&
1966                 (card->ext_csd.power_off_notification == EXT_CSD_POWER_ON);
1967 }
1968
1969 static int mmc_poweroff_notify(struct mmc_card *card, unsigned int notify_type)
1970 {
1971         unsigned int timeout = card->ext_csd.generic_cmd6_time;
1972         int err;
1973
1974         /* Use EXT_CSD_POWER_OFF_SHORT as default notification type. */
1975         if (notify_type == EXT_CSD_POWER_OFF_LONG)
1976                 timeout = card->ext_csd.power_off_longtime;
1977
1978         err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1979                         EXT_CSD_POWER_OFF_NOTIFICATION,
1980                         notify_type, timeout, 0, false, false, MMC_CMD_RETRIES);
1981         if (err)
1982                 pr_err("%s: Power Off Notification timed out, %u\n",
1983                        mmc_hostname(card->host), timeout);
1984
1985         /* Disable the power off notification after the switch operation. */
1986         card->ext_csd.power_off_notification = EXT_CSD_NO_POWER_NOTIFICATION;
1987
1988         return err;
1989 }
1990
1991 /*
1992  * Host is being removed. Free up the current card.
1993  */
1994 static void mmc_remove(struct mmc_host *host)
1995 {
1996         mmc_remove_card(host->card);
1997         host->card = NULL;
1998 }
1999
2000 /*
2001  * Card detection - card is alive.
2002  */
2003 static int mmc_alive(struct mmc_host *host)
2004 {
2005         return mmc_send_status(host->card, NULL);
2006 }
2007
2008 /*
2009  * Card detection callback from host.
2010  */
2011 static void mmc_detect(struct mmc_host *host)
2012 {
2013         int err;
2014
2015         mmc_get_card(host->card, NULL);
2016
2017         /*
2018          * Just check if our card has been removed.
2019          */
2020         err = _mmc_detect_card_removed(host);
2021
2022         mmc_put_card(host->card, NULL);
2023
2024         if (err) {
2025                 mmc_remove(host);
2026
2027                 mmc_claim_host(host);
2028                 mmc_detach_bus(host);
2029                 mmc_power_off(host);
2030                 mmc_release_host(host);
2031         }
2032 }
2033
2034 static bool _mmc_cache_enabled(struct mmc_host *host)
2035 {
2036         return host->card->ext_csd.cache_size > 0 &&
2037                host->card->ext_csd.cache_ctrl & 1;
2038 }
2039
2040 /*
2041  * Flush the internal cache of the eMMC to non-volatile storage.
2042  */
2043 static int _mmc_flush_cache(struct mmc_host *host)
2044 {
2045         int err = 0;
2046
2047         if (_mmc_cache_enabled(host)) {
2048                 err = mmc_switch(host->card, EXT_CSD_CMD_SET_NORMAL,
2049                                  EXT_CSD_FLUSH_CACHE, 1,
2050                                  CACHE_FLUSH_TIMEOUT_MS);
2051                 if (err)
2052                         pr_err("%s: cache flush error %d\n",
2053                                mmc_hostname(host), err);
2054         }
2055
2056         return err;
2057 }
2058
2059 static int _mmc_suspend(struct mmc_host *host, bool is_suspend)
2060 {
2061         int err = 0;
2062         unsigned int notify_type = is_suspend ? EXT_CSD_POWER_OFF_SHORT :
2063                                         EXT_CSD_POWER_OFF_LONG;
2064
2065         mmc_claim_host(host);
2066
2067         if (mmc_card_suspended(host->card))
2068                 goto out;
2069
2070         err = _mmc_flush_cache(host);
2071         if (err)
2072                 goto out;
2073
2074         if (mmc_can_poweroff_notify(host->card) &&
2075             ((host->caps2 & MMC_CAP2_FULL_PWR_CYCLE) || !is_suspend ||
2076              (host->caps2 & MMC_CAP2_FULL_PWR_CYCLE_IN_SUSPEND)))
2077                 err = mmc_poweroff_notify(host->card, notify_type);
2078         else if (mmc_can_sleep(host->card))
2079                 err = mmc_sleep(host);
2080         else if (!mmc_host_is_spi(host))
2081                 err = mmc_deselect_cards(host);
2082
2083         if (!err) {
2084                 mmc_power_off(host);
2085                 mmc_card_set_suspended(host->card);
2086         }
2087 out:
2088         mmc_release_host(host);
2089         return err;
2090 }
2091
2092 /*
2093  * Suspend callback
2094  */
2095 static int mmc_suspend(struct mmc_host *host)
2096 {
2097         int err;
2098
2099         err = _mmc_suspend(host, true);
2100         if (!err) {
2101                 pm_runtime_disable(&host->card->dev);
2102                 pm_runtime_set_suspended(&host->card->dev);
2103         }
2104
2105         return err;
2106 }
2107
2108 /*
2109  * This function tries to determine if the same card is still present
2110  * and, if so, restore all state to it.
2111  */
2112 static int _mmc_resume(struct mmc_host *host)
2113 {
2114         int err = 0;
2115
2116         mmc_claim_host(host);
2117
2118         if (!mmc_card_suspended(host->card))
2119                 goto out;
2120
2121         mmc_power_up(host, host->card->ocr);
2122         err = mmc_init_card(host, host->card->ocr, host->card);
2123         mmc_card_clr_suspended(host->card);
2124
2125 out:
2126         mmc_release_host(host);
2127         return err;
2128 }
2129
2130 /*
2131  * Shutdown callback
2132  */
2133 static int mmc_shutdown(struct mmc_host *host)
2134 {
2135         int err = 0;
2136
2137         /*
2138          * In a specific case for poweroff notify, we need to resume the card
2139          * before we can shutdown it properly.
2140          */
2141         if (mmc_can_poweroff_notify(host->card) &&
2142                 !(host->caps2 & MMC_CAP2_FULL_PWR_CYCLE))
2143                 err = _mmc_resume(host);
2144
2145         if (!err)
2146                 err = _mmc_suspend(host, false);
2147
2148         return err;
2149 }
2150
2151 /*
2152  * Callback for resume.
2153  */
2154 static int mmc_resume(struct mmc_host *host)
2155 {
2156         pm_runtime_enable(&host->card->dev);
2157         return 0;
2158 }
2159
2160 /*
2161  * Callback for runtime_suspend.
2162  */
2163 static int mmc_runtime_suspend(struct mmc_host *host)
2164 {
2165         int err;
2166
2167         if (!(host->caps & MMC_CAP_AGGRESSIVE_PM))
2168                 return 0;
2169
2170         err = _mmc_suspend(host, true);
2171         if (err)
2172                 pr_err("%s: error %d doing aggressive suspend\n",
2173                         mmc_hostname(host), err);
2174
2175         return err;
2176 }
2177
2178 /*
2179  * Callback for runtime_resume.
2180  */
2181 static int mmc_runtime_resume(struct mmc_host *host)
2182 {
2183         int err;
2184
2185         err = _mmc_resume(host);
2186         if (err && err != -ENOMEDIUM)
2187                 pr_err("%s: error %d doing runtime resume\n",
2188                         mmc_hostname(host), err);
2189
2190         return 0;
2191 }
2192
2193 static int mmc_can_reset(struct mmc_card *card)
2194 {
2195         u8 rst_n_function;
2196
2197         rst_n_function = card->ext_csd.rst_n_function;
2198         if ((rst_n_function & EXT_CSD_RST_N_EN_MASK) != EXT_CSD_RST_N_ENABLED)
2199                 return 0;
2200         return 1;
2201 }
2202
2203 static int _mmc_hw_reset(struct mmc_host *host)
2204 {
2205         struct mmc_card *card = host->card;
2206
2207         /*
2208          * In the case of recovery, we can't expect flushing the cache to work
2209          * always, but we have a go and ignore errors.
2210          */
2211         _mmc_flush_cache(host);
2212
2213         if ((host->caps & MMC_CAP_HW_RESET) && host->ops->hw_reset &&
2214              mmc_can_reset(card)) {
2215                 /* If the card accept RST_n signal, send it. */
2216                 mmc_set_clock(host, host->f_init);
2217                 host->ops->hw_reset(host);
2218                 /* Set initial state and call mmc_set_ios */
2219                 mmc_set_initial_state(host);
2220         } else {
2221                 /* Do a brute force power cycle */
2222                 mmc_power_cycle(host, card->ocr);
2223                 mmc_pwrseq_reset(host);
2224         }
2225         return mmc_init_card(host, card->ocr, card);
2226 }
2227
2228 static const struct mmc_bus_ops mmc_ops = {
2229         .remove = mmc_remove,
2230         .detect = mmc_detect,
2231         .suspend = mmc_suspend,
2232         .resume = mmc_resume,
2233         .runtime_suspend = mmc_runtime_suspend,
2234         .runtime_resume = mmc_runtime_resume,
2235         .alive = mmc_alive,
2236         .shutdown = mmc_shutdown,
2237         .hw_reset = _mmc_hw_reset,
2238         .cache_enabled = _mmc_cache_enabled,
2239         .flush_cache = _mmc_flush_cache,
2240 };
2241
2242 /*
2243  * Starting point for MMC card init.
2244  */
2245 int mmc_attach_mmc(struct mmc_host *host)
2246 {
2247         int err;
2248         u32 ocr, rocr;
2249
2250         WARN_ON(!host->claimed);
2251
2252         /* Set correct bus mode for MMC before attempting attach */
2253         if (!mmc_host_is_spi(host))
2254                 mmc_set_bus_mode(host, MMC_BUSMODE_OPENDRAIN);
2255
2256         err = mmc_send_op_cond(host, 0, &ocr);
2257         if (err)
2258                 return err;
2259
2260         mmc_attach_bus(host, &mmc_ops);
2261         if (host->ocr_avail_mmc)
2262                 host->ocr_avail = host->ocr_avail_mmc;
2263
2264         /*
2265          * We need to get OCR a different way for SPI.
2266          */
2267         if (mmc_host_is_spi(host)) {
2268                 err = mmc_spi_read_ocr(host, 1, &ocr);
2269                 if (err)
2270                         goto err;
2271         }
2272
2273         rocr = mmc_select_voltage(host, ocr);
2274
2275         /*
2276          * Can we support the voltage of the card?
2277          */
2278         if (!rocr) {
2279                 err = -EINVAL;
2280                 goto err;
2281         }
2282
2283         /*
2284          * Detect and init the card.
2285          */
2286         err = mmc_init_card(host, rocr, NULL);
2287         if (err)
2288                 goto err;
2289
2290         mmc_release_host(host);
2291         err = mmc_add_card(host->card);
2292         if (err)
2293                 goto remove_card;
2294
2295         mmc_claim_host(host);
2296         return 0;
2297
2298 remove_card:
2299         mmc_remove_card(host->card);
2300         mmc_claim_host(host);
2301         host->card = NULL;
2302 err:
2303         mmc_detach_bus(host);
2304
2305         pr_err("%s: error %d whilst initialising MMC card\n",
2306                 mmc_hostname(host), err);
2307
2308         return err;
2309 }