Merge tag 'trace-v4.15' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt...
[linux-2.6-microblaze.git] / drivers / mmc / core / block.c
1 /*
2  * Block driver for media (i.e., flash cards)
3  *
4  * Copyright 2002 Hewlett-Packard Company
5  * Copyright 2005-2008 Pierre Ossman
6  *
7  * Use consistent with the GNU GPL is permitted,
8  * provided that this copyright notice is
9  * preserved in its entirety in all copies and derived works.
10  *
11  * HEWLETT-PACKARD COMPANY MAKES NO WARRANTIES, EXPRESSED OR IMPLIED,
12  * AS TO THE USEFULNESS OR CORRECTNESS OF THIS CODE OR ITS
13  * FITNESS FOR ANY PARTICULAR PURPOSE.
14  *
15  * Many thanks to Alessandro Rubini and Jonathan Corbet!
16  *
17  * Author:  Andrew Christian
18  *          28 May 2002
19  */
20 #include <linux/moduleparam.h>
21 #include <linux/module.h>
22 #include <linux/init.h>
23
24 #include <linux/kernel.h>
25 #include <linux/fs.h>
26 #include <linux/slab.h>
27 #include <linux/errno.h>
28 #include <linux/hdreg.h>
29 #include <linux/kdev_t.h>
30 #include <linux/blkdev.h>
31 #include <linux/cdev.h>
32 #include <linux/mutex.h>
33 #include <linux/scatterlist.h>
34 #include <linux/string_helpers.h>
35 #include <linux/delay.h>
36 #include <linux/capability.h>
37 #include <linux/compat.h>
38 #include <linux/pm_runtime.h>
39 #include <linux/idr.h>
40 #include <linux/debugfs.h>
41
42 #include <linux/mmc/ioctl.h>
43 #include <linux/mmc/card.h>
44 #include <linux/mmc/host.h>
45 #include <linux/mmc/mmc.h>
46 #include <linux/mmc/sd.h>
47
48 #include <linux/uaccess.h>
49
50 #include "queue.h"
51 #include "block.h"
52 #include "core.h"
53 #include "card.h"
54 #include "host.h"
55 #include "bus.h"
56 #include "mmc_ops.h"
57 #include "quirks.h"
58 #include "sd_ops.h"
59
60 MODULE_ALIAS("mmc:block");
61 #ifdef MODULE_PARAM_PREFIX
62 #undef MODULE_PARAM_PREFIX
63 #endif
64 #define MODULE_PARAM_PREFIX "mmcblk."
65
66 #define MMC_BLK_TIMEOUT_MS  (10 * 60 * 1000)        /* 10 minute timeout */
67 #define MMC_SANITIZE_REQ_TIMEOUT 240000
68 #define MMC_EXTRACT_INDEX_FROM_ARG(x) ((x & 0x00FF0000) >> 16)
69
70 #define mmc_req_rel_wr(req)     ((req->cmd_flags & REQ_FUA) && \
71                                   (rq_data_dir(req) == WRITE))
72 static DEFINE_MUTEX(block_mutex);
73
74 /*
75  * The defaults come from config options but can be overriden by module
76  * or bootarg options.
77  */
78 static int perdev_minors = CONFIG_MMC_BLOCK_MINORS;
79
80 /*
81  * We've only got one major, so number of mmcblk devices is
82  * limited to (1 << 20) / number of minors per device.  It is also
83  * limited by the MAX_DEVICES below.
84  */
85 static int max_devices;
86
87 #define MAX_DEVICES 256
88
89 static DEFINE_IDA(mmc_blk_ida);
90 static DEFINE_IDA(mmc_rpmb_ida);
91
92 /*
93  * There is one mmc_blk_data per slot.
94  */
95 struct mmc_blk_data {
96         spinlock_t      lock;
97         struct device   *parent;
98         struct gendisk  *disk;
99         struct mmc_queue queue;
100         struct list_head part;
101         struct list_head rpmbs;
102
103         unsigned int    flags;
104 #define MMC_BLK_CMD23   (1 << 0)        /* Can do SET_BLOCK_COUNT for multiblock */
105 #define MMC_BLK_REL_WR  (1 << 1)        /* MMC Reliable write support */
106
107         unsigned int    usage;
108         unsigned int    read_only;
109         unsigned int    part_type;
110         unsigned int    reset_done;
111 #define MMC_BLK_READ            BIT(0)
112 #define MMC_BLK_WRITE           BIT(1)
113 #define MMC_BLK_DISCARD         BIT(2)
114 #define MMC_BLK_SECDISCARD      BIT(3)
115
116         /*
117          * Only set in main mmc_blk_data associated
118          * with mmc_card with dev_set_drvdata, and keeps
119          * track of the current selected device partition.
120          */
121         unsigned int    part_curr;
122         struct device_attribute force_ro;
123         struct device_attribute power_ro_lock;
124         int     area_type;
125 };
126
127 /* Device type for RPMB character devices */
128 static dev_t mmc_rpmb_devt;
129
130 /* Bus type for RPMB character devices */
131 static struct bus_type mmc_rpmb_bus_type = {
132         .name = "mmc_rpmb",
133 };
134
135 /**
136  * struct mmc_rpmb_data - special RPMB device type for these areas
137  * @dev: the device for the RPMB area
138  * @chrdev: character device for the RPMB area
139  * @id: unique device ID number
140  * @part_index: partition index (0 on first)
141  * @md: parent MMC block device
142  * @node: list item, so we can put this device on a list
143  */
144 struct mmc_rpmb_data {
145         struct device dev;
146         struct cdev chrdev;
147         int id;
148         unsigned int part_index;
149         struct mmc_blk_data *md;
150         struct list_head node;
151 };
152
153 static DEFINE_MUTEX(open_lock);
154
155 module_param(perdev_minors, int, 0444);
156 MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device");
157
158 static inline int mmc_blk_part_switch(struct mmc_card *card,
159                                       unsigned int part_type);
160
161 static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk)
162 {
163         struct mmc_blk_data *md;
164
165         mutex_lock(&open_lock);
166         md = disk->private_data;
167         if (md && md->usage == 0)
168                 md = NULL;
169         if (md)
170                 md->usage++;
171         mutex_unlock(&open_lock);
172
173         return md;
174 }
175
176 static inline int mmc_get_devidx(struct gendisk *disk)
177 {
178         int devidx = disk->first_minor / perdev_minors;
179         return devidx;
180 }
181
182 static void mmc_blk_put(struct mmc_blk_data *md)
183 {
184         mutex_lock(&open_lock);
185         md->usage--;
186         if (md->usage == 0) {
187                 int devidx = mmc_get_devidx(md->disk);
188                 blk_cleanup_queue(md->queue.queue);
189                 ida_simple_remove(&mmc_blk_ida, devidx);
190                 put_disk(md->disk);
191                 kfree(md);
192         }
193         mutex_unlock(&open_lock);
194 }
195
196 static ssize_t power_ro_lock_show(struct device *dev,
197                 struct device_attribute *attr, char *buf)
198 {
199         int ret;
200         struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
201         struct mmc_card *card = md->queue.card;
202         int locked = 0;
203
204         if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PERM_WP_EN)
205                 locked = 2;
206         else if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PWR_WP_EN)
207                 locked = 1;
208
209         ret = snprintf(buf, PAGE_SIZE, "%d\n", locked);
210
211         mmc_blk_put(md);
212
213         return ret;
214 }
215
216 static ssize_t power_ro_lock_store(struct device *dev,
217                 struct device_attribute *attr, const char *buf, size_t count)
218 {
219         int ret;
220         struct mmc_blk_data *md, *part_md;
221         struct mmc_queue *mq;
222         struct request *req;
223         unsigned long set;
224
225         if (kstrtoul(buf, 0, &set))
226                 return -EINVAL;
227
228         if (set != 1)
229                 return count;
230
231         md = mmc_blk_get(dev_to_disk(dev));
232         mq = &md->queue;
233
234         /* Dispatch locking to the block layer */
235         req = blk_get_request(mq->queue, REQ_OP_DRV_OUT, __GFP_RECLAIM);
236         req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_BOOT_WP;
237         blk_execute_rq(mq->queue, NULL, req, 0);
238         ret = req_to_mmc_queue_req(req)->drv_op_result;
239
240         if (!ret) {
241                 pr_info("%s: Locking boot partition ro until next power on\n",
242                         md->disk->disk_name);
243                 set_disk_ro(md->disk, 1);
244
245                 list_for_each_entry(part_md, &md->part, part)
246                         if (part_md->area_type == MMC_BLK_DATA_AREA_BOOT) {
247                                 pr_info("%s: Locking boot partition ro until next power on\n", part_md->disk->disk_name);
248                                 set_disk_ro(part_md->disk, 1);
249                         }
250         }
251
252         mmc_blk_put(md);
253         return count;
254 }
255
256 static ssize_t force_ro_show(struct device *dev, struct device_attribute *attr,
257                              char *buf)
258 {
259         int ret;
260         struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
261
262         ret = snprintf(buf, PAGE_SIZE, "%d\n",
263                        get_disk_ro(dev_to_disk(dev)) ^
264                        md->read_only);
265         mmc_blk_put(md);
266         return ret;
267 }
268
269 static ssize_t force_ro_store(struct device *dev, struct device_attribute *attr,
270                               const char *buf, size_t count)
271 {
272         int ret;
273         char *end;
274         struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
275         unsigned long set = simple_strtoul(buf, &end, 0);
276         if (end == buf) {
277                 ret = -EINVAL;
278                 goto out;
279         }
280
281         set_disk_ro(dev_to_disk(dev), set || md->read_only);
282         ret = count;
283 out:
284         mmc_blk_put(md);
285         return ret;
286 }
287
288 static int mmc_blk_open(struct block_device *bdev, fmode_t mode)
289 {
290         struct mmc_blk_data *md = mmc_blk_get(bdev->bd_disk);
291         int ret = -ENXIO;
292
293         mutex_lock(&block_mutex);
294         if (md) {
295                 if (md->usage == 2)
296                         check_disk_change(bdev);
297                 ret = 0;
298
299                 if ((mode & FMODE_WRITE) && md->read_only) {
300                         mmc_blk_put(md);
301                         ret = -EROFS;
302                 }
303         }
304         mutex_unlock(&block_mutex);
305
306         return ret;
307 }
308
309 static void mmc_blk_release(struct gendisk *disk, fmode_t mode)
310 {
311         struct mmc_blk_data *md = disk->private_data;
312
313         mutex_lock(&block_mutex);
314         mmc_blk_put(md);
315         mutex_unlock(&block_mutex);
316 }
317
318 static int
319 mmc_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
320 {
321         geo->cylinders = get_capacity(bdev->bd_disk) / (4 * 16);
322         geo->heads = 4;
323         geo->sectors = 16;
324         return 0;
325 }
326
327 struct mmc_blk_ioc_data {
328         struct mmc_ioc_cmd ic;
329         unsigned char *buf;
330         u64 buf_bytes;
331         struct mmc_rpmb_data *rpmb;
332 };
333
334 static struct mmc_blk_ioc_data *mmc_blk_ioctl_copy_from_user(
335         struct mmc_ioc_cmd __user *user)
336 {
337         struct mmc_blk_ioc_data *idata;
338         int err;
339
340         idata = kmalloc(sizeof(*idata), GFP_KERNEL);
341         if (!idata) {
342                 err = -ENOMEM;
343                 goto out;
344         }
345
346         if (copy_from_user(&idata->ic, user, sizeof(idata->ic))) {
347                 err = -EFAULT;
348                 goto idata_err;
349         }
350
351         idata->buf_bytes = (u64) idata->ic.blksz * idata->ic.blocks;
352         if (idata->buf_bytes > MMC_IOC_MAX_BYTES) {
353                 err = -EOVERFLOW;
354                 goto idata_err;
355         }
356
357         if (!idata->buf_bytes) {
358                 idata->buf = NULL;
359                 return idata;
360         }
361
362         idata->buf = kmalloc(idata->buf_bytes, GFP_KERNEL);
363         if (!idata->buf) {
364                 err = -ENOMEM;
365                 goto idata_err;
366         }
367
368         if (copy_from_user(idata->buf, (void __user *)(unsigned long)
369                                         idata->ic.data_ptr, idata->buf_bytes)) {
370                 err = -EFAULT;
371                 goto copy_err;
372         }
373
374         return idata;
375
376 copy_err:
377         kfree(idata->buf);
378 idata_err:
379         kfree(idata);
380 out:
381         return ERR_PTR(err);
382 }
383
384 static int mmc_blk_ioctl_copy_to_user(struct mmc_ioc_cmd __user *ic_ptr,
385                                       struct mmc_blk_ioc_data *idata)
386 {
387         struct mmc_ioc_cmd *ic = &idata->ic;
388
389         if (copy_to_user(&(ic_ptr->response), ic->response,
390                          sizeof(ic->response)))
391                 return -EFAULT;
392
393         if (!idata->ic.write_flag) {
394                 if (copy_to_user((void __user *)(unsigned long)ic->data_ptr,
395                                  idata->buf, idata->buf_bytes))
396                         return -EFAULT;
397         }
398
399         return 0;
400 }
401
402 static int ioctl_rpmb_card_status_poll(struct mmc_card *card, u32 *status,
403                                        u32 retries_max)
404 {
405         int err;
406         u32 retry_count = 0;
407
408         if (!status || !retries_max)
409                 return -EINVAL;
410
411         do {
412                 err = __mmc_send_status(card, status, 5);
413                 if (err)
414                         break;
415
416                 if (!R1_STATUS(*status) &&
417                                 (R1_CURRENT_STATE(*status) != R1_STATE_PRG))
418                         break; /* RPMB programming operation complete */
419
420                 /*
421                  * Rechedule to give the MMC device a chance to continue
422                  * processing the previous command without being polled too
423                  * frequently.
424                  */
425                 usleep_range(1000, 5000);
426         } while (++retry_count < retries_max);
427
428         if (retry_count == retries_max)
429                 err = -EPERM;
430
431         return err;
432 }
433
434 static int ioctl_do_sanitize(struct mmc_card *card)
435 {
436         int err;
437
438         if (!mmc_can_sanitize(card)) {
439                         pr_warn("%s: %s - SANITIZE is not supported\n",
440                                 mmc_hostname(card->host), __func__);
441                         err = -EOPNOTSUPP;
442                         goto out;
443         }
444
445         pr_debug("%s: %s - SANITIZE IN PROGRESS...\n",
446                 mmc_hostname(card->host), __func__);
447
448         err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
449                                         EXT_CSD_SANITIZE_START, 1,
450                                         MMC_SANITIZE_REQ_TIMEOUT);
451
452         if (err)
453                 pr_err("%s: %s - EXT_CSD_SANITIZE_START failed. err=%d\n",
454                        mmc_hostname(card->host), __func__, err);
455
456         pr_debug("%s: %s - SANITIZE COMPLETED\n", mmc_hostname(card->host),
457                                              __func__);
458 out:
459         return err;
460 }
461
462 static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
463                                struct mmc_blk_ioc_data *idata)
464 {
465         struct mmc_command cmd = {};
466         struct mmc_data data = {};
467         struct mmc_request mrq = {};
468         struct scatterlist sg;
469         int err;
470         unsigned int target_part;
471         u32 status = 0;
472
473         if (!card || !md || !idata)
474                 return -EINVAL;
475
476         /*
477          * The RPMB accesses comes in from the character device, so we
478          * need to target these explicitly. Else we just target the
479          * partition type for the block device the ioctl() was issued
480          * on.
481          */
482         if (idata->rpmb) {
483                 /* Support multiple RPMB partitions */
484                 target_part = idata->rpmb->part_index;
485                 target_part |= EXT_CSD_PART_CONFIG_ACC_RPMB;
486         } else {
487                 target_part = md->part_type;
488         }
489
490         cmd.opcode = idata->ic.opcode;
491         cmd.arg = idata->ic.arg;
492         cmd.flags = idata->ic.flags;
493
494         if (idata->buf_bytes) {
495                 data.sg = &sg;
496                 data.sg_len = 1;
497                 data.blksz = idata->ic.blksz;
498                 data.blocks = idata->ic.blocks;
499
500                 sg_init_one(data.sg, idata->buf, idata->buf_bytes);
501
502                 if (idata->ic.write_flag)
503                         data.flags = MMC_DATA_WRITE;
504                 else
505                         data.flags = MMC_DATA_READ;
506
507                 /* data.flags must already be set before doing this. */
508                 mmc_set_data_timeout(&data, card);
509
510                 /* Allow overriding the timeout_ns for empirical tuning. */
511                 if (idata->ic.data_timeout_ns)
512                         data.timeout_ns = idata->ic.data_timeout_ns;
513
514                 if ((cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B) {
515                         /*
516                          * Pretend this is a data transfer and rely on the
517                          * host driver to compute timeout.  When all host
518                          * drivers support cmd.cmd_timeout for R1B, this
519                          * can be changed to:
520                          *
521                          *     mrq.data = NULL;
522                          *     cmd.cmd_timeout = idata->ic.cmd_timeout_ms;
523                          */
524                         data.timeout_ns = idata->ic.cmd_timeout_ms * 1000000;
525                 }
526
527                 mrq.data = &data;
528         }
529
530         mrq.cmd = &cmd;
531
532         err = mmc_blk_part_switch(card, target_part);
533         if (err)
534                 return err;
535
536         if (idata->ic.is_acmd) {
537                 err = mmc_app_cmd(card->host, card);
538                 if (err)
539                         return err;
540         }
541
542         if (idata->rpmb) {
543                 err = mmc_set_blockcount(card, data.blocks,
544                         idata->ic.write_flag & (1 << 31));
545                 if (err)
546                         return err;
547         }
548
549         if ((MMC_EXTRACT_INDEX_FROM_ARG(cmd.arg) == EXT_CSD_SANITIZE_START) &&
550             (cmd.opcode == MMC_SWITCH)) {
551                 err = ioctl_do_sanitize(card);
552
553                 if (err)
554                         pr_err("%s: ioctl_do_sanitize() failed. err = %d",
555                                __func__, err);
556
557                 return err;
558         }
559
560         mmc_wait_for_req(card->host, &mrq);
561
562         if (cmd.error) {
563                 dev_err(mmc_dev(card->host), "%s: cmd error %d\n",
564                                                 __func__, cmd.error);
565                 return cmd.error;
566         }
567         if (data.error) {
568                 dev_err(mmc_dev(card->host), "%s: data error %d\n",
569                                                 __func__, data.error);
570                 return data.error;
571         }
572
573         /*
574          * According to the SD specs, some commands require a delay after
575          * issuing the command.
576          */
577         if (idata->ic.postsleep_min_us)
578                 usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us);
579
580         memcpy(&(idata->ic.response), cmd.resp, sizeof(cmd.resp));
581
582         if (idata->rpmb) {
583                 /*
584                  * Ensure RPMB command has completed by polling CMD13
585                  * "Send Status".
586                  */
587                 err = ioctl_rpmb_card_status_poll(card, &status, 5);
588                 if (err)
589                         dev_err(mmc_dev(card->host),
590                                         "%s: Card Status=0x%08X, error %d\n",
591                                         __func__, status, err);
592         }
593
594         return err;
595 }
596
597 static int mmc_blk_ioctl_cmd(struct mmc_blk_data *md,
598                              struct mmc_ioc_cmd __user *ic_ptr,
599                              struct mmc_rpmb_data *rpmb)
600 {
601         struct mmc_blk_ioc_data *idata;
602         struct mmc_blk_ioc_data *idatas[1];
603         struct mmc_queue *mq;
604         struct mmc_card *card;
605         int err = 0, ioc_err = 0;
606         struct request *req;
607
608         idata = mmc_blk_ioctl_copy_from_user(ic_ptr);
609         if (IS_ERR(idata))
610                 return PTR_ERR(idata);
611         /* This will be NULL on non-RPMB ioctl():s */
612         idata->rpmb = rpmb;
613
614         card = md->queue.card;
615         if (IS_ERR(card)) {
616                 err = PTR_ERR(card);
617                 goto cmd_done;
618         }
619
620         /*
621          * Dispatch the ioctl() into the block request queue.
622          */
623         mq = &md->queue;
624         req = blk_get_request(mq->queue,
625                 idata->ic.write_flag ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN,
626                 __GFP_RECLAIM);
627         idatas[0] = idata;
628         req_to_mmc_queue_req(req)->drv_op =
629                 rpmb ? MMC_DRV_OP_IOCTL_RPMB : MMC_DRV_OP_IOCTL;
630         req_to_mmc_queue_req(req)->drv_op_data = idatas;
631         req_to_mmc_queue_req(req)->ioc_count = 1;
632         blk_execute_rq(mq->queue, NULL, req, 0);
633         ioc_err = req_to_mmc_queue_req(req)->drv_op_result;
634         err = mmc_blk_ioctl_copy_to_user(ic_ptr, idata);
635         blk_put_request(req);
636
637 cmd_done:
638         kfree(idata->buf);
639         kfree(idata);
640         return ioc_err ? ioc_err : err;
641 }
642
643 static int mmc_blk_ioctl_multi_cmd(struct mmc_blk_data *md,
644                                    struct mmc_ioc_multi_cmd __user *user,
645                                    struct mmc_rpmb_data *rpmb)
646 {
647         struct mmc_blk_ioc_data **idata = NULL;
648         struct mmc_ioc_cmd __user *cmds = user->cmds;
649         struct mmc_card *card;
650         struct mmc_queue *mq;
651         int i, err = 0, ioc_err = 0;
652         __u64 num_of_cmds;
653         struct request *req;
654
655         if (copy_from_user(&num_of_cmds, &user->num_of_cmds,
656                            sizeof(num_of_cmds)))
657                 return -EFAULT;
658
659         if (!num_of_cmds)
660                 return 0;
661
662         if (num_of_cmds > MMC_IOC_MAX_CMDS)
663                 return -EINVAL;
664
665         idata = kcalloc(num_of_cmds, sizeof(*idata), GFP_KERNEL);
666         if (!idata)
667                 return -ENOMEM;
668
669         for (i = 0; i < num_of_cmds; i++) {
670                 idata[i] = mmc_blk_ioctl_copy_from_user(&cmds[i]);
671                 if (IS_ERR(idata[i])) {
672                         err = PTR_ERR(idata[i]);
673                         num_of_cmds = i;
674                         goto cmd_err;
675                 }
676                 /* This will be NULL on non-RPMB ioctl():s */
677                 idata[i]->rpmb = rpmb;
678         }
679
680         card = md->queue.card;
681         if (IS_ERR(card)) {
682                 err = PTR_ERR(card);
683                 goto cmd_err;
684         }
685
686
687         /*
688          * Dispatch the ioctl()s into the block request queue.
689          */
690         mq = &md->queue;
691         req = blk_get_request(mq->queue,
692                 idata[0]->ic.write_flag ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN,
693                 __GFP_RECLAIM);
694         req_to_mmc_queue_req(req)->drv_op =
695                 rpmb ? MMC_DRV_OP_IOCTL_RPMB : MMC_DRV_OP_IOCTL;
696         req_to_mmc_queue_req(req)->drv_op_data = idata;
697         req_to_mmc_queue_req(req)->ioc_count = num_of_cmds;
698         blk_execute_rq(mq->queue, NULL, req, 0);
699         ioc_err = req_to_mmc_queue_req(req)->drv_op_result;
700
701         /* copy to user if data and response */
702         for (i = 0; i < num_of_cmds && !err; i++)
703                 err = mmc_blk_ioctl_copy_to_user(&cmds[i], idata[i]);
704
705         blk_put_request(req);
706
707 cmd_err:
708         for (i = 0; i < num_of_cmds; i++) {
709                 kfree(idata[i]->buf);
710                 kfree(idata[i]);
711         }
712         kfree(idata);
713         return ioc_err ? ioc_err : err;
714 }
715
716 static int mmc_blk_check_blkdev(struct block_device *bdev)
717 {
718         /*
719          * The caller must have CAP_SYS_RAWIO, and must be calling this on the
720          * whole block device, not on a partition.  This prevents overspray
721          * between sibling partitions.
722          */
723         if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains))
724                 return -EPERM;
725         return 0;
726 }
727
728 static int mmc_blk_ioctl(struct block_device *bdev, fmode_t mode,
729         unsigned int cmd, unsigned long arg)
730 {
731         struct mmc_blk_data *md;
732         int ret;
733
734         switch (cmd) {
735         case MMC_IOC_CMD:
736                 ret = mmc_blk_check_blkdev(bdev);
737                 if (ret)
738                         return ret;
739                 md = mmc_blk_get(bdev->bd_disk);
740                 if (!md)
741                         return -EINVAL;
742                 ret = mmc_blk_ioctl_cmd(md,
743                                         (struct mmc_ioc_cmd __user *)arg,
744                                         NULL);
745                 mmc_blk_put(md);
746                 return ret;
747         case MMC_IOC_MULTI_CMD:
748                 ret = mmc_blk_check_blkdev(bdev);
749                 if (ret)
750                         return ret;
751                 md = mmc_blk_get(bdev->bd_disk);
752                 if (!md)
753                         return -EINVAL;
754                 ret = mmc_blk_ioctl_multi_cmd(md,
755                                         (struct mmc_ioc_multi_cmd __user *)arg,
756                                         NULL);
757                 mmc_blk_put(md);
758                 return ret;
759         default:
760                 return -EINVAL;
761         }
762 }
763
764 #ifdef CONFIG_COMPAT
765 static int mmc_blk_compat_ioctl(struct block_device *bdev, fmode_t mode,
766         unsigned int cmd, unsigned long arg)
767 {
768         return mmc_blk_ioctl(bdev, mode, cmd, (unsigned long) compat_ptr(arg));
769 }
770 #endif
771
772 static const struct block_device_operations mmc_bdops = {
773         .open                   = mmc_blk_open,
774         .release                = mmc_blk_release,
775         .getgeo                 = mmc_blk_getgeo,
776         .owner                  = THIS_MODULE,
777         .ioctl                  = mmc_blk_ioctl,
778 #ifdef CONFIG_COMPAT
779         .compat_ioctl           = mmc_blk_compat_ioctl,
780 #endif
781 };
782
783 static int mmc_blk_part_switch_pre(struct mmc_card *card,
784                                    unsigned int part_type)
785 {
786         int ret = 0;
787
788         if (part_type == EXT_CSD_PART_CONFIG_ACC_RPMB) {
789                 if (card->ext_csd.cmdq_en) {
790                         ret = mmc_cmdq_disable(card);
791                         if (ret)
792                                 return ret;
793                 }
794                 mmc_retune_pause(card->host);
795         }
796
797         return ret;
798 }
799
800 static int mmc_blk_part_switch_post(struct mmc_card *card,
801                                     unsigned int part_type)
802 {
803         int ret = 0;
804
805         if (part_type == EXT_CSD_PART_CONFIG_ACC_RPMB) {
806                 mmc_retune_unpause(card->host);
807                 if (card->reenable_cmdq && !card->ext_csd.cmdq_en)
808                         ret = mmc_cmdq_enable(card);
809         }
810
811         return ret;
812 }
813
814 static inline int mmc_blk_part_switch(struct mmc_card *card,
815                                       unsigned int part_type)
816 {
817         int ret = 0;
818         struct mmc_blk_data *main_md = dev_get_drvdata(&card->dev);
819
820         if (main_md->part_curr == part_type)
821                 return 0;
822
823         if (mmc_card_mmc(card)) {
824                 u8 part_config = card->ext_csd.part_config;
825
826                 ret = mmc_blk_part_switch_pre(card, part_type);
827                 if (ret)
828                         return ret;
829
830                 part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK;
831                 part_config |= part_type;
832
833                 ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
834                                  EXT_CSD_PART_CONFIG, part_config,
835                                  card->ext_csd.part_time);
836                 if (ret) {
837                         mmc_blk_part_switch_post(card, part_type);
838                         return ret;
839                 }
840
841                 card->ext_csd.part_config = part_config;
842
843                 ret = mmc_blk_part_switch_post(card, main_md->part_curr);
844         }
845
846         main_md->part_curr = part_type;
847         return ret;
848 }
849
850 static int mmc_sd_num_wr_blocks(struct mmc_card *card, u32 *written_blocks)
851 {
852         int err;
853         u32 result;
854         __be32 *blocks;
855
856         struct mmc_request mrq = {};
857         struct mmc_command cmd = {};
858         struct mmc_data data = {};
859
860         struct scatterlist sg;
861
862         cmd.opcode = MMC_APP_CMD;
863         cmd.arg = card->rca << 16;
864         cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
865
866         err = mmc_wait_for_cmd(card->host, &cmd, 0);
867         if (err)
868                 return err;
869         if (!mmc_host_is_spi(card->host) && !(cmd.resp[0] & R1_APP_CMD))
870                 return -EIO;
871
872         memset(&cmd, 0, sizeof(struct mmc_command));
873
874         cmd.opcode = SD_APP_SEND_NUM_WR_BLKS;
875         cmd.arg = 0;
876         cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
877
878         data.blksz = 4;
879         data.blocks = 1;
880         data.flags = MMC_DATA_READ;
881         data.sg = &sg;
882         data.sg_len = 1;
883         mmc_set_data_timeout(&data, card);
884
885         mrq.cmd = &cmd;
886         mrq.data = &data;
887
888         blocks = kmalloc(4, GFP_KERNEL);
889         if (!blocks)
890                 return -ENOMEM;
891
892         sg_init_one(&sg, blocks, 4);
893
894         mmc_wait_for_req(card->host, &mrq);
895
896         result = ntohl(*blocks);
897         kfree(blocks);
898
899         if (cmd.error || data.error)
900                 return -EIO;
901
902         *written_blocks = result;
903
904         return 0;
905 }
906
907 static int card_busy_detect(struct mmc_card *card, unsigned int timeout_ms,
908                 bool hw_busy_detect, struct request *req, bool *gen_err)
909 {
910         unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
911         int err = 0;
912         u32 status;
913
914         do {
915                 err = __mmc_send_status(card, &status, 5);
916                 if (err) {
917                         pr_err("%s: error %d requesting status\n",
918                                req->rq_disk->disk_name, err);
919                         return err;
920                 }
921
922                 if (status & R1_ERROR) {
923                         pr_err("%s: %s: error sending status cmd, status %#x\n",
924                                 req->rq_disk->disk_name, __func__, status);
925                         *gen_err = true;
926                 }
927
928                 /* We may rely on the host hw to handle busy detection.*/
929                 if ((card->host->caps & MMC_CAP_WAIT_WHILE_BUSY) &&
930                         hw_busy_detect)
931                         break;
932
933                 /*
934                  * Timeout if the device never becomes ready for data and never
935                  * leaves the program state.
936                  */
937                 if (time_after(jiffies, timeout)) {
938                         pr_err("%s: Card stuck in programming state! %s %s\n",
939                                 mmc_hostname(card->host),
940                                 req->rq_disk->disk_name, __func__);
941                         return -ETIMEDOUT;
942                 }
943
944                 /*
945                  * Some cards mishandle the status bits,
946                  * so make sure to check both the busy
947                  * indication and the card state.
948                  */
949         } while (!(status & R1_READY_FOR_DATA) ||
950                  (R1_CURRENT_STATE(status) == R1_STATE_PRG));
951
952         return err;
953 }
954
955 static int send_stop(struct mmc_card *card, unsigned int timeout_ms,
956                 struct request *req, bool *gen_err, u32 *stop_status)
957 {
958         struct mmc_host *host = card->host;
959         struct mmc_command cmd = {};
960         int err;
961         bool use_r1b_resp = rq_data_dir(req) == WRITE;
962
963         /*
964          * Normally we use R1B responses for WRITE, but in cases where the host
965          * has specified a max_busy_timeout we need to validate it. A failure
966          * means we need to prevent the host from doing hw busy detection, which
967          * is done by converting to a R1 response instead.
968          */
969         if (host->max_busy_timeout && (timeout_ms > host->max_busy_timeout))
970                 use_r1b_resp = false;
971
972         cmd.opcode = MMC_STOP_TRANSMISSION;
973         if (use_r1b_resp) {
974                 cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
975                 cmd.busy_timeout = timeout_ms;
976         } else {
977                 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
978         }
979
980         err = mmc_wait_for_cmd(host, &cmd, 5);
981         if (err)
982                 return err;
983
984         *stop_status = cmd.resp[0];
985
986         /* No need to check card status in case of READ. */
987         if (rq_data_dir(req) == READ)
988                 return 0;
989
990         if (!mmc_host_is_spi(host) &&
991                 (*stop_status & R1_ERROR)) {
992                 pr_err("%s: %s: general error sending stop command, resp %#x\n",
993                         req->rq_disk->disk_name, __func__, *stop_status);
994                 *gen_err = true;
995         }
996
997         return card_busy_detect(card, timeout_ms, use_r1b_resp, req, gen_err);
998 }
999
1000 #define ERR_NOMEDIUM    3
1001 #define ERR_RETRY       2
1002 #define ERR_ABORT       1
1003 #define ERR_CONTINUE    0
1004
1005 static int mmc_blk_cmd_error(struct request *req, const char *name, int error,
1006         bool status_valid, u32 status)
1007 {
1008         switch (error) {
1009         case -EILSEQ:
1010                 /* response crc error, retry the r/w cmd */
1011                 pr_err("%s: %s sending %s command, card status %#x\n",
1012                         req->rq_disk->disk_name, "response CRC error",
1013                         name, status);
1014                 return ERR_RETRY;
1015
1016         case -ETIMEDOUT:
1017                 pr_err("%s: %s sending %s command, card status %#x\n",
1018                         req->rq_disk->disk_name, "timed out", name, status);
1019
1020                 /* If the status cmd initially failed, retry the r/w cmd */
1021                 if (!status_valid) {
1022                         pr_err("%s: status not valid, retrying timeout\n",
1023                                 req->rq_disk->disk_name);
1024                         return ERR_RETRY;
1025                 }
1026
1027                 /*
1028                  * If it was a r/w cmd crc error, or illegal command
1029                  * (eg, issued in wrong state) then retry - we should
1030                  * have corrected the state problem above.
1031                  */
1032                 if (status & (R1_COM_CRC_ERROR | R1_ILLEGAL_COMMAND)) {
1033                         pr_err("%s: command error, retrying timeout\n",
1034                                 req->rq_disk->disk_name);
1035                         return ERR_RETRY;
1036                 }
1037
1038                 /* Otherwise abort the command */
1039                 return ERR_ABORT;
1040
1041         default:
1042                 /* We don't understand the error code the driver gave us */
1043                 pr_err("%s: unknown error %d sending read/write command, card status %#x\n",
1044                        req->rq_disk->disk_name, error, status);
1045                 return ERR_ABORT;
1046         }
1047 }
1048
1049 /*
1050  * Initial r/w and stop cmd error recovery.
1051  * We don't know whether the card received the r/w cmd or not, so try to
1052  * restore things back to a sane state.  Essentially, we do this as follows:
1053  * - Obtain card status.  If the first attempt to obtain card status fails,
1054  *   the status word will reflect the failed status cmd, not the failed
1055  *   r/w cmd.  If we fail to obtain card status, it suggests we can no
1056  *   longer communicate with the card.
1057  * - Check the card state.  If the card received the cmd but there was a
1058  *   transient problem with the response, it might still be in a data transfer
1059  *   mode.  Try to send it a stop command.  If this fails, we can't recover.
1060  * - If the r/w cmd failed due to a response CRC error, it was probably
1061  *   transient, so retry the cmd.
1062  * - If the r/w cmd timed out, but we didn't get the r/w cmd status, retry.
1063  * - If the r/w cmd timed out, and the r/w cmd failed due to CRC error or
1064  *   illegal cmd, retry.
1065  * Otherwise we don't understand what happened, so abort.
1066  */
1067 static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req,
1068         struct mmc_blk_request *brq, bool *ecc_err, bool *gen_err)
1069 {
1070         bool prev_cmd_status_valid = true;
1071         u32 status, stop_status = 0;
1072         int err, retry;
1073
1074         if (mmc_card_removed(card))
1075                 return ERR_NOMEDIUM;
1076
1077         /*
1078          * Try to get card status which indicates both the card state
1079          * and why there was no response.  If the first attempt fails,
1080          * we can't be sure the returned status is for the r/w command.
1081          */
1082         for (retry = 2; retry >= 0; retry--) {
1083                 err = __mmc_send_status(card, &status, 0);
1084                 if (!err)
1085                         break;
1086
1087                 /* Re-tune if needed */
1088                 mmc_retune_recheck(card->host);
1089
1090                 prev_cmd_status_valid = false;
1091                 pr_err("%s: error %d sending status command, %sing\n",
1092                        req->rq_disk->disk_name, err, retry ? "retry" : "abort");
1093         }
1094
1095         /* We couldn't get a response from the card.  Give up. */
1096         if (err) {
1097                 /* Check if the card is removed */
1098                 if (mmc_detect_card_removed(card->host))
1099                         return ERR_NOMEDIUM;
1100                 return ERR_ABORT;
1101         }
1102
1103         /* Flag ECC errors */
1104         if ((status & R1_CARD_ECC_FAILED) ||
1105             (brq->stop.resp[0] & R1_CARD_ECC_FAILED) ||
1106             (brq->cmd.resp[0] & R1_CARD_ECC_FAILED))
1107                 *ecc_err = true;
1108
1109         /* Flag General errors */
1110         if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ)
1111                 if ((status & R1_ERROR) ||
1112                         (brq->stop.resp[0] & R1_ERROR)) {
1113                         pr_err("%s: %s: general error sending stop or status command, stop cmd response %#x, card status %#x\n",
1114                                req->rq_disk->disk_name, __func__,
1115                                brq->stop.resp[0], status);
1116                         *gen_err = true;
1117                 }
1118
1119         /*
1120          * Check the current card state.  If it is in some data transfer
1121          * mode, tell it to stop (and hopefully transition back to TRAN.)
1122          */
1123         if (R1_CURRENT_STATE(status) == R1_STATE_DATA ||
1124             R1_CURRENT_STATE(status) == R1_STATE_RCV) {
1125                 err = send_stop(card,
1126                         DIV_ROUND_UP(brq->data.timeout_ns, 1000000),
1127                         req, gen_err, &stop_status);
1128                 if (err) {
1129                         pr_err("%s: error %d sending stop command\n",
1130                                req->rq_disk->disk_name, err);
1131                         /*
1132                          * If the stop cmd also timed out, the card is probably
1133                          * not present, so abort. Other errors are bad news too.
1134                          */
1135                         return ERR_ABORT;
1136                 }
1137
1138                 if (stop_status & R1_CARD_ECC_FAILED)
1139                         *ecc_err = true;
1140         }
1141
1142         /* Check for set block count errors */
1143         if (brq->sbc.error)
1144                 return mmc_blk_cmd_error(req, "SET_BLOCK_COUNT", brq->sbc.error,
1145                                 prev_cmd_status_valid, status);
1146
1147         /* Check for r/w command errors */
1148         if (brq->cmd.error)
1149                 return mmc_blk_cmd_error(req, "r/w cmd", brq->cmd.error,
1150                                 prev_cmd_status_valid, status);
1151
1152         /* Data errors */
1153         if (!brq->stop.error)
1154                 return ERR_CONTINUE;
1155
1156         /* Now for stop errors.  These aren't fatal to the transfer. */
1157         pr_info("%s: error %d sending stop command, original cmd response %#x, card status %#x\n",
1158                req->rq_disk->disk_name, brq->stop.error,
1159                brq->cmd.resp[0], status);
1160
1161         /*
1162          * Subsitute in our own stop status as this will give the error
1163          * state which happened during the execution of the r/w command.
1164          */
1165         if (stop_status) {
1166                 brq->stop.resp[0] = stop_status;
1167                 brq->stop.error = 0;
1168         }
1169         return ERR_CONTINUE;
1170 }
1171
1172 static int mmc_blk_reset(struct mmc_blk_data *md, struct mmc_host *host,
1173                          int type)
1174 {
1175         int err;
1176
1177         if (md->reset_done & type)
1178                 return -EEXIST;
1179
1180         md->reset_done |= type;
1181         err = mmc_hw_reset(host);
1182         /* Ensure we switch back to the correct partition */
1183         if (err != -EOPNOTSUPP) {
1184                 struct mmc_blk_data *main_md =
1185                         dev_get_drvdata(&host->card->dev);
1186                 int part_err;
1187
1188                 main_md->part_curr = main_md->part_type;
1189                 part_err = mmc_blk_part_switch(host->card, md->part_type);
1190                 if (part_err) {
1191                         /*
1192                          * We have failed to get back into the correct
1193                          * partition, so we need to abort the whole request.
1194                          */
1195                         return -ENODEV;
1196                 }
1197         }
1198         return err;
1199 }
1200
1201 static inline void mmc_blk_reset_success(struct mmc_blk_data *md, int type)
1202 {
1203         md->reset_done &= ~type;
1204 }
1205
1206 /*
1207  * The non-block commands come back from the block layer after it queued it and
1208  * processed it with all other requests and then they get issued in this
1209  * function.
1210  */
1211 static void mmc_blk_issue_drv_op(struct mmc_queue *mq, struct request *req)
1212 {
1213         struct mmc_queue_req *mq_rq;
1214         struct mmc_card *card = mq->card;
1215         struct mmc_blk_data *md = mq->blkdata;
1216         struct mmc_blk_ioc_data **idata;
1217         bool rpmb_ioctl;
1218         u8 **ext_csd;
1219         u32 status;
1220         int ret;
1221         int i;
1222
1223         mq_rq = req_to_mmc_queue_req(req);
1224         rpmb_ioctl = (mq_rq->drv_op == MMC_DRV_OP_IOCTL_RPMB);
1225
1226         switch (mq_rq->drv_op) {
1227         case MMC_DRV_OP_IOCTL:
1228         case MMC_DRV_OP_IOCTL_RPMB:
1229                 idata = mq_rq->drv_op_data;
1230                 for (i = 0, ret = 0; i < mq_rq->ioc_count; i++) {
1231                         ret = __mmc_blk_ioctl_cmd(card, md, idata[i]);
1232                         if (ret)
1233                                 break;
1234                 }
1235                 /* Always switch back to main area after RPMB access */
1236                 if (rpmb_ioctl)
1237                         mmc_blk_part_switch(card, 0);
1238                 break;
1239         case MMC_DRV_OP_BOOT_WP:
1240                 ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BOOT_WP,
1241                                  card->ext_csd.boot_ro_lock |
1242                                  EXT_CSD_BOOT_WP_B_PWR_WP_EN,
1243                                  card->ext_csd.part_time);
1244                 if (ret)
1245                         pr_err("%s: Locking boot partition ro until next power on failed: %d\n",
1246                                md->disk->disk_name, ret);
1247                 else
1248                         card->ext_csd.boot_ro_lock |=
1249                                 EXT_CSD_BOOT_WP_B_PWR_WP_EN;
1250                 break;
1251         case MMC_DRV_OP_GET_CARD_STATUS:
1252                 ret = mmc_send_status(card, &status);
1253                 if (!ret)
1254                         ret = status;
1255                 break;
1256         case MMC_DRV_OP_GET_EXT_CSD:
1257                 ext_csd = mq_rq->drv_op_data;
1258                 ret = mmc_get_ext_csd(card, ext_csd);
1259                 break;
1260         default:
1261                 pr_err("%s: unknown driver specific operation\n",
1262                        md->disk->disk_name);
1263                 ret = -EINVAL;
1264                 break;
1265         }
1266         mq_rq->drv_op_result = ret;
1267         blk_end_request_all(req, ret ? BLK_STS_IOERR : BLK_STS_OK);
1268 }
1269
1270 static void mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
1271 {
1272         struct mmc_blk_data *md = mq->blkdata;
1273         struct mmc_card *card = md->queue.card;
1274         unsigned int from, nr, arg;
1275         int err = 0, type = MMC_BLK_DISCARD;
1276         blk_status_t status = BLK_STS_OK;
1277
1278         if (!mmc_can_erase(card)) {
1279                 status = BLK_STS_NOTSUPP;
1280                 goto fail;
1281         }
1282
1283         from = blk_rq_pos(req);
1284         nr = blk_rq_sectors(req);
1285
1286         if (mmc_can_discard(card))
1287                 arg = MMC_DISCARD_ARG;
1288         else if (mmc_can_trim(card))
1289                 arg = MMC_TRIM_ARG;
1290         else
1291                 arg = MMC_ERASE_ARG;
1292         do {
1293                 err = 0;
1294                 if (card->quirks & MMC_QUIRK_INAND_CMD38) {
1295                         err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1296                                          INAND_CMD38_ARG_EXT_CSD,
1297                                          arg == MMC_TRIM_ARG ?
1298                                          INAND_CMD38_ARG_TRIM :
1299                                          INAND_CMD38_ARG_ERASE,
1300                                          0);
1301                 }
1302                 if (!err)
1303                         err = mmc_erase(card, from, nr, arg);
1304         } while (err == -EIO && !mmc_blk_reset(md, card->host, type));
1305         if (err)
1306                 status = BLK_STS_IOERR;
1307         else
1308                 mmc_blk_reset_success(md, type);
1309 fail:
1310         blk_end_request(req, status, blk_rq_bytes(req));
1311 }
1312
1313 static void mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
1314                                        struct request *req)
1315 {
1316         struct mmc_blk_data *md = mq->blkdata;
1317         struct mmc_card *card = md->queue.card;
1318         unsigned int from, nr, arg;
1319         int err = 0, type = MMC_BLK_SECDISCARD;
1320         blk_status_t status = BLK_STS_OK;
1321
1322         if (!(mmc_can_secure_erase_trim(card))) {
1323                 status = BLK_STS_NOTSUPP;
1324                 goto out;
1325         }
1326
1327         from = blk_rq_pos(req);
1328         nr = blk_rq_sectors(req);
1329
1330         if (mmc_can_trim(card) && !mmc_erase_group_aligned(card, from, nr))
1331                 arg = MMC_SECURE_TRIM1_ARG;
1332         else
1333                 arg = MMC_SECURE_ERASE_ARG;
1334
1335 retry:
1336         if (card->quirks & MMC_QUIRK_INAND_CMD38) {
1337                 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1338                                  INAND_CMD38_ARG_EXT_CSD,
1339                                  arg == MMC_SECURE_TRIM1_ARG ?
1340                                  INAND_CMD38_ARG_SECTRIM1 :
1341                                  INAND_CMD38_ARG_SECERASE,
1342                                  0);
1343                 if (err)
1344                         goto out_retry;
1345         }
1346
1347         err = mmc_erase(card, from, nr, arg);
1348         if (err == -EIO)
1349                 goto out_retry;
1350         if (err) {
1351                 status = BLK_STS_IOERR;
1352                 goto out;
1353         }
1354
1355         if (arg == MMC_SECURE_TRIM1_ARG) {
1356                 if (card->quirks & MMC_QUIRK_INAND_CMD38) {
1357                         err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1358                                          INAND_CMD38_ARG_EXT_CSD,
1359                                          INAND_CMD38_ARG_SECTRIM2,
1360                                          0);
1361                         if (err)
1362                                 goto out_retry;
1363                 }
1364
1365                 err = mmc_erase(card, from, nr, MMC_SECURE_TRIM2_ARG);
1366                 if (err == -EIO)
1367                         goto out_retry;
1368                 if (err) {
1369                         status = BLK_STS_IOERR;
1370                         goto out;
1371                 }
1372         }
1373
1374 out_retry:
1375         if (err && !mmc_blk_reset(md, card->host, type))
1376                 goto retry;
1377         if (!err)
1378                 mmc_blk_reset_success(md, type);
1379 out:
1380         blk_end_request(req, status, blk_rq_bytes(req));
1381 }
1382
1383 static void mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req)
1384 {
1385         struct mmc_blk_data *md = mq->blkdata;
1386         struct mmc_card *card = md->queue.card;
1387         int ret = 0;
1388
1389         ret = mmc_flush_cache(card);
1390         blk_end_request_all(req, ret ? BLK_STS_IOERR : BLK_STS_OK);
1391 }
1392
1393 /*
1394  * Reformat current write as a reliable write, supporting
1395  * both legacy and the enhanced reliable write MMC cards.
1396  * In each transfer we'll handle only as much as a single
1397  * reliable write can handle, thus finish the request in
1398  * partial completions.
1399  */
1400 static inline void mmc_apply_rel_rw(struct mmc_blk_request *brq,
1401                                     struct mmc_card *card,
1402                                     struct request *req)
1403 {
1404         if (!(card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN)) {
1405                 /* Legacy mode imposes restrictions on transfers. */
1406                 if (!IS_ALIGNED(blk_rq_pos(req), card->ext_csd.rel_sectors))
1407                         brq->data.blocks = 1;
1408
1409                 if (brq->data.blocks > card->ext_csd.rel_sectors)
1410                         brq->data.blocks = card->ext_csd.rel_sectors;
1411                 else if (brq->data.blocks < card->ext_csd.rel_sectors)
1412                         brq->data.blocks = 1;
1413         }
1414 }
1415
1416 #define CMD_ERRORS                                                      \
1417         (R1_OUT_OF_RANGE |      /* Command argument out of range */     \
1418          R1_ADDRESS_ERROR |     /* Misaligned address */                \
1419          R1_BLOCK_LEN_ERROR |   /* Transferred block length incorrect */\
1420          R1_WP_VIOLATION |      /* Tried to write to protected block */ \
1421          R1_CARD_ECC_FAILED |   /* Card ECC failed */                   \
1422          R1_CC_ERROR |          /* Card controller error */             \
1423          R1_ERROR)              /* General/unknown error */
1424
1425 static void mmc_blk_eval_resp_error(struct mmc_blk_request *brq)
1426 {
1427         u32 val;
1428
1429         /*
1430          * Per the SD specification(physical layer version 4.10)[1],
1431          * section 4.3.3, it explicitly states that "When the last
1432          * block of user area is read using CMD18, the host should
1433          * ignore OUT_OF_RANGE error that may occur even the sequence
1434          * is correct". And JESD84-B51 for eMMC also has a similar
1435          * statement on section 6.8.3.
1436          *
1437          * Multiple block read/write could be done by either predefined
1438          * method, namely CMD23, or open-ending mode. For open-ending mode,
1439          * we should ignore the OUT_OF_RANGE error as it's normal behaviour.
1440          *
1441          * However the spec[1] doesn't tell us whether we should also
1442          * ignore that for predefined method. But per the spec[1], section
1443          * 4.15 Set Block Count Command, it says"If illegal block count
1444          * is set, out of range error will be indicated during read/write
1445          * operation (For example, data transfer is stopped at user area
1446          * boundary)." In another word, we could expect a out of range error
1447          * in the response for the following CMD18/25. And if argument of
1448          * CMD23 + the argument of CMD18/25 exceed the max number of blocks,
1449          * we could also expect to get a -ETIMEDOUT or any error number from
1450          * the host drivers due to missing data response(for write)/data(for
1451          * read), as the cards will stop the data transfer by itself per the
1452          * spec. So we only need to check R1_OUT_OF_RANGE for open-ending mode.
1453          */
1454
1455         if (!brq->stop.error) {
1456                 bool oor_with_open_end;
1457                 /* If there is no error yet, check R1 response */
1458
1459                 val = brq->stop.resp[0] & CMD_ERRORS;
1460                 oor_with_open_end = val & R1_OUT_OF_RANGE && !brq->mrq.sbc;
1461
1462                 if (val && !oor_with_open_end)
1463                         brq->stop.error = -EIO;
1464         }
1465 }
1466
1467 static enum mmc_blk_status mmc_blk_err_check(struct mmc_card *card,
1468                                              struct mmc_async_req *areq)
1469 {
1470         struct mmc_queue_req *mq_mrq = container_of(areq, struct mmc_queue_req,
1471                                                     areq);
1472         struct mmc_blk_request *brq = &mq_mrq->brq;
1473         struct request *req = mmc_queue_req_to_req(mq_mrq);
1474         int need_retune = card->host->need_retune;
1475         bool ecc_err = false;
1476         bool gen_err = false;
1477
1478         /*
1479          * sbc.error indicates a problem with the set block count
1480          * command.  No data will have been transferred.
1481          *
1482          * cmd.error indicates a problem with the r/w command.  No
1483          * data will have been transferred.
1484          *
1485          * stop.error indicates a problem with the stop command.  Data
1486          * may have been transferred, or may still be transferring.
1487          */
1488
1489         mmc_blk_eval_resp_error(brq);
1490
1491         if (brq->sbc.error || brq->cmd.error ||
1492             brq->stop.error || brq->data.error) {
1493                 switch (mmc_blk_cmd_recovery(card, req, brq, &ecc_err, &gen_err)) {
1494                 case ERR_RETRY:
1495                         return MMC_BLK_RETRY;
1496                 case ERR_ABORT:
1497                         return MMC_BLK_ABORT;
1498                 case ERR_NOMEDIUM:
1499                         return MMC_BLK_NOMEDIUM;
1500                 case ERR_CONTINUE:
1501                         break;
1502                 }
1503         }
1504
1505         /*
1506          * Check for errors relating to the execution of the
1507          * initial command - such as address errors.  No data
1508          * has been transferred.
1509          */
1510         if (brq->cmd.resp[0] & CMD_ERRORS) {
1511                 pr_err("%s: r/w command failed, status = %#x\n",
1512                        req->rq_disk->disk_name, brq->cmd.resp[0]);
1513                 return MMC_BLK_ABORT;
1514         }
1515
1516         /*
1517          * Everything else is either success, or a data error of some
1518          * kind.  If it was a write, we may have transitioned to
1519          * program mode, which we have to wait for it to complete.
1520          */
1521         if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) {
1522                 int err;
1523
1524                 /* Check stop command response */
1525                 if (brq->stop.resp[0] & R1_ERROR) {
1526                         pr_err("%s: %s: general error sending stop command, stop cmd response %#x\n",
1527                                req->rq_disk->disk_name, __func__,
1528                                brq->stop.resp[0]);
1529                         gen_err = true;
1530                 }
1531
1532                 err = card_busy_detect(card, MMC_BLK_TIMEOUT_MS, false, req,
1533                                         &gen_err);
1534                 if (err)
1535                         return MMC_BLK_CMD_ERR;
1536         }
1537
1538         /* if general error occurs, retry the write operation. */
1539         if (gen_err) {
1540                 pr_warn("%s: retrying write for general error\n",
1541                                 req->rq_disk->disk_name);
1542                 return MMC_BLK_RETRY;
1543         }
1544
1545         /* Some errors (ECC) are flagged on the next commmand, so check stop, too */
1546         if (brq->data.error || brq->stop.error) {
1547                 if (need_retune && !brq->retune_retry_done) {
1548                         pr_debug("%s: retrying because a re-tune was needed\n",
1549                                  req->rq_disk->disk_name);
1550                         brq->retune_retry_done = 1;
1551                         return MMC_BLK_RETRY;
1552                 }
1553                 pr_err("%s: error %d transferring data, sector %u, nr %u, cmd response %#x, card status %#x\n",
1554                        req->rq_disk->disk_name, brq->data.error ?: brq->stop.error,
1555                        (unsigned)blk_rq_pos(req),
1556                        (unsigned)blk_rq_sectors(req),
1557                        brq->cmd.resp[0], brq->stop.resp[0]);
1558
1559                 if (rq_data_dir(req) == READ) {
1560                         if (ecc_err)
1561                                 return MMC_BLK_ECC_ERR;
1562                         return MMC_BLK_DATA_ERR;
1563                 } else {
1564                         return MMC_BLK_CMD_ERR;
1565                 }
1566         }
1567
1568         if (!brq->data.bytes_xfered)
1569                 return MMC_BLK_RETRY;
1570
1571         if (blk_rq_bytes(req) != brq->data.bytes_xfered)
1572                 return MMC_BLK_PARTIAL;
1573
1574         return MMC_BLK_SUCCESS;
1575 }
1576
1577 static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq,
1578                               int disable_multi, bool *do_rel_wr_p,
1579                               bool *do_data_tag_p)
1580 {
1581         struct mmc_blk_data *md = mq->blkdata;
1582         struct mmc_card *card = md->queue.card;
1583         struct mmc_blk_request *brq = &mqrq->brq;
1584         struct request *req = mmc_queue_req_to_req(mqrq);
1585         bool do_rel_wr, do_data_tag;
1586
1587         /*
1588          * Reliable writes are used to implement Forced Unit Access and
1589          * are supported only on MMCs.
1590          */
1591         do_rel_wr = (req->cmd_flags & REQ_FUA) &&
1592                     rq_data_dir(req) == WRITE &&
1593                     (md->flags & MMC_BLK_REL_WR);
1594
1595         memset(brq, 0, sizeof(struct mmc_blk_request));
1596
1597         brq->mrq.data = &brq->data;
1598         brq->mrq.tag = req->tag;
1599
1600         brq->stop.opcode = MMC_STOP_TRANSMISSION;
1601         brq->stop.arg = 0;
1602
1603         if (rq_data_dir(req) == READ) {
1604                 brq->data.flags = MMC_DATA_READ;
1605                 brq->stop.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
1606         } else {
1607                 brq->data.flags = MMC_DATA_WRITE;
1608                 brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
1609         }
1610
1611         brq->data.blksz = 512;
1612         brq->data.blocks = blk_rq_sectors(req);
1613         brq->data.blk_addr = blk_rq_pos(req);
1614
1615         /*
1616          * The command queue supports 2 priorities: "high" (1) and "simple" (0).
1617          * The eMMC will give "high" priority tasks priority over "simple"
1618          * priority tasks. Here we always set "simple" priority by not setting
1619          * MMC_DATA_PRIO.
1620          */
1621
1622         /*
1623          * The block layer doesn't support all sector count
1624          * restrictions, so we need to be prepared for too big
1625          * requests.
1626          */
1627         if (brq->data.blocks > card->host->max_blk_count)
1628                 brq->data.blocks = card->host->max_blk_count;
1629
1630         if (brq->data.blocks > 1) {
1631                 /*
1632                  * After a read error, we redo the request one sector
1633                  * at a time in order to accurately determine which
1634                  * sectors can be read successfully.
1635                  */
1636                 if (disable_multi)
1637                         brq->data.blocks = 1;
1638
1639                 /*
1640                  * Some controllers have HW issues while operating
1641                  * in multiple I/O mode
1642                  */
1643                 if (card->host->ops->multi_io_quirk)
1644                         brq->data.blocks = card->host->ops->multi_io_quirk(card,
1645                                                 (rq_data_dir(req) == READ) ?
1646                                                 MMC_DATA_READ : MMC_DATA_WRITE,
1647                                                 brq->data.blocks);
1648         }
1649
1650         if (do_rel_wr) {
1651                 mmc_apply_rel_rw(brq, card, req);
1652                 brq->data.flags |= MMC_DATA_REL_WR;
1653         }
1654
1655         /*
1656          * Data tag is used only during writing meta data to speed
1657          * up write and any subsequent read of this meta data
1658          */
1659         do_data_tag = card->ext_csd.data_tag_unit_size &&
1660                       (req->cmd_flags & REQ_META) &&
1661                       (rq_data_dir(req) == WRITE) &&
1662                       ((brq->data.blocks * brq->data.blksz) >=
1663                        card->ext_csd.data_tag_unit_size);
1664
1665         if (do_data_tag)
1666                 brq->data.flags |= MMC_DATA_DAT_TAG;
1667
1668         mmc_set_data_timeout(&brq->data, card);
1669
1670         brq->data.sg = mqrq->sg;
1671         brq->data.sg_len = mmc_queue_map_sg(mq, mqrq);
1672
1673         /*
1674          * Adjust the sg list so it is the same size as the
1675          * request.
1676          */
1677         if (brq->data.blocks != blk_rq_sectors(req)) {
1678                 int i, data_size = brq->data.blocks << 9;
1679                 struct scatterlist *sg;
1680
1681                 for_each_sg(brq->data.sg, sg, brq->data.sg_len, i) {
1682                         data_size -= sg->length;
1683                         if (data_size <= 0) {
1684                                 sg->length += data_size;
1685                                 i++;
1686                                 break;
1687                         }
1688                 }
1689                 brq->data.sg_len = i;
1690         }
1691
1692         mqrq->areq.mrq = &brq->mrq;
1693
1694         if (do_rel_wr_p)
1695                 *do_rel_wr_p = do_rel_wr;
1696
1697         if (do_data_tag_p)
1698                 *do_data_tag_p = do_data_tag;
1699 }
1700
1701 static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
1702                                struct mmc_card *card,
1703                                int disable_multi,
1704                                struct mmc_queue *mq)
1705 {
1706         u32 readcmd, writecmd;
1707         struct mmc_blk_request *brq = &mqrq->brq;
1708         struct request *req = mmc_queue_req_to_req(mqrq);
1709         struct mmc_blk_data *md = mq->blkdata;
1710         bool do_rel_wr, do_data_tag;
1711
1712         mmc_blk_data_prep(mq, mqrq, disable_multi, &do_rel_wr, &do_data_tag);
1713
1714         brq->mrq.cmd = &brq->cmd;
1715
1716         brq->cmd.arg = blk_rq_pos(req);
1717         if (!mmc_card_blockaddr(card))
1718                 brq->cmd.arg <<= 9;
1719         brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
1720
1721         if (brq->data.blocks > 1 || do_rel_wr) {
1722                 /* SPI multiblock writes terminate using a special
1723                  * token, not a STOP_TRANSMISSION request.
1724                  */
1725                 if (!mmc_host_is_spi(card->host) ||
1726                     rq_data_dir(req) == READ)
1727                         brq->mrq.stop = &brq->stop;
1728                 readcmd = MMC_READ_MULTIPLE_BLOCK;
1729                 writecmd = MMC_WRITE_MULTIPLE_BLOCK;
1730         } else {
1731                 brq->mrq.stop = NULL;
1732                 readcmd = MMC_READ_SINGLE_BLOCK;
1733                 writecmd = MMC_WRITE_BLOCK;
1734         }
1735         brq->cmd.opcode = rq_data_dir(req) == READ ? readcmd : writecmd;
1736
1737         /*
1738          * Pre-defined multi-block transfers are preferable to
1739          * open ended-ones (and necessary for reliable writes).
1740          * However, it is not sufficient to just send CMD23,
1741          * and avoid the final CMD12, as on an error condition
1742          * CMD12 (stop) needs to be sent anyway. This, coupled
1743          * with Auto-CMD23 enhancements provided by some
1744          * hosts, means that the complexity of dealing
1745          * with this is best left to the host. If CMD23 is
1746          * supported by card and host, we'll fill sbc in and let
1747          * the host deal with handling it correctly. This means
1748          * that for hosts that don't expose MMC_CAP_CMD23, no
1749          * change of behavior will be observed.
1750          *
1751          * N.B: Some MMC cards experience perf degradation.
1752          * We'll avoid using CMD23-bounded multiblock writes for
1753          * these, while retaining features like reliable writes.
1754          */
1755         if ((md->flags & MMC_BLK_CMD23) && mmc_op_multi(brq->cmd.opcode) &&
1756             (do_rel_wr || !(card->quirks & MMC_QUIRK_BLK_NO_CMD23) ||
1757              do_data_tag)) {
1758                 brq->sbc.opcode = MMC_SET_BLOCK_COUNT;
1759                 brq->sbc.arg = brq->data.blocks |
1760                         (do_rel_wr ? (1 << 31) : 0) |
1761                         (do_data_tag ? (1 << 29) : 0);
1762                 brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
1763                 brq->mrq.sbc = &brq->sbc;
1764         }
1765
1766         mqrq->areq.err_check = mmc_blk_err_check;
1767 }
1768
1769 static bool mmc_blk_rw_cmd_err(struct mmc_blk_data *md, struct mmc_card *card,
1770                                struct mmc_blk_request *brq, struct request *req,
1771                                bool old_req_pending)
1772 {
1773         bool req_pending;
1774
1775         /*
1776          * If this is an SD card and we're writing, we can first
1777          * mark the known good sectors as ok.
1778          *
1779          * If the card is not SD, we can still ok written sectors
1780          * as reported by the controller (which might be less than
1781          * the real number of written sectors, but never more).
1782          */
1783         if (mmc_card_sd(card)) {
1784                 u32 blocks;
1785                 int err;
1786
1787                 err = mmc_sd_num_wr_blocks(card, &blocks);
1788                 if (err)
1789                         req_pending = old_req_pending;
1790                 else
1791                         req_pending = blk_end_request(req, BLK_STS_OK, blocks << 9);
1792         } else {
1793                 req_pending = blk_end_request(req, BLK_STS_OK, brq->data.bytes_xfered);
1794         }
1795         return req_pending;
1796 }
1797
1798 static void mmc_blk_rw_cmd_abort(struct mmc_queue *mq, struct mmc_card *card,
1799                                  struct request *req,
1800                                  struct mmc_queue_req *mqrq)
1801 {
1802         if (mmc_card_removed(card))
1803                 req->rq_flags |= RQF_QUIET;
1804         while (blk_end_request(req, BLK_STS_IOERR, blk_rq_cur_bytes(req)));
1805         mq->qcnt--;
1806 }
1807
1808 /**
1809  * mmc_blk_rw_try_restart() - tries to restart the current async request
1810  * @mq: the queue with the card and host to restart
1811  * @req: a new request that want to be started after the current one
1812  */
1813 static void mmc_blk_rw_try_restart(struct mmc_queue *mq, struct request *req,
1814                                    struct mmc_queue_req *mqrq)
1815 {
1816         if (!req)
1817                 return;
1818
1819         /*
1820          * If the card was removed, just cancel everything and return.
1821          */
1822         if (mmc_card_removed(mq->card)) {
1823                 req->rq_flags |= RQF_QUIET;
1824                 blk_end_request_all(req, BLK_STS_IOERR);
1825                 mq->qcnt--; /* FIXME: just set to 0? */
1826                 return;
1827         }
1828         /* Else proceed and try to restart the current async request */
1829         mmc_blk_rw_rq_prep(mqrq, mq->card, 0, mq);
1830         mmc_start_areq(mq->card->host, &mqrq->areq, NULL);
1831 }
1832
1833 static void mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *new_req)
1834 {
1835         struct mmc_blk_data *md = mq->blkdata;
1836         struct mmc_card *card = md->queue.card;
1837         struct mmc_blk_request *brq;
1838         int disable_multi = 0, retry = 0, type, retune_retry_done = 0;
1839         enum mmc_blk_status status;
1840         struct mmc_queue_req *mqrq_cur = NULL;
1841         struct mmc_queue_req *mq_rq;
1842         struct request *old_req;
1843         struct mmc_async_req *new_areq;
1844         struct mmc_async_req *old_areq;
1845         bool req_pending = true;
1846
1847         if (new_req) {
1848                 mqrq_cur = req_to_mmc_queue_req(new_req);
1849                 mq->qcnt++;
1850         }
1851
1852         if (!mq->qcnt)
1853                 return;
1854
1855         do {
1856                 if (new_req) {
1857                         /*
1858                          * When 4KB native sector is enabled, only 8 blocks
1859                          * multiple read or write is allowed
1860                          */
1861                         if (mmc_large_sector(card) &&
1862                                 !IS_ALIGNED(blk_rq_sectors(new_req), 8)) {
1863                                 pr_err("%s: Transfer size is not 4KB sector size aligned\n",
1864                                         new_req->rq_disk->disk_name);
1865                                 mmc_blk_rw_cmd_abort(mq, card, new_req, mqrq_cur);
1866                                 return;
1867                         }
1868
1869                         mmc_blk_rw_rq_prep(mqrq_cur, card, 0, mq);
1870                         new_areq = &mqrq_cur->areq;
1871                 } else
1872                         new_areq = NULL;
1873
1874                 old_areq = mmc_start_areq(card->host, new_areq, &status);
1875                 if (!old_areq) {
1876                         /*
1877                          * We have just put the first request into the pipeline
1878                          * and there is nothing more to do until it is
1879                          * complete.
1880                          */
1881                         return;
1882                 }
1883
1884                 /*
1885                  * An asynchronous request has been completed and we proceed
1886                  * to handle the result of it.
1887                  */
1888                 mq_rq = container_of(old_areq, struct mmc_queue_req, areq);
1889                 brq = &mq_rq->brq;
1890                 old_req = mmc_queue_req_to_req(mq_rq);
1891                 type = rq_data_dir(old_req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE;
1892
1893                 switch (status) {
1894                 case MMC_BLK_SUCCESS:
1895                 case MMC_BLK_PARTIAL:
1896                         /*
1897                          * A block was successfully transferred.
1898                          */
1899                         mmc_blk_reset_success(md, type);
1900
1901                         req_pending = blk_end_request(old_req, BLK_STS_OK,
1902                                                       brq->data.bytes_xfered);
1903                         /*
1904                          * If the blk_end_request function returns non-zero even
1905                          * though all data has been transferred and no errors
1906                          * were returned by the host controller, it's a bug.
1907                          */
1908                         if (status == MMC_BLK_SUCCESS && req_pending) {
1909                                 pr_err("%s BUG rq_tot %d d_xfer %d\n",
1910                                        __func__, blk_rq_bytes(old_req),
1911                                        brq->data.bytes_xfered);
1912                                 mmc_blk_rw_cmd_abort(mq, card, old_req, mq_rq);
1913                                 return;
1914                         }
1915                         break;
1916                 case MMC_BLK_CMD_ERR:
1917                         req_pending = mmc_blk_rw_cmd_err(md, card, brq, old_req, req_pending);
1918                         if (mmc_blk_reset(md, card->host, type)) {
1919                                 if (req_pending)
1920                                         mmc_blk_rw_cmd_abort(mq, card, old_req, mq_rq);
1921                                 else
1922                                         mq->qcnt--;
1923                                 mmc_blk_rw_try_restart(mq, new_req, mqrq_cur);
1924                                 return;
1925                         }
1926                         if (!req_pending) {
1927                                 mq->qcnt--;
1928                                 mmc_blk_rw_try_restart(mq, new_req, mqrq_cur);
1929                                 return;
1930                         }
1931                         break;
1932                 case MMC_BLK_RETRY:
1933                         retune_retry_done = brq->retune_retry_done;
1934                         if (retry++ < 5)
1935                                 break;
1936                         /* Fall through */
1937                 case MMC_BLK_ABORT:
1938                         if (!mmc_blk_reset(md, card->host, type))
1939                                 break;
1940                         mmc_blk_rw_cmd_abort(mq, card, old_req, mq_rq);
1941                         mmc_blk_rw_try_restart(mq, new_req, mqrq_cur);
1942                         return;
1943                 case MMC_BLK_DATA_ERR: {
1944                         int err;
1945
1946                         err = mmc_blk_reset(md, card->host, type);
1947                         if (!err)
1948                                 break;
1949                         if (err == -ENODEV) {
1950                                 mmc_blk_rw_cmd_abort(mq, card, old_req, mq_rq);
1951                                 mmc_blk_rw_try_restart(mq, new_req, mqrq_cur);
1952                                 return;
1953                         }
1954                         /* Fall through */
1955                 }
1956                 case MMC_BLK_ECC_ERR:
1957                         if (brq->data.blocks > 1) {
1958                                 /* Redo read one sector at a time */
1959                                 pr_warn("%s: retrying using single block read\n",
1960                                         old_req->rq_disk->disk_name);
1961                                 disable_multi = 1;
1962                                 break;
1963                         }
1964                         /*
1965                          * After an error, we redo I/O one sector at a
1966                          * time, so we only reach here after trying to
1967                          * read a single sector.
1968                          */
1969                         req_pending = blk_end_request(old_req, BLK_STS_IOERR,
1970                                                       brq->data.blksz);
1971                         if (!req_pending) {
1972                                 mq->qcnt--;
1973                                 mmc_blk_rw_try_restart(mq, new_req, mqrq_cur);
1974                                 return;
1975                         }
1976                         break;
1977                 case MMC_BLK_NOMEDIUM:
1978                         mmc_blk_rw_cmd_abort(mq, card, old_req, mq_rq);
1979                         mmc_blk_rw_try_restart(mq, new_req, mqrq_cur);
1980                         return;
1981                 default:
1982                         pr_err("%s: Unhandled return value (%d)",
1983                                         old_req->rq_disk->disk_name, status);
1984                         mmc_blk_rw_cmd_abort(mq, card, old_req, mq_rq);
1985                         mmc_blk_rw_try_restart(mq, new_req, mqrq_cur);
1986                         return;
1987                 }
1988
1989                 if (req_pending) {
1990                         /*
1991                          * In case of a incomplete request
1992                          * prepare it again and resend.
1993                          */
1994                         mmc_blk_rw_rq_prep(mq_rq, card,
1995                                         disable_multi, mq);
1996                         mmc_start_areq(card->host,
1997                                         &mq_rq->areq, NULL);
1998                         mq_rq->brq.retune_retry_done = retune_retry_done;
1999                 }
2000         } while (req_pending);
2001
2002         mq->qcnt--;
2003 }
2004
2005 void mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
2006 {
2007         int ret;
2008         struct mmc_blk_data *md = mq->blkdata;
2009         struct mmc_card *card = md->queue.card;
2010
2011         if (req && !mq->qcnt)
2012                 /* claim host only for the first request */
2013                 mmc_get_card(card, NULL);
2014
2015         ret = mmc_blk_part_switch(card, md->part_type);
2016         if (ret) {
2017                 if (req) {
2018                         blk_end_request_all(req, BLK_STS_IOERR);
2019                 }
2020                 goto out;
2021         }
2022
2023         if (req) {
2024                 switch (req_op(req)) {
2025                 case REQ_OP_DRV_IN:
2026                 case REQ_OP_DRV_OUT:
2027                         /*
2028                          * Complete ongoing async transfer before issuing
2029                          * ioctl()s
2030                          */
2031                         if (mq->qcnt)
2032                                 mmc_blk_issue_rw_rq(mq, NULL);
2033                         mmc_blk_issue_drv_op(mq, req);
2034                         break;
2035                 case REQ_OP_DISCARD:
2036                         /*
2037                          * Complete ongoing async transfer before issuing
2038                          * discard.
2039                          */
2040                         if (mq->qcnt)
2041                                 mmc_blk_issue_rw_rq(mq, NULL);
2042                         mmc_blk_issue_discard_rq(mq, req);
2043                         break;
2044                 case REQ_OP_SECURE_ERASE:
2045                         /*
2046                          * Complete ongoing async transfer before issuing
2047                          * secure erase.
2048                          */
2049                         if (mq->qcnt)
2050                                 mmc_blk_issue_rw_rq(mq, NULL);
2051                         mmc_blk_issue_secdiscard_rq(mq, req);
2052                         break;
2053                 case REQ_OP_FLUSH:
2054                         /*
2055                          * Complete ongoing async transfer before issuing
2056                          * flush.
2057                          */
2058                         if (mq->qcnt)
2059                                 mmc_blk_issue_rw_rq(mq, NULL);
2060                         mmc_blk_issue_flush(mq, req);
2061                         break;
2062                 default:
2063                         /* Normal request, just issue it */
2064                         mmc_blk_issue_rw_rq(mq, req);
2065                         card->host->context_info.is_waiting_last_req = false;
2066                         break;
2067                 }
2068         } else {
2069                 /* No request, flushing the pipeline with NULL */
2070                 mmc_blk_issue_rw_rq(mq, NULL);
2071                 card->host->context_info.is_waiting_last_req = false;
2072         }
2073
2074 out:
2075         if (!mq->qcnt)
2076                 mmc_put_card(card, NULL);
2077 }
2078
2079 static inline int mmc_blk_readonly(struct mmc_card *card)
2080 {
2081         return mmc_card_readonly(card) ||
2082                !(card->csd.cmdclass & CCC_BLOCK_WRITE);
2083 }
2084
2085 static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
2086                                               struct device *parent,
2087                                               sector_t size,
2088                                               bool default_ro,
2089                                               const char *subname,
2090                                               int area_type)
2091 {
2092         struct mmc_blk_data *md;
2093         int devidx, ret;
2094
2095         devidx = ida_simple_get(&mmc_blk_ida, 0, max_devices, GFP_KERNEL);
2096         if (devidx < 0) {
2097                 /*
2098                  * We get -ENOSPC because there are no more any available
2099                  * devidx. The reason may be that, either userspace haven't yet
2100                  * unmounted the partitions, which postpones mmc_blk_release()
2101                  * from being called, or the device has more partitions than
2102                  * what we support.
2103                  */
2104                 if (devidx == -ENOSPC)
2105                         dev_err(mmc_dev(card->host),
2106                                 "no more device IDs available\n");
2107
2108                 return ERR_PTR(devidx);
2109         }
2110
2111         md = kzalloc(sizeof(struct mmc_blk_data), GFP_KERNEL);
2112         if (!md) {
2113                 ret = -ENOMEM;
2114                 goto out;
2115         }
2116
2117         md->area_type = area_type;
2118
2119         /*
2120          * Set the read-only status based on the supported commands
2121          * and the write protect switch.
2122          */
2123         md->read_only = mmc_blk_readonly(card);
2124
2125         md->disk = alloc_disk(perdev_minors);
2126         if (md->disk == NULL) {
2127                 ret = -ENOMEM;
2128                 goto err_kfree;
2129         }
2130
2131         spin_lock_init(&md->lock);
2132         INIT_LIST_HEAD(&md->part);
2133         INIT_LIST_HEAD(&md->rpmbs);
2134         md->usage = 1;
2135
2136         ret = mmc_init_queue(&md->queue, card, &md->lock, subname);
2137         if (ret)
2138                 goto err_putdisk;
2139
2140         md->queue.blkdata = md;
2141
2142         md->disk->major = MMC_BLOCK_MAJOR;
2143         md->disk->first_minor = devidx * perdev_minors;
2144         md->disk->fops = &mmc_bdops;
2145         md->disk->private_data = md;
2146         md->disk->queue = md->queue.queue;
2147         md->parent = parent;
2148         set_disk_ro(md->disk, md->read_only || default_ro);
2149         md->disk->flags = GENHD_FL_EXT_DEVT;
2150         if (area_type & (MMC_BLK_DATA_AREA_RPMB | MMC_BLK_DATA_AREA_BOOT))
2151                 md->disk->flags |= GENHD_FL_NO_PART_SCAN;
2152
2153         /*
2154          * As discussed on lkml, GENHD_FL_REMOVABLE should:
2155          *
2156          * - be set for removable media with permanent block devices
2157          * - be unset for removable block devices with permanent media
2158          *
2159          * Since MMC block devices clearly fall under the second
2160          * case, we do not set GENHD_FL_REMOVABLE.  Userspace
2161          * should use the block device creation/destruction hotplug
2162          * messages to tell when the card is present.
2163          */
2164
2165         snprintf(md->disk->disk_name, sizeof(md->disk->disk_name),
2166                  "mmcblk%u%s", card->host->index, subname ? subname : "");
2167
2168         if (mmc_card_mmc(card))
2169                 blk_queue_logical_block_size(md->queue.queue,
2170                                              card->ext_csd.data_sector_size);
2171         else
2172                 blk_queue_logical_block_size(md->queue.queue, 512);
2173
2174         set_capacity(md->disk, size);
2175
2176         if (mmc_host_cmd23(card->host)) {
2177                 if ((mmc_card_mmc(card) &&
2178                      card->csd.mmca_vsn >= CSD_SPEC_VER_3) ||
2179                     (mmc_card_sd(card) &&
2180                      card->scr.cmds & SD_SCR_CMD23_SUPPORT))
2181                         md->flags |= MMC_BLK_CMD23;
2182         }
2183
2184         if (mmc_card_mmc(card) &&
2185             md->flags & MMC_BLK_CMD23 &&
2186             ((card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN) ||
2187              card->ext_csd.rel_sectors)) {
2188                 md->flags |= MMC_BLK_REL_WR;
2189                 blk_queue_write_cache(md->queue.queue, true, true);
2190         }
2191
2192         return md;
2193
2194  err_putdisk:
2195         put_disk(md->disk);
2196  err_kfree:
2197         kfree(md);
2198  out:
2199         ida_simple_remove(&mmc_blk_ida, devidx);
2200         return ERR_PTR(ret);
2201 }
2202
2203 static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
2204 {
2205         sector_t size;
2206
2207         if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) {
2208                 /*
2209                  * The EXT_CSD sector count is in number or 512 byte
2210                  * sectors.
2211                  */
2212                 size = card->ext_csd.sectors;
2213         } else {
2214                 /*
2215                  * The CSD capacity field is in units of read_blkbits.
2216                  * set_capacity takes units of 512 bytes.
2217                  */
2218                 size = (typeof(sector_t))card->csd.capacity
2219                         << (card->csd.read_blkbits - 9);
2220         }
2221
2222         return mmc_blk_alloc_req(card, &card->dev, size, false, NULL,
2223                                         MMC_BLK_DATA_AREA_MAIN);
2224 }
2225
2226 static int mmc_blk_alloc_part(struct mmc_card *card,
2227                               struct mmc_blk_data *md,
2228                               unsigned int part_type,
2229                               sector_t size,
2230                               bool default_ro,
2231                               const char *subname,
2232                               int area_type)
2233 {
2234         char cap_str[10];
2235         struct mmc_blk_data *part_md;
2236
2237         part_md = mmc_blk_alloc_req(card, disk_to_dev(md->disk), size, default_ro,
2238                                     subname, area_type);
2239         if (IS_ERR(part_md))
2240                 return PTR_ERR(part_md);
2241         part_md->part_type = part_type;
2242         list_add(&part_md->part, &md->part);
2243
2244         string_get_size((u64)get_capacity(part_md->disk), 512, STRING_UNITS_2,
2245                         cap_str, sizeof(cap_str));
2246         pr_info("%s: %s %s partition %u %s\n",
2247                part_md->disk->disk_name, mmc_card_id(card),
2248                mmc_card_name(card), part_md->part_type, cap_str);
2249         return 0;
2250 }
2251
2252 /**
2253  * mmc_rpmb_ioctl() - ioctl handler for the RPMB chardev
2254  * @filp: the character device file
2255  * @cmd: the ioctl() command
2256  * @arg: the argument from userspace
2257  *
2258  * This will essentially just redirect the ioctl()s coming in over to
2259  * the main block device spawning the RPMB character device.
2260  */
2261 static long mmc_rpmb_ioctl(struct file *filp, unsigned int cmd,
2262                            unsigned long arg)
2263 {
2264         struct mmc_rpmb_data *rpmb = filp->private_data;
2265         int ret;
2266
2267         switch (cmd) {
2268         case MMC_IOC_CMD:
2269                 ret = mmc_blk_ioctl_cmd(rpmb->md,
2270                                         (struct mmc_ioc_cmd __user *)arg,
2271                                         rpmb);
2272                 break;
2273         case MMC_IOC_MULTI_CMD:
2274                 ret = mmc_blk_ioctl_multi_cmd(rpmb->md,
2275                                         (struct mmc_ioc_multi_cmd __user *)arg,
2276                                         rpmb);
2277                 break;
2278         default:
2279                 ret = -EINVAL;
2280                 break;
2281         }
2282
2283         return 0;
2284 }
2285
2286 #ifdef CONFIG_COMPAT
2287 static long mmc_rpmb_ioctl_compat(struct file *filp, unsigned int cmd,
2288                               unsigned long arg)
2289 {
2290         return mmc_rpmb_ioctl(filp, cmd, (unsigned long)compat_ptr(arg));
2291 }
2292 #endif
2293
2294 static int mmc_rpmb_chrdev_open(struct inode *inode, struct file *filp)
2295 {
2296         struct mmc_rpmb_data *rpmb = container_of(inode->i_cdev,
2297                                                   struct mmc_rpmb_data, chrdev);
2298
2299         get_device(&rpmb->dev);
2300         filp->private_data = rpmb;
2301         mmc_blk_get(rpmb->md->disk);
2302
2303         return nonseekable_open(inode, filp);
2304 }
2305
2306 static int mmc_rpmb_chrdev_release(struct inode *inode, struct file *filp)
2307 {
2308         struct mmc_rpmb_data *rpmb = container_of(inode->i_cdev,
2309                                                   struct mmc_rpmb_data, chrdev);
2310
2311         put_device(&rpmb->dev);
2312         mmc_blk_put(rpmb->md);
2313
2314         return 0;
2315 }
2316
2317 static const struct file_operations mmc_rpmb_fileops = {
2318         .release = mmc_rpmb_chrdev_release,
2319         .open = mmc_rpmb_chrdev_open,
2320         .owner = THIS_MODULE,
2321         .llseek = no_llseek,
2322         .unlocked_ioctl = mmc_rpmb_ioctl,
2323 #ifdef CONFIG_COMPAT
2324         .compat_ioctl = mmc_rpmb_ioctl_compat,
2325 #endif
2326 };
2327
2328 static void mmc_blk_rpmb_device_release(struct device *dev)
2329 {
2330         struct mmc_rpmb_data *rpmb = dev_get_drvdata(dev);
2331
2332         ida_simple_remove(&mmc_rpmb_ida, rpmb->id);
2333         kfree(rpmb);
2334 }
2335
2336 static int mmc_blk_alloc_rpmb_part(struct mmc_card *card,
2337                                    struct mmc_blk_data *md,
2338                                    unsigned int part_index,
2339                                    sector_t size,
2340                                    const char *subname)
2341 {
2342         int devidx, ret;
2343         char rpmb_name[DISK_NAME_LEN];
2344         char cap_str[10];
2345         struct mmc_rpmb_data *rpmb;
2346
2347         /* This creates the minor number for the RPMB char device */
2348         devidx = ida_simple_get(&mmc_rpmb_ida, 0, max_devices, GFP_KERNEL);
2349         if (devidx < 0)
2350                 return devidx;
2351
2352         rpmb = kzalloc(sizeof(*rpmb), GFP_KERNEL);
2353         if (!rpmb) {
2354                 ida_simple_remove(&mmc_rpmb_ida, devidx);
2355                 return -ENOMEM;
2356         }
2357
2358         snprintf(rpmb_name, sizeof(rpmb_name),
2359                  "mmcblk%u%s", card->host->index, subname ? subname : "");
2360
2361         rpmb->id = devidx;
2362         rpmb->part_index = part_index;
2363         rpmb->dev.init_name = rpmb_name;
2364         rpmb->dev.bus = &mmc_rpmb_bus_type;
2365         rpmb->dev.devt = MKDEV(MAJOR(mmc_rpmb_devt), rpmb->id);
2366         rpmb->dev.parent = &card->dev;
2367         rpmb->dev.release = mmc_blk_rpmb_device_release;
2368         device_initialize(&rpmb->dev);
2369         dev_set_drvdata(&rpmb->dev, rpmb);
2370         rpmb->md = md;
2371
2372         cdev_init(&rpmb->chrdev, &mmc_rpmb_fileops);
2373         rpmb->chrdev.owner = THIS_MODULE;
2374         ret = cdev_device_add(&rpmb->chrdev, &rpmb->dev);
2375         if (ret) {
2376                 pr_err("%s: could not add character device\n", rpmb_name);
2377                 goto out_put_device;
2378         }
2379
2380         list_add(&rpmb->node, &md->rpmbs);
2381
2382         string_get_size((u64)size, 512, STRING_UNITS_2,
2383                         cap_str, sizeof(cap_str));
2384
2385         pr_info("%s: %s %s partition %u %s, chardev (%d:%d)\n",
2386                 rpmb_name, mmc_card_id(card),
2387                 mmc_card_name(card), EXT_CSD_PART_CONFIG_ACC_RPMB, cap_str,
2388                 MAJOR(mmc_rpmb_devt), rpmb->id);
2389
2390         return 0;
2391
2392 out_put_device:
2393         put_device(&rpmb->dev);
2394         return ret;
2395 }
2396
2397 static void mmc_blk_remove_rpmb_part(struct mmc_rpmb_data *rpmb)
2398
2399 {
2400         cdev_device_del(&rpmb->chrdev, &rpmb->dev);
2401         put_device(&rpmb->dev);
2402 }
2403
2404 /* MMC Physical partitions consist of two boot partitions and
2405  * up to four general purpose partitions.
2406  * For each partition enabled in EXT_CSD a block device will be allocatedi
2407  * to provide access to the partition.
2408  */
2409
2410 static int mmc_blk_alloc_parts(struct mmc_card *card, struct mmc_blk_data *md)
2411 {
2412         int idx, ret;
2413
2414         if (!mmc_card_mmc(card))
2415                 return 0;
2416
2417         for (idx = 0; idx < card->nr_parts; idx++) {
2418                 if (card->part[idx].area_type & MMC_BLK_DATA_AREA_RPMB) {
2419                         /*
2420                          * RPMB partitions does not provide block access, they
2421                          * are only accessed using ioctl():s. Thus create
2422                          * special RPMB block devices that do not have a
2423                          * backing block queue for these.
2424                          */
2425                         ret = mmc_blk_alloc_rpmb_part(card, md,
2426                                 card->part[idx].part_cfg,
2427                                 card->part[idx].size >> 9,
2428                                 card->part[idx].name);
2429                         if (ret)
2430                                 return ret;
2431                 } else if (card->part[idx].size) {
2432                         ret = mmc_blk_alloc_part(card, md,
2433                                 card->part[idx].part_cfg,
2434                                 card->part[idx].size >> 9,
2435                                 card->part[idx].force_ro,
2436                                 card->part[idx].name,
2437                                 card->part[idx].area_type);
2438                         if (ret)
2439                                 return ret;
2440                 }
2441         }
2442
2443         return 0;
2444 }
2445
2446 static void mmc_blk_remove_req(struct mmc_blk_data *md)
2447 {
2448         struct mmc_card *card;
2449
2450         if (md) {
2451                 /*
2452                  * Flush remaining requests and free queues. It
2453                  * is freeing the queue that stops new requests
2454                  * from being accepted.
2455                  */
2456                 card = md->queue.card;
2457                 spin_lock_irq(md->queue.queue->queue_lock);
2458                 queue_flag_set(QUEUE_FLAG_BYPASS, md->queue.queue);
2459                 spin_unlock_irq(md->queue.queue->queue_lock);
2460                 blk_set_queue_dying(md->queue.queue);
2461                 mmc_cleanup_queue(&md->queue);
2462                 if (md->disk->flags & GENHD_FL_UP) {
2463                         device_remove_file(disk_to_dev(md->disk), &md->force_ro);
2464                         if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) &&
2465                                         card->ext_csd.boot_ro_lockable)
2466                                 device_remove_file(disk_to_dev(md->disk),
2467                                         &md->power_ro_lock);
2468
2469                         del_gendisk(md->disk);
2470                 }
2471                 mmc_blk_put(md);
2472         }
2473 }
2474
2475 static void mmc_blk_remove_parts(struct mmc_card *card,
2476                                  struct mmc_blk_data *md)
2477 {
2478         struct list_head *pos, *q;
2479         struct mmc_blk_data *part_md;
2480         struct mmc_rpmb_data *rpmb;
2481
2482         /* Remove RPMB partitions */
2483         list_for_each_safe(pos, q, &md->rpmbs) {
2484                 rpmb = list_entry(pos, struct mmc_rpmb_data, node);
2485                 list_del(pos);
2486                 mmc_blk_remove_rpmb_part(rpmb);
2487         }
2488         /* Remove block partitions */
2489         list_for_each_safe(pos, q, &md->part) {
2490                 part_md = list_entry(pos, struct mmc_blk_data, part);
2491                 list_del(pos);
2492                 mmc_blk_remove_req(part_md);
2493         }
2494 }
2495
2496 static int mmc_add_disk(struct mmc_blk_data *md)
2497 {
2498         int ret;
2499         struct mmc_card *card = md->queue.card;
2500
2501         device_add_disk(md->parent, md->disk);
2502         md->force_ro.show = force_ro_show;
2503         md->force_ro.store = force_ro_store;
2504         sysfs_attr_init(&md->force_ro.attr);
2505         md->force_ro.attr.name = "force_ro";
2506         md->force_ro.attr.mode = S_IRUGO | S_IWUSR;
2507         ret = device_create_file(disk_to_dev(md->disk), &md->force_ro);
2508         if (ret)
2509                 goto force_ro_fail;
2510
2511         if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) &&
2512              card->ext_csd.boot_ro_lockable) {
2513                 umode_t mode;
2514
2515                 if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PWR_WP_DIS)
2516                         mode = S_IRUGO;
2517                 else
2518                         mode = S_IRUGO | S_IWUSR;
2519
2520                 md->power_ro_lock.show = power_ro_lock_show;
2521                 md->power_ro_lock.store = power_ro_lock_store;
2522                 sysfs_attr_init(&md->power_ro_lock.attr);
2523                 md->power_ro_lock.attr.mode = mode;
2524                 md->power_ro_lock.attr.name =
2525                                         "ro_lock_until_next_power_on";
2526                 ret = device_create_file(disk_to_dev(md->disk),
2527                                 &md->power_ro_lock);
2528                 if (ret)
2529                         goto power_ro_lock_fail;
2530         }
2531         return ret;
2532
2533 power_ro_lock_fail:
2534         device_remove_file(disk_to_dev(md->disk), &md->force_ro);
2535 force_ro_fail:
2536         del_gendisk(md->disk);
2537
2538         return ret;
2539 }
2540
2541 #ifdef CONFIG_DEBUG_FS
2542
2543 static int mmc_dbg_card_status_get(void *data, u64 *val)
2544 {
2545         struct mmc_card *card = data;
2546         struct mmc_blk_data *md = dev_get_drvdata(&card->dev);
2547         struct mmc_queue *mq = &md->queue;
2548         struct request *req;
2549         int ret;
2550
2551         /* Ask the block layer about the card status */
2552         req = blk_get_request(mq->queue, REQ_OP_DRV_IN, __GFP_RECLAIM);
2553         req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_GET_CARD_STATUS;
2554         blk_execute_rq(mq->queue, NULL, req, 0);
2555         ret = req_to_mmc_queue_req(req)->drv_op_result;
2556         if (ret >= 0) {
2557                 *val = ret;
2558                 ret = 0;
2559         }
2560
2561         return ret;
2562 }
2563 DEFINE_SIMPLE_ATTRIBUTE(mmc_dbg_card_status_fops, mmc_dbg_card_status_get,
2564                 NULL, "%08llx\n");
2565
2566 /* That is two digits * 512 + 1 for newline */
2567 #define EXT_CSD_STR_LEN 1025
2568
2569 static int mmc_ext_csd_open(struct inode *inode, struct file *filp)
2570 {
2571         struct mmc_card *card = inode->i_private;
2572         struct mmc_blk_data *md = dev_get_drvdata(&card->dev);
2573         struct mmc_queue *mq = &md->queue;
2574         struct request *req;
2575         char *buf;
2576         ssize_t n = 0;
2577         u8 *ext_csd;
2578         int err, i;
2579
2580         buf = kmalloc(EXT_CSD_STR_LEN + 1, GFP_KERNEL);
2581         if (!buf)
2582                 return -ENOMEM;
2583
2584         /* Ask the block layer for the EXT CSD */
2585         req = blk_get_request(mq->queue, REQ_OP_DRV_IN, __GFP_RECLAIM);
2586         req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_GET_EXT_CSD;
2587         req_to_mmc_queue_req(req)->drv_op_data = &ext_csd;
2588         blk_execute_rq(mq->queue, NULL, req, 0);
2589         err = req_to_mmc_queue_req(req)->drv_op_result;
2590         if (err) {
2591                 pr_err("FAILED %d\n", err);
2592                 goto out_free;
2593         }
2594
2595         for (i = 0; i < 512; i++)
2596                 n += sprintf(buf + n, "%02x", ext_csd[i]);
2597         n += sprintf(buf + n, "\n");
2598
2599         if (n != EXT_CSD_STR_LEN) {
2600                 err = -EINVAL;
2601                 goto out_free;
2602         }
2603
2604         filp->private_data = buf;
2605         kfree(ext_csd);
2606         return 0;
2607
2608 out_free:
2609         kfree(buf);
2610         return err;
2611 }
2612
2613 static ssize_t mmc_ext_csd_read(struct file *filp, char __user *ubuf,
2614                                 size_t cnt, loff_t *ppos)
2615 {
2616         char *buf = filp->private_data;
2617
2618         return simple_read_from_buffer(ubuf, cnt, ppos,
2619                                        buf, EXT_CSD_STR_LEN);
2620 }
2621
2622 static int mmc_ext_csd_release(struct inode *inode, struct file *file)
2623 {
2624         kfree(file->private_data);
2625         return 0;
2626 }
2627
2628 static const struct file_operations mmc_dbg_ext_csd_fops = {
2629         .open           = mmc_ext_csd_open,
2630         .read           = mmc_ext_csd_read,
2631         .release        = mmc_ext_csd_release,
2632         .llseek         = default_llseek,
2633 };
2634
2635 static int mmc_blk_add_debugfs(struct mmc_card *card)
2636 {
2637         struct dentry *root;
2638
2639         if (!card->debugfs_root)
2640                 return 0;
2641
2642         root = card->debugfs_root;
2643
2644         if (mmc_card_mmc(card) || mmc_card_sd(card)) {
2645                 if (!debugfs_create_file("status", S_IRUSR, root, card,
2646                                          &mmc_dbg_card_status_fops))
2647                         return -EIO;
2648         }
2649
2650         if (mmc_card_mmc(card)) {
2651                 if (!debugfs_create_file("ext_csd", S_IRUSR, root, card,
2652                                          &mmc_dbg_ext_csd_fops))
2653                         return -EIO;
2654         }
2655
2656         return 0;
2657 }
2658
2659
2660 #else
2661
2662 static int mmc_blk_add_debugfs(struct mmc_card *card)
2663 {
2664         return 0;
2665 }
2666
2667 #endif /* CONFIG_DEBUG_FS */
2668
2669 static int mmc_blk_probe(struct mmc_card *card)
2670 {
2671         struct mmc_blk_data *md, *part_md;
2672         char cap_str[10];
2673
2674         /*
2675          * Check that the card supports the command class(es) we need.
2676          */
2677         if (!(card->csd.cmdclass & CCC_BLOCK_READ))
2678                 return -ENODEV;
2679
2680         mmc_fixup_device(card, mmc_blk_fixups);
2681
2682         md = mmc_blk_alloc(card);
2683         if (IS_ERR(md))
2684                 return PTR_ERR(md);
2685
2686         string_get_size((u64)get_capacity(md->disk), 512, STRING_UNITS_2,
2687                         cap_str, sizeof(cap_str));
2688         pr_info("%s: %s %s %s %s\n",
2689                 md->disk->disk_name, mmc_card_id(card), mmc_card_name(card),
2690                 cap_str, md->read_only ? "(ro)" : "");
2691
2692         if (mmc_blk_alloc_parts(card, md))
2693                 goto out;
2694
2695         dev_set_drvdata(&card->dev, md);
2696
2697         if (mmc_add_disk(md))
2698                 goto out;
2699
2700         list_for_each_entry(part_md, &md->part, part) {
2701                 if (mmc_add_disk(part_md))
2702                         goto out;
2703         }
2704
2705         /* Add two debugfs entries */
2706         mmc_blk_add_debugfs(card);
2707
2708         pm_runtime_set_autosuspend_delay(&card->dev, 3000);
2709         pm_runtime_use_autosuspend(&card->dev);
2710
2711         /*
2712          * Don't enable runtime PM for SD-combo cards here. Leave that
2713          * decision to be taken during the SDIO init sequence instead.
2714          */
2715         if (card->type != MMC_TYPE_SD_COMBO) {
2716                 pm_runtime_set_active(&card->dev);
2717                 pm_runtime_enable(&card->dev);
2718         }
2719
2720         return 0;
2721
2722  out:
2723         mmc_blk_remove_parts(card, md);
2724         mmc_blk_remove_req(md);
2725         return 0;
2726 }
2727
2728 static void mmc_blk_remove(struct mmc_card *card)
2729 {
2730         struct mmc_blk_data *md = dev_get_drvdata(&card->dev);
2731
2732         mmc_blk_remove_parts(card, md);
2733         pm_runtime_get_sync(&card->dev);
2734         mmc_claim_host(card->host);
2735         mmc_blk_part_switch(card, md->part_type);
2736         mmc_release_host(card->host);
2737         if (card->type != MMC_TYPE_SD_COMBO)
2738                 pm_runtime_disable(&card->dev);
2739         pm_runtime_put_noidle(&card->dev);
2740         mmc_blk_remove_req(md);
2741         dev_set_drvdata(&card->dev, NULL);
2742 }
2743
2744 static int _mmc_blk_suspend(struct mmc_card *card)
2745 {
2746         struct mmc_blk_data *part_md;
2747         struct mmc_blk_data *md = dev_get_drvdata(&card->dev);
2748
2749         if (md) {
2750                 mmc_queue_suspend(&md->queue);
2751                 list_for_each_entry(part_md, &md->part, part) {
2752                         mmc_queue_suspend(&part_md->queue);
2753                 }
2754         }
2755         return 0;
2756 }
2757
2758 static void mmc_blk_shutdown(struct mmc_card *card)
2759 {
2760         _mmc_blk_suspend(card);
2761 }
2762
2763 #ifdef CONFIG_PM_SLEEP
2764 static int mmc_blk_suspend(struct device *dev)
2765 {
2766         struct mmc_card *card = mmc_dev_to_card(dev);
2767
2768         return _mmc_blk_suspend(card);
2769 }
2770
2771 static int mmc_blk_resume(struct device *dev)
2772 {
2773         struct mmc_blk_data *part_md;
2774         struct mmc_blk_data *md = dev_get_drvdata(dev);
2775
2776         if (md) {
2777                 /*
2778                  * Resume involves the card going into idle state,
2779                  * so current partition is always the main one.
2780                  */
2781                 md->part_curr = md->part_type;
2782                 mmc_queue_resume(&md->queue);
2783                 list_for_each_entry(part_md, &md->part, part) {
2784                         mmc_queue_resume(&part_md->queue);
2785                 }
2786         }
2787         return 0;
2788 }
2789 #endif
2790
2791 static SIMPLE_DEV_PM_OPS(mmc_blk_pm_ops, mmc_blk_suspend, mmc_blk_resume);
2792
2793 static struct mmc_driver mmc_driver = {
2794         .drv            = {
2795                 .name   = "mmcblk",
2796                 .pm     = &mmc_blk_pm_ops,
2797         },
2798         .probe          = mmc_blk_probe,
2799         .remove         = mmc_blk_remove,
2800         .shutdown       = mmc_blk_shutdown,
2801 };
2802
2803 static int __init mmc_blk_init(void)
2804 {
2805         int res;
2806
2807         res  = bus_register(&mmc_rpmb_bus_type);
2808         if (res < 0) {
2809                 pr_err("mmcblk: could not register RPMB bus type\n");
2810                 return res;
2811         }
2812         res = alloc_chrdev_region(&mmc_rpmb_devt, 0, MAX_DEVICES, "rpmb");
2813         if (res < 0) {
2814                 pr_err("mmcblk: failed to allocate rpmb chrdev region\n");
2815                 goto out_bus_unreg;
2816         }
2817
2818         if (perdev_minors != CONFIG_MMC_BLOCK_MINORS)
2819                 pr_info("mmcblk: using %d minors per device\n", perdev_minors);
2820
2821         max_devices = min(MAX_DEVICES, (1 << MINORBITS) / perdev_minors);
2822
2823         res = register_blkdev(MMC_BLOCK_MAJOR, "mmc");
2824         if (res)
2825                 goto out_chrdev_unreg;
2826
2827         res = mmc_register_driver(&mmc_driver);
2828         if (res)
2829                 goto out_blkdev_unreg;
2830
2831         return 0;
2832
2833 out_blkdev_unreg:
2834         unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
2835 out_chrdev_unreg:
2836         unregister_chrdev_region(mmc_rpmb_devt, MAX_DEVICES);
2837 out_bus_unreg:
2838         bus_unregister(&mmc_rpmb_bus_type);
2839         return res;
2840 }
2841
2842 static void __exit mmc_blk_exit(void)
2843 {
2844         mmc_unregister_driver(&mmc_driver);
2845         unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
2846         unregister_chrdev_region(mmc_rpmb_devt, MAX_DEVICES);
2847 }
2848
2849 module_init(mmc_blk_init);
2850 module_exit(mmc_blk_exit);
2851
2852 MODULE_LICENSE("GPL");
2853 MODULE_DESCRIPTION("Multimedia Card (MMC) block device driver");
2854