1 // SPDX-License-Identifier: GPL-2.0-only
3 * sd.c Copyright (C) 1992 Drew Eckhardt
4 * Copyright (C) 1993, 1994, 1995, 1999 Eric Youngdale
6 * Linux scsi disk driver
7 * Initial versions: Drew Eckhardt
8 * Subsequent revisions: Eric Youngdale
9 * Modification history:
10 * - Drew Eckhardt <drew@colorado.edu> original
11 * - Eric Youngdale <eric@andante.org> add scatter-gather, multiple
12 * outstanding request, and other enhancements.
13 * Support loadable low-level scsi drivers.
14 * - Jirka Hanika <geo@ff.cuni.cz> support more scsi disks using
15 * eight major numbers.
16 * - Richard Gooch <rgooch@atnf.csiro.au> support devfs.
17 * - Torben Mathiasen <tmm@image.dk> Resource allocation fixes in
18 * sd_init and cleanups.
19 * - Alex Davis <letmein@erols.com> Fix problem where partition info
20 * not being read in sd_open. Fix problem where removable media
21 * could be ejected after sd_open.
22 * - Douglas Gilbert <dgilbert@interlog.com> cleanup for lk 2.5.x
23 * - Badari Pulavarty <pbadari@us.ibm.com>, Matthew Wilcox
24 * <willy@debian.org>, Kurt Garloff <garloff@suse.de>:
25 * Support 32k/1M disks.
27 * Logging policy (needs CONFIG_SCSI_LOGGING defined):
28 * - setting up transfer: SCSI_LOG_HLQUEUE levels 1 and 2
29 * - end of transfer (bh + scsi_lib): SCSI_LOG_HLCOMPLETE level 1
30 * - entering sd_ioctl: SCSI_LOG_IOCTL level 1
31 * - entering other commands: SCSI_LOG_HLQUEUE level 3
32 * Note: when the logging level is set by the user, it must be greater
33 * than the level indicated above to trigger output.
36 #include <linux/module.h>
38 #include <linux/kernel.h>
40 #include <linux/bio.h>
41 #include <linux/genhd.h>
42 #include <linux/hdreg.h>
43 #include <linux/errno.h>
44 #include <linux/idr.h>
45 #include <linux/interrupt.h>
46 #include <linux/init.h>
47 #include <linux/blkdev.h>
48 #include <linux/blkpg.h>
49 #include <linux/blk-pm.h>
50 #include <linux/delay.h>
51 #include <linux/mutex.h>
52 #include <linux/string_helpers.h>
53 #include <linux/async.h>
54 #include <linux/slab.h>
55 #include <linux/sed-opal.h>
56 #include <linux/pm_runtime.h>
58 #include <linux/t10-pi.h>
59 #include <linux/uaccess.h>
60 #include <asm/unaligned.h>
62 #include <scsi/scsi.h>
63 #include <scsi/scsi_cmnd.h>
64 #include <scsi/scsi_dbg.h>
65 #include <scsi/scsi_device.h>
66 #include <scsi/scsi_driver.h>
67 #include <scsi/scsi_eh.h>
68 #include <scsi/scsi_host.h>
69 #include <scsi/scsi_ioctl.h>
70 #include <scsi/scsicam.h>
73 #include "scsi_priv.h"
74 #include "scsi_logging.h"
76 MODULE_AUTHOR("Eric Youngdale");
77 MODULE_DESCRIPTION("SCSI disk (sd) driver");
78 MODULE_LICENSE("GPL");
80 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK0_MAJOR);
81 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK1_MAJOR);
82 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK2_MAJOR);
83 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK3_MAJOR);
84 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK4_MAJOR);
85 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK5_MAJOR);
86 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK6_MAJOR);
87 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK7_MAJOR);
88 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK8_MAJOR);
89 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK9_MAJOR);
90 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK10_MAJOR);
91 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK11_MAJOR);
92 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK12_MAJOR);
93 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK13_MAJOR);
94 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK14_MAJOR);
95 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK15_MAJOR);
96 MODULE_ALIAS_SCSI_DEVICE(TYPE_DISK);
97 MODULE_ALIAS_SCSI_DEVICE(TYPE_MOD);
98 MODULE_ALIAS_SCSI_DEVICE(TYPE_RBC);
99 MODULE_ALIAS_SCSI_DEVICE(TYPE_ZBC);
101 #if !defined(CONFIG_DEBUG_BLOCK_EXT_DEVT)
107 static void sd_config_discard(struct scsi_disk *, unsigned int);
108 static void sd_config_write_same(struct scsi_disk *);
109 static int sd_revalidate_disk(struct gendisk *);
110 static void sd_unlock_native_capacity(struct gendisk *disk);
111 static int sd_probe(struct device *);
112 static int sd_remove(struct device *);
113 static void sd_shutdown(struct device *);
114 static int sd_suspend_system(struct device *);
115 static int sd_suspend_runtime(struct device *);
116 static int sd_resume(struct device *);
117 static void sd_rescan(struct device *);
118 static blk_status_t sd_init_command(struct scsi_cmnd *SCpnt);
119 static void sd_uninit_command(struct scsi_cmnd *SCpnt);
120 static int sd_done(struct scsi_cmnd *);
121 static void sd_eh_reset(struct scsi_cmnd *);
122 static int sd_eh_action(struct scsi_cmnd *, int);
123 static void sd_read_capacity(struct scsi_disk *sdkp, unsigned char *buffer);
124 static void scsi_disk_release(struct device *cdev);
126 static DEFINE_IDA(sd_index_ida);
128 /* This semaphore is used to mediate the 0->1 reference get in the
129 * face of object destruction (i.e. we can't allow a get on an
130 * object after last put) */
131 static DEFINE_MUTEX(sd_ref_mutex);
133 static struct kmem_cache *sd_cdb_cache;
134 static mempool_t *sd_cdb_pool;
135 static mempool_t *sd_page_pool;
137 static const char *sd_cache_types[] = {
138 "write through", "none", "write back",
139 "write back, no read (daft)"
142 static void sd_set_flush_flag(struct scsi_disk *sdkp)
144 bool wc = false, fua = false;
152 blk_queue_write_cache(sdkp->disk->queue, wc, fua);
156 cache_type_store(struct device *dev, struct device_attribute *attr,
157 const char *buf, size_t count)
159 int ct, rcd, wce, sp;
160 struct scsi_disk *sdkp = to_scsi_disk(dev);
161 struct scsi_device *sdp = sdkp->device;
164 struct scsi_mode_data data;
165 struct scsi_sense_hdr sshdr;
166 static const char temp[] = "temporary ";
169 if (sdp->type != TYPE_DISK && sdp->type != TYPE_ZBC)
170 /* no cache control on RBC devices; theoretically they
171 * can do it, but there's probably so many exceptions
172 * it's not worth the risk */
175 if (strncmp(buf, temp, sizeof(temp) - 1) == 0) {
176 buf += sizeof(temp) - 1;
177 sdkp->cache_override = 1;
179 sdkp->cache_override = 0;
182 ct = sysfs_match_string(sd_cache_types, buf);
186 rcd = ct & 0x01 ? 1 : 0;
187 wce = (ct & 0x02) && !sdkp->write_prot ? 1 : 0;
189 if (sdkp->cache_override) {
192 sd_set_flush_flag(sdkp);
196 if (scsi_mode_sense(sdp, 0x08, 8, buffer, sizeof(buffer), SD_TIMEOUT,
197 sdkp->max_retries, &data, NULL))
199 len = min_t(size_t, sizeof(buffer), data.length - data.header_length -
200 data.block_descriptor_length);
201 buffer_data = buffer + data.header_length +
202 data.block_descriptor_length;
203 buffer_data[2] &= ~0x05;
204 buffer_data[2] |= wce << 2 | rcd;
205 sp = buffer_data[0] & 0x80 ? 1 : 0;
206 buffer_data[0] &= ~0x80;
209 * Ensure WP, DPOFUA, and RESERVED fields are cleared in
210 * received mode parameter buffer before doing MODE SELECT.
212 data.device_specific = 0;
214 if (scsi_mode_select(sdp, 1, sp, 8, buffer_data, len, SD_TIMEOUT,
215 sdkp->max_retries, &data, &sshdr)) {
216 if (scsi_sense_valid(&sshdr))
217 sd_print_sense_hdr(sdkp, &sshdr);
220 sd_revalidate_disk(sdkp->disk);
225 manage_start_stop_show(struct device *dev, struct device_attribute *attr,
228 struct scsi_disk *sdkp = to_scsi_disk(dev);
229 struct scsi_device *sdp = sdkp->device;
231 return sprintf(buf, "%u\n", sdp->manage_start_stop);
235 manage_start_stop_store(struct device *dev, struct device_attribute *attr,
236 const char *buf, size_t count)
238 struct scsi_disk *sdkp = to_scsi_disk(dev);
239 struct scsi_device *sdp = sdkp->device;
242 if (!capable(CAP_SYS_ADMIN))
245 if (kstrtobool(buf, &v))
248 sdp->manage_start_stop = v;
252 static DEVICE_ATTR_RW(manage_start_stop);
255 allow_restart_show(struct device *dev, struct device_attribute *attr, char *buf)
257 struct scsi_disk *sdkp = to_scsi_disk(dev);
259 return sprintf(buf, "%u\n", sdkp->device->allow_restart);
263 allow_restart_store(struct device *dev, struct device_attribute *attr,
264 const char *buf, size_t count)
267 struct scsi_disk *sdkp = to_scsi_disk(dev);
268 struct scsi_device *sdp = sdkp->device;
270 if (!capable(CAP_SYS_ADMIN))
273 if (sdp->type != TYPE_DISK && sdp->type != TYPE_ZBC)
276 if (kstrtobool(buf, &v))
279 sdp->allow_restart = v;
283 static DEVICE_ATTR_RW(allow_restart);
286 cache_type_show(struct device *dev, struct device_attribute *attr, char *buf)
288 struct scsi_disk *sdkp = to_scsi_disk(dev);
289 int ct = sdkp->RCD + 2*sdkp->WCE;
291 return sprintf(buf, "%s\n", sd_cache_types[ct]);
293 static DEVICE_ATTR_RW(cache_type);
296 FUA_show(struct device *dev, struct device_attribute *attr, char *buf)
298 struct scsi_disk *sdkp = to_scsi_disk(dev);
300 return sprintf(buf, "%u\n", sdkp->DPOFUA);
302 static DEVICE_ATTR_RO(FUA);
305 protection_type_show(struct device *dev, struct device_attribute *attr,
308 struct scsi_disk *sdkp = to_scsi_disk(dev);
310 return sprintf(buf, "%u\n", sdkp->protection_type);
314 protection_type_store(struct device *dev, struct device_attribute *attr,
315 const char *buf, size_t count)
317 struct scsi_disk *sdkp = to_scsi_disk(dev);
321 if (!capable(CAP_SYS_ADMIN))
324 err = kstrtouint(buf, 10, &val);
329 if (val <= T10_PI_TYPE3_PROTECTION)
330 sdkp->protection_type = val;
334 static DEVICE_ATTR_RW(protection_type);
337 protection_mode_show(struct device *dev, struct device_attribute *attr,
340 struct scsi_disk *sdkp = to_scsi_disk(dev);
341 struct scsi_device *sdp = sdkp->device;
342 unsigned int dif, dix;
344 dif = scsi_host_dif_capable(sdp->host, sdkp->protection_type);
345 dix = scsi_host_dix_capable(sdp->host, sdkp->protection_type);
347 if (!dix && scsi_host_dix_capable(sdp->host, T10_PI_TYPE0_PROTECTION)) {
353 return sprintf(buf, "none\n");
355 return sprintf(buf, "%s%u\n", dix ? "dix" : "dif", dif);
357 static DEVICE_ATTR_RO(protection_mode);
360 app_tag_own_show(struct device *dev, struct device_attribute *attr, char *buf)
362 struct scsi_disk *sdkp = to_scsi_disk(dev);
364 return sprintf(buf, "%u\n", sdkp->ATO);
366 static DEVICE_ATTR_RO(app_tag_own);
369 thin_provisioning_show(struct device *dev, struct device_attribute *attr,
372 struct scsi_disk *sdkp = to_scsi_disk(dev);
374 return sprintf(buf, "%u\n", sdkp->lbpme);
376 static DEVICE_ATTR_RO(thin_provisioning);
378 /* sysfs_match_string() requires dense arrays */
379 static const char *lbp_mode[] = {
380 [SD_LBP_FULL] = "full",
381 [SD_LBP_UNMAP] = "unmap",
382 [SD_LBP_WS16] = "writesame_16",
383 [SD_LBP_WS10] = "writesame_10",
384 [SD_LBP_ZERO] = "writesame_zero",
385 [SD_LBP_DISABLE] = "disabled",
389 provisioning_mode_show(struct device *dev, struct device_attribute *attr,
392 struct scsi_disk *sdkp = to_scsi_disk(dev);
394 return sprintf(buf, "%s\n", lbp_mode[sdkp->provisioning_mode]);
398 provisioning_mode_store(struct device *dev, struct device_attribute *attr,
399 const char *buf, size_t count)
401 struct scsi_disk *sdkp = to_scsi_disk(dev);
402 struct scsi_device *sdp = sdkp->device;
405 if (!capable(CAP_SYS_ADMIN))
408 if (sd_is_zoned(sdkp)) {
409 sd_config_discard(sdkp, SD_LBP_DISABLE);
413 if (sdp->type != TYPE_DISK)
416 mode = sysfs_match_string(lbp_mode, buf);
420 sd_config_discard(sdkp, mode);
424 static DEVICE_ATTR_RW(provisioning_mode);
426 /* sysfs_match_string() requires dense arrays */
427 static const char *zeroing_mode[] = {
428 [SD_ZERO_WRITE] = "write",
429 [SD_ZERO_WS] = "writesame",
430 [SD_ZERO_WS16_UNMAP] = "writesame_16_unmap",
431 [SD_ZERO_WS10_UNMAP] = "writesame_10_unmap",
435 zeroing_mode_show(struct device *dev, struct device_attribute *attr,
438 struct scsi_disk *sdkp = to_scsi_disk(dev);
440 return sprintf(buf, "%s\n", zeroing_mode[sdkp->zeroing_mode]);
444 zeroing_mode_store(struct device *dev, struct device_attribute *attr,
445 const char *buf, size_t count)
447 struct scsi_disk *sdkp = to_scsi_disk(dev);
450 if (!capable(CAP_SYS_ADMIN))
453 mode = sysfs_match_string(zeroing_mode, buf);
457 sdkp->zeroing_mode = mode;
461 static DEVICE_ATTR_RW(zeroing_mode);
464 max_medium_access_timeouts_show(struct device *dev,
465 struct device_attribute *attr, char *buf)
467 struct scsi_disk *sdkp = to_scsi_disk(dev);
469 return sprintf(buf, "%u\n", sdkp->max_medium_access_timeouts);
473 max_medium_access_timeouts_store(struct device *dev,
474 struct device_attribute *attr, const char *buf,
477 struct scsi_disk *sdkp = to_scsi_disk(dev);
480 if (!capable(CAP_SYS_ADMIN))
483 err = kstrtouint(buf, 10, &sdkp->max_medium_access_timeouts);
485 return err ? err : count;
487 static DEVICE_ATTR_RW(max_medium_access_timeouts);
490 max_write_same_blocks_show(struct device *dev, struct device_attribute *attr,
493 struct scsi_disk *sdkp = to_scsi_disk(dev);
495 return sprintf(buf, "%u\n", sdkp->max_ws_blocks);
499 max_write_same_blocks_store(struct device *dev, struct device_attribute *attr,
500 const char *buf, size_t count)
502 struct scsi_disk *sdkp = to_scsi_disk(dev);
503 struct scsi_device *sdp = sdkp->device;
507 if (!capable(CAP_SYS_ADMIN))
510 if (sdp->type != TYPE_DISK && sdp->type != TYPE_ZBC)
513 err = kstrtoul(buf, 10, &max);
519 sdp->no_write_same = 1;
520 else if (max <= SD_MAX_WS16_BLOCKS) {
521 sdp->no_write_same = 0;
522 sdkp->max_ws_blocks = max;
525 sd_config_write_same(sdkp);
529 static DEVICE_ATTR_RW(max_write_same_blocks);
532 zoned_cap_show(struct device *dev, struct device_attribute *attr, char *buf)
534 struct scsi_disk *sdkp = to_scsi_disk(dev);
536 if (sdkp->device->type == TYPE_ZBC)
537 return sprintf(buf, "host-managed\n");
538 if (sdkp->zoned == 1)
539 return sprintf(buf, "host-aware\n");
540 if (sdkp->zoned == 2)
541 return sprintf(buf, "drive-managed\n");
542 return sprintf(buf, "none\n");
544 static DEVICE_ATTR_RO(zoned_cap);
547 max_retries_store(struct device *dev, struct device_attribute *attr,
548 const char *buf, size_t count)
550 struct scsi_disk *sdkp = to_scsi_disk(dev);
551 struct scsi_device *sdev = sdkp->device;
554 err = kstrtoint(buf, 10, &retries);
558 if (retries == SCSI_CMD_RETRIES_NO_LIMIT || retries <= SD_MAX_RETRIES) {
559 sdkp->max_retries = retries;
563 sdev_printk(KERN_ERR, sdev, "max_retries must be between -1 and %d\n",
569 max_retries_show(struct device *dev, struct device_attribute *attr,
572 struct scsi_disk *sdkp = to_scsi_disk(dev);
574 return sprintf(buf, "%d\n", sdkp->max_retries);
577 static DEVICE_ATTR_RW(max_retries);
579 static struct attribute *sd_disk_attrs[] = {
580 &dev_attr_cache_type.attr,
582 &dev_attr_allow_restart.attr,
583 &dev_attr_manage_start_stop.attr,
584 &dev_attr_protection_type.attr,
585 &dev_attr_protection_mode.attr,
586 &dev_attr_app_tag_own.attr,
587 &dev_attr_thin_provisioning.attr,
588 &dev_attr_provisioning_mode.attr,
589 &dev_attr_zeroing_mode.attr,
590 &dev_attr_max_write_same_blocks.attr,
591 &dev_attr_max_medium_access_timeouts.attr,
592 &dev_attr_zoned_cap.attr,
593 &dev_attr_max_retries.attr,
596 ATTRIBUTE_GROUPS(sd_disk);
598 static struct class sd_disk_class = {
600 .owner = THIS_MODULE,
601 .dev_release = scsi_disk_release,
602 .dev_groups = sd_disk_groups,
605 static const struct dev_pm_ops sd_pm_ops = {
606 .suspend = sd_suspend_system,
608 .poweroff = sd_suspend_system,
609 .restore = sd_resume,
610 .runtime_suspend = sd_suspend_runtime,
611 .runtime_resume = sd_resume,
614 static struct scsi_driver sd_template = {
617 .owner = THIS_MODULE,
619 .probe_type = PROBE_PREFER_ASYNCHRONOUS,
621 .shutdown = sd_shutdown,
625 .init_command = sd_init_command,
626 .uninit_command = sd_uninit_command,
628 .eh_action = sd_eh_action,
629 .eh_reset = sd_eh_reset,
633 * Dummy kobj_map->probe function.
634 * The default ->probe function will call modprobe, which is
635 * pointless as this module is already loaded.
637 static struct kobject *sd_default_probe(dev_t devt, int *partno, void *data)
643 * Device no to disk mapping:
645 * major disc2 disc p1
646 * |............|.............|....|....| <- dev_t
649 * Inside a major, we have 16k disks, however mapped non-
650 * contiguously. The first 16 disks are for major0, the next
651 * ones with major1, ... Disk 256 is for major0 again, disk 272
653 * As we stay compatible with our numbering scheme, we can reuse
654 * the well-know SCSI majors 8, 65--71, 136--143.
656 static int sd_major(int major_idx)
660 return SCSI_DISK0_MAJOR;
662 return SCSI_DISK1_MAJOR + major_idx - 1;
664 return SCSI_DISK8_MAJOR + major_idx - 8;
667 return 0; /* shut up gcc */
671 static struct scsi_disk *scsi_disk_get(struct gendisk *disk)
673 struct scsi_disk *sdkp = NULL;
675 mutex_lock(&sd_ref_mutex);
677 if (disk->private_data) {
678 sdkp = scsi_disk(disk);
679 if (scsi_device_get(sdkp->device) == 0)
680 get_device(&sdkp->dev);
684 mutex_unlock(&sd_ref_mutex);
688 static void scsi_disk_put(struct scsi_disk *sdkp)
690 struct scsi_device *sdev = sdkp->device;
692 mutex_lock(&sd_ref_mutex);
693 put_device(&sdkp->dev);
694 scsi_device_put(sdev);
695 mutex_unlock(&sd_ref_mutex);
698 #ifdef CONFIG_BLK_SED_OPAL
699 static int sd_sec_submit(void *data, u16 spsp, u8 secp, void *buffer,
700 size_t len, bool send)
702 struct scsi_disk *sdkp = data;
703 struct scsi_device *sdev = sdkp->device;
707 cdb[0] = send ? SECURITY_PROTOCOL_OUT : SECURITY_PROTOCOL_IN;
709 put_unaligned_be16(spsp, &cdb[2]);
710 put_unaligned_be32(len, &cdb[6]);
712 ret = scsi_execute_req(sdev, cdb,
713 send ? DMA_TO_DEVICE : DMA_FROM_DEVICE,
714 buffer, len, NULL, SD_TIMEOUT, sdkp->max_retries, NULL);
715 return ret <= 0 ? ret : -EIO;
717 #endif /* CONFIG_BLK_SED_OPAL */
720 * Look up the DIX operation based on whether the command is read or
721 * write and whether dix and dif are enabled.
723 static unsigned int sd_prot_op(bool write, bool dix, bool dif)
725 /* Lookup table: bit 2 (write), bit 1 (dix), bit 0 (dif) */
726 static const unsigned int ops[] = { /* wrt dix dif */
727 SCSI_PROT_NORMAL, /* 0 0 0 */
728 SCSI_PROT_READ_STRIP, /* 0 0 1 */
729 SCSI_PROT_READ_INSERT, /* 0 1 0 */
730 SCSI_PROT_READ_PASS, /* 0 1 1 */
731 SCSI_PROT_NORMAL, /* 1 0 0 */
732 SCSI_PROT_WRITE_INSERT, /* 1 0 1 */
733 SCSI_PROT_WRITE_STRIP, /* 1 1 0 */
734 SCSI_PROT_WRITE_PASS, /* 1 1 1 */
737 return ops[write << 2 | dix << 1 | dif];
741 * Returns a mask of the protection flags that are valid for a given DIX
744 static unsigned int sd_prot_flag_mask(unsigned int prot_op)
746 static const unsigned int flag_mask[] = {
747 [SCSI_PROT_NORMAL] = 0,
749 [SCSI_PROT_READ_STRIP] = SCSI_PROT_TRANSFER_PI |
750 SCSI_PROT_GUARD_CHECK |
751 SCSI_PROT_REF_CHECK |
752 SCSI_PROT_REF_INCREMENT,
754 [SCSI_PROT_READ_INSERT] = SCSI_PROT_REF_INCREMENT |
755 SCSI_PROT_IP_CHECKSUM,
757 [SCSI_PROT_READ_PASS] = SCSI_PROT_TRANSFER_PI |
758 SCSI_PROT_GUARD_CHECK |
759 SCSI_PROT_REF_CHECK |
760 SCSI_PROT_REF_INCREMENT |
761 SCSI_PROT_IP_CHECKSUM,
763 [SCSI_PROT_WRITE_INSERT] = SCSI_PROT_TRANSFER_PI |
764 SCSI_PROT_REF_INCREMENT,
766 [SCSI_PROT_WRITE_STRIP] = SCSI_PROT_GUARD_CHECK |
767 SCSI_PROT_REF_CHECK |
768 SCSI_PROT_REF_INCREMENT |
769 SCSI_PROT_IP_CHECKSUM,
771 [SCSI_PROT_WRITE_PASS] = SCSI_PROT_TRANSFER_PI |
772 SCSI_PROT_GUARD_CHECK |
773 SCSI_PROT_REF_CHECK |
774 SCSI_PROT_REF_INCREMENT |
775 SCSI_PROT_IP_CHECKSUM,
778 return flag_mask[prot_op];
781 static unsigned char sd_setup_protect_cmnd(struct scsi_cmnd *scmd,
782 unsigned int dix, unsigned int dif)
784 struct bio *bio = scmd->request->bio;
785 unsigned int prot_op = sd_prot_op(rq_data_dir(scmd->request), dix, dif);
786 unsigned int protect = 0;
788 if (dix) { /* DIX Type 0, 1, 2, 3 */
789 if (bio_integrity_flagged(bio, BIP_IP_CHECKSUM))
790 scmd->prot_flags |= SCSI_PROT_IP_CHECKSUM;
792 if (bio_integrity_flagged(bio, BIP_CTRL_NOCHECK) == false)
793 scmd->prot_flags |= SCSI_PROT_GUARD_CHECK;
796 if (dif != T10_PI_TYPE3_PROTECTION) { /* DIX/DIF Type 0, 1, 2 */
797 scmd->prot_flags |= SCSI_PROT_REF_INCREMENT;
799 if (bio_integrity_flagged(bio, BIP_CTRL_NOCHECK) == false)
800 scmd->prot_flags |= SCSI_PROT_REF_CHECK;
803 if (dif) { /* DIX/DIF Type 1, 2, 3 */
804 scmd->prot_flags |= SCSI_PROT_TRANSFER_PI;
806 if (bio_integrity_flagged(bio, BIP_DISK_NOCHECK))
807 protect = 3 << 5; /* Disable target PI checking */
809 protect = 1 << 5; /* Enable target PI checking */
812 scsi_set_prot_op(scmd, prot_op);
813 scsi_set_prot_type(scmd, dif);
814 scmd->prot_flags &= sd_prot_flag_mask(prot_op);
819 static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode)
821 struct request_queue *q = sdkp->disk->queue;
822 unsigned int logical_block_size = sdkp->device->sector_size;
823 unsigned int max_blocks = 0;
825 q->limits.discard_alignment =
826 sdkp->unmap_alignment * logical_block_size;
827 q->limits.discard_granularity =
828 max(sdkp->physical_block_size,
829 sdkp->unmap_granularity * logical_block_size);
830 sdkp->provisioning_mode = mode;
836 blk_queue_max_discard_sectors(q, 0);
837 blk_queue_flag_clear(QUEUE_FLAG_DISCARD, q);
841 max_blocks = min_not_zero(sdkp->max_unmap_blocks,
842 (u32)SD_MAX_WS16_BLOCKS);
846 if (sdkp->device->unmap_limit_for_ws)
847 max_blocks = sdkp->max_unmap_blocks;
849 max_blocks = sdkp->max_ws_blocks;
851 max_blocks = min_not_zero(max_blocks, (u32)SD_MAX_WS16_BLOCKS);
855 if (sdkp->device->unmap_limit_for_ws)
856 max_blocks = sdkp->max_unmap_blocks;
858 max_blocks = sdkp->max_ws_blocks;
860 max_blocks = min_not_zero(max_blocks, (u32)SD_MAX_WS10_BLOCKS);
864 max_blocks = min_not_zero(sdkp->max_ws_blocks,
865 (u32)SD_MAX_WS10_BLOCKS);
869 blk_queue_max_discard_sectors(q, max_blocks * (logical_block_size >> 9));
870 blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
873 static blk_status_t sd_setup_unmap_cmnd(struct scsi_cmnd *cmd)
875 struct scsi_device *sdp = cmd->device;
876 struct request *rq = cmd->request;
877 struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
878 u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq));
879 u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
880 unsigned int data_len = 24;
883 rq->special_vec.bv_page = mempool_alloc(sd_page_pool, GFP_ATOMIC);
884 if (!rq->special_vec.bv_page)
885 return BLK_STS_RESOURCE;
886 clear_highpage(rq->special_vec.bv_page);
887 rq->special_vec.bv_offset = 0;
888 rq->special_vec.bv_len = data_len;
889 rq->rq_flags |= RQF_SPECIAL_PAYLOAD;
892 cmd->cmnd[0] = UNMAP;
895 buf = page_address(rq->special_vec.bv_page);
896 put_unaligned_be16(6 + 16, &buf[0]);
897 put_unaligned_be16(16, &buf[2]);
898 put_unaligned_be64(lba, &buf[8]);
899 put_unaligned_be32(nr_blocks, &buf[16]);
901 cmd->allowed = sdkp->max_retries;
902 cmd->transfersize = data_len;
903 rq->timeout = SD_TIMEOUT;
905 return scsi_init_io(cmd);
908 static blk_status_t sd_setup_write_same16_cmnd(struct scsi_cmnd *cmd,
911 struct scsi_device *sdp = cmd->device;
912 struct request *rq = cmd->request;
913 struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
914 u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq));
915 u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
916 u32 data_len = sdp->sector_size;
918 rq->special_vec.bv_page = mempool_alloc(sd_page_pool, GFP_ATOMIC);
919 if (!rq->special_vec.bv_page)
920 return BLK_STS_RESOURCE;
921 clear_highpage(rq->special_vec.bv_page);
922 rq->special_vec.bv_offset = 0;
923 rq->special_vec.bv_len = data_len;
924 rq->rq_flags |= RQF_SPECIAL_PAYLOAD;
927 cmd->cmnd[0] = WRITE_SAME_16;
929 cmd->cmnd[1] = 0x8; /* UNMAP */
930 put_unaligned_be64(lba, &cmd->cmnd[2]);
931 put_unaligned_be32(nr_blocks, &cmd->cmnd[10]);
933 cmd->allowed = sdkp->max_retries;
934 cmd->transfersize = data_len;
935 rq->timeout = unmap ? SD_TIMEOUT : SD_WRITE_SAME_TIMEOUT;
937 return scsi_init_io(cmd);
940 static blk_status_t sd_setup_write_same10_cmnd(struct scsi_cmnd *cmd,
943 struct scsi_device *sdp = cmd->device;
944 struct request *rq = cmd->request;
945 struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
946 u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq));
947 u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
948 u32 data_len = sdp->sector_size;
950 rq->special_vec.bv_page = mempool_alloc(sd_page_pool, GFP_ATOMIC);
951 if (!rq->special_vec.bv_page)
952 return BLK_STS_RESOURCE;
953 clear_highpage(rq->special_vec.bv_page);
954 rq->special_vec.bv_offset = 0;
955 rq->special_vec.bv_len = data_len;
956 rq->rq_flags |= RQF_SPECIAL_PAYLOAD;
959 cmd->cmnd[0] = WRITE_SAME;
961 cmd->cmnd[1] = 0x8; /* UNMAP */
962 put_unaligned_be32(lba, &cmd->cmnd[2]);
963 put_unaligned_be16(nr_blocks, &cmd->cmnd[7]);
965 cmd->allowed = sdkp->max_retries;
966 cmd->transfersize = data_len;
967 rq->timeout = unmap ? SD_TIMEOUT : SD_WRITE_SAME_TIMEOUT;
969 return scsi_init_io(cmd);
972 static blk_status_t sd_setup_write_zeroes_cmnd(struct scsi_cmnd *cmd)
974 struct request *rq = cmd->request;
975 struct scsi_device *sdp = cmd->device;
976 struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
977 u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq));
978 u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
980 if (!(rq->cmd_flags & REQ_NOUNMAP)) {
981 switch (sdkp->zeroing_mode) {
982 case SD_ZERO_WS16_UNMAP:
983 return sd_setup_write_same16_cmnd(cmd, true);
984 case SD_ZERO_WS10_UNMAP:
985 return sd_setup_write_same10_cmnd(cmd, true);
989 if (sdp->no_write_same)
990 return BLK_STS_TARGET;
992 if (sdkp->ws16 || lba > 0xffffffff || nr_blocks > 0xffff)
993 return sd_setup_write_same16_cmnd(cmd, false);
995 return sd_setup_write_same10_cmnd(cmd, false);
998 static void sd_config_write_same(struct scsi_disk *sdkp)
1000 struct request_queue *q = sdkp->disk->queue;
1001 unsigned int logical_block_size = sdkp->device->sector_size;
1003 if (sdkp->device->no_write_same) {
1004 sdkp->max_ws_blocks = 0;
1008 /* Some devices can not handle block counts above 0xffff despite
1009 * supporting WRITE SAME(16). Consequently we default to 64k
1010 * blocks per I/O unless the device explicitly advertises a
1013 if (sdkp->max_ws_blocks > SD_MAX_WS10_BLOCKS)
1014 sdkp->max_ws_blocks = min_not_zero(sdkp->max_ws_blocks,
1015 (u32)SD_MAX_WS16_BLOCKS);
1016 else if (sdkp->ws16 || sdkp->ws10 || sdkp->device->no_report_opcodes)
1017 sdkp->max_ws_blocks = min_not_zero(sdkp->max_ws_blocks,
1018 (u32)SD_MAX_WS10_BLOCKS);
1020 sdkp->device->no_write_same = 1;
1021 sdkp->max_ws_blocks = 0;
1024 if (sdkp->lbprz && sdkp->lbpws)
1025 sdkp->zeroing_mode = SD_ZERO_WS16_UNMAP;
1026 else if (sdkp->lbprz && sdkp->lbpws10)
1027 sdkp->zeroing_mode = SD_ZERO_WS10_UNMAP;
1028 else if (sdkp->max_ws_blocks)
1029 sdkp->zeroing_mode = SD_ZERO_WS;
1031 sdkp->zeroing_mode = SD_ZERO_WRITE;
1033 if (sdkp->max_ws_blocks &&
1034 sdkp->physical_block_size > logical_block_size) {
1036 * Reporting a maximum number of blocks that is not aligned
1037 * on the device physical size would cause a large write same
1038 * request to be split into physically unaligned chunks by
1039 * __blkdev_issue_write_zeroes() and __blkdev_issue_write_same()
1040 * even if the caller of these functions took care to align the
1041 * large request. So make sure the maximum reported is aligned
1042 * to the device physical block size. This is only an optional
1043 * optimization for regular disks, but this is mandatory to
1044 * avoid failure of large write same requests directed at
1045 * sequential write required zones of host-managed ZBC disks.
1047 sdkp->max_ws_blocks =
1048 round_down(sdkp->max_ws_blocks,
1049 bytes_to_logical(sdkp->device,
1050 sdkp->physical_block_size));
1054 blk_queue_max_write_same_sectors(q, sdkp->max_ws_blocks *
1055 (logical_block_size >> 9));
1056 blk_queue_max_write_zeroes_sectors(q, sdkp->max_ws_blocks *
1057 (logical_block_size >> 9));
1061 * sd_setup_write_same_cmnd - write the same data to multiple blocks
1062 * @cmd: command to prepare
1064 * Will set up either WRITE SAME(10) or WRITE SAME(16) depending on
1065 * the preference indicated by the target device.
1067 static blk_status_t sd_setup_write_same_cmnd(struct scsi_cmnd *cmd)
1069 struct request *rq = cmd->request;
1070 struct scsi_device *sdp = cmd->device;
1071 struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
1072 struct bio *bio = rq->bio;
1073 u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq));
1074 u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
1077 if (sdkp->device->no_write_same)
1078 return BLK_STS_TARGET;
1080 BUG_ON(bio_offset(bio) || bio_iovec(bio).bv_len != sdp->sector_size);
1082 rq->timeout = SD_WRITE_SAME_TIMEOUT;
1084 if (sdkp->ws16 || lba > 0xffffffff || nr_blocks > 0xffff) {
1086 cmd->cmnd[0] = WRITE_SAME_16;
1087 put_unaligned_be64(lba, &cmd->cmnd[2]);
1088 put_unaligned_be32(nr_blocks, &cmd->cmnd[10]);
1091 cmd->cmnd[0] = WRITE_SAME;
1092 put_unaligned_be32(lba, &cmd->cmnd[2]);
1093 put_unaligned_be16(nr_blocks, &cmd->cmnd[7]);
1096 cmd->transfersize = sdp->sector_size;
1097 cmd->allowed = sdkp->max_retries;
1100 * For WRITE SAME the data transferred via the DATA OUT buffer is
1101 * different from the amount of data actually written to the target.
1103 * We set up __data_len to the amount of data transferred via the
1104 * DATA OUT buffer so that blk_rq_map_sg sets up the proper S/G list
1105 * to transfer a single sector of data first, but then reset it to
1106 * the amount of data to be written right after so that the I/O path
1107 * knows how much to actually write.
1109 rq->__data_len = sdp->sector_size;
1110 ret = scsi_init_io(cmd);
1111 rq->__data_len = blk_rq_bytes(rq);
1116 static blk_status_t sd_setup_flush_cmnd(struct scsi_cmnd *cmd)
1118 struct request *rq = cmd->request;
1119 struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
1121 /* flush requests don't perform I/O, zero the S/G table */
1122 memset(&cmd->sdb, 0, sizeof(cmd->sdb));
1124 cmd->cmnd[0] = SYNCHRONIZE_CACHE;
1126 cmd->transfersize = 0;
1127 cmd->allowed = sdkp->max_retries;
1129 rq->timeout = rq->q->rq_timeout * SD_FLUSH_TIMEOUT_MULTIPLIER;
1133 static blk_status_t sd_setup_rw32_cmnd(struct scsi_cmnd *cmd, bool write,
1134 sector_t lba, unsigned int nr_blocks,
1135 unsigned char flags)
1137 cmd->cmnd = mempool_alloc(sd_cdb_pool, GFP_ATOMIC);
1138 if (unlikely(cmd->cmnd == NULL))
1139 return BLK_STS_RESOURCE;
1141 cmd->cmd_len = SD_EXT_CDB_SIZE;
1142 memset(cmd->cmnd, 0, cmd->cmd_len);
1144 cmd->cmnd[0] = VARIABLE_LENGTH_CMD;
1145 cmd->cmnd[7] = 0x18; /* Additional CDB len */
1146 cmd->cmnd[9] = write ? WRITE_32 : READ_32;
1147 cmd->cmnd[10] = flags;
1148 put_unaligned_be64(lba, &cmd->cmnd[12]);
1149 put_unaligned_be32(lba, &cmd->cmnd[20]); /* Expected Indirect LBA */
1150 put_unaligned_be32(nr_blocks, &cmd->cmnd[28]);
1155 static blk_status_t sd_setup_rw16_cmnd(struct scsi_cmnd *cmd, bool write,
1156 sector_t lba, unsigned int nr_blocks,
1157 unsigned char flags)
1160 cmd->cmnd[0] = write ? WRITE_16 : READ_16;
1161 cmd->cmnd[1] = flags;
1164 put_unaligned_be64(lba, &cmd->cmnd[2]);
1165 put_unaligned_be32(nr_blocks, &cmd->cmnd[10]);
1170 static blk_status_t sd_setup_rw10_cmnd(struct scsi_cmnd *cmd, bool write,
1171 sector_t lba, unsigned int nr_blocks,
1172 unsigned char flags)
1175 cmd->cmnd[0] = write ? WRITE_10 : READ_10;
1176 cmd->cmnd[1] = flags;
1179 put_unaligned_be32(lba, &cmd->cmnd[2]);
1180 put_unaligned_be16(nr_blocks, &cmd->cmnd[7]);
1185 static blk_status_t sd_setup_rw6_cmnd(struct scsi_cmnd *cmd, bool write,
1186 sector_t lba, unsigned int nr_blocks,
1187 unsigned char flags)
1189 /* Avoid that 0 blocks gets translated into 256 blocks. */
1190 if (WARN_ON_ONCE(nr_blocks == 0))
1191 return BLK_STS_IOERR;
1193 if (unlikely(flags & 0x8)) {
1195 * This happens only if this drive failed 10byte rw
1196 * command with ILLEGAL_REQUEST during operation and
1197 * thus turned off use_10_for_rw.
1199 scmd_printk(KERN_ERR, cmd, "FUA write on READ/WRITE(6) drive\n");
1200 return BLK_STS_IOERR;
1204 cmd->cmnd[0] = write ? WRITE_6 : READ_6;
1205 cmd->cmnd[1] = (lba >> 16) & 0x1f;
1206 cmd->cmnd[2] = (lba >> 8) & 0xff;
1207 cmd->cmnd[3] = lba & 0xff;
1208 cmd->cmnd[4] = nr_blocks;
1214 static blk_status_t sd_setup_read_write_cmnd(struct scsi_cmnd *cmd)
1216 struct request *rq = cmd->request;
1217 struct scsi_device *sdp = cmd->device;
1218 struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
1219 sector_t lba = sectors_to_logical(sdp, blk_rq_pos(rq));
1221 unsigned int nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
1222 unsigned int mask = logical_to_sectors(sdp, 1) - 1;
1223 bool write = rq_data_dir(rq) == WRITE;
1224 unsigned char protect, fua;
1229 ret = scsi_init_io(cmd);
1230 if (ret != BLK_STS_OK)
1233 if (!scsi_device_online(sdp) || sdp->changed) {
1234 scmd_printk(KERN_ERR, cmd, "device offline or changed\n");
1235 return BLK_STS_IOERR;
1238 if (blk_rq_pos(rq) + blk_rq_sectors(rq) > get_capacity(rq->rq_disk)) {
1239 scmd_printk(KERN_ERR, cmd, "access beyond end of device\n");
1240 return BLK_STS_IOERR;
1243 if ((blk_rq_pos(rq) & mask) || (blk_rq_sectors(rq) & mask)) {
1244 scmd_printk(KERN_ERR, cmd, "request not aligned to the logical block size\n");
1245 return BLK_STS_IOERR;
1249 * Some SD card readers can't handle accesses which touch the
1250 * last one or two logical blocks. Split accesses as needed.
1252 threshold = sdkp->capacity - SD_LAST_BUGGY_SECTORS;
1254 if (unlikely(sdp->last_sector_bug && lba + nr_blocks > threshold)) {
1255 if (lba < threshold) {
1256 /* Access up to the threshold but not beyond */
1257 nr_blocks = threshold - lba;
1259 /* Access only a single logical block */
1264 if (req_op(rq) == REQ_OP_ZONE_APPEND) {
1265 ret = sd_zbc_prepare_zone_append(cmd, &lba, nr_blocks);
1270 fua = rq->cmd_flags & REQ_FUA ? 0x8 : 0;
1271 dix = scsi_prot_sg_count(cmd);
1272 dif = scsi_host_dif_capable(cmd->device->host, sdkp->protection_type);
1275 protect = sd_setup_protect_cmnd(cmd, dix, dif);
1279 if (protect && sdkp->protection_type == T10_PI_TYPE2_PROTECTION) {
1280 ret = sd_setup_rw32_cmnd(cmd, write, lba, nr_blocks,
1282 } else if (sdp->use_16_for_rw || (nr_blocks > 0xffff)) {
1283 ret = sd_setup_rw16_cmnd(cmd, write, lba, nr_blocks,
1285 } else if ((nr_blocks > 0xff) || (lba > 0x1fffff) ||
1286 sdp->use_10_for_rw || protect) {
1287 ret = sd_setup_rw10_cmnd(cmd, write, lba, nr_blocks,
1290 ret = sd_setup_rw6_cmnd(cmd, write, lba, nr_blocks,
1294 if (unlikely(ret != BLK_STS_OK))
1298 * We shouldn't disconnect in the middle of a sector, so with a dumb
1299 * host adapter, it's safe to assume that we can at least transfer
1300 * this many bytes between each connect / disconnect.
1302 cmd->transfersize = sdp->sector_size;
1303 cmd->underflow = nr_blocks << 9;
1304 cmd->allowed = sdkp->max_retries;
1305 cmd->sdb.length = nr_blocks * sdp->sector_size;
1308 scmd_printk(KERN_INFO, cmd,
1309 "%s: block=%llu, count=%d\n", __func__,
1310 (unsigned long long)blk_rq_pos(rq),
1311 blk_rq_sectors(rq)));
1313 scmd_printk(KERN_INFO, cmd,
1314 "%s %d/%u 512 byte blocks.\n",
1315 write ? "writing" : "reading", nr_blocks,
1316 blk_rq_sectors(rq)));
1319 * This indicates that the command is ready from our end to be
1325 static blk_status_t sd_init_command(struct scsi_cmnd *cmd)
1327 struct request *rq = cmd->request;
1329 switch (req_op(rq)) {
1330 case REQ_OP_DISCARD:
1331 switch (scsi_disk(rq->rq_disk)->provisioning_mode) {
1333 return sd_setup_unmap_cmnd(cmd);
1335 return sd_setup_write_same16_cmnd(cmd, true);
1337 return sd_setup_write_same10_cmnd(cmd, true);
1339 return sd_setup_write_same10_cmnd(cmd, false);
1341 return BLK_STS_TARGET;
1343 case REQ_OP_WRITE_ZEROES:
1344 return sd_setup_write_zeroes_cmnd(cmd);
1345 case REQ_OP_WRITE_SAME:
1346 return sd_setup_write_same_cmnd(cmd);
1348 return sd_setup_flush_cmnd(cmd);
1351 case REQ_OP_ZONE_APPEND:
1352 return sd_setup_read_write_cmnd(cmd);
1353 case REQ_OP_ZONE_RESET:
1354 return sd_zbc_setup_zone_mgmt_cmnd(cmd, ZO_RESET_WRITE_POINTER,
1356 case REQ_OP_ZONE_RESET_ALL:
1357 return sd_zbc_setup_zone_mgmt_cmnd(cmd, ZO_RESET_WRITE_POINTER,
1359 case REQ_OP_ZONE_OPEN:
1360 return sd_zbc_setup_zone_mgmt_cmnd(cmd, ZO_OPEN_ZONE, false);
1361 case REQ_OP_ZONE_CLOSE:
1362 return sd_zbc_setup_zone_mgmt_cmnd(cmd, ZO_CLOSE_ZONE, false);
1363 case REQ_OP_ZONE_FINISH:
1364 return sd_zbc_setup_zone_mgmt_cmnd(cmd, ZO_FINISH_ZONE, false);
1367 return BLK_STS_NOTSUPP;
1371 static void sd_uninit_command(struct scsi_cmnd *SCpnt)
1373 struct request *rq = SCpnt->request;
1376 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
1377 mempool_free(rq->special_vec.bv_page, sd_page_pool);
1379 if (SCpnt->cmnd != scsi_req(rq)->cmd) {
1383 mempool_free(cmnd, sd_cdb_pool);
1388 * sd_open - open a scsi disk device
1389 * @bdev: Block device of the scsi disk to open
1390 * @mode: FMODE_* mask
1392 * Returns 0 if successful. Returns a negated errno value in case
1395 * Note: This can be called from a user context (e.g. fsck(1) )
1396 * or from within the kernel (e.g. as a result of a mount(1) ).
1397 * In the latter case @inode and @filp carry an abridged amount
1398 * of information as noted above.
1400 * Locking: called with bdev->bd_mutex held.
1402 static int sd_open(struct block_device *bdev, fmode_t mode)
1404 struct scsi_disk *sdkp = scsi_disk_get(bdev->bd_disk);
1405 struct scsi_device *sdev;
1411 SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_open\n"));
1413 sdev = sdkp->device;
1416 * If the device is in error recovery, wait until it is done.
1417 * If the device is offline, then disallow any access to it.
1420 if (!scsi_block_when_processing_errors(sdev))
1423 if (sdev->removable || sdkp->write_prot) {
1424 if (bdev_check_media_change(bdev))
1425 sd_revalidate_disk(bdev->bd_disk);
1429 * If the drive is empty, just let the open fail.
1431 retval = -ENOMEDIUM;
1432 if (sdev->removable && !sdkp->media_present && !(mode & FMODE_NDELAY))
1436 * If the device has the write protect tab set, have the open fail
1437 * if the user expects to be able to write to the thing.
1440 if (sdkp->write_prot && (mode & FMODE_WRITE))
1444 * It is possible that the disk changing stuff resulted in
1445 * the device being taken offline. If this is the case,
1446 * report this to the user, and don't pretend that the
1447 * open actually succeeded.
1450 if (!scsi_device_online(sdev))
1453 if ((atomic_inc_return(&sdkp->openers) == 1) && sdev->removable) {
1454 if (scsi_block_when_processing_errors(sdev))
1455 scsi_set_medium_removal(sdev, SCSI_REMOVAL_PREVENT);
1461 scsi_disk_put(sdkp);
1466 * sd_release - invoked when the (last) close(2) is called on this
1468 * @disk: disk to release
1469 * @mode: FMODE_* mask
1473 * Note: may block (uninterruptible) if error recovery is underway
1476 * Locking: called with bdev->bd_mutex held.
1478 static void sd_release(struct gendisk *disk, fmode_t mode)
1480 struct scsi_disk *sdkp = scsi_disk(disk);
1481 struct scsi_device *sdev = sdkp->device;
1483 SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_release\n"));
1485 if (atomic_dec_return(&sdkp->openers) == 0 && sdev->removable) {
1486 if (scsi_block_when_processing_errors(sdev))
1487 scsi_set_medium_removal(sdev, SCSI_REMOVAL_ALLOW);
1490 scsi_disk_put(sdkp);
1493 static int sd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
1495 struct scsi_disk *sdkp = scsi_disk(bdev->bd_disk);
1496 struct scsi_device *sdp = sdkp->device;
1497 struct Scsi_Host *host = sdp->host;
1498 sector_t capacity = logical_to_sectors(sdp, sdkp->capacity);
1501 /* default to most commonly used values */
1502 diskinfo[0] = 0x40; /* 1 << 6 */
1503 diskinfo[1] = 0x20; /* 1 << 5 */
1504 diskinfo[2] = capacity >> 11;
1506 /* override with calculated, extended default, or driver values */
1507 if (host->hostt->bios_param)
1508 host->hostt->bios_param(sdp, bdev, capacity, diskinfo);
1510 scsicam_bios_param(bdev, capacity, diskinfo);
1512 geo->heads = diskinfo[0];
1513 geo->sectors = diskinfo[1];
1514 geo->cylinders = diskinfo[2];
1519 * sd_ioctl - process an ioctl
1520 * @bdev: target block device
1521 * @mode: FMODE_* mask
1522 * @cmd: ioctl command number
1523 * @p: this is third argument given to ioctl(2) system call.
1524 * Often contains a pointer.
1526 * Returns 0 if successful (some ioctls return positive numbers on
1527 * success as well). Returns a negated errno value in case of error.
1529 * Note: most ioctls are forward onto the block subsystem or further
1530 * down in the scsi subsystem.
1532 static int sd_ioctl_common(struct block_device *bdev, fmode_t mode,
1533 unsigned int cmd, void __user *p)
1535 struct gendisk *disk = bdev->bd_disk;
1536 struct scsi_disk *sdkp = scsi_disk(disk);
1537 struct scsi_device *sdp = sdkp->device;
1540 SCSI_LOG_IOCTL(1, sd_printk(KERN_INFO, sdkp, "sd_ioctl: disk=%s, "
1541 "cmd=0x%x\n", disk->disk_name, cmd));
1543 error = scsi_verify_blk_ioctl(bdev, cmd);
1548 * If we are in the middle of error recovery, don't let anyone
1549 * else try and use this device. Also, if error recovery fails, it
1550 * may try and take the device offline, in which case all further
1551 * access to the device is prohibited.
1553 error = scsi_ioctl_block_when_processing_errors(sdp, cmd,
1554 (mode & FMODE_NDELAY) != 0);
1558 if (is_sed_ioctl(cmd))
1559 return sed_ioctl(sdkp->opal_dev, cmd, p);
1562 * Send SCSI addressing ioctls directly to mid level, send other
1563 * ioctls to block level and then onto mid level if they can't be
1567 case SCSI_IOCTL_GET_IDLUN:
1568 case SCSI_IOCTL_GET_BUS_NUMBER:
1569 error = scsi_ioctl(sdp, cmd, p);
1572 error = scsi_cmd_blk_ioctl(bdev, mode, cmd, p);
1579 static void set_media_not_present(struct scsi_disk *sdkp)
1581 if (sdkp->media_present)
1582 sdkp->device->changed = 1;
1584 if (sdkp->device->removable) {
1585 sdkp->media_present = 0;
1590 static int media_not_present(struct scsi_disk *sdkp,
1591 struct scsi_sense_hdr *sshdr)
1593 if (!scsi_sense_valid(sshdr))
1596 /* not invoked for commands that could return deferred errors */
1597 switch (sshdr->sense_key) {
1598 case UNIT_ATTENTION:
1600 /* medium not present */
1601 if (sshdr->asc == 0x3A) {
1602 set_media_not_present(sdkp);
1610 * sd_check_events - check media events
1611 * @disk: kernel device descriptor
1612 * @clearing: disk events currently being cleared
1614 * Returns mask of DISK_EVENT_*.
1616 * Note: this function is invoked from the block subsystem.
1618 static unsigned int sd_check_events(struct gendisk *disk, unsigned int clearing)
1620 struct scsi_disk *sdkp = scsi_disk_get(disk);
1621 struct scsi_device *sdp;
1628 SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_check_events\n"));
1631 * If the device is offline, don't send any commands - just pretend as
1632 * if the command failed. If the device ever comes back online, we
1633 * can deal with it then. It is only because of unrecoverable errors
1634 * that we would ever take a device offline in the first place.
1636 if (!scsi_device_online(sdp)) {
1637 set_media_not_present(sdkp);
1642 * Using TEST_UNIT_READY enables differentiation between drive with
1643 * no cartridge loaded - NOT READY, drive with changed cartridge -
1644 * UNIT ATTENTION, or with same cartridge - GOOD STATUS.
1646 * Drives that auto spin down. eg iomega jaz 1G, will be started
1647 * by sd_spinup_disk() from sd_revalidate_disk(), which happens whenever
1648 * sd_revalidate() is called.
1650 if (scsi_block_when_processing_errors(sdp)) {
1651 struct scsi_sense_hdr sshdr = { 0, };
1653 retval = scsi_test_unit_ready(sdp, SD_TIMEOUT, sdkp->max_retries,
1656 /* failed to execute TUR, assume media not present */
1657 if (host_byte(retval)) {
1658 set_media_not_present(sdkp);
1662 if (media_not_present(sdkp, &sshdr))
1667 * For removable scsi disk we have to recognise the presence
1668 * of a disk in the drive.
1670 if (!sdkp->media_present)
1672 sdkp->media_present = 1;
1675 * sdp->changed is set under the following conditions:
1677 * Medium present state has changed in either direction.
1678 * Device has indicated UNIT_ATTENTION.
1680 retval = sdp->changed ? DISK_EVENT_MEDIA_CHANGE : 0;
1682 scsi_disk_put(sdkp);
1686 static int sd_sync_cache(struct scsi_disk *sdkp, struct scsi_sense_hdr *sshdr)
1689 struct scsi_device *sdp = sdkp->device;
1690 const int timeout = sdp->request_queue->rq_timeout
1691 * SD_FLUSH_TIMEOUT_MULTIPLIER;
1692 struct scsi_sense_hdr my_sshdr;
1694 if (!scsi_device_online(sdp))
1697 /* caller might not be interested in sense, but we need it */
1701 for (retries = 3; retries > 0; --retries) {
1702 unsigned char cmd[10] = { 0 };
1704 cmd[0] = SYNCHRONIZE_CACHE;
1706 * Leave the rest of the command zero to indicate
1709 res = scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL, sshdr,
1710 timeout, sdkp->max_retries, 0, RQF_PM, NULL);
1716 sd_print_result(sdkp, "Synchronize Cache(10) failed", res);
1718 if (driver_byte(res) == DRIVER_SENSE)
1719 sd_print_sense_hdr(sdkp, sshdr);
1721 /* we need to evaluate the error return */
1722 if (scsi_sense_valid(sshdr) &&
1723 (sshdr->asc == 0x3a || /* medium not present */
1724 sshdr->asc == 0x20 || /* invalid command */
1725 (sshdr->asc == 0x74 && sshdr->ascq == 0x71))) /* drive is password locked */
1726 /* this is no error here */
1729 switch (host_byte(res)) {
1730 /* ignore errors due to racing a disconnection */
1731 case DID_BAD_TARGET:
1732 case DID_NO_CONNECT:
1734 /* signal the upper layer it might try again */
1738 case DID_SOFT_ERROR:
1747 static void sd_rescan(struct device *dev)
1749 struct scsi_disk *sdkp = dev_get_drvdata(dev);
1752 ret = sd_revalidate_disk(sdkp->disk);
1753 revalidate_disk_size(sdkp->disk, ret == 0);
1756 static int sd_ioctl(struct block_device *bdev, fmode_t mode,
1757 unsigned int cmd, unsigned long arg)
1759 void __user *p = (void __user *)arg;
1762 ret = sd_ioctl_common(bdev, mode, cmd, p);
1766 return scsi_ioctl(scsi_disk(bdev->bd_disk)->device, cmd, p);
1769 #ifdef CONFIG_COMPAT
1770 static int sd_compat_ioctl(struct block_device *bdev, fmode_t mode,
1771 unsigned int cmd, unsigned long arg)
1773 void __user *p = compat_ptr(arg);
1776 ret = sd_ioctl_common(bdev, mode, cmd, p);
1780 return scsi_compat_ioctl(scsi_disk(bdev->bd_disk)->device, cmd, p);
1784 static char sd_pr_type(enum pr_type type)
1787 case PR_WRITE_EXCLUSIVE:
1789 case PR_EXCLUSIVE_ACCESS:
1791 case PR_WRITE_EXCLUSIVE_REG_ONLY:
1793 case PR_EXCLUSIVE_ACCESS_REG_ONLY:
1795 case PR_WRITE_EXCLUSIVE_ALL_REGS:
1797 case PR_EXCLUSIVE_ACCESS_ALL_REGS:
1804 static int sd_pr_command(struct block_device *bdev, u8 sa,
1805 u64 key, u64 sa_key, u8 type, u8 flags)
1807 struct scsi_disk *sdkp = scsi_disk(bdev->bd_disk);
1808 struct scsi_device *sdev = sdkp->device;
1809 struct scsi_sense_hdr sshdr;
1811 u8 cmd[16] = { 0, };
1812 u8 data[24] = { 0, };
1814 cmd[0] = PERSISTENT_RESERVE_OUT;
1817 put_unaligned_be32(sizeof(data), &cmd[5]);
1819 put_unaligned_be64(key, &data[0]);
1820 put_unaligned_be64(sa_key, &data[8]);
1823 result = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, &data, sizeof(data),
1824 &sshdr, SD_TIMEOUT, sdkp->max_retries, NULL);
1826 if (driver_byte(result) == DRIVER_SENSE &&
1827 scsi_sense_valid(&sshdr)) {
1828 sdev_printk(KERN_INFO, sdev, "PR command failed: %d\n", result);
1829 scsi_print_sense_hdr(sdev, NULL, &sshdr);
1835 static int sd_pr_register(struct block_device *bdev, u64 old_key, u64 new_key,
1838 if (flags & ~PR_FL_IGNORE_KEY)
1840 return sd_pr_command(bdev, (flags & PR_FL_IGNORE_KEY) ? 0x06 : 0x00,
1841 old_key, new_key, 0,
1842 (1 << 0) /* APTPL */);
1845 static int sd_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type,
1850 return sd_pr_command(bdev, 0x01, key, 0, sd_pr_type(type), 0);
1853 static int sd_pr_release(struct block_device *bdev, u64 key, enum pr_type type)
1855 return sd_pr_command(bdev, 0x02, key, 0, sd_pr_type(type), 0);
1858 static int sd_pr_preempt(struct block_device *bdev, u64 old_key, u64 new_key,
1859 enum pr_type type, bool abort)
1861 return sd_pr_command(bdev, abort ? 0x05 : 0x04, old_key, new_key,
1862 sd_pr_type(type), 0);
1865 static int sd_pr_clear(struct block_device *bdev, u64 key)
1867 return sd_pr_command(bdev, 0x03, key, 0, 0, 0);
1870 static const struct pr_ops sd_pr_ops = {
1871 .pr_register = sd_pr_register,
1872 .pr_reserve = sd_pr_reserve,
1873 .pr_release = sd_pr_release,
1874 .pr_preempt = sd_pr_preempt,
1875 .pr_clear = sd_pr_clear,
1878 static const struct block_device_operations sd_fops = {
1879 .owner = THIS_MODULE,
1881 .release = sd_release,
1883 .getgeo = sd_getgeo,
1884 #ifdef CONFIG_COMPAT
1885 .compat_ioctl = sd_compat_ioctl,
1887 .check_events = sd_check_events,
1888 .unlock_native_capacity = sd_unlock_native_capacity,
1889 .report_zones = sd_zbc_report_zones,
1890 .pr_ops = &sd_pr_ops,
1894 * sd_eh_reset - reset error handling callback
1895 * @scmd: sd-issued command that has failed
1897 * This function is called by the SCSI midlayer before starting
1898 * SCSI EH. When counting medium access failures we have to be
1899 * careful to register it only only once per device and SCSI EH run;
1900 * there might be several timed out commands which will cause the
1901 * 'max_medium_access_timeouts' counter to trigger after the first
1902 * SCSI EH run already and set the device to offline.
1903 * So this function resets the internal counter before starting SCSI EH.
1905 static void sd_eh_reset(struct scsi_cmnd *scmd)
1907 struct scsi_disk *sdkp = scsi_disk(scmd->request->rq_disk);
1909 /* New SCSI EH run, reset gate variable */
1910 sdkp->ignore_medium_access_errors = false;
1914 * sd_eh_action - error handling callback
1915 * @scmd: sd-issued command that has failed
1916 * @eh_disp: The recovery disposition suggested by the midlayer
1918 * This function is called by the SCSI midlayer upon completion of an
1919 * error test command (currently TEST UNIT READY). The result of sending
1920 * the eh command is passed in eh_disp. We're looking for devices that
1921 * fail medium access commands but are OK with non access commands like
1922 * test unit ready (so wrongly see the device as having a successful
1925 static int sd_eh_action(struct scsi_cmnd *scmd, int eh_disp)
1927 struct scsi_disk *sdkp = scsi_disk(scmd->request->rq_disk);
1928 struct scsi_device *sdev = scmd->device;
1930 if (!scsi_device_online(sdev) ||
1931 !scsi_medium_access_command(scmd) ||
1932 host_byte(scmd->result) != DID_TIME_OUT ||
1937 * The device has timed out executing a medium access command.
1938 * However, the TEST UNIT READY command sent during error
1939 * handling completed successfully. Either the device is in the
1940 * process of recovering or has it suffered an internal failure
1941 * that prevents access to the storage medium.
1943 if (!sdkp->ignore_medium_access_errors) {
1944 sdkp->medium_access_timed_out++;
1945 sdkp->ignore_medium_access_errors = true;
1949 * If the device keeps failing read/write commands but TEST UNIT
1950 * READY always completes successfully we assume that medium
1951 * access is no longer possible and take the device offline.
1953 if (sdkp->medium_access_timed_out >= sdkp->max_medium_access_timeouts) {
1954 scmd_printk(KERN_ERR, scmd,
1955 "Medium access timeout failure. Offlining disk!\n");
1956 mutex_lock(&sdev->state_mutex);
1957 scsi_device_set_state(sdev, SDEV_OFFLINE);
1958 mutex_unlock(&sdev->state_mutex);
1966 static unsigned int sd_completed_bytes(struct scsi_cmnd *scmd)
1968 struct request *req = scmd->request;
1969 struct scsi_device *sdev = scmd->device;
1970 unsigned int transferred, good_bytes;
1971 u64 start_lba, end_lba, bad_lba;
1974 * Some commands have a payload smaller than the device logical
1975 * block size (e.g. INQUIRY on a 4K disk).
1977 if (scsi_bufflen(scmd) <= sdev->sector_size)
1980 /* Check if we have a 'bad_lba' information */
1981 if (!scsi_get_sense_info_fld(scmd->sense_buffer,
1982 SCSI_SENSE_BUFFERSIZE,
1987 * If the bad lba was reported incorrectly, we have no idea where
1990 start_lba = sectors_to_logical(sdev, blk_rq_pos(req));
1991 end_lba = start_lba + bytes_to_logical(sdev, scsi_bufflen(scmd));
1992 if (bad_lba < start_lba || bad_lba >= end_lba)
1996 * resid is optional but mostly filled in. When it's unused,
1997 * its value is zero, so we assume the whole buffer transferred
1999 transferred = scsi_bufflen(scmd) - scsi_get_resid(scmd);
2001 /* This computation should always be done in terms of the
2002 * resolution of the device's medium.
2004 good_bytes = logical_to_bytes(sdev, bad_lba - start_lba);
2006 return min(good_bytes, transferred);
2010 * sd_done - bottom half handler: called when the lower level
2011 * driver has completed (successfully or otherwise) a scsi command.
2012 * @SCpnt: mid-level's per command structure.
2014 * Note: potentially run from within an ISR. Must not block.
2016 static int sd_done(struct scsi_cmnd *SCpnt)
2018 int result = SCpnt->result;
2019 unsigned int good_bytes = result ? 0 : scsi_bufflen(SCpnt);
2020 unsigned int sector_size = SCpnt->device->sector_size;
2022 struct scsi_sense_hdr sshdr;
2023 struct scsi_disk *sdkp = scsi_disk(SCpnt->request->rq_disk);
2024 struct request *req = SCpnt->request;
2025 int sense_valid = 0;
2026 int sense_deferred = 0;
2028 switch (req_op(req)) {
2029 case REQ_OP_DISCARD:
2030 case REQ_OP_WRITE_ZEROES:
2031 case REQ_OP_WRITE_SAME:
2032 case REQ_OP_ZONE_RESET:
2033 case REQ_OP_ZONE_RESET_ALL:
2034 case REQ_OP_ZONE_OPEN:
2035 case REQ_OP_ZONE_CLOSE:
2036 case REQ_OP_ZONE_FINISH:
2038 good_bytes = blk_rq_bytes(req);
2039 scsi_set_resid(SCpnt, 0);
2042 scsi_set_resid(SCpnt, blk_rq_bytes(req));
2047 * In case of bogus fw or device, we could end up having
2048 * an unaligned partial completion. Check this here and force
2051 resid = scsi_get_resid(SCpnt);
2052 if (resid & (sector_size - 1)) {
2053 sd_printk(KERN_INFO, sdkp,
2054 "Unaligned partial completion (resid=%u, sector_sz=%u)\n",
2055 resid, sector_size);
2056 scsi_print_command(SCpnt);
2057 resid = min(scsi_bufflen(SCpnt),
2058 round_up(resid, sector_size));
2059 scsi_set_resid(SCpnt, resid);
2064 sense_valid = scsi_command_normalize_sense(SCpnt, &sshdr);
2066 sense_deferred = scsi_sense_is_deferred(&sshdr);
2068 sdkp->medium_access_timed_out = 0;
2070 if (driver_byte(result) != DRIVER_SENSE &&
2071 (!sense_valid || sense_deferred))
2074 switch (sshdr.sense_key) {
2075 case HARDWARE_ERROR:
2077 good_bytes = sd_completed_bytes(SCpnt);
2079 case RECOVERED_ERROR:
2080 good_bytes = scsi_bufflen(SCpnt);
2083 /* This indicates a false check condition, so ignore it. An
2084 * unknown amount of data was transferred so treat it as an
2088 memset(SCpnt->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
2090 case ABORTED_COMMAND:
2091 if (sshdr.asc == 0x10) /* DIF: Target detected corruption */
2092 good_bytes = sd_completed_bytes(SCpnt);
2094 case ILLEGAL_REQUEST:
2095 switch (sshdr.asc) {
2096 case 0x10: /* DIX: Host detected corruption */
2097 good_bytes = sd_completed_bytes(SCpnt);
2099 case 0x20: /* INVALID COMMAND OPCODE */
2100 case 0x24: /* INVALID FIELD IN CDB */
2101 switch (SCpnt->cmnd[0]) {
2103 sd_config_discard(sdkp, SD_LBP_DISABLE);
2107 if (SCpnt->cmnd[1] & 8) { /* UNMAP */
2108 sd_config_discard(sdkp, SD_LBP_DISABLE);
2110 sdkp->device->no_write_same = 1;
2111 sd_config_write_same(sdkp);
2112 req->rq_flags |= RQF_QUIET;
2123 if (sd_is_zoned(sdkp))
2124 good_bytes = sd_zbc_complete(SCpnt, good_bytes, &sshdr);
2126 SCSI_LOG_HLCOMPLETE(1, scmd_printk(KERN_INFO, SCpnt,
2127 "sd_done: completed %d of %d bytes\n",
2128 good_bytes, scsi_bufflen(SCpnt)));
2134 * spinup disk - called only in sd_revalidate_disk()
2137 sd_spinup_disk(struct scsi_disk *sdkp)
2139 unsigned char cmd[10];
2140 unsigned long spintime_expire = 0;
2141 int retries, spintime;
2142 unsigned int the_result;
2143 struct scsi_sense_hdr sshdr;
2144 int sense_valid = 0;
2148 /* Spin up drives, as required. Only do this at boot time */
2149 /* Spinup needs to be done for module loads too. */
2154 cmd[0] = TEST_UNIT_READY;
2155 memset((void *) &cmd[1], 0, 9);
2157 the_result = scsi_execute_req(sdkp->device, cmd,
2160 sdkp->max_retries, NULL);
2163 * If the drive has indicated to us that it
2164 * doesn't have any media in it, don't bother
2165 * with any more polling.
2167 if (media_not_present(sdkp, &sshdr))
2171 sense_valid = scsi_sense_valid(&sshdr);
2173 } while (retries < 3 &&
2174 (!scsi_status_is_good(the_result) ||
2175 ((driver_byte(the_result) == DRIVER_SENSE) &&
2176 sense_valid && sshdr.sense_key == UNIT_ATTENTION)));
2178 if (driver_byte(the_result) != DRIVER_SENSE) {
2179 /* no sense, TUR either succeeded or failed
2180 * with a status error */
2181 if(!spintime && !scsi_status_is_good(the_result)) {
2182 sd_print_result(sdkp, "Test Unit Ready failed",
2189 * The device does not want the automatic start to be issued.
2191 if (sdkp->device->no_start_on_add)
2194 if (sense_valid && sshdr.sense_key == NOT_READY) {
2195 if (sshdr.asc == 4 && sshdr.ascq == 3)
2196 break; /* manual intervention required */
2197 if (sshdr.asc == 4 && sshdr.ascq == 0xb)
2198 break; /* standby */
2199 if (sshdr.asc == 4 && sshdr.ascq == 0xc)
2200 break; /* unavailable */
2201 if (sshdr.asc == 4 && sshdr.ascq == 0x1b)
2202 break; /* sanitize in progress */
2204 * Issue command to spin up drive when not ready
2207 sd_printk(KERN_NOTICE, sdkp, "Spinning up disk...");
2208 cmd[0] = START_STOP;
2209 cmd[1] = 1; /* Return immediately */
2210 memset((void *) &cmd[2], 0, 8);
2211 cmd[4] = 1; /* Start spin cycle */
2212 if (sdkp->device->start_stop_pwr_cond)
2214 scsi_execute_req(sdkp->device, cmd, DMA_NONE,
2216 SD_TIMEOUT, sdkp->max_retries,
2218 spintime_expire = jiffies + 100 * HZ;
2221 /* Wait 1 second for next try */
2223 printk(KERN_CONT ".");
2226 * Wait for USB flash devices with slow firmware.
2227 * Yes, this sense key/ASC combination shouldn't
2228 * occur here. It's characteristic of these devices.
2230 } else if (sense_valid &&
2231 sshdr.sense_key == UNIT_ATTENTION &&
2232 sshdr.asc == 0x28) {
2234 spintime_expire = jiffies + 5 * HZ;
2237 /* Wait 1 second for next try */
2240 /* we don't understand the sense code, so it's
2241 * probably pointless to loop */
2243 sd_printk(KERN_NOTICE, sdkp, "Unit Not Ready\n");
2244 sd_print_sense_hdr(sdkp, &sshdr);
2249 } while (spintime && time_before_eq(jiffies, spintime_expire));
2252 if (scsi_status_is_good(the_result))
2253 printk(KERN_CONT "ready\n");
2255 printk(KERN_CONT "not responding...\n");
2260 * Determine whether disk supports Data Integrity Field.
2262 static int sd_read_protection_type(struct scsi_disk *sdkp, unsigned char *buffer)
2264 struct scsi_device *sdp = sdkp->device;
2268 if (scsi_device_protection(sdp) == 0 || (buffer[12] & 1) == 0) {
2269 sdkp->protection_type = 0;
2273 type = ((buffer[12] >> 1) & 7) + 1; /* P_TYPE 0 = Type 1 */
2275 if (type > T10_PI_TYPE3_PROTECTION)
2277 else if (scsi_host_dif_capable(sdp->host, type))
2280 if (sdkp->first_scan || type != sdkp->protection_type)
2283 sd_printk(KERN_ERR, sdkp, "formatted with unsupported" \
2284 " protection type %u. Disabling disk!\n",
2288 sd_printk(KERN_NOTICE, sdkp,
2289 "Enabling DIF Type %u protection\n", type);
2292 sd_printk(KERN_NOTICE, sdkp,
2293 "Disabling DIF Type %u protection\n", type);
2297 sdkp->protection_type = type;
2302 static void read_capacity_error(struct scsi_disk *sdkp, struct scsi_device *sdp,
2303 struct scsi_sense_hdr *sshdr, int sense_valid,
2306 if (driver_byte(the_result) == DRIVER_SENSE)
2307 sd_print_sense_hdr(sdkp, sshdr);
2309 sd_printk(KERN_NOTICE, sdkp, "Sense not available.\n");
2312 * Set dirty bit for removable devices if not ready -
2313 * sometimes drives will not report this properly.
2315 if (sdp->removable &&
2316 sense_valid && sshdr->sense_key == NOT_READY)
2317 set_media_not_present(sdkp);
2320 * We used to set media_present to 0 here to indicate no media
2321 * in the drive, but some drives fail read capacity even with
2322 * media present, so we can't do that.
2324 sdkp->capacity = 0; /* unknown mapped to zero - as usual */
2328 #if RC16_LEN > SD_BUF_SIZE
2329 #error RC16_LEN must not be more than SD_BUF_SIZE
2332 #define READ_CAPACITY_RETRIES_ON_RESET 10
2334 static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp,
2335 unsigned char *buffer)
2337 unsigned char cmd[16];
2338 struct scsi_sense_hdr sshdr;
2339 int sense_valid = 0;
2341 int retries = 3, reset_retries = READ_CAPACITY_RETRIES_ON_RESET;
2342 unsigned int alignment;
2343 unsigned long long lba;
2344 unsigned sector_size;
2346 if (sdp->no_read_capacity_16)
2351 cmd[0] = SERVICE_ACTION_IN_16;
2352 cmd[1] = SAI_READ_CAPACITY_16;
2354 memset(buffer, 0, RC16_LEN);
2356 the_result = scsi_execute_req(sdp, cmd, DMA_FROM_DEVICE,
2357 buffer, RC16_LEN, &sshdr,
2358 SD_TIMEOUT, sdkp->max_retries, NULL);
2360 if (media_not_present(sdkp, &sshdr))
2364 sense_valid = scsi_sense_valid(&sshdr);
2366 sshdr.sense_key == ILLEGAL_REQUEST &&
2367 (sshdr.asc == 0x20 || sshdr.asc == 0x24) &&
2369 /* Invalid Command Operation Code or
2370 * Invalid Field in CDB, just retry
2371 * silently with RC10 */
2374 sshdr.sense_key == UNIT_ATTENTION &&
2375 sshdr.asc == 0x29 && sshdr.ascq == 0x00)
2376 /* Device reset might occur several times,
2377 * give it one more chance */
2378 if (--reset_retries > 0)
2383 } while (the_result && retries);
2386 sd_print_result(sdkp, "Read Capacity(16) failed", the_result);
2387 read_capacity_error(sdkp, sdp, &sshdr, sense_valid, the_result);
2391 sector_size = get_unaligned_be32(&buffer[8]);
2392 lba = get_unaligned_be64(&buffer[0]);
2394 if (sd_read_protection_type(sdkp, buffer) < 0) {
2399 /* Logical blocks per physical block exponent */
2400 sdkp->physical_block_size = (1 << (buffer[13] & 0xf)) * sector_size;
2403 sdkp->rc_basis = (buffer[12] >> 4) & 0x3;
2405 /* Lowest aligned logical block */
2406 alignment = ((buffer[14] & 0x3f) << 8 | buffer[15]) * sector_size;
2407 blk_queue_alignment_offset(sdp->request_queue, alignment);
2408 if (alignment && sdkp->first_scan)
2409 sd_printk(KERN_NOTICE, sdkp,
2410 "physical block alignment offset: %u\n", alignment);
2412 if (buffer[14] & 0x80) { /* LBPME */
2415 if (buffer[14] & 0x40) /* LBPRZ */
2418 sd_config_discard(sdkp, SD_LBP_WS16);
2421 sdkp->capacity = lba + 1;
2425 static int read_capacity_10(struct scsi_disk *sdkp, struct scsi_device *sdp,
2426 unsigned char *buffer)
2428 unsigned char cmd[16];
2429 struct scsi_sense_hdr sshdr;
2430 int sense_valid = 0;
2432 int retries = 3, reset_retries = READ_CAPACITY_RETRIES_ON_RESET;
2434 unsigned sector_size;
2437 cmd[0] = READ_CAPACITY;
2438 memset(&cmd[1], 0, 9);
2439 memset(buffer, 0, 8);
2441 the_result = scsi_execute_req(sdp, cmd, DMA_FROM_DEVICE,
2443 SD_TIMEOUT, sdkp->max_retries, NULL);
2445 if (media_not_present(sdkp, &sshdr))
2449 sense_valid = scsi_sense_valid(&sshdr);
2451 sshdr.sense_key == UNIT_ATTENTION &&
2452 sshdr.asc == 0x29 && sshdr.ascq == 0x00)
2453 /* Device reset might occur several times,
2454 * give it one more chance */
2455 if (--reset_retries > 0)
2460 } while (the_result && retries);
2463 sd_print_result(sdkp, "Read Capacity(10) failed", the_result);
2464 read_capacity_error(sdkp, sdp, &sshdr, sense_valid, the_result);
2468 sector_size = get_unaligned_be32(&buffer[4]);
2469 lba = get_unaligned_be32(&buffer[0]);
2471 if (sdp->no_read_capacity_16 && (lba == 0xffffffff)) {
2472 /* Some buggy (usb cardreader) devices return an lba of
2473 0xffffffff when the want to report a size of 0 (with
2474 which they really mean no media is present) */
2476 sdkp->physical_block_size = sector_size;
2480 sdkp->capacity = lba + 1;
2481 sdkp->physical_block_size = sector_size;
2485 static int sd_try_rc16_first(struct scsi_device *sdp)
2487 if (sdp->host->max_cmd_len < 16)
2489 if (sdp->try_rc_10_first)
2491 if (sdp->scsi_level > SCSI_SPC_2)
2493 if (scsi_device_protection(sdp))
2499 * read disk capacity
2502 sd_read_capacity(struct scsi_disk *sdkp, unsigned char *buffer)
2505 struct scsi_device *sdp = sdkp->device;
2507 if (sd_try_rc16_first(sdp)) {
2508 sector_size = read_capacity_16(sdkp, sdp, buffer);
2509 if (sector_size == -EOVERFLOW)
2511 if (sector_size == -ENODEV)
2513 if (sector_size < 0)
2514 sector_size = read_capacity_10(sdkp, sdp, buffer);
2515 if (sector_size < 0)
2518 sector_size = read_capacity_10(sdkp, sdp, buffer);
2519 if (sector_size == -EOVERFLOW)
2521 if (sector_size < 0)
2523 if ((sizeof(sdkp->capacity) > 4) &&
2524 (sdkp->capacity > 0xffffffffULL)) {
2525 int old_sector_size = sector_size;
2526 sd_printk(KERN_NOTICE, sdkp, "Very big device. "
2527 "Trying to use READ CAPACITY(16).\n");
2528 sector_size = read_capacity_16(sdkp, sdp, buffer);
2529 if (sector_size < 0) {
2530 sd_printk(KERN_NOTICE, sdkp,
2531 "Using 0xffffffff as device size\n");
2532 sdkp->capacity = 1 + (sector_t) 0xffffffff;
2533 sector_size = old_sector_size;
2536 /* Remember that READ CAPACITY(16) succeeded */
2537 sdp->try_rc_10_first = 0;
2541 /* Some devices are known to return the total number of blocks,
2542 * not the highest block number. Some devices have versions
2543 * which do this and others which do not. Some devices we might
2544 * suspect of doing this but we don't know for certain.
2546 * If we know the reported capacity is wrong, decrement it. If
2547 * we can only guess, then assume the number of blocks is even
2548 * (usually true but not always) and err on the side of lowering
2551 if (sdp->fix_capacity ||
2552 (sdp->guess_capacity && (sdkp->capacity & 0x01))) {
2553 sd_printk(KERN_INFO, sdkp, "Adjusting the sector count "
2554 "from its reported value: %llu\n",
2555 (unsigned long long) sdkp->capacity);
2560 if (sector_size == 0) {
2562 sd_printk(KERN_NOTICE, sdkp, "Sector size 0 reported, "
2566 if (sector_size != 512 &&
2567 sector_size != 1024 &&
2568 sector_size != 2048 &&
2569 sector_size != 4096) {
2570 sd_printk(KERN_NOTICE, sdkp, "Unsupported sector size %d.\n",
2573 * The user might want to re-format the drive with
2574 * a supported sectorsize. Once this happens, it
2575 * would be relatively trivial to set the thing up.
2576 * For this reason, we leave the thing in the table.
2580 * set a bogus sector size so the normal read/write
2581 * logic in the block layer will eventually refuse any
2582 * request on this device without tripping over power
2583 * of two sector size assumptions
2587 blk_queue_logical_block_size(sdp->request_queue, sector_size);
2588 blk_queue_physical_block_size(sdp->request_queue,
2589 sdkp->physical_block_size);
2590 sdkp->device->sector_size = sector_size;
2592 if (sdkp->capacity > 0xffffffff)
2593 sdp->use_16_for_rw = 1;
2598 * Print disk capacity
2601 sd_print_capacity(struct scsi_disk *sdkp,
2602 sector_t old_capacity)
2604 int sector_size = sdkp->device->sector_size;
2605 char cap_str_2[10], cap_str_10[10];
2607 if (!sdkp->first_scan && old_capacity == sdkp->capacity)
2610 string_get_size(sdkp->capacity, sector_size,
2611 STRING_UNITS_2, cap_str_2, sizeof(cap_str_2));
2612 string_get_size(sdkp->capacity, sector_size,
2613 STRING_UNITS_10, cap_str_10, sizeof(cap_str_10));
2615 sd_printk(KERN_NOTICE, sdkp,
2616 "%llu %d-byte logical blocks: (%s/%s)\n",
2617 (unsigned long long)sdkp->capacity,
2618 sector_size, cap_str_10, cap_str_2);
2620 if (sdkp->physical_block_size != sector_size)
2621 sd_printk(KERN_NOTICE, sdkp,
2622 "%u-byte physical blocks\n",
2623 sdkp->physical_block_size);
2626 /* called with buffer of length 512 */
2628 sd_do_mode_sense(struct scsi_disk *sdkp, int dbd, int modepage,
2629 unsigned char *buffer, int len, struct scsi_mode_data *data,
2630 struct scsi_sense_hdr *sshdr)
2632 return scsi_mode_sense(sdkp->device, dbd, modepage, buffer, len,
2633 SD_TIMEOUT, sdkp->max_retries, data,
2638 * read write protect setting, if possible - called only in sd_revalidate_disk()
2639 * called with buffer of length SD_BUF_SIZE
2642 sd_read_write_protect_flag(struct scsi_disk *sdkp, unsigned char *buffer)
2645 struct scsi_device *sdp = sdkp->device;
2646 struct scsi_mode_data data;
2647 int old_wp = sdkp->write_prot;
2649 set_disk_ro(sdkp->disk, 0);
2650 if (sdp->skip_ms_page_3f) {
2651 sd_first_printk(KERN_NOTICE, sdkp, "Assuming Write Enabled\n");
2655 if (sdp->use_192_bytes_for_3f) {
2656 res = sd_do_mode_sense(sdkp, 0, 0x3F, buffer, 192, &data, NULL);
2659 * First attempt: ask for all pages (0x3F), but only 4 bytes.
2660 * We have to start carefully: some devices hang if we ask
2661 * for more than is available.
2663 res = sd_do_mode_sense(sdkp, 0, 0x3F, buffer, 4, &data, NULL);
2666 * Second attempt: ask for page 0 When only page 0 is
2667 * implemented, a request for page 3F may return Sense Key
2668 * 5: Illegal Request, Sense Code 24: Invalid field in
2671 if (!scsi_status_is_good(res))
2672 res = sd_do_mode_sense(sdkp, 0, 0, buffer, 4, &data, NULL);
2675 * Third attempt: ask 255 bytes, as we did earlier.
2677 if (!scsi_status_is_good(res))
2678 res = sd_do_mode_sense(sdkp, 0, 0x3F, buffer, 255,
2682 if (!scsi_status_is_good(res)) {
2683 sd_first_printk(KERN_WARNING, sdkp,
2684 "Test WP failed, assume Write Enabled\n");
2686 sdkp->write_prot = ((data.device_specific & 0x80) != 0);
2687 set_disk_ro(sdkp->disk, sdkp->write_prot);
2688 if (sdkp->first_scan || old_wp != sdkp->write_prot) {
2689 sd_printk(KERN_NOTICE, sdkp, "Write Protect is %s\n",
2690 sdkp->write_prot ? "on" : "off");
2691 sd_printk(KERN_DEBUG, sdkp, "Mode Sense: %4ph\n", buffer);
2697 * sd_read_cache_type - called only from sd_revalidate_disk()
2698 * called with buffer of length SD_BUF_SIZE
2701 sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
2704 struct scsi_device *sdp = sdkp->device;
2709 struct scsi_mode_data data;
2710 struct scsi_sense_hdr sshdr;
2711 int old_wce = sdkp->WCE;
2712 int old_rcd = sdkp->RCD;
2713 int old_dpofua = sdkp->DPOFUA;
2716 if (sdkp->cache_override)
2720 if (sdp->skip_ms_page_8) {
2721 if (sdp->type == TYPE_RBC)
2724 if (sdp->skip_ms_page_3f)
2727 if (sdp->use_192_bytes_for_3f)
2731 } else if (sdp->type == TYPE_RBC) {
2739 /* cautiously ask */
2740 res = sd_do_mode_sense(sdkp, dbd, modepage, buffer, first_len,
2743 if (!scsi_status_is_good(res))
2746 if (!data.header_length) {
2749 sd_first_printk(KERN_ERR, sdkp,
2750 "Missing header in MODE_SENSE response\n");
2753 /* that went OK, now ask for the proper length */
2757 * We're only interested in the first three bytes, actually.
2758 * But the data cache page is defined for the first 20.
2762 else if (len > SD_BUF_SIZE) {
2763 sd_first_printk(KERN_NOTICE, sdkp, "Truncating mode parameter "
2764 "data from %d to %d bytes\n", len, SD_BUF_SIZE);
2767 if (modepage == 0x3F && sdp->use_192_bytes_for_3f)
2771 if (len > first_len)
2772 res = sd_do_mode_sense(sdkp, dbd, modepage, buffer, len,
2775 if (scsi_status_is_good(res)) {
2776 int offset = data.header_length + data.block_descriptor_length;
2778 while (offset < len) {
2779 u8 page_code = buffer[offset] & 0x3F;
2780 u8 spf = buffer[offset] & 0x40;
2782 if (page_code == 8 || page_code == 6) {
2783 /* We're interested only in the first 3 bytes.
2785 if (len - offset <= 2) {
2786 sd_first_printk(KERN_ERR, sdkp,
2787 "Incomplete mode parameter "
2791 modepage = page_code;
2795 /* Go to the next page */
2796 if (spf && len - offset > 3)
2797 offset += 4 + (buffer[offset+2] << 8) +
2799 else if (!spf && len - offset > 1)
2800 offset += 2 + buffer[offset+1];
2802 sd_first_printk(KERN_ERR, sdkp,
2804 "parameter data\n");
2810 sd_first_printk(KERN_ERR, sdkp, "No Caching mode page found\n");
2814 if (modepage == 8) {
2815 sdkp->WCE = ((buffer[offset + 2] & 0x04) != 0);
2816 sdkp->RCD = ((buffer[offset + 2] & 0x01) != 0);
2818 sdkp->WCE = ((buffer[offset + 2] & 0x01) == 0);
2822 sdkp->DPOFUA = (data.device_specific & 0x10) != 0;
2823 if (sdp->broken_fua) {
2824 sd_first_printk(KERN_NOTICE, sdkp, "Disabling FUA\n");
2826 } else if (sdkp->DPOFUA && !sdkp->device->use_10_for_rw &&
2827 !sdkp->device->use_16_for_rw) {
2828 sd_first_printk(KERN_NOTICE, sdkp,
2829 "Uses READ/WRITE(6), disabling FUA\n");
2833 /* No cache flush allowed for write protected devices */
2834 if (sdkp->WCE && sdkp->write_prot)
2837 if (sdkp->first_scan || old_wce != sdkp->WCE ||
2838 old_rcd != sdkp->RCD || old_dpofua != sdkp->DPOFUA)
2839 sd_printk(KERN_NOTICE, sdkp,
2840 "Write cache: %s, read cache: %s, %s\n",
2841 sdkp->WCE ? "enabled" : "disabled",
2842 sdkp->RCD ? "disabled" : "enabled",
2843 sdkp->DPOFUA ? "supports DPO and FUA"
2844 : "doesn't support DPO or FUA");
2850 if (scsi_sense_valid(&sshdr) &&
2851 sshdr.sense_key == ILLEGAL_REQUEST &&
2852 sshdr.asc == 0x24 && sshdr.ascq == 0x0)
2853 /* Invalid field in CDB */
2854 sd_first_printk(KERN_NOTICE, sdkp, "Cache data unavailable\n");
2856 sd_first_printk(KERN_ERR, sdkp,
2857 "Asking for cache data failed\n");
2860 if (sdp->wce_default_on) {
2861 sd_first_printk(KERN_NOTICE, sdkp,
2862 "Assuming drive cache: write back\n");
2865 sd_first_printk(KERN_ERR, sdkp,
2866 "Assuming drive cache: write through\n");
2874 * The ATO bit indicates whether the DIF application tag is available
2875 * for use by the operating system.
2877 static void sd_read_app_tag_own(struct scsi_disk *sdkp, unsigned char *buffer)
2880 struct scsi_device *sdp = sdkp->device;
2881 struct scsi_mode_data data;
2882 struct scsi_sense_hdr sshdr;
2884 if (sdp->type != TYPE_DISK && sdp->type != TYPE_ZBC)
2887 if (sdkp->protection_type == 0)
2890 res = scsi_mode_sense(sdp, 1, 0x0a, buffer, 36, SD_TIMEOUT,
2891 sdkp->max_retries, &data, &sshdr);
2893 if (!scsi_status_is_good(res) || !data.header_length ||
2895 sd_first_printk(KERN_WARNING, sdkp,
2896 "getting Control mode page failed, assume no ATO\n");
2898 if (scsi_sense_valid(&sshdr))
2899 sd_print_sense_hdr(sdkp, &sshdr);
2904 offset = data.header_length + data.block_descriptor_length;
2906 if ((buffer[offset] & 0x3f) != 0x0a) {
2907 sd_first_printk(KERN_ERR, sdkp, "ATO Got wrong page\n");
2911 if ((buffer[offset + 5] & 0x80) == 0)
2920 * sd_read_block_limits - Query disk device for preferred I/O sizes.
2921 * @sdkp: disk to query
2923 static void sd_read_block_limits(struct scsi_disk *sdkp)
2925 unsigned int sector_sz = sdkp->device->sector_size;
2926 const int vpd_len = 64;
2927 unsigned char *buffer = kmalloc(vpd_len, GFP_KERNEL);
2930 /* Block Limits VPD */
2931 scsi_get_vpd_page(sdkp->device, 0xb0, buffer, vpd_len))
2934 blk_queue_io_min(sdkp->disk->queue,
2935 get_unaligned_be16(&buffer[6]) * sector_sz);
2937 sdkp->max_xfer_blocks = get_unaligned_be32(&buffer[8]);
2938 sdkp->opt_xfer_blocks = get_unaligned_be32(&buffer[12]);
2940 if (buffer[3] == 0x3c) {
2941 unsigned int lba_count, desc_count;
2943 sdkp->max_ws_blocks = (u32)get_unaligned_be64(&buffer[36]);
2948 lba_count = get_unaligned_be32(&buffer[20]);
2949 desc_count = get_unaligned_be32(&buffer[24]);
2951 if (lba_count && desc_count)
2952 sdkp->max_unmap_blocks = lba_count;
2954 sdkp->unmap_granularity = get_unaligned_be32(&buffer[28]);
2956 if (buffer[32] & 0x80)
2957 sdkp->unmap_alignment =
2958 get_unaligned_be32(&buffer[32]) & ~(1 << 31);
2960 if (!sdkp->lbpvpd) { /* LBP VPD page not provided */
2962 if (sdkp->max_unmap_blocks)
2963 sd_config_discard(sdkp, SD_LBP_UNMAP);
2965 sd_config_discard(sdkp, SD_LBP_WS16);
2967 } else { /* LBP VPD page tells us what to use */
2968 if (sdkp->lbpu && sdkp->max_unmap_blocks)
2969 sd_config_discard(sdkp, SD_LBP_UNMAP);
2970 else if (sdkp->lbpws)
2971 sd_config_discard(sdkp, SD_LBP_WS16);
2972 else if (sdkp->lbpws10)
2973 sd_config_discard(sdkp, SD_LBP_WS10);
2975 sd_config_discard(sdkp, SD_LBP_DISABLE);
2984 * sd_read_block_characteristics - Query block dev. characteristics
2985 * @sdkp: disk to query
2987 static void sd_read_block_characteristics(struct scsi_disk *sdkp)
2989 struct request_queue *q = sdkp->disk->queue;
2990 unsigned char *buffer;
2992 const int vpd_len = 64;
2994 buffer = kmalloc(vpd_len, GFP_KERNEL);
2997 /* Block Device Characteristics VPD */
2998 scsi_get_vpd_page(sdkp->device, 0xb1, buffer, vpd_len))
3001 rot = get_unaligned_be16(&buffer[4]);
3004 blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
3005 blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q);
3008 if (sdkp->device->type == TYPE_ZBC) {
3010 blk_queue_set_zoned(sdkp->disk, BLK_ZONED_HM);
3012 sdkp->zoned = (buffer[8] >> 4) & 3;
3013 if (sdkp->zoned == 1) {
3015 blk_queue_set_zoned(sdkp->disk, BLK_ZONED_HA);
3017 /* Regular disk or drive managed disk */
3018 blk_queue_set_zoned(sdkp->disk, BLK_ZONED_NONE);
3022 if (!sdkp->first_scan)
3025 if (blk_queue_is_zoned(q)) {
3026 sd_printk(KERN_NOTICE, sdkp, "Host-%s zoned block device\n",
3027 q->limits.zoned == BLK_ZONED_HM ? "managed" : "aware");
3029 if (sdkp->zoned == 1)
3030 sd_printk(KERN_NOTICE, sdkp,
3031 "Host-aware SMR disk used as regular disk\n");
3032 else if (sdkp->zoned == 2)
3033 sd_printk(KERN_NOTICE, sdkp,
3034 "Drive-managed SMR disk\n");
3042 * sd_read_block_provisioning - Query provisioning VPD page
3043 * @sdkp: disk to query
3045 static void sd_read_block_provisioning(struct scsi_disk *sdkp)
3047 unsigned char *buffer;
3048 const int vpd_len = 8;
3050 if (sdkp->lbpme == 0)
3053 buffer = kmalloc(vpd_len, GFP_KERNEL);
3055 if (!buffer || scsi_get_vpd_page(sdkp->device, 0xb2, buffer, vpd_len))
3059 sdkp->lbpu = (buffer[5] >> 7) & 1; /* UNMAP */
3060 sdkp->lbpws = (buffer[5] >> 6) & 1; /* WRITE SAME(16) with UNMAP */
3061 sdkp->lbpws10 = (buffer[5] >> 5) & 1; /* WRITE SAME(10) with UNMAP */
3067 static void sd_read_write_same(struct scsi_disk *sdkp, unsigned char *buffer)
3069 struct scsi_device *sdev = sdkp->device;
3071 if (sdev->host->no_write_same) {
3072 sdev->no_write_same = 1;
3077 if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE, INQUIRY) < 0) {
3078 /* too large values might cause issues with arcmsr */
3079 int vpd_buf_len = 64;
3081 sdev->no_report_opcodes = 1;
3083 /* Disable WRITE SAME if REPORT SUPPORTED OPERATION
3084 * CODES is unsupported and the device has an ATA
3085 * Information VPD page (SAT).
3087 if (!scsi_get_vpd_page(sdev, 0x89, buffer, vpd_buf_len))
3088 sdev->no_write_same = 1;
3091 if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE, WRITE_SAME_16) == 1)
3094 if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE, WRITE_SAME) == 1)
3098 static void sd_read_security(struct scsi_disk *sdkp, unsigned char *buffer)
3100 struct scsi_device *sdev = sdkp->device;
3102 if (!sdev->security_supported)
3105 if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE,
3106 SECURITY_PROTOCOL_IN) == 1 &&
3107 scsi_report_opcode(sdev, buffer, SD_BUF_SIZE,
3108 SECURITY_PROTOCOL_OUT) == 1)
3113 * Determine the device's preferred I/O size for reads and writes
3114 * unless the reported value is unreasonably small, large, not a
3115 * multiple of the physical block size, or simply garbage.
3117 static bool sd_validate_opt_xfer_size(struct scsi_disk *sdkp,
3118 unsigned int dev_max)
3120 struct scsi_device *sdp = sdkp->device;
3121 unsigned int opt_xfer_bytes =
3122 logical_to_bytes(sdp, sdkp->opt_xfer_blocks);
3124 if (sdkp->opt_xfer_blocks == 0)
3127 if (sdkp->opt_xfer_blocks > dev_max) {
3128 sd_first_printk(KERN_WARNING, sdkp,
3129 "Optimal transfer size %u logical blocks " \
3130 "> dev_max (%u logical blocks)\n",
3131 sdkp->opt_xfer_blocks, dev_max);
3135 if (sdkp->opt_xfer_blocks > SD_DEF_XFER_BLOCKS) {
3136 sd_first_printk(KERN_WARNING, sdkp,
3137 "Optimal transfer size %u logical blocks " \
3138 "> sd driver limit (%u logical blocks)\n",
3139 sdkp->opt_xfer_blocks, SD_DEF_XFER_BLOCKS);
3143 if (opt_xfer_bytes < PAGE_SIZE) {
3144 sd_first_printk(KERN_WARNING, sdkp,
3145 "Optimal transfer size %u bytes < " \
3146 "PAGE_SIZE (%u bytes)\n",
3147 opt_xfer_bytes, (unsigned int)PAGE_SIZE);
3151 if (opt_xfer_bytes & (sdkp->physical_block_size - 1)) {
3152 sd_first_printk(KERN_WARNING, sdkp,
3153 "Optimal transfer size %u bytes not a " \
3154 "multiple of physical block size (%u bytes)\n",
3155 opt_xfer_bytes, sdkp->physical_block_size);
3159 sd_first_printk(KERN_INFO, sdkp, "Optimal transfer size %u bytes\n",
3165 * sd_revalidate_disk - called the first time a new disk is seen,
3166 * performs disk spin up, read_capacity, etc.
3167 * @disk: struct gendisk we care about
3169 static int sd_revalidate_disk(struct gendisk *disk)
3171 struct scsi_disk *sdkp = scsi_disk(disk);
3172 struct scsi_device *sdp = sdkp->device;
3173 struct request_queue *q = sdkp->disk->queue;
3174 sector_t old_capacity = sdkp->capacity;
3175 unsigned char *buffer;
3176 unsigned int dev_max, rw_max;
3178 SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp,
3179 "sd_revalidate_disk\n"));
3182 * If the device is offline, don't try and read capacity or any
3183 * of the other niceties.
3185 if (!scsi_device_online(sdp))
3188 buffer = kmalloc(SD_BUF_SIZE, GFP_KERNEL);
3190 sd_printk(KERN_WARNING, sdkp, "sd_revalidate_disk: Memory "
3191 "allocation failure.\n");
3195 sd_spinup_disk(sdkp);
3198 * Without media there is no reason to ask; moreover, some devices
3199 * react badly if we do.
3201 if (sdkp->media_present) {
3202 sd_read_capacity(sdkp, buffer);
3205 * set the default to rotational. All non-rotational devices
3206 * support the block characteristics VPD page, which will
3207 * cause this to be updated correctly and any device which
3208 * doesn't support it should be treated as rotational.
3210 blk_queue_flag_clear(QUEUE_FLAG_NONROT, q);
3211 blk_queue_flag_set(QUEUE_FLAG_ADD_RANDOM, q);
3213 if (scsi_device_supports_vpd(sdp)) {
3214 sd_read_block_provisioning(sdkp);
3215 sd_read_block_limits(sdkp);
3216 sd_read_block_characteristics(sdkp);
3217 sd_zbc_read_zones(sdkp, buffer);
3220 sd_print_capacity(sdkp, old_capacity);
3222 sd_read_write_protect_flag(sdkp, buffer);
3223 sd_read_cache_type(sdkp, buffer);
3224 sd_read_app_tag_own(sdkp, buffer);
3225 sd_read_write_same(sdkp, buffer);
3226 sd_read_security(sdkp, buffer);
3230 * We now have all cache related info, determine how we deal
3231 * with flush requests.
3233 sd_set_flush_flag(sdkp);
3235 /* Initial block count limit based on CDB TRANSFER LENGTH field size. */
3236 dev_max = sdp->use_16_for_rw ? SD_MAX_XFER_BLOCKS : SD_DEF_XFER_BLOCKS;
3238 /* Some devices report a maximum block count for READ/WRITE requests. */
3239 dev_max = min_not_zero(dev_max, sdkp->max_xfer_blocks);
3240 q->limits.max_dev_sectors = logical_to_sectors(sdp, dev_max);
3242 if (sd_validate_opt_xfer_size(sdkp, dev_max)) {
3243 q->limits.io_opt = logical_to_bytes(sdp, sdkp->opt_xfer_blocks);
3244 rw_max = logical_to_sectors(sdp, sdkp->opt_xfer_blocks);
3246 q->limits.io_opt = 0;
3247 rw_max = min_not_zero(logical_to_sectors(sdp, dev_max),
3248 (sector_t)BLK_DEF_MAX_SECTORS);
3251 /* Do not exceed controller limit */
3252 rw_max = min(rw_max, queue_max_hw_sectors(q));
3255 * Only update max_sectors if previously unset or if the current value
3256 * exceeds the capabilities of the hardware.
3258 if (sdkp->first_scan ||
3259 q->limits.max_sectors > q->limits.max_dev_sectors ||
3260 q->limits.max_sectors > q->limits.max_hw_sectors)
3261 q->limits.max_sectors = rw_max;
3263 sdkp->first_scan = 0;
3265 set_capacity_revalidate_and_notify(disk,
3266 logical_to_sectors(sdp, sdkp->capacity), false);
3267 sd_config_write_same(sdkp);
3271 * For a zoned drive, revalidating the zones can be done only once
3272 * the gendisk capacity is set. So if this fails, set back the gendisk
3275 if (sd_zbc_revalidate_zones(sdkp))
3276 set_capacity_revalidate_and_notify(disk, 0, false);
3283 * sd_unlock_native_capacity - unlock native capacity
3284 * @disk: struct gendisk to set capacity for
3286 * Block layer calls this function if it detects that partitions
3287 * on @disk reach beyond the end of the device. If the SCSI host
3288 * implements ->unlock_native_capacity() method, it's invoked to
3289 * give it a chance to adjust the device capacity.
3292 * Defined by block layer. Might sleep.
3294 static void sd_unlock_native_capacity(struct gendisk *disk)
3296 struct scsi_device *sdev = scsi_disk(disk)->device;
3298 if (sdev->host->hostt->unlock_native_capacity)
3299 sdev->host->hostt->unlock_native_capacity(sdev);
3303 * sd_format_disk_name - format disk name
3304 * @prefix: name prefix - ie. "sd" for SCSI disks
3305 * @index: index of the disk to format name for
3306 * @buf: output buffer
3307 * @buflen: length of the output buffer
3309 * SCSI disk names starts at sda. The 26th device is sdz and the
3310 * 27th is sdaa. The last one for two lettered suffix is sdzz
3311 * which is followed by sdaaa.
3313 * This is basically 26 base counting with one extra 'nil' entry
3314 * at the beginning from the second digit on and can be
3315 * determined using similar method as 26 base conversion with the
3316 * index shifted -1 after each digit is computed.
3322 * 0 on success, -errno on failure.
3324 static int sd_format_disk_name(char *prefix, int index, char *buf, int buflen)
3326 const int base = 'z' - 'a' + 1;
3327 char *begin = buf + strlen(prefix);
3328 char *end = buf + buflen;
3338 *--p = 'a' + (index % unit);
3339 index = (index / unit) - 1;
3340 } while (index >= 0);
3342 memmove(begin, p, end - p);
3343 memcpy(buf, prefix, strlen(prefix));
3349 * sd_probe - called during driver initialization and whenever a
3350 * new scsi device is attached to the system. It is called once
3351 * for each scsi device (not just disks) present.
3352 * @dev: pointer to device object
3354 * Returns 0 if successful (or not interested in this scsi device
3355 * (e.g. scanner)); 1 when there is an error.
3357 * Note: this function is invoked from the scsi mid-level.
3358 * This function sets up the mapping between a given
3359 * <host,channel,id,lun> (found in sdp) and new device name
3360 * (e.g. /dev/sda). More precisely it is the block device major
3361 * and minor number that is chosen here.
3363 * Assume sd_probe is not re-entrant (for time being)
3364 * Also think about sd_probe() and sd_remove() running coincidentally.
3366 static int sd_probe(struct device *dev)
3368 struct scsi_device *sdp = to_scsi_device(dev);
3369 struct scsi_disk *sdkp;
3374 scsi_autopm_get_device(sdp);
3376 if (sdp->type != TYPE_DISK &&
3377 sdp->type != TYPE_ZBC &&
3378 sdp->type != TYPE_MOD &&
3379 sdp->type != TYPE_RBC)
3382 #ifndef CONFIG_BLK_DEV_ZONED
3383 if (sdp->type == TYPE_ZBC)
3386 SCSI_LOG_HLQUEUE(3, sdev_printk(KERN_INFO, sdp,
3390 sdkp = kzalloc(sizeof(*sdkp), GFP_KERNEL);
3394 gd = alloc_disk(SD_MINORS);
3398 index = ida_alloc(&sd_index_ida, GFP_KERNEL);
3400 sdev_printk(KERN_WARNING, sdp, "sd_probe: memory exhausted.\n");
3404 error = sd_format_disk_name("sd", index, gd->disk_name, DISK_NAME_LEN);
3406 sdev_printk(KERN_WARNING, sdp, "SCSI disk (sd) name length exceeded.\n");
3407 goto out_free_index;
3411 sdkp->driver = &sd_template;
3413 sdkp->index = index;
3414 sdkp->max_retries = SD_MAX_RETRIES;
3415 atomic_set(&sdkp->openers, 0);
3416 atomic_set(&sdkp->device->ioerr_cnt, 0);
3418 if (!sdp->request_queue->rq_timeout) {
3419 if (sdp->type != TYPE_MOD)
3420 blk_queue_rq_timeout(sdp->request_queue, SD_TIMEOUT);
3422 blk_queue_rq_timeout(sdp->request_queue,
3426 device_initialize(&sdkp->dev);
3427 sdkp->dev.parent = dev;
3428 sdkp->dev.class = &sd_disk_class;
3429 dev_set_name(&sdkp->dev, "%s", dev_name(dev));
3431 error = device_add(&sdkp->dev);
3433 goto out_free_index;
3436 dev_set_drvdata(dev, sdkp);
3438 gd->major = sd_major((index & 0xf0) >> 4);
3439 gd->first_minor = ((index & 0xf) << 4) | (index & 0xfff00);
3441 gd->fops = &sd_fops;
3442 gd->private_data = &sdkp->driver;
3443 gd->queue = sdkp->device->request_queue;
3445 /* defaults, until the device tells us otherwise */
3446 sdp->sector_size = 512;
3448 sdkp->media_present = 1;
3449 sdkp->write_prot = 0;
3450 sdkp->cache_override = 0;
3454 sdkp->first_scan = 1;
3455 sdkp->max_medium_access_timeouts = SD_MAX_MEDIUM_TIMEOUTS;
3457 sd_revalidate_disk(gd);
3459 gd->flags = GENHD_FL_EXT_DEVT;
3460 if (sdp->removable) {
3461 gd->flags |= GENHD_FL_REMOVABLE;
3462 gd->events |= DISK_EVENT_MEDIA_CHANGE;
3463 gd->event_flags = DISK_EVENT_FLAG_POLL | DISK_EVENT_FLAG_UEVENT;
3466 blk_pm_runtime_init(sdp->request_queue, dev);
3467 if (sdp->rpm_autosuspend) {
3468 pm_runtime_set_autosuspend_delay(dev,
3469 sdp->host->hostt->rpm_autosuspend_delay);
3471 device_add_disk(dev, gd, NULL);
3473 sd_dif_config_host(sdkp);
3475 sd_revalidate_disk(gd);
3477 if (sdkp->security) {
3478 sdkp->opal_dev = init_opal_dev(sdkp, &sd_sec_submit);
3480 sd_printk(KERN_NOTICE, sdkp, "supports TCG Opal\n");
3483 sd_printk(KERN_NOTICE, sdkp, "Attached SCSI %sdisk\n",
3484 sdp->removable ? "removable " : "");
3485 scsi_autopm_put_device(sdp);
3490 ida_free(&sd_index_ida, index);
3494 sd_zbc_release_disk(sdkp);
3497 scsi_autopm_put_device(sdp);
3502 * sd_remove - called whenever a scsi disk (previously recognized by
3503 * sd_probe) is detached from the system. It is called (potentially
3504 * multiple times) during sd module unload.
3505 * @dev: pointer to device object
3507 * Note: this function is invoked from the scsi mid-level.
3508 * This function potentially frees up a device name (e.g. /dev/sdc)
3509 * that could be re-used by a subsequent sd_probe().
3510 * This function is not called when the built-in sd driver is "exit-ed".
3512 static int sd_remove(struct device *dev)
3514 struct scsi_disk *sdkp;
3517 sdkp = dev_get_drvdata(dev);
3518 devt = disk_devt(sdkp->disk);
3519 scsi_autopm_get_device(sdkp->device);
3521 async_synchronize_full_domain(&scsi_sd_pm_domain);
3522 device_del(&sdkp->dev);
3523 del_gendisk(sdkp->disk);
3526 free_opal_dev(sdkp->opal_dev);
3528 blk_register_region(devt, SD_MINORS, NULL,
3529 sd_default_probe, NULL, NULL);
3531 mutex_lock(&sd_ref_mutex);
3532 dev_set_drvdata(dev, NULL);
3533 put_device(&sdkp->dev);
3534 mutex_unlock(&sd_ref_mutex);
3540 * scsi_disk_release - Called to free the scsi_disk structure
3541 * @dev: pointer to embedded class device
3543 * sd_ref_mutex must be held entering this routine. Because it is
3544 * called on last put, you should always use the scsi_disk_get()
3545 * scsi_disk_put() helpers which manipulate the semaphore directly
3546 * and never do a direct put_device.
3548 static void scsi_disk_release(struct device *dev)
3550 struct scsi_disk *sdkp = to_scsi_disk(dev);
3551 struct gendisk *disk = sdkp->disk;
3552 struct request_queue *q = disk->queue;
3554 ida_free(&sd_index_ida, sdkp->index);
3557 * Wait until all requests that are in progress have completed.
3558 * This is necessary to avoid that e.g. scsi_end_request() crashes
3559 * due to clearing the disk->private_data pointer. Wait from inside
3560 * scsi_disk_release() instead of from sd_release() to avoid that
3561 * freezing and unfreezing the request queue affects user space I/O
3562 * in case multiple processes open a /dev/sd... node concurrently.
3564 blk_mq_freeze_queue(q);
3565 blk_mq_unfreeze_queue(q);
3567 disk->private_data = NULL;
3569 put_device(&sdkp->device->sdev_gendev);
3571 sd_zbc_release_disk(sdkp);
3576 static int sd_start_stop_device(struct scsi_disk *sdkp, int start)
3578 unsigned char cmd[6] = { START_STOP }; /* START_VALID */
3579 struct scsi_sense_hdr sshdr;
3580 struct scsi_device *sdp = sdkp->device;
3584 cmd[4] |= 1; /* START */
3586 if (sdp->start_stop_pwr_cond)
3587 cmd[4] |= start ? 1 << 4 : 3 << 4; /* Active or Standby */
3589 if (!scsi_device_online(sdp))
3592 res = scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL, &sshdr,
3593 SD_TIMEOUT, sdkp->max_retries, 0, RQF_PM, NULL);
3595 sd_print_result(sdkp, "Start/Stop Unit failed", res);
3596 if (driver_byte(res) == DRIVER_SENSE)
3597 sd_print_sense_hdr(sdkp, &sshdr);
3598 if (scsi_sense_valid(&sshdr) &&
3599 /* 0x3a is medium not present */
3604 /* SCSI error codes must not go to the generic layer */
3612 * Send a SYNCHRONIZE CACHE instruction down to the device through
3613 * the normal SCSI command structure. Wait for the command to
3616 static void sd_shutdown(struct device *dev)
3618 struct scsi_disk *sdkp = dev_get_drvdata(dev);
3621 return; /* this can happen */
3623 if (pm_runtime_suspended(dev))
3626 if (sdkp->WCE && sdkp->media_present) {
3627 sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n");
3628 sd_sync_cache(sdkp, NULL);
3631 if (system_state != SYSTEM_RESTART && sdkp->device->manage_start_stop) {
3632 sd_printk(KERN_NOTICE, sdkp, "Stopping disk\n");
3633 sd_start_stop_device(sdkp, 0);
3637 static int sd_suspend_common(struct device *dev, bool ignore_stop_errors)
3639 struct scsi_disk *sdkp = dev_get_drvdata(dev);
3640 struct scsi_sense_hdr sshdr;
3643 if (!sdkp) /* E.g.: runtime suspend following sd_remove() */
3646 if (sdkp->WCE && sdkp->media_present) {
3647 sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n");
3648 ret = sd_sync_cache(sdkp, &sshdr);
3651 /* ignore OFFLINE device */
3655 if (!scsi_sense_valid(&sshdr) ||
3656 sshdr.sense_key != ILLEGAL_REQUEST)
3660 * sshdr.sense_key == ILLEGAL_REQUEST means this drive
3661 * doesn't support sync. There's not much to do and
3662 * suspend shouldn't fail.
3668 if (sdkp->device->manage_start_stop) {
3669 sd_printk(KERN_NOTICE, sdkp, "Stopping disk\n");
3670 /* an error is not worth aborting a system sleep */
3671 ret = sd_start_stop_device(sdkp, 0);
3672 if (ignore_stop_errors)
3679 static int sd_suspend_system(struct device *dev)
3681 return sd_suspend_common(dev, true);
3684 static int sd_suspend_runtime(struct device *dev)
3686 return sd_suspend_common(dev, false);
3689 static int sd_resume(struct device *dev)
3691 struct scsi_disk *sdkp = dev_get_drvdata(dev);
3694 if (!sdkp) /* E.g.: runtime resume at the start of sd_probe() */
3697 if (!sdkp->device->manage_start_stop)
3700 sd_printk(KERN_NOTICE, sdkp, "Starting disk\n");
3701 ret = sd_start_stop_device(sdkp, 1);
3703 opal_unlock_from_suspend(sdkp->opal_dev);
3708 * init_sd - entry point for this driver (both when built in or when
3711 * Note: this function registers this driver with the scsi mid-level.
3713 static int __init init_sd(void)
3715 int majors = 0, i, err;
3717 SCSI_LOG_HLQUEUE(3, printk("init_sd: sd driver entry point\n"));
3719 for (i = 0; i < SD_MAJORS; i++) {
3720 if (register_blkdev(sd_major(i), "sd") != 0)
3723 blk_register_region(sd_major(i), SD_MINORS, NULL,
3724 sd_default_probe, NULL, NULL);
3730 err = class_register(&sd_disk_class);
3734 sd_cdb_cache = kmem_cache_create("sd_ext_cdb", SD_EXT_CDB_SIZE,
3736 if (!sd_cdb_cache) {
3737 printk(KERN_ERR "sd: can't init extended cdb cache\n");
3742 sd_cdb_pool = mempool_create_slab_pool(SD_MEMPOOL_SIZE, sd_cdb_cache);
3744 printk(KERN_ERR "sd: can't init extended cdb pool\n");
3749 sd_page_pool = mempool_create_page_pool(SD_MEMPOOL_SIZE, 0);
3750 if (!sd_page_pool) {
3751 printk(KERN_ERR "sd: can't init discard page pool\n");
3756 err = scsi_register_driver(&sd_template.gendrv);
3758 goto err_out_driver;
3763 mempool_destroy(sd_page_pool);
3766 mempool_destroy(sd_cdb_pool);
3769 kmem_cache_destroy(sd_cdb_cache);
3772 class_unregister(&sd_disk_class);
3774 for (i = 0; i < SD_MAJORS; i++)
3775 unregister_blkdev(sd_major(i), "sd");
3780 * exit_sd - exit point for this driver (when it is a module).
3782 * Note: this function unregisters this driver from the scsi mid-level.
3784 static void __exit exit_sd(void)
3788 SCSI_LOG_HLQUEUE(3, printk("exit_sd: exiting sd driver\n"));
3790 scsi_unregister_driver(&sd_template.gendrv);
3791 mempool_destroy(sd_cdb_pool);
3792 mempool_destroy(sd_page_pool);
3793 kmem_cache_destroy(sd_cdb_cache);
3795 class_unregister(&sd_disk_class);
3797 for (i = 0; i < SD_MAJORS; i++) {
3798 blk_unregister_region(sd_major(i), SD_MINORS);
3799 unregister_blkdev(sd_major(i), "sd");
3803 module_init(init_sd);
3804 module_exit(exit_sd);
3806 void sd_print_sense_hdr(struct scsi_disk *sdkp, struct scsi_sense_hdr *sshdr)
3808 scsi_print_sense_hdr(sdkp->device,
3809 sdkp->disk ? sdkp->disk->disk_name : NULL, sshdr);
3812 void sd_print_result(const struct scsi_disk *sdkp, const char *msg, int result)
3814 const char *hb_string = scsi_hostbyte_string(result);
3815 const char *db_string = scsi_driverbyte_string(result);
3817 if (hb_string || db_string)
3818 sd_printk(KERN_INFO, sdkp,
3819 "%s: Result: hostbyte=%s driverbyte=%s\n", msg,
3820 hb_string ? hb_string : "invalid",
3821 db_string ? db_string : "invalid");
3823 sd_printk(KERN_INFO, sdkp,
3824 "%s: Result: hostbyte=0x%02x driverbyte=0x%02x\n",
3825 msg, host_byte(result), driver_byte(result));