1 // SPDX-License-Identifier: GPL-2.0-only
3 * sd.c Copyright (C) 1992 Drew Eckhardt
4 * Copyright (C) 1993, 1994, 1995, 1999 Eric Youngdale
6 * Linux scsi disk driver
7 * Initial versions: Drew Eckhardt
8 * Subsequent revisions: Eric Youngdale
9 * Modification history:
10 * - Drew Eckhardt <drew@colorado.edu> original
11 * - Eric Youngdale <eric@andante.org> add scatter-gather, multiple
12 * outstanding request, and other enhancements.
13 * Support loadable low-level scsi drivers.
14 * - Jirka Hanika <geo@ff.cuni.cz> support more scsi disks using
15 * eight major numbers.
16 * - Richard Gooch <rgooch@atnf.csiro.au> support devfs.
17 * - Torben Mathiasen <tmm@image.dk> Resource allocation fixes in
18 * sd_init and cleanups.
19 * - Alex Davis <letmein@erols.com> Fix problem where partition info
20 * not being read in sd_open. Fix problem where removable media
21 * could be ejected after sd_open.
22 * - Douglas Gilbert <dgilbert@interlog.com> cleanup for lk 2.5.x
23 * - Badari Pulavarty <pbadari@us.ibm.com>, Matthew Wilcox
24 * <willy@debian.org>, Kurt Garloff <garloff@suse.de>:
25 * Support 32k/1M disks.
27 * Logging policy (needs CONFIG_SCSI_LOGGING defined):
28 * - setting up transfer: SCSI_LOG_HLQUEUE levels 1 and 2
29 * - end of transfer (bh + scsi_lib): SCSI_LOG_HLCOMPLETE level 1
30 * - entering sd_ioctl: SCSI_LOG_IOCTL level 1
31 * - entering other commands: SCSI_LOG_HLQUEUE level 3
32 * Note: when the logging level is set by the user, it must be greater
33 * than the level indicated above to trigger output.
36 #include <linux/module.h>
38 #include <linux/kernel.h>
40 #include <linux/bio.h>
41 #include <linux/hdreg.h>
42 #include <linux/errno.h>
43 #include <linux/idr.h>
44 #include <linux/interrupt.h>
45 #include <linux/init.h>
46 #include <linux/blkdev.h>
47 #include <linux/blkpg.h>
48 #include <linux/blk-pm.h>
49 #include <linux/delay.h>
50 #include <linux/major.h>
51 #include <linux/mutex.h>
52 #include <linux/string_helpers.h>
53 #include <linux/slab.h>
54 #include <linux/sed-opal.h>
55 #include <linux/pm_runtime.h>
57 #include <linux/t10-pi.h>
58 #include <linux/uaccess.h>
59 #include <asm/unaligned.h>
61 #include <scsi/scsi.h>
62 #include <scsi/scsi_cmnd.h>
63 #include <scsi/scsi_dbg.h>
64 #include <scsi/scsi_device.h>
65 #include <scsi/scsi_driver.h>
66 #include <scsi/scsi_eh.h>
67 #include <scsi/scsi_host.h>
68 #include <scsi/scsi_ioctl.h>
69 #include <scsi/scsicam.h>
72 #include "scsi_priv.h"
73 #include "scsi_logging.h"
75 MODULE_AUTHOR("Eric Youngdale");
76 MODULE_DESCRIPTION("SCSI disk (sd) driver");
77 MODULE_LICENSE("GPL");
79 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK0_MAJOR);
80 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK1_MAJOR);
81 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK2_MAJOR);
82 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK3_MAJOR);
83 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK4_MAJOR);
84 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK5_MAJOR);
85 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK6_MAJOR);
86 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK7_MAJOR);
87 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK8_MAJOR);
88 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK9_MAJOR);
89 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK10_MAJOR);
90 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK11_MAJOR);
91 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK12_MAJOR);
92 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK13_MAJOR);
93 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK14_MAJOR);
94 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK15_MAJOR);
95 MODULE_ALIAS_SCSI_DEVICE(TYPE_DISK);
96 MODULE_ALIAS_SCSI_DEVICE(TYPE_MOD);
97 MODULE_ALIAS_SCSI_DEVICE(TYPE_RBC);
98 MODULE_ALIAS_SCSI_DEVICE(TYPE_ZBC);
102 static void sd_config_discard(struct scsi_disk *, unsigned int);
103 static void sd_config_write_same(struct scsi_disk *);
104 static int sd_revalidate_disk(struct gendisk *);
105 static void sd_unlock_native_capacity(struct gendisk *disk);
106 static int sd_probe(struct device *);
107 static int sd_remove(struct device *);
108 static void sd_shutdown(struct device *);
109 static int sd_suspend_system(struct device *);
110 static int sd_suspend_runtime(struct device *);
111 static int sd_resume_system(struct device *);
112 static int sd_resume_runtime(struct device *);
113 static void sd_rescan(struct device *);
114 static blk_status_t sd_init_command(struct scsi_cmnd *SCpnt);
115 static void sd_uninit_command(struct scsi_cmnd *SCpnt);
116 static int sd_done(struct scsi_cmnd *);
117 static void sd_eh_reset(struct scsi_cmnd *);
118 static int sd_eh_action(struct scsi_cmnd *, int);
119 static void sd_read_capacity(struct scsi_disk *sdkp, unsigned char *buffer);
120 static void scsi_disk_release(struct device *cdev);
122 static DEFINE_IDA(sd_index_ida);
124 static mempool_t *sd_page_pool;
125 static struct lock_class_key sd_bio_compl_lkclass;
127 static const char *sd_cache_types[] = {
128 "write through", "none", "write back",
129 "write back, no read (daft)"
132 static void sd_set_flush_flag(struct scsi_disk *sdkp)
134 bool wc = false, fua = false;
142 blk_queue_write_cache(sdkp->disk->queue, wc, fua);
146 cache_type_store(struct device *dev, struct device_attribute *attr,
147 const char *buf, size_t count)
149 int ct, rcd, wce, sp;
150 struct scsi_disk *sdkp = to_scsi_disk(dev);
151 struct scsi_device *sdp = sdkp->device;
154 struct scsi_mode_data data;
155 struct scsi_sense_hdr sshdr;
156 static const char temp[] = "temporary ";
159 if (sdp->type != TYPE_DISK && sdp->type != TYPE_ZBC)
160 /* no cache control on RBC devices; theoretically they
161 * can do it, but there's probably so many exceptions
162 * it's not worth the risk */
165 if (strncmp(buf, temp, sizeof(temp) - 1) == 0) {
166 buf += sizeof(temp) - 1;
167 sdkp->cache_override = 1;
169 sdkp->cache_override = 0;
172 ct = sysfs_match_string(sd_cache_types, buf);
176 rcd = ct & 0x01 ? 1 : 0;
177 wce = (ct & 0x02) && !sdkp->write_prot ? 1 : 0;
179 if (sdkp->cache_override) {
182 sd_set_flush_flag(sdkp);
186 if (scsi_mode_sense(sdp, 0x08, 8, buffer, sizeof(buffer), SD_TIMEOUT,
187 sdkp->max_retries, &data, NULL))
189 len = min_t(size_t, sizeof(buffer), data.length - data.header_length -
190 data.block_descriptor_length);
191 buffer_data = buffer + data.header_length +
192 data.block_descriptor_length;
193 buffer_data[2] &= ~0x05;
194 buffer_data[2] |= wce << 2 | rcd;
195 sp = buffer_data[0] & 0x80 ? 1 : 0;
196 buffer_data[0] &= ~0x80;
199 * Ensure WP, DPOFUA, and RESERVED fields are cleared in
200 * received mode parameter buffer before doing MODE SELECT.
202 data.device_specific = 0;
204 if (scsi_mode_select(sdp, 1, sp, buffer_data, len, SD_TIMEOUT,
205 sdkp->max_retries, &data, &sshdr)) {
206 if (scsi_sense_valid(&sshdr))
207 sd_print_sense_hdr(sdkp, &sshdr);
210 sd_revalidate_disk(sdkp->disk);
215 manage_start_stop_show(struct device *dev, struct device_attribute *attr,
218 struct scsi_disk *sdkp = to_scsi_disk(dev);
219 struct scsi_device *sdp = sdkp->device;
221 return sprintf(buf, "%u\n", sdp->manage_start_stop);
225 manage_start_stop_store(struct device *dev, struct device_attribute *attr,
226 const char *buf, size_t count)
228 struct scsi_disk *sdkp = to_scsi_disk(dev);
229 struct scsi_device *sdp = sdkp->device;
232 if (!capable(CAP_SYS_ADMIN))
235 if (kstrtobool(buf, &v))
238 sdp->manage_start_stop = v;
242 static DEVICE_ATTR_RW(manage_start_stop);
245 allow_restart_show(struct device *dev, struct device_attribute *attr, char *buf)
247 struct scsi_disk *sdkp = to_scsi_disk(dev);
249 return sprintf(buf, "%u\n", sdkp->device->allow_restart);
253 allow_restart_store(struct device *dev, struct device_attribute *attr,
254 const char *buf, size_t count)
257 struct scsi_disk *sdkp = to_scsi_disk(dev);
258 struct scsi_device *sdp = sdkp->device;
260 if (!capable(CAP_SYS_ADMIN))
263 if (sdp->type != TYPE_DISK && sdp->type != TYPE_ZBC)
266 if (kstrtobool(buf, &v))
269 sdp->allow_restart = v;
273 static DEVICE_ATTR_RW(allow_restart);
276 cache_type_show(struct device *dev, struct device_attribute *attr, char *buf)
278 struct scsi_disk *sdkp = to_scsi_disk(dev);
279 int ct = sdkp->RCD + 2*sdkp->WCE;
281 return sprintf(buf, "%s\n", sd_cache_types[ct]);
283 static DEVICE_ATTR_RW(cache_type);
286 FUA_show(struct device *dev, struct device_attribute *attr, char *buf)
288 struct scsi_disk *sdkp = to_scsi_disk(dev);
290 return sprintf(buf, "%u\n", sdkp->DPOFUA);
292 static DEVICE_ATTR_RO(FUA);
295 protection_type_show(struct device *dev, struct device_attribute *attr,
298 struct scsi_disk *sdkp = to_scsi_disk(dev);
300 return sprintf(buf, "%u\n", sdkp->protection_type);
304 protection_type_store(struct device *dev, struct device_attribute *attr,
305 const char *buf, size_t count)
307 struct scsi_disk *sdkp = to_scsi_disk(dev);
311 if (!capable(CAP_SYS_ADMIN))
314 err = kstrtouint(buf, 10, &val);
319 if (val <= T10_PI_TYPE3_PROTECTION)
320 sdkp->protection_type = val;
324 static DEVICE_ATTR_RW(protection_type);
327 protection_mode_show(struct device *dev, struct device_attribute *attr,
330 struct scsi_disk *sdkp = to_scsi_disk(dev);
331 struct scsi_device *sdp = sdkp->device;
332 unsigned int dif, dix;
334 dif = scsi_host_dif_capable(sdp->host, sdkp->protection_type);
335 dix = scsi_host_dix_capable(sdp->host, sdkp->protection_type);
337 if (!dix && scsi_host_dix_capable(sdp->host, T10_PI_TYPE0_PROTECTION)) {
343 return sprintf(buf, "none\n");
345 return sprintf(buf, "%s%u\n", dix ? "dix" : "dif", dif);
347 static DEVICE_ATTR_RO(protection_mode);
350 app_tag_own_show(struct device *dev, struct device_attribute *attr, char *buf)
352 struct scsi_disk *sdkp = to_scsi_disk(dev);
354 return sprintf(buf, "%u\n", sdkp->ATO);
356 static DEVICE_ATTR_RO(app_tag_own);
359 thin_provisioning_show(struct device *dev, struct device_attribute *attr,
362 struct scsi_disk *sdkp = to_scsi_disk(dev);
364 return sprintf(buf, "%u\n", sdkp->lbpme);
366 static DEVICE_ATTR_RO(thin_provisioning);
368 /* sysfs_match_string() requires dense arrays */
369 static const char *lbp_mode[] = {
370 [SD_LBP_FULL] = "full",
371 [SD_LBP_UNMAP] = "unmap",
372 [SD_LBP_WS16] = "writesame_16",
373 [SD_LBP_WS10] = "writesame_10",
374 [SD_LBP_ZERO] = "writesame_zero",
375 [SD_LBP_DISABLE] = "disabled",
379 provisioning_mode_show(struct device *dev, struct device_attribute *attr,
382 struct scsi_disk *sdkp = to_scsi_disk(dev);
384 return sprintf(buf, "%s\n", lbp_mode[sdkp->provisioning_mode]);
388 provisioning_mode_store(struct device *dev, struct device_attribute *attr,
389 const char *buf, size_t count)
391 struct scsi_disk *sdkp = to_scsi_disk(dev);
392 struct scsi_device *sdp = sdkp->device;
395 if (!capable(CAP_SYS_ADMIN))
398 if (sd_is_zoned(sdkp)) {
399 sd_config_discard(sdkp, SD_LBP_DISABLE);
403 if (sdp->type != TYPE_DISK)
406 mode = sysfs_match_string(lbp_mode, buf);
410 sd_config_discard(sdkp, mode);
414 static DEVICE_ATTR_RW(provisioning_mode);
416 /* sysfs_match_string() requires dense arrays */
417 static const char *zeroing_mode[] = {
418 [SD_ZERO_WRITE] = "write",
419 [SD_ZERO_WS] = "writesame",
420 [SD_ZERO_WS16_UNMAP] = "writesame_16_unmap",
421 [SD_ZERO_WS10_UNMAP] = "writesame_10_unmap",
425 zeroing_mode_show(struct device *dev, struct device_attribute *attr,
428 struct scsi_disk *sdkp = to_scsi_disk(dev);
430 return sprintf(buf, "%s\n", zeroing_mode[sdkp->zeroing_mode]);
434 zeroing_mode_store(struct device *dev, struct device_attribute *attr,
435 const char *buf, size_t count)
437 struct scsi_disk *sdkp = to_scsi_disk(dev);
440 if (!capable(CAP_SYS_ADMIN))
443 mode = sysfs_match_string(zeroing_mode, buf);
447 sdkp->zeroing_mode = mode;
451 static DEVICE_ATTR_RW(zeroing_mode);
454 max_medium_access_timeouts_show(struct device *dev,
455 struct device_attribute *attr, char *buf)
457 struct scsi_disk *sdkp = to_scsi_disk(dev);
459 return sprintf(buf, "%u\n", sdkp->max_medium_access_timeouts);
463 max_medium_access_timeouts_store(struct device *dev,
464 struct device_attribute *attr, const char *buf,
467 struct scsi_disk *sdkp = to_scsi_disk(dev);
470 if (!capable(CAP_SYS_ADMIN))
473 err = kstrtouint(buf, 10, &sdkp->max_medium_access_timeouts);
475 return err ? err : count;
477 static DEVICE_ATTR_RW(max_medium_access_timeouts);
480 max_write_same_blocks_show(struct device *dev, struct device_attribute *attr,
483 struct scsi_disk *sdkp = to_scsi_disk(dev);
485 return sprintf(buf, "%u\n", sdkp->max_ws_blocks);
489 max_write_same_blocks_store(struct device *dev, struct device_attribute *attr,
490 const char *buf, size_t count)
492 struct scsi_disk *sdkp = to_scsi_disk(dev);
493 struct scsi_device *sdp = sdkp->device;
497 if (!capable(CAP_SYS_ADMIN))
500 if (sdp->type != TYPE_DISK && sdp->type != TYPE_ZBC)
503 err = kstrtoul(buf, 10, &max);
509 sdp->no_write_same = 1;
510 else if (max <= SD_MAX_WS16_BLOCKS) {
511 sdp->no_write_same = 0;
512 sdkp->max_ws_blocks = max;
515 sd_config_write_same(sdkp);
519 static DEVICE_ATTR_RW(max_write_same_blocks);
522 zoned_cap_show(struct device *dev, struct device_attribute *attr, char *buf)
524 struct scsi_disk *sdkp = to_scsi_disk(dev);
526 if (sdkp->device->type == TYPE_ZBC)
527 return sprintf(buf, "host-managed\n");
528 if (sdkp->zoned == 1)
529 return sprintf(buf, "host-aware\n");
530 if (sdkp->zoned == 2)
531 return sprintf(buf, "drive-managed\n");
532 return sprintf(buf, "none\n");
534 static DEVICE_ATTR_RO(zoned_cap);
537 max_retries_store(struct device *dev, struct device_attribute *attr,
538 const char *buf, size_t count)
540 struct scsi_disk *sdkp = to_scsi_disk(dev);
541 struct scsi_device *sdev = sdkp->device;
544 err = kstrtoint(buf, 10, &retries);
548 if (retries == SCSI_CMD_RETRIES_NO_LIMIT || retries <= SD_MAX_RETRIES) {
549 sdkp->max_retries = retries;
553 sdev_printk(KERN_ERR, sdev, "max_retries must be between -1 and %d\n",
559 max_retries_show(struct device *dev, struct device_attribute *attr,
562 struct scsi_disk *sdkp = to_scsi_disk(dev);
564 return sprintf(buf, "%d\n", sdkp->max_retries);
567 static DEVICE_ATTR_RW(max_retries);
569 static struct attribute *sd_disk_attrs[] = {
570 &dev_attr_cache_type.attr,
572 &dev_attr_allow_restart.attr,
573 &dev_attr_manage_start_stop.attr,
574 &dev_attr_protection_type.attr,
575 &dev_attr_protection_mode.attr,
576 &dev_attr_app_tag_own.attr,
577 &dev_attr_thin_provisioning.attr,
578 &dev_attr_provisioning_mode.attr,
579 &dev_attr_zeroing_mode.attr,
580 &dev_attr_max_write_same_blocks.attr,
581 &dev_attr_max_medium_access_timeouts.attr,
582 &dev_attr_zoned_cap.attr,
583 &dev_attr_max_retries.attr,
586 ATTRIBUTE_GROUPS(sd_disk);
588 static struct class sd_disk_class = {
590 .owner = THIS_MODULE,
591 .dev_release = scsi_disk_release,
592 .dev_groups = sd_disk_groups,
595 static const struct dev_pm_ops sd_pm_ops = {
596 .suspend = sd_suspend_system,
597 .resume = sd_resume_system,
598 .poweroff = sd_suspend_system,
599 .restore = sd_resume_system,
600 .runtime_suspend = sd_suspend_runtime,
601 .runtime_resume = sd_resume_runtime,
604 static struct scsi_driver sd_template = {
607 .owner = THIS_MODULE,
609 .probe_type = PROBE_PREFER_ASYNCHRONOUS,
611 .shutdown = sd_shutdown,
615 .init_command = sd_init_command,
616 .uninit_command = sd_uninit_command,
618 .eh_action = sd_eh_action,
619 .eh_reset = sd_eh_reset,
623 * Don't request a new module, as that could deadlock in multipath
626 static void sd_default_probe(dev_t devt)
631 * Device no to disk mapping:
633 * major disc2 disc p1
634 * |............|.............|....|....| <- dev_t
637 * Inside a major, we have 16k disks, however mapped non-
638 * contiguously. The first 16 disks are for major0, the next
639 * ones with major1, ... Disk 256 is for major0 again, disk 272
641 * As we stay compatible with our numbering scheme, we can reuse
642 * the well-know SCSI majors 8, 65--71, 136--143.
644 static int sd_major(int major_idx)
648 return SCSI_DISK0_MAJOR;
650 return SCSI_DISK1_MAJOR + major_idx - 1;
652 return SCSI_DISK8_MAJOR + major_idx - 8;
655 return 0; /* shut up gcc */
659 #ifdef CONFIG_BLK_SED_OPAL
660 static int sd_sec_submit(void *data, u16 spsp, u8 secp, void *buffer,
661 size_t len, bool send)
663 struct scsi_disk *sdkp = data;
664 struct scsi_device *sdev = sdkp->device;
666 const struct scsi_exec_args exec_args = {
667 .req_flags = BLK_MQ_REQ_PM,
671 cdb[0] = send ? SECURITY_PROTOCOL_OUT : SECURITY_PROTOCOL_IN;
673 put_unaligned_be16(spsp, &cdb[2]);
674 put_unaligned_be32(len, &cdb[6]);
676 ret = scsi_execute_cmd(sdev, cdb, send ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN,
677 buffer, len, SD_TIMEOUT, sdkp->max_retries,
679 return ret <= 0 ? ret : -EIO;
681 #endif /* CONFIG_BLK_SED_OPAL */
684 * Look up the DIX operation based on whether the command is read or
685 * write and whether dix and dif are enabled.
687 static unsigned int sd_prot_op(bool write, bool dix, bool dif)
689 /* Lookup table: bit 2 (write), bit 1 (dix), bit 0 (dif) */
690 static const unsigned int ops[] = { /* wrt dix dif */
691 SCSI_PROT_NORMAL, /* 0 0 0 */
692 SCSI_PROT_READ_STRIP, /* 0 0 1 */
693 SCSI_PROT_READ_INSERT, /* 0 1 0 */
694 SCSI_PROT_READ_PASS, /* 0 1 1 */
695 SCSI_PROT_NORMAL, /* 1 0 0 */
696 SCSI_PROT_WRITE_INSERT, /* 1 0 1 */
697 SCSI_PROT_WRITE_STRIP, /* 1 1 0 */
698 SCSI_PROT_WRITE_PASS, /* 1 1 1 */
701 return ops[write << 2 | dix << 1 | dif];
705 * Returns a mask of the protection flags that are valid for a given DIX
708 static unsigned int sd_prot_flag_mask(unsigned int prot_op)
710 static const unsigned int flag_mask[] = {
711 [SCSI_PROT_NORMAL] = 0,
713 [SCSI_PROT_READ_STRIP] = SCSI_PROT_TRANSFER_PI |
714 SCSI_PROT_GUARD_CHECK |
715 SCSI_PROT_REF_CHECK |
716 SCSI_PROT_REF_INCREMENT,
718 [SCSI_PROT_READ_INSERT] = SCSI_PROT_REF_INCREMENT |
719 SCSI_PROT_IP_CHECKSUM,
721 [SCSI_PROT_READ_PASS] = SCSI_PROT_TRANSFER_PI |
722 SCSI_PROT_GUARD_CHECK |
723 SCSI_PROT_REF_CHECK |
724 SCSI_PROT_REF_INCREMENT |
725 SCSI_PROT_IP_CHECKSUM,
727 [SCSI_PROT_WRITE_INSERT] = SCSI_PROT_TRANSFER_PI |
728 SCSI_PROT_REF_INCREMENT,
730 [SCSI_PROT_WRITE_STRIP] = SCSI_PROT_GUARD_CHECK |
731 SCSI_PROT_REF_CHECK |
732 SCSI_PROT_REF_INCREMENT |
733 SCSI_PROT_IP_CHECKSUM,
735 [SCSI_PROT_WRITE_PASS] = SCSI_PROT_TRANSFER_PI |
736 SCSI_PROT_GUARD_CHECK |
737 SCSI_PROT_REF_CHECK |
738 SCSI_PROT_REF_INCREMENT |
739 SCSI_PROT_IP_CHECKSUM,
742 return flag_mask[prot_op];
745 static unsigned char sd_setup_protect_cmnd(struct scsi_cmnd *scmd,
746 unsigned int dix, unsigned int dif)
748 struct request *rq = scsi_cmd_to_rq(scmd);
749 struct bio *bio = rq->bio;
750 unsigned int prot_op = sd_prot_op(rq_data_dir(rq), dix, dif);
751 unsigned int protect = 0;
753 if (dix) { /* DIX Type 0, 1, 2, 3 */
754 if (bio_integrity_flagged(bio, BIP_IP_CHECKSUM))
755 scmd->prot_flags |= SCSI_PROT_IP_CHECKSUM;
757 if (bio_integrity_flagged(bio, BIP_CTRL_NOCHECK) == false)
758 scmd->prot_flags |= SCSI_PROT_GUARD_CHECK;
761 if (dif != T10_PI_TYPE3_PROTECTION) { /* DIX/DIF Type 0, 1, 2 */
762 scmd->prot_flags |= SCSI_PROT_REF_INCREMENT;
764 if (bio_integrity_flagged(bio, BIP_CTRL_NOCHECK) == false)
765 scmd->prot_flags |= SCSI_PROT_REF_CHECK;
768 if (dif) { /* DIX/DIF Type 1, 2, 3 */
769 scmd->prot_flags |= SCSI_PROT_TRANSFER_PI;
771 if (bio_integrity_flagged(bio, BIP_DISK_NOCHECK))
772 protect = 3 << 5; /* Disable target PI checking */
774 protect = 1 << 5; /* Enable target PI checking */
777 scsi_set_prot_op(scmd, prot_op);
778 scsi_set_prot_type(scmd, dif);
779 scmd->prot_flags &= sd_prot_flag_mask(prot_op);
784 static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode)
786 struct request_queue *q = sdkp->disk->queue;
787 unsigned int logical_block_size = sdkp->device->sector_size;
788 unsigned int max_blocks = 0;
790 q->limits.discard_alignment =
791 sdkp->unmap_alignment * logical_block_size;
792 q->limits.discard_granularity =
793 max(sdkp->physical_block_size,
794 sdkp->unmap_granularity * logical_block_size);
795 sdkp->provisioning_mode = mode;
801 blk_queue_max_discard_sectors(q, 0);
805 max_blocks = min_not_zero(sdkp->max_unmap_blocks,
806 (u32)SD_MAX_WS16_BLOCKS);
810 if (sdkp->device->unmap_limit_for_ws)
811 max_blocks = sdkp->max_unmap_blocks;
813 max_blocks = sdkp->max_ws_blocks;
815 max_blocks = min_not_zero(max_blocks, (u32)SD_MAX_WS16_BLOCKS);
819 if (sdkp->device->unmap_limit_for_ws)
820 max_blocks = sdkp->max_unmap_blocks;
822 max_blocks = sdkp->max_ws_blocks;
824 max_blocks = min_not_zero(max_blocks, (u32)SD_MAX_WS10_BLOCKS);
828 max_blocks = min_not_zero(sdkp->max_ws_blocks,
829 (u32)SD_MAX_WS10_BLOCKS);
833 blk_queue_max_discard_sectors(q, max_blocks * (logical_block_size >> 9));
836 static void *sd_set_special_bvec(struct request *rq, unsigned int data_len)
840 page = mempool_alloc(sd_page_pool, GFP_ATOMIC);
843 clear_highpage(page);
844 bvec_set_page(&rq->special_vec, page, data_len, 0);
845 rq->rq_flags |= RQF_SPECIAL_PAYLOAD;
846 return bvec_virt(&rq->special_vec);
849 static blk_status_t sd_setup_unmap_cmnd(struct scsi_cmnd *cmd)
851 struct scsi_device *sdp = cmd->device;
852 struct request *rq = scsi_cmd_to_rq(cmd);
853 struct scsi_disk *sdkp = scsi_disk(rq->q->disk);
854 u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq));
855 u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
856 unsigned int data_len = 24;
859 buf = sd_set_special_bvec(rq, data_len);
861 return BLK_STS_RESOURCE;
864 cmd->cmnd[0] = UNMAP;
867 put_unaligned_be16(6 + 16, &buf[0]);
868 put_unaligned_be16(16, &buf[2]);
869 put_unaligned_be64(lba, &buf[8]);
870 put_unaligned_be32(nr_blocks, &buf[16]);
872 cmd->allowed = sdkp->max_retries;
873 cmd->transfersize = data_len;
874 rq->timeout = SD_TIMEOUT;
876 return scsi_alloc_sgtables(cmd);
879 static blk_status_t sd_setup_write_same16_cmnd(struct scsi_cmnd *cmd,
882 struct scsi_device *sdp = cmd->device;
883 struct request *rq = scsi_cmd_to_rq(cmd);
884 struct scsi_disk *sdkp = scsi_disk(rq->q->disk);
885 u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq));
886 u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
887 u32 data_len = sdp->sector_size;
889 if (!sd_set_special_bvec(rq, data_len))
890 return BLK_STS_RESOURCE;
893 cmd->cmnd[0] = WRITE_SAME_16;
895 cmd->cmnd[1] = 0x8; /* UNMAP */
896 put_unaligned_be64(lba, &cmd->cmnd[2]);
897 put_unaligned_be32(nr_blocks, &cmd->cmnd[10]);
899 cmd->allowed = sdkp->max_retries;
900 cmd->transfersize = data_len;
901 rq->timeout = unmap ? SD_TIMEOUT : SD_WRITE_SAME_TIMEOUT;
903 return scsi_alloc_sgtables(cmd);
906 static blk_status_t sd_setup_write_same10_cmnd(struct scsi_cmnd *cmd,
909 struct scsi_device *sdp = cmd->device;
910 struct request *rq = scsi_cmd_to_rq(cmd);
911 struct scsi_disk *sdkp = scsi_disk(rq->q->disk);
912 u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq));
913 u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
914 u32 data_len = sdp->sector_size;
916 if (!sd_set_special_bvec(rq, data_len))
917 return BLK_STS_RESOURCE;
920 cmd->cmnd[0] = WRITE_SAME;
922 cmd->cmnd[1] = 0x8; /* UNMAP */
923 put_unaligned_be32(lba, &cmd->cmnd[2]);
924 put_unaligned_be16(nr_blocks, &cmd->cmnd[7]);
926 cmd->allowed = sdkp->max_retries;
927 cmd->transfersize = data_len;
928 rq->timeout = unmap ? SD_TIMEOUT : SD_WRITE_SAME_TIMEOUT;
930 return scsi_alloc_sgtables(cmd);
933 static blk_status_t sd_setup_write_zeroes_cmnd(struct scsi_cmnd *cmd)
935 struct request *rq = scsi_cmd_to_rq(cmd);
936 struct scsi_device *sdp = cmd->device;
937 struct scsi_disk *sdkp = scsi_disk(rq->q->disk);
938 u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq));
939 u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
941 if (!(rq->cmd_flags & REQ_NOUNMAP)) {
942 switch (sdkp->zeroing_mode) {
943 case SD_ZERO_WS16_UNMAP:
944 return sd_setup_write_same16_cmnd(cmd, true);
945 case SD_ZERO_WS10_UNMAP:
946 return sd_setup_write_same10_cmnd(cmd, true);
950 if (sdp->no_write_same) {
951 rq->rq_flags |= RQF_QUIET;
952 return BLK_STS_TARGET;
955 if (sdkp->ws16 || lba > 0xffffffff || nr_blocks > 0xffff)
956 return sd_setup_write_same16_cmnd(cmd, false);
958 return sd_setup_write_same10_cmnd(cmd, false);
961 static void sd_config_write_same(struct scsi_disk *sdkp)
963 struct request_queue *q = sdkp->disk->queue;
964 unsigned int logical_block_size = sdkp->device->sector_size;
966 if (sdkp->device->no_write_same) {
967 sdkp->max_ws_blocks = 0;
971 /* Some devices can not handle block counts above 0xffff despite
972 * supporting WRITE SAME(16). Consequently we default to 64k
973 * blocks per I/O unless the device explicitly advertises a
976 if (sdkp->max_ws_blocks > SD_MAX_WS10_BLOCKS)
977 sdkp->max_ws_blocks = min_not_zero(sdkp->max_ws_blocks,
978 (u32)SD_MAX_WS16_BLOCKS);
979 else if (sdkp->ws16 || sdkp->ws10 || sdkp->device->no_report_opcodes)
980 sdkp->max_ws_blocks = min_not_zero(sdkp->max_ws_blocks,
981 (u32)SD_MAX_WS10_BLOCKS);
983 sdkp->device->no_write_same = 1;
984 sdkp->max_ws_blocks = 0;
987 if (sdkp->lbprz && sdkp->lbpws)
988 sdkp->zeroing_mode = SD_ZERO_WS16_UNMAP;
989 else if (sdkp->lbprz && sdkp->lbpws10)
990 sdkp->zeroing_mode = SD_ZERO_WS10_UNMAP;
991 else if (sdkp->max_ws_blocks)
992 sdkp->zeroing_mode = SD_ZERO_WS;
994 sdkp->zeroing_mode = SD_ZERO_WRITE;
996 if (sdkp->max_ws_blocks &&
997 sdkp->physical_block_size > logical_block_size) {
999 * Reporting a maximum number of blocks that is not aligned
1000 * on the device physical size would cause a large write same
1001 * request to be split into physically unaligned chunks by
1002 * __blkdev_issue_write_zeroes() even if the caller of this
1003 * functions took care to align the large request. So make sure
1004 * the maximum reported is aligned to the device physical block
1005 * size. This is only an optional optimization for regular
1006 * disks, but this is mandatory to avoid failure of large write
1007 * same requests directed at sequential write required zones of
1008 * host-managed ZBC disks.
1010 sdkp->max_ws_blocks =
1011 round_down(sdkp->max_ws_blocks,
1012 bytes_to_logical(sdkp->device,
1013 sdkp->physical_block_size));
1017 blk_queue_max_write_zeroes_sectors(q, sdkp->max_ws_blocks *
1018 (logical_block_size >> 9));
1021 static blk_status_t sd_setup_flush_cmnd(struct scsi_cmnd *cmd)
1023 struct request *rq = scsi_cmd_to_rq(cmd);
1024 struct scsi_disk *sdkp = scsi_disk(rq->q->disk);
1026 /* flush requests don't perform I/O, zero the S/G table */
1027 memset(&cmd->sdb, 0, sizeof(cmd->sdb));
1029 if (cmd->device->use_16_for_sync) {
1030 cmd->cmnd[0] = SYNCHRONIZE_CACHE_16;
1033 cmd->cmnd[0] = SYNCHRONIZE_CACHE;
1036 cmd->transfersize = 0;
1037 cmd->allowed = sdkp->max_retries;
1039 rq->timeout = rq->q->rq_timeout * SD_FLUSH_TIMEOUT_MULTIPLIER;
1043 static blk_status_t sd_setup_rw32_cmnd(struct scsi_cmnd *cmd, bool write,
1044 sector_t lba, unsigned int nr_blocks,
1045 unsigned char flags)
1047 cmd->cmd_len = SD_EXT_CDB_SIZE;
1048 cmd->cmnd[0] = VARIABLE_LENGTH_CMD;
1049 cmd->cmnd[7] = 0x18; /* Additional CDB len */
1050 cmd->cmnd[9] = write ? WRITE_32 : READ_32;
1051 cmd->cmnd[10] = flags;
1052 put_unaligned_be64(lba, &cmd->cmnd[12]);
1053 put_unaligned_be32(lba, &cmd->cmnd[20]); /* Expected Indirect LBA */
1054 put_unaligned_be32(nr_blocks, &cmd->cmnd[28]);
1059 static blk_status_t sd_setup_rw16_cmnd(struct scsi_cmnd *cmd, bool write,
1060 sector_t lba, unsigned int nr_blocks,
1061 unsigned char flags)
1064 cmd->cmnd[0] = write ? WRITE_16 : READ_16;
1065 cmd->cmnd[1] = flags;
1068 put_unaligned_be64(lba, &cmd->cmnd[2]);
1069 put_unaligned_be32(nr_blocks, &cmd->cmnd[10]);
1074 static blk_status_t sd_setup_rw10_cmnd(struct scsi_cmnd *cmd, bool write,
1075 sector_t lba, unsigned int nr_blocks,
1076 unsigned char flags)
1079 cmd->cmnd[0] = write ? WRITE_10 : READ_10;
1080 cmd->cmnd[1] = flags;
1083 put_unaligned_be32(lba, &cmd->cmnd[2]);
1084 put_unaligned_be16(nr_blocks, &cmd->cmnd[7]);
1089 static blk_status_t sd_setup_rw6_cmnd(struct scsi_cmnd *cmd, bool write,
1090 sector_t lba, unsigned int nr_blocks,
1091 unsigned char flags)
1093 /* Avoid that 0 blocks gets translated into 256 blocks. */
1094 if (WARN_ON_ONCE(nr_blocks == 0))
1095 return BLK_STS_IOERR;
1097 if (unlikely(flags & 0x8)) {
1099 * This happens only if this drive failed 10byte rw
1100 * command with ILLEGAL_REQUEST during operation and
1101 * thus turned off use_10_for_rw.
1103 scmd_printk(KERN_ERR, cmd, "FUA write on READ/WRITE(6) drive\n");
1104 return BLK_STS_IOERR;
1108 cmd->cmnd[0] = write ? WRITE_6 : READ_6;
1109 cmd->cmnd[1] = (lba >> 16) & 0x1f;
1110 cmd->cmnd[2] = (lba >> 8) & 0xff;
1111 cmd->cmnd[3] = lba & 0xff;
1112 cmd->cmnd[4] = nr_blocks;
1118 static blk_status_t sd_setup_read_write_cmnd(struct scsi_cmnd *cmd)
1120 struct request *rq = scsi_cmd_to_rq(cmd);
1121 struct scsi_device *sdp = cmd->device;
1122 struct scsi_disk *sdkp = scsi_disk(rq->q->disk);
1123 sector_t lba = sectors_to_logical(sdp, blk_rq_pos(rq));
1125 unsigned int nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
1126 unsigned int mask = logical_to_sectors(sdp, 1) - 1;
1127 bool write = rq_data_dir(rq) == WRITE;
1128 unsigned char protect, fua;
1133 ret = scsi_alloc_sgtables(cmd);
1134 if (ret != BLK_STS_OK)
1137 ret = BLK_STS_IOERR;
1138 if (!scsi_device_online(sdp) || sdp->changed) {
1139 scmd_printk(KERN_ERR, cmd, "device offline or changed\n");
1143 if (blk_rq_pos(rq) + blk_rq_sectors(rq) > get_capacity(rq->q->disk)) {
1144 scmd_printk(KERN_ERR, cmd, "access beyond end of device\n");
1148 if ((blk_rq_pos(rq) & mask) || (blk_rq_sectors(rq) & mask)) {
1149 scmd_printk(KERN_ERR, cmd, "request not aligned to the logical block size\n");
1154 * Some SD card readers can't handle accesses which touch the
1155 * last one or two logical blocks. Split accesses as needed.
1157 threshold = sdkp->capacity - SD_LAST_BUGGY_SECTORS;
1159 if (unlikely(sdp->last_sector_bug && lba + nr_blocks > threshold)) {
1160 if (lba < threshold) {
1161 /* Access up to the threshold but not beyond */
1162 nr_blocks = threshold - lba;
1164 /* Access only a single logical block */
1169 if (req_op(rq) == REQ_OP_ZONE_APPEND) {
1170 ret = sd_zbc_prepare_zone_append(cmd, &lba, nr_blocks);
1175 fua = rq->cmd_flags & REQ_FUA ? 0x8 : 0;
1176 dix = scsi_prot_sg_count(cmd);
1177 dif = scsi_host_dif_capable(cmd->device->host, sdkp->protection_type);
1180 protect = sd_setup_protect_cmnd(cmd, dix, dif);
1184 if (protect && sdkp->protection_type == T10_PI_TYPE2_PROTECTION) {
1185 ret = sd_setup_rw32_cmnd(cmd, write, lba, nr_blocks,
1187 } else if (sdp->use_16_for_rw || (nr_blocks > 0xffff)) {
1188 ret = sd_setup_rw16_cmnd(cmd, write, lba, nr_blocks,
1190 } else if ((nr_blocks > 0xff) || (lba > 0x1fffff) ||
1191 sdp->use_10_for_rw || protect) {
1192 ret = sd_setup_rw10_cmnd(cmd, write, lba, nr_blocks,
1195 ret = sd_setup_rw6_cmnd(cmd, write, lba, nr_blocks,
1199 if (unlikely(ret != BLK_STS_OK))
1203 * We shouldn't disconnect in the middle of a sector, so with a dumb
1204 * host adapter, it's safe to assume that we can at least transfer
1205 * this many bytes between each connect / disconnect.
1207 cmd->transfersize = sdp->sector_size;
1208 cmd->underflow = nr_blocks << 9;
1209 cmd->allowed = sdkp->max_retries;
1210 cmd->sdb.length = nr_blocks * sdp->sector_size;
1213 scmd_printk(KERN_INFO, cmd,
1214 "%s: block=%llu, count=%d\n", __func__,
1215 (unsigned long long)blk_rq_pos(rq),
1216 blk_rq_sectors(rq)));
1218 scmd_printk(KERN_INFO, cmd,
1219 "%s %d/%u 512 byte blocks.\n",
1220 write ? "writing" : "reading", nr_blocks,
1221 blk_rq_sectors(rq)));
1224 * This indicates that the command is ready from our end to be queued.
1228 scsi_free_sgtables(cmd);
1232 static blk_status_t sd_init_command(struct scsi_cmnd *cmd)
1234 struct request *rq = scsi_cmd_to_rq(cmd);
1236 switch (req_op(rq)) {
1237 case REQ_OP_DISCARD:
1238 switch (scsi_disk(rq->q->disk)->provisioning_mode) {
1240 return sd_setup_unmap_cmnd(cmd);
1242 return sd_setup_write_same16_cmnd(cmd, true);
1244 return sd_setup_write_same10_cmnd(cmd, true);
1246 return sd_setup_write_same10_cmnd(cmd, false);
1248 return BLK_STS_TARGET;
1250 case REQ_OP_WRITE_ZEROES:
1251 return sd_setup_write_zeroes_cmnd(cmd);
1253 return sd_setup_flush_cmnd(cmd);
1256 case REQ_OP_ZONE_APPEND:
1257 return sd_setup_read_write_cmnd(cmd);
1258 case REQ_OP_ZONE_RESET:
1259 return sd_zbc_setup_zone_mgmt_cmnd(cmd, ZO_RESET_WRITE_POINTER,
1261 case REQ_OP_ZONE_RESET_ALL:
1262 return sd_zbc_setup_zone_mgmt_cmnd(cmd, ZO_RESET_WRITE_POINTER,
1264 case REQ_OP_ZONE_OPEN:
1265 return sd_zbc_setup_zone_mgmt_cmnd(cmd, ZO_OPEN_ZONE, false);
1266 case REQ_OP_ZONE_CLOSE:
1267 return sd_zbc_setup_zone_mgmt_cmnd(cmd, ZO_CLOSE_ZONE, false);
1268 case REQ_OP_ZONE_FINISH:
1269 return sd_zbc_setup_zone_mgmt_cmnd(cmd, ZO_FINISH_ZONE, false);
1272 return BLK_STS_NOTSUPP;
1276 static void sd_uninit_command(struct scsi_cmnd *SCpnt)
1278 struct request *rq = scsi_cmd_to_rq(SCpnt);
1280 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
1281 mempool_free(rq->special_vec.bv_page, sd_page_pool);
1284 static bool sd_need_revalidate(struct block_device *bdev,
1285 struct scsi_disk *sdkp)
1287 if (sdkp->device->removable || sdkp->write_prot) {
1288 if (bdev_check_media_change(bdev))
1293 * Force a full rescan after ioctl(BLKRRPART). While the disk state has
1294 * nothing to do with partitions, BLKRRPART is used to force a full
1295 * revalidate after things like a format for historical reasons.
1297 return test_bit(GD_NEED_PART_SCAN, &bdev->bd_disk->state);
1301 * sd_open - open a scsi disk device
1302 * @bdev: Block device of the scsi disk to open
1303 * @mode: FMODE_* mask
1305 * Returns 0 if successful. Returns a negated errno value in case
1308 * Note: This can be called from a user context (e.g. fsck(1) )
1309 * or from within the kernel (e.g. as a result of a mount(1) ).
1310 * In the latter case @inode and @filp carry an abridged amount
1311 * of information as noted above.
1313 * Locking: called with bdev->bd_disk->open_mutex held.
1315 static int sd_open(struct block_device *bdev, fmode_t mode)
1317 struct scsi_disk *sdkp = scsi_disk(bdev->bd_disk);
1318 struct scsi_device *sdev = sdkp->device;
1321 if (scsi_device_get(sdev))
1324 SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_open\n"));
1327 * If the device is in error recovery, wait until it is done.
1328 * If the device is offline, then disallow any access to it.
1331 if (!scsi_block_when_processing_errors(sdev))
1334 if (sd_need_revalidate(bdev, sdkp))
1335 sd_revalidate_disk(bdev->bd_disk);
1338 * If the drive is empty, just let the open fail.
1340 retval = -ENOMEDIUM;
1341 if (sdev->removable && !sdkp->media_present && !(mode & FMODE_NDELAY))
1345 * If the device has the write protect tab set, have the open fail
1346 * if the user expects to be able to write to the thing.
1349 if (sdkp->write_prot && (mode & FMODE_WRITE))
1353 * It is possible that the disk changing stuff resulted in
1354 * the device being taken offline. If this is the case,
1355 * report this to the user, and don't pretend that the
1356 * open actually succeeded.
1359 if (!scsi_device_online(sdev))
1362 if ((atomic_inc_return(&sdkp->openers) == 1) && sdev->removable) {
1363 if (scsi_block_when_processing_errors(sdev))
1364 scsi_set_medium_removal(sdev, SCSI_REMOVAL_PREVENT);
1370 scsi_device_put(sdev);
1375 * sd_release - invoked when the (last) close(2) is called on this
1377 * @disk: disk to release
1378 * @mode: FMODE_* mask
1382 * Note: may block (uninterruptible) if error recovery is underway
1385 * Locking: called with bdev->bd_disk->open_mutex held.
1387 static void sd_release(struct gendisk *disk, fmode_t mode)
1389 struct scsi_disk *sdkp = scsi_disk(disk);
1390 struct scsi_device *sdev = sdkp->device;
1392 SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_release\n"));
1394 if (atomic_dec_return(&sdkp->openers) == 0 && sdev->removable) {
1395 if (scsi_block_when_processing_errors(sdev))
1396 scsi_set_medium_removal(sdev, SCSI_REMOVAL_ALLOW);
1399 scsi_device_put(sdev);
1402 static int sd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
1404 struct scsi_disk *sdkp = scsi_disk(bdev->bd_disk);
1405 struct scsi_device *sdp = sdkp->device;
1406 struct Scsi_Host *host = sdp->host;
1407 sector_t capacity = logical_to_sectors(sdp, sdkp->capacity);
1410 /* default to most commonly used values */
1411 diskinfo[0] = 0x40; /* 1 << 6 */
1412 diskinfo[1] = 0x20; /* 1 << 5 */
1413 diskinfo[2] = capacity >> 11;
1415 /* override with calculated, extended default, or driver values */
1416 if (host->hostt->bios_param)
1417 host->hostt->bios_param(sdp, bdev, capacity, diskinfo);
1419 scsicam_bios_param(bdev, capacity, diskinfo);
1421 geo->heads = diskinfo[0];
1422 geo->sectors = diskinfo[1];
1423 geo->cylinders = diskinfo[2];
1428 * sd_ioctl - process an ioctl
1429 * @bdev: target block device
1430 * @mode: FMODE_* mask
1431 * @cmd: ioctl command number
1432 * @arg: this is third argument given to ioctl(2) system call.
1433 * Often contains a pointer.
1435 * Returns 0 if successful (some ioctls return positive numbers on
1436 * success as well). Returns a negated errno value in case of error.
1438 * Note: most ioctls are forward onto the block subsystem or further
1439 * down in the scsi subsystem.
1441 static int sd_ioctl(struct block_device *bdev, fmode_t mode,
1442 unsigned int cmd, unsigned long arg)
1444 struct gendisk *disk = bdev->bd_disk;
1445 struct scsi_disk *sdkp = scsi_disk(disk);
1446 struct scsi_device *sdp = sdkp->device;
1447 void __user *p = (void __user *)arg;
1450 SCSI_LOG_IOCTL(1, sd_printk(KERN_INFO, sdkp, "sd_ioctl: disk=%s, "
1451 "cmd=0x%x\n", disk->disk_name, cmd));
1453 if (bdev_is_partition(bdev) && !capable(CAP_SYS_RAWIO))
1454 return -ENOIOCTLCMD;
1457 * If we are in the middle of error recovery, don't let anyone
1458 * else try and use this device. Also, if error recovery fails, it
1459 * may try and take the device offline, in which case all further
1460 * access to the device is prohibited.
1462 error = scsi_ioctl_block_when_processing_errors(sdp, cmd,
1463 (mode & FMODE_NDELAY) != 0);
1467 if (is_sed_ioctl(cmd))
1468 return sed_ioctl(sdkp->opal_dev, cmd, p);
1469 return scsi_ioctl(sdp, mode, cmd, p);
1472 static void set_media_not_present(struct scsi_disk *sdkp)
1474 if (sdkp->media_present)
1475 sdkp->device->changed = 1;
1477 if (sdkp->device->removable) {
1478 sdkp->media_present = 0;
1483 static int media_not_present(struct scsi_disk *sdkp,
1484 struct scsi_sense_hdr *sshdr)
1486 if (!scsi_sense_valid(sshdr))
1489 /* not invoked for commands that could return deferred errors */
1490 switch (sshdr->sense_key) {
1491 case UNIT_ATTENTION:
1493 /* medium not present */
1494 if (sshdr->asc == 0x3A) {
1495 set_media_not_present(sdkp);
1503 * sd_check_events - check media events
1504 * @disk: kernel device descriptor
1505 * @clearing: disk events currently being cleared
1507 * Returns mask of DISK_EVENT_*.
1509 * Note: this function is invoked from the block subsystem.
1511 static unsigned int sd_check_events(struct gendisk *disk, unsigned int clearing)
1513 struct scsi_disk *sdkp = disk->private_data;
1514 struct scsi_device *sdp;
1522 SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_check_events\n"));
1525 * If the device is offline, don't send any commands - just pretend as
1526 * if the command failed. If the device ever comes back online, we
1527 * can deal with it then. It is only because of unrecoverable errors
1528 * that we would ever take a device offline in the first place.
1530 if (!scsi_device_online(sdp)) {
1531 set_media_not_present(sdkp);
1536 * Using TEST_UNIT_READY enables differentiation between drive with
1537 * no cartridge loaded - NOT READY, drive with changed cartridge -
1538 * UNIT ATTENTION, or with same cartridge - GOOD STATUS.
1540 * Drives that auto spin down. eg iomega jaz 1G, will be started
1541 * by sd_spinup_disk() from sd_revalidate_disk(), which happens whenever
1542 * sd_revalidate() is called.
1544 if (scsi_block_when_processing_errors(sdp)) {
1545 struct scsi_sense_hdr sshdr = { 0, };
1547 retval = scsi_test_unit_ready(sdp, SD_TIMEOUT, sdkp->max_retries,
1550 /* failed to execute TUR, assume media not present */
1551 if (retval < 0 || host_byte(retval)) {
1552 set_media_not_present(sdkp);
1556 if (media_not_present(sdkp, &sshdr))
1561 * For removable scsi disk we have to recognise the presence
1562 * of a disk in the drive.
1564 if (!sdkp->media_present)
1566 sdkp->media_present = 1;
1569 * sdp->changed is set under the following conditions:
1571 * Medium present state has changed in either direction.
1572 * Device has indicated UNIT_ATTENTION.
1574 disk_changed = sdp->changed;
1576 return disk_changed ? DISK_EVENT_MEDIA_CHANGE : 0;
1579 static int sd_sync_cache(struct scsi_disk *sdkp, struct scsi_sense_hdr *sshdr)
1582 struct scsi_device *sdp = sdkp->device;
1583 const int timeout = sdp->request_queue->rq_timeout
1584 * SD_FLUSH_TIMEOUT_MULTIPLIER;
1585 struct scsi_sense_hdr my_sshdr;
1586 const struct scsi_exec_args exec_args = {
1587 .req_flags = BLK_MQ_REQ_PM,
1588 /* caller might not be interested in sense, but we need it */
1589 .sshdr = sshdr ? : &my_sshdr,
1592 if (!scsi_device_online(sdp))
1595 sshdr = exec_args.sshdr;
1597 for (retries = 3; retries > 0; --retries) {
1598 unsigned char cmd[16] = { 0 };
1600 if (sdp->use_16_for_sync)
1601 cmd[0] = SYNCHRONIZE_CACHE_16;
1603 cmd[0] = SYNCHRONIZE_CACHE;
1605 * Leave the rest of the command zero to indicate
1608 res = scsi_execute_cmd(sdp, cmd, REQ_OP_DRV_IN, NULL, 0,
1609 timeout, sdkp->max_retries, &exec_args);
1615 sd_print_result(sdkp, "Synchronize Cache(10) failed", res);
1620 if (scsi_status_is_check_condition(res) &&
1621 scsi_sense_valid(sshdr)) {
1622 sd_print_sense_hdr(sdkp, sshdr);
1624 /* we need to evaluate the error return */
1625 if (sshdr->asc == 0x3a || /* medium not present */
1626 sshdr->asc == 0x20 || /* invalid command */
1627 (sshdr->asc == 0x74 && sshdr->ascq == 0x71)) /* drive is password locked */
1628 /* this is no error here */
1632 switch (host_byte(res)) {
1633 /* ignore errors due to racing a disconnection */
1634 case DID_BAD_TARGET:
1635 case DID_NO_CONNECT:
1637 /* signal the upper layer it might try again */
1641 case DID_SOFT_ERROR:
1650 static void sd_rescan(struct device *dev)
1652 struct scsi_disk *sdkp = dev_get_drvdata(dev);
1654 sd_revalidate_disk(sdkp->disk);
1657 static int sd_get_unique_id(struct gendisk *disk, u8 id[16],
1658 enum blk_unique_id type)
1660 struct scsi_device *sdev = scsi_disk(disk)->device;
1661 const struct scsi_vpd *vpd;
1662 const unsigned char *d;
1663 int ret = -ENXIO, len;
1666 vpd = rcu_dereference(sdev->vpd_pg83);
1671 for (d = vpd->data + 4; d < vpd->data + vpd->len; d += d[3] + 4) {
1672 /* we only care about designators with LU association */
1673 if (((d[1] >> 4) & 0x3) != 0x00)
1675 if ((d[1] & 0xf) != type)
1679 * Only exit early if a 16-byte descriptor was found. Otherwise
1680 * keep looking as one with more entropy might still show up.
1683 if (len != 8 && len != 12 && len != 16)
1686 memcpy(id, d + 4, len);
1695 static char sd_pr_type(enum pr_type type)
1698 case PR_WRITE_EXCLUSIVE:
1700 case PR_EXCLUSIVE_ACCESS:
1702 case PR_WRITE_EXCLUSIVE_REG_ONLY:
1704 case PR_EXCLUSIVE_ACCESS_REG_ONLY:
1706 case PR_WRITE_EXCLUSIVE_ALL_REGS:
1708 case PR_EXCLUSIVE_ACCESS_ALL_REGS:
1715 static int sd_scsi_to_pr_err(struct scsi_sense_hdr *sshdr, int result)
1717 switch (host_byte(result)) {
1718 case DID_TRANSPORT_MARGINAL:
1719 case DID_TRANSPORT_DISRUPTED:
1721 return PR_STS_RETRY_PATH_FAILURE;
1722 case DID_NO_CONNECT:
1723 return PR_STS_PATH_FAILED;
1724 case DID_TRANSPORT_FAILFAST:
1725 return PR_STS_PATH_FAST_FAILED;
1728 switch (status_byte(result)) {
1729 case SAM_STAT_RESERVATION_CONFLICT:
1730 return PR_STS_RESERVATION_CONFLICT;
1731 case SAM_STAT_CHECK_CONDITION:
1732 if (!scsi_sense_valid(sshdr))
1733 return PR_STS_IOERR;
1735 if (sshdr->sense_key == ILLEGAL_REQUEST &&
1736 (sshdr->asc == 0x26 || sshdr->asc == 0x24))
1741 return PR_STS_IOERR;
1745 static int sd_pr_command(struct block_device *bdev, u8 sa,
1746 u64 key, u64 sa_key, u8 type, u8 flags)
1748 struct scsi_disk *sdkp = scsi_disk(bdev->bd_disk);
1749 struct scsi_device *sdev = sdkp->device;
1750 struct scsi_sense_hdr sshdr;
1751 const struct scsi_exec_args exec_args = {
1755 u8 cmd[16] = { 0, };
1756 u8 data[24] = { 0, };
1758 cmd[0] = PERSISTENT_RESERVE_OUT;
1761 put_unaligned_be32(sizeof(data), &cmd[5]);
1763 put_unaligned_be64(key, &data[0]);
1764 put_unaligned_be64(sa_key, &data[8]);
1767 result = scsi_execute_cmd(sdev, cmd, REQ_OP_DRV_OUT, &data,
1768 sizeof(data), SD_TIMEOUT, sdkp->max_retries,
1771 if (scsi_status_is_check_condition(result) &&
1772 scsi_sense_valid(&sshdr)) {
1773 sdev_printk(KERN_INFO, sdev, "PR command failed: %d\n", result);
1774 scsi_print_sense_hdr(sdev, NULL, &sshdr);
1780 return sd_scsi_to_pr_err(&sshdr, result);
1783 static int sd_pr_register(struct block_device *bdev, u64 old_key, u64 new_key,
1786 if (flags & ~PR_FL_IGNORE_KEY)
1788 return sd_pr_command(bdev, (flags & PR_FL_IGNORE_KEY) ? 0x06 : 0x00,
1789 old_key, new_key, 0,
1790 (1 << 0) /* APTPL */);
1793 static int sd_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type,
1798 return sd_pr_command(bdev, 0x01, key, 0, sd_pr_type(type), 0);
1801 static int sd_pr_release(struct block_device *bdev, u64 key, enum pr_type type)
1803 return sd_pr_command(bdev, 0x02, key, 0, sd_pr_type(type), 0);
1806 static int sd_pr_preempt(struct block_device *bdev, u64 old_key, u64 new_key,
1807 enum pr_type type, bool abort)
1809 return sd_pr_command(bdev, abort ? 0x05 : 0x04, old_key, new_key,
1810 sd_pr_type(type), 0);
1813 static int sd_pr_clear(struct block_device *bdev, u64 key)
1815 return sd_pr_command(bdev, 0x03, key, 0, 0, 0);
1818 static const struct pr_ops sd_pr_ops = {
1819 .pr_register = sd_pr_register,
1820 .pr_reserve = sd_pr_reserve,
1821 .pr_release = sd_pr_release,
1822 .pr_preempt = sd_pr_preempt,
1823 .pr_clear = sd_pr_clear,
1826 static void scsi_disk_free_disk(struct gendisk *disk)
1828 struct scsi_disk *sdkp = scsi_disk(disk);
1830 put_device(&sdkp->disk_dev);
1833 static const struct block_device_operations sd_fops = {
1834 .owner = THIS_MODULE,
1836 .release = sd_release,
1838 .getgeo = sd_getgeo,
1839 .compat_ioctl = blkdev_compat_ptr_ioctl,
1840 .check_events = sd_check_events,
1841 .unlock_native_capacity = sd_unlock_native_capacity,
1842 .report_zones = sd_zbc_report_zones,
1843 .get_unique_id = sd_get_unique_id,
1844 .free_disk = scsi_disk_free_disk,
1845 .pr_ops = &sd_pr_ops,
1849 * sd_eh_reset - reset error handling callback
1850 * @scmd: sd-issued command that has failed
1852 * This function is called by the SCSI midlayer before starting
1853 * SCSI EH. When counting medium access failures we have to be
1854 * careful to register it only only once per device and SCSI EH run;
1855 * there might be several timed out commands which will cause the
1856 * 'max_medium_access_timeouts' counter to trigger after the first
1857 * SCSI EH run already and set the device to offline.
1858 * So this function resets the internal counter before starting SCSI EH.
1860 static void sd_eh_reset(struct scsi_cmnd *scmd)
1862 struct scsi_disk *sdkp = scsi_disk(scsi_cmd_to_rq(scmd)->q->disk);
1864 /* New SCSI EH run, reset gate variable */
1865 sdkp->ignore_medium_access_errors = false;
1869 * sd_eh_action - error handling callback
1870 * @scmd: sd-issued command that has failed
1871 * @eh_disp: The recovery disposition suggested by the midlayer
1873 * This function is called by the SCSI midlayer upon completion of an
1874 * error test command (currently TEST UNIT READY). The result of sending
1875 * the eh command is passed in eh_disp. We're looking for devices that
1876 * fail medium access commands but are OK with non access commands like
1877 * test unit ready (so wrongly see the device as having a successful
1880 static int sd_eh_action(struct scsi_cmnd *scmd, int eh_disp)
1882 struct scsi_disk *sdkp = scsi_disk(scsi_cmd_to_rq(scmd)->q->disk);
1883 struct scsi_device *sdev = scmd->device;
1885 if (!scsi_device_online(sdev) ||
1886 !scsi_medium_access_command(scmd) ||
1887 host_byte(scmd->result) != DID_TIME_OUT ||
1892 * The device has timed out executing a medium access command.
1893 * However, the TEST UNIT READY command sent during error
1894 * handling completed successfully. Either the device is in the
1895 * process of recovering or has it suffered an internal failure
1896 * that prevents access to the storage medium.
1898 if (!sdkp->ignore_medium_access_errors) {
1899 sdkp->medium_access_timed_out++;
1900 sdkp->ignore_medium_access_errors = true;
1904 * If the device keeps failing read/write commands but TEST UNIT
1905 * READY always completes successfully we assume that medium
1906 * access is no longer possible and take the device offline.
1908 if (sdkp->medium_access_timed_out >= sdkp->max_medium_access_timeouts) {
1909 scmd_printk(KERN_ERR, scmd,
1910 "Medium access timeout failure. Offlining disk!\n");
1911 mutex_lock(&sdev->state_mutex);
1912 scsi_device_set_state(sdev, SDEV_OFFLINE);
1913 mutex_unlock(&sdev->state_mutex);
1921 static unsigned int sd_completed_bytes(struct scsi_cmnd *scmd)
1923 struct request *req = scsi_cmd_to_rq(scmd);
1924 struct scsi_device *sdev = scmd->device;
1925 unsigned int transferred, good_bytes;
1926 u64 start_lba, end_lba, bad_lba;
1929 * Some commands have a payload smaller than the device logical
1930 * block size (e.g. INQUIRY on a 4K disk).
1932 if (scsi_bufflen(scmd) <= sdev->sector_size)
1935 /* Check if we have a 'bad_lba' information */
1936 if (!scsi_get_sense_info_fld(scmd->sense_buffer,
1937 SCSI_SENSE_BUFFERSIZE,
1942 * If the bad lba was reported incorrectly, we have no idea where
1945 start_lba = sectors_to_logical(sdev, blk_rq_pos(req));
1946 end_lba = start_lba + bytes_to_logical(sdev, scsi_bufflen(scmd));
1947 if (bad_lba < start_lba || bad_lba >= end_lba)
1951 * resid is optional but mostly filled in. When it's unused,
1952 * its value is zero, so we assume the whole buffer transferred
1954 transferred = scsi_bufflen(scmd) - scsi_get_resid(scmd);
1956 /* This computation should always be done in terms of the
1957 * resolution of the device's medium.
1959 good_bytes = logical_to_bytes(sdev, bad_lba - start_lba);
1961 return min(good_bytes, transferred);
1965 * sd_done - bottom half handler: called when the lower level
1966 * driver has completed (successfully or otherwise) a scsi command.
1967 * @SCpnt: mid-level's per command structure.
1969 * Note: potentially run from within an ISR. Must not block.
1971 static int sd_done(struct scsi_cmnd *SCpnt)
1973 int result = SCpnt->result;
1974 unsigned int good_bytes = result ? 0 : scsi_bufflen(SCpnt);
1975 unsigned int sector_size = SCpnt->device->sector_size;
1977 struct scsi_sense_hdr sshdr;
1978 struct request *req = scsi_cmd_to_rq(SCpnt);
1979 struct scsi_disk *sdkp = scsi_disk(req->q->disk);
1980 int sense_valid = 0;
1981 int sense_deferred = 0;
1983 switch (req_op(req)) {
1984 case REQ_OP_DISCARD:
1985 case REQ_OP_WRITE_ZEROES:
1986 case REQ_OP_ZONE_RESET:
1987 case REQ_OP_ZONE_RESET_ALL:
1988 case REQ_OP_ZONE_OPEN:
1989 case REQ_OP_ZONE_CLOSE:
1990 case REQ_OP_ZONE_FINISH:
1992 good_bytes = blk_rq_bytes(req);
1993 scsi_set_resid(SCpnt, 0);
1996 scsi_set_resid(SCpnt, blk_rq_bytes(req));
2001 * In case of bogus fw or device, we could end up having
2002 * an unaligned partial completion. Check this here and force
2005 resid = scsi_get_resid(SCpnt);
2006 if (resid & (sector_size - 1)) {
2007 sd_printk(KERN_INFO, sdkp,
2008 "Unaligned partial completion (resid=%u, sector_sz=%u)\n",
2009 resid, sector_size);
2010 scsi_print_command(SCpnt);
2011 resid = min(scsi_bufflen(SCpnt),
2012 round_up(resid, sector_size));
2013 scsi_set_resid(SCpnt, resid);
2018 sense_valid = scsi_command_normalize_sense(SCpnt, &sshdr);
2020 sense_deferred = scsi_sense_is_deferred(&sshdr);
2022 sdkp->medium_access_timed_out = 0;
2024 if (!scsi_status_is_check_condition(result) &&
2025 (!sense_valid || sense_deferred))
2028 switch (sshdr.sense_key) {
2029 case HARDWARE_ERROR:
2031 good_bytes = sd_completed_bytes(SCpnt);
2033 case RECOVERED_ERROR:
2034 good_bytes = scsi_bufflen(SCpnt);
2037 /* This indicates a false check condition, so ignore it. An
2038 * unknown amount of data was transferred so treat it as an
2042 memset(SCpnt->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
2044 case ABORTED_COMMAND:
2045 if (sshdr.asc == 0x10) /* DIF: Target detected corruption */
2046 good_bytes = sd_completed_bytes(SCpnt);
2048 case ILLEGAL_REQUEST:
2049 switch (sshdr.asc) {
2050 case 0x10: /* DIX: Host detected corruption */
2051 good_bytes = sd_completed_bytes(SCpnt);
2053 case 0x20: /* INVALID COMMAND OPCODE */
2054 case 0x24: /* INVALID FIELD IN CDB */
2055 switch (SCpnt->cmnd[0]) {
2057 sd_config_discard(sdkp, SD_LBP_DISABLE);
2061 if (SCpnt->cmnd[1] & 8) { /* UNMAP */
2062 sd_config_discard(sdkp, SD_LBP_DISABLE);
2064 sdkp->device->no_write_same = 1;
2065 sd_config_write_same(sdkp);
2066 req->rq_flags |= RQF_QUIET;
2077 if (sd_is_zoned(sdkp))
2078 good_bytes = sd_zbc_complete(SCpnt, good_bytes, &sshdr);
2080 SCSI_LOG_HLCOMPLETE(1, scmd_printk(KERN_INFO, SCpnt,
2081 "sd_done: completed %d of %d bytes\n",
2082 good_bytes, scsi_bufflen(SCpnt)));
2088 * spinup disk - called only in sd_revalidate_disk()
2091 sd_spinup_disk(struct scsi_disk *sdkp)
2093 unsigned char cmd[10];
2094 unsigned long spintime_expire = 0;
2095 int retries, spintime;
2096 unsigned int the_result;
2097 struct scsi_sense_hdr sshdr;
2098 const struct scsi_exec_args exec_args = {
2101 int sense_valid = 0;
2105 /* Spin up drives, as required. Only do this at boot time */
2106 /* Spinup needs to be done for module loads too. */
2111 bool media_was_present = sdkp->media_present;
2113 cmd[0] = TEST_UNIT_READY;
2114 memset((void *) &cmd[1], 0, 9);
2116 the_result = scsi_execute_cmd(sdkp->device, cmd,
2117 REQ_OP_DRV_IN, NULL, 0,
2123 * If the drive has indicated to us that it
2124 * doesn't have any media in it, don't bother
2125 * with any more polling.
2127 if (media_not_present(sdkp, &sshdr)) {
2128 if (media_was_present)
2129 sd_printk(KERN_NOTICE, sdkp, "Media removed, stopped polling\n");
2134 sense_valid = scsi_sense_valid(&sshdr);
2136 } while (retries < 3 &&
2137 (!scsi_status_is_good(the_result) ||
2138 (scsi_status_is_check_condition(the_result) &&
2139 sense_valid && sshdr.sense_key == UNIT_ATTENTION)));
2141 if (!scsi_status_is_check_condition(the_result)) {
2142 /* no sense, TUR either succeeded or failed
2143 * with a status error */
2144 if(!spintime && !scsi_status_is_good(the_result)) {
2145 sd_print_result(sdkp, "Test Unit Ready failed",
2152 * The device does not want the automatic start to be issued.
2154 if (sdkp->device->no_start_on_add)
2157 if (sense_valid && sshdr.sense_key == NOT_READY) {
2158 if (sshdr.asc == 4 && sshdr.ascq == 3)
2159 break; /* manual intervention required */
2160 if (sshdr.asc == 4 && sshdr.ascq == 0xb)
2161 break; /* standby */
2162 if (sshdr.asc == 4 && sshdr.ascq == 0xc)
2163 break; /* unavailable */
2164 if (sshdr.asc == 4 && sshdr.ascq == 0x1b)
2165 break; /* sanitize in progress */
2167 * Issue command to spin up drive when not ready
2170 sd_printk(KERN_NOTICE, sdkp, "Spinning up disk...");
2171 cmd[0] = START_STOP;
2172 cmd[1] = 1; /* Return immediately */
2173 memset((void *) &cmd[2], 0, 8);
2174 cmd[4] = 1; /* Start spin cycle */
2175 if (sdkp->device->start_stop_pwr_cond)
2177 scsi_execute_cmd(sdkp->device, cmd,
2178 REQ_OP_DRV_IN, NULL, 0,
2179 SD_TIMEOUT, sdkp->max_retries,
2181 spintime_expire = jiffies + 100 * HZ;
2184 /* Wait 1 second for next try */
2186 printk(KERN_CONT ".");
2189 * Wait for USB flash devices with slow firmware.
2190 * Yes, this sense key/ASC combination shouldn't
2191 * occur here. It's characteristic of these devices.
2193 } else if (sense_valid &&
2194 sshdr.sense_key == UNIT_ATTENTION &&
2195 sshdr.asc == 0x28) {
2197 spintime_expire = jiffies + 5 * HZ;
2200 /* Wait 1 second for next try */
2203 /* we don't understand the sense code, so it's
2204 * probably pointless to loop */
2206 sd_printk(KERN_NOTICE, sdkp, "Unit Not Ready\n");
2207 sd_print_sense_hdr(sdkp, &sshdr);
2212 } while (spintime && time_before_eq(jiffies, spintime_expire));
2215 if (scsi_status_is_good(the_result))
2216 printk(KERN_CONT "ready\n");
2218 printk(KERN_CONT "not responding...\n");
2223 * Determine whether disk supports Data Integrity Field.
2225 static int sd_read_protection_type(struct scsi_disk *sdkp, unsigned char *buffer)
2227 struct scsi_device *sdp = sdkp->device;
2230 if (scsi_device_protection(sdp) == 0 || (buffer[12] & 1) == 0) {
2231 sdkp->protection_type = 0;
2235 type = ((buffer[12] >> 1) & 7) + 1; /* P_TYPE 0 = Type 1 */
2237 if (type > T10_PI_TYPE3_PROTECTION) {
2238 sd_printk(KERN_ERR, sdkp, "formatted with unsupported" \
2239 " protection type %u. Disabling disk!\n",
2241 sdkp->protection_type = 0;
2245 sdkp->protection_type = type;
2250 static void sd_config_protection(struct scsi_disk *sdkp)
2252 struct scsi_device *sdp = sdkp->device;
2254 sd_dif_config_host(sdkp);
2256 if (!sdkp->protection_type)
2259 if (!scsi_host_dif_capable(sdp->host, sdkp->protection_type)) {
2260 sd_first_printk(KERN_NOTICE, sdkp,
2261 "Disabling DIF Type %u protection\n",
2262 sdkp->protection_type);
2263 sdkp->protection_type = 0;
2266 sd_first_printk(KERN_NOTICE, sdkp, "Enabling DIF Type %u protection\n",
2267 sdkp->protection_type);
2270 static void read_capacity_error(struct scsi_disk *sdkp, struct scsi_device *sdp,
2271 struct scsi_sense_hdr *sshdr, int sense_valid,
2275 sd_print_sense_hdr(sdkp, sshdr);
2277 sd_printk(KERN_NOTICE, sdkp, "Sense not available.\n");
2280 * Set dirty bit for removable devices if not ready -
2281 * sometimes drives will not report this properly.
2283 if (sdp->removable &&
2284 sense_valid && sshdr->sense_key == NOT_READY)
2285 set_media_not_present(sdkp);
2288 * We used to set media_present to 0 here to indicate no media
2289 * in the drive, but some drives fail read capacity even with
2290 * media present, so we can't do that.
2292 sdkp->capacity = 0; /* unknown mapped to zero - as usual */
2296 #if RC16_LEN > SD_BUF_SIZE
2297 #error RC16_LEN must not be more than SD_BUF_SIZE
2300 #define READ_CAPACITY_RETRIES_ON_RESET 10
2302 static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp,
2303 unsigned char *buffer)
2305 unsigned char cmd[16];
2306 struct scsi_sense_hdr sshdr;
2307 const struct scsi_exec_args exec_args = {
2310 int sense_valid = 0;
2312 int retries = 3, reset_retries = READ_CAPACITY_RETRIES_ON_RESET;
2313 unsigned int alignment;
2314 unsigned long long lba;
2315 unsigned sector_size;
2317 if (sdp->no_read_capacity_16)
2322 cmd[0] = SERVICE_ACTION_IN_16;
2323 cmd[1] = SAI_READ_CAPACITY_16;
2325 memset(buffer, 0, RC16_LEN);
2327 the_result = scsi_execute_cmd(sdp, cmd, REQ_OP_DRV_IN,
2328 buffer, RC16_LEN, SD_TIMEOUT,
2329 sdkp->max_retries, &exec_args);
2331 if (media_not_present(sdkp, &sshdr))
2334 if (the_result > 0) {
2335 sense_valid = scsi_sense_valid(&sshdr);
2337 sshdr.sense_key == ILLEGAL_REQUEST &&
2338 (sshdr.asc == 0x20 || sshdr.asc == 0x24) &&
2340 /* Invalid Command Operation Code or
2341 * Invalid Field in CDB, just retry
2342 * silently with RC10 */
2345 sshdr.sense_key == UNIT_ATTENTION &&
2346 sshdr.asc == 0x29 && sshdr.ascq == 0x00)
2347 /* Device reset might occur several times,
2348 * give it one more chance */
2349 if (--reset_retries > 0)
2354 } while (the_result && retries);
2357 sd_print_result(sdkp, "Read Capacity(16) failed", the_result);
2358 read_capacity_error(sdkp, sdp, &sshdr, sense_valid, the_result);
2362 sector_size = get_unaligned_be32(&buffer[8]);
2363 lba = get_unaligned_be64(&buffer[0]);
2365 if (sd_read_protection_type(sdkp, buffer) < 0) {
2370 /* Logical blocks per physical block exponent */
2371 sdkp->physical_block_size = (1 << (buffer[13] & 0xf)) * sector_size;
2374 sdkp->rc_basis = (buffer[12] >> 4) & 0x3;
2376 /* Lowest aligned logical block */
2377 alignment = ((buffer[14] & 0x3f) << 8 | buffer[15]) * sector_size;
2378 blk_queue_alignment_offset(sdp->request_queue, alignment);
2379 if (alignment && sdkp->first_scan)
2380 sd_printk(KERN_NOTICE, sdkp,
2381 "physical block alignment offset: %u\n", alignment);
2383 if (buffer[14] & 0x80) { /* LBPME */
2386 if (buffer[14] & 0x40) /* LBPRZ */
2389 sd_config_discard(sdkp, SD_LBP_WS16);
2392 sdkp->capacity = lba + 1;
2396 static int read_capacity_10(struct scsi_disk *sdkp, struct scsi_device *sdp,
2397 unsigned char *buffer)
2399 unsigned char cmd[16];
2400 struct scsi_sense_hdr sshdr;
2401 const struct scsi_exec_args exec_args = {
2404 int sense_valid = 0;
2406 int retries = 3, reset_retries = READ_CAPACITY_RETRIES_ON_RESET;
2408 unsigned sector_size;
2411 cmd[0] = READ_CAPACITY;
2412 memset(&cmd[1], 0, 9);
2413 memset(buffer, 0, 8);
2415 the_result = scsi_execute_cmd(sdp, cmd, REQ_OP_DRV_IN, buffer,
2416 8, SD_TIMEOUT, sdkp->max_retries,
2419 if (media_not_present(sdkp, &sshdr))
2422 if (the_result > 0) {
2423 sense_valid = scsi_sense_valid(&sshdr);
2425 sshdr.sense_key == UNIT_ATTENTION &&
2426 sshdr.asc == 0x29 && sshdr.ascq == 0x00)
2427 /* Device reset might occur several times,
2428 * give it one more chance */
2429 if (--reset_retries > 0)
2434 } while (the_result && retries);
2437 sd_print_result(sdkp, "Read Capacity(10) failed", the_result);
2438 read_capacity_error(sdkp, sdp, &sshdr, sense_valid, the_result);
2442 sector_size = get_unaligned_be32(&buffer[4]);
2443 lba = get_unaligned_be32(&buffer[0]);
2445 if (sdp->no_read_capacity_16 && (lba == 0xffffffff)) {
2446 /* Some buggy (usb cardreader) devices return an lba of
2447 0xffffffff when the want to report a size of 0 (with
2448 which they really mean no media is present) */
2450 sdkp->physical_block_size = sector_size;
2454 sdkp->capacity = lba + 1;
2455 sdkp->physical_block_size = sector_size;
2459 static int sd_try_rc16_first(struct scsi_device *sdp)
2461 if (sdp->host->max_cmd_len < 16)
2463 if (sdp->try_rc_10_first)
2465 if (sdp->scsi_level > SCSI_SPC_2)
2467 if (scsi_device_protection(sdp))
2473 * read disk capacity
2476 sd_read_capacity(struct scsi_disk *sdkp, unsigned char *buffer)
2479 struct scsi_device *sdp = sdkp->device;
2481 if (sd_try_rc16_first(sdp)) {
2482 sector_size = read_capacity_16(sdkp, sdp, buffer);
2483 if (sector_size == -EOVERFLOW)
2485 if (sector_size == -ENODEV)
2487 if (sector_size < 0)
2488 sector_size = read_capacity_10(sdkp, sdp, buffer);
2489 if (sector_size < 0)
2492 sector_size = read_capacity_10(sdkp, sdp, buffer);
2493 if (sector_size == -EOVERFLOW)
2495 if (sector_size < 0)
2497 if ((sizeof(sdkp->capacity) > 4) &&
2498 (sdkp->capacity > 0xffffffffULL)) {
2499 int old_sector_size = sector_size;
2500 sd_printk(KERN_NOTICE, sdkp, "Very big device. "
2501 "Trying to use READ CAPACITY(16).\n");
2502 sector_size = read_capacity_16(sdkp, sdp, buffer);
2503 if (sector_size < 0) {
2504 sd_printk(KERN_NOTICE, sdkp,
2505 "Using 0xffffffff as device size\n");
2506 sdkp->capacity = 1 + (sector_t) 0xffffffff;
2507 sector_size = old_sector_size;
2510 /* Remember that READ CAPACITY(16) succeeded */
2511 sdp->try_rc_10_first = 0;
2515 /* Some devices are known to return the total number of blocks,
2516 * not the highest block number. Some devices have versions
2517 * which do this and others which do not. Some devices we might
2518 * suspect of doing this but we don't know for certain.
2520 * If we know the reported capacity is wrong, decrement it. If
2521 * we can only guess, then assume the number of blocks is even
2522 * (usually true but not always) and err on the side of lowering
2525 if (sdp->fix_capacity ||
2526 (sdp->guess_capacity && (sdkp->capacity & 0x01))) {
2527 sd_printk(KERN_INFO, sdkp, "Adjusting the sector count "
2528 "from its reported value: %llu\n",
2529 (unsigned long long) sdkp->capacity);
2534 if (sector_size == 0) {
2536 sd_printk(KERN_NOTICE, sdkp, "Sector size 0 reported, "
2540 if (sector_size != 512 &&
2541 sector_size != 1024 &&
2542 sector_size != 2048 &&
2543 sector_size != 4096) {
2544 sd_printk(KERN_NOTICE, sdkp, "Unsupported sector size %d.\n",
2547 * The user might want to re-format the drive with
2548 * a supported sectorsize. Once this happens, it
2549 * would be relatively trivial to set the thing up.
2550 * For this reason, we leave the thing in the table.
2554 * set a bogus sector size so the normal read/write
2555 * logic in the block layer will eventually refuse any
2556 * request on this device without tripping over power
2557 * of two sector size assumptions
2561 blk_queue_logical_block_size(sdp->request_queue, sector_size);
2562 blk_queue_physical_block_size(sdp->request_queue,
2563 sdkp->physical_block_size);
2564 sdkp->device->sector_size = sector_size;
2566 if (sdkp->capacity > 0xffffffff)
2567 sdp->use_16_for_rw = 1;
2572 * Print disk capacity
2575 sd_print_capacity(struct scsi_disk *sdkp,
2576 sector_t old_capacity)
2578 int sector_size = sdkp->device->sector_size;
2579 char cap_str_2[10], cap_str_10[10];
2581 if (!sdkp->first_scan && old_capacity == sdkp->capacity)
2584 string_get_size(sdkp->capacity, sector_size,
2585 STRING_UNITS_2, cap_str_2, sizeof(cap_str_2));
2586 string_get_size(sdkp->capacity, sector_size,
2587 STRING_UNITS_10, cap_str_10, sizeof(cap_str_10));
2589 sd_printk(KERN_NOTICE, sdkp,
2590 "%llu %d-byte logical blocks: (%s/%s)\n",
2591 (unsigned long long)sdkp->capacity,
2592 sector_size, cap_str_10, cap_str_2);
2594 if (sdkp->physical_block_size != sector_size)
2595 sd_printk(KERN_NOTICE, sdkp,
2596 "%u-byte physical blocks\n",
2597 sdkp->physical_block_size);
2600 /* called with buffer of length 512 */
2602 sd_do_mode_sense(struct scsi_disk *sdkp, int dbd, int modepage,
2603 unsigned char *buffer, int len, struct scsi_mode_data *data,
2604 struct scsi_sense_hdr *sshdr)
2607 * If we must use MODE SENSE(10), make sure that the buffer length
2608 * is at least 8 bytes so that the mode sense header fits.
2610 if (sdkp->device->use_10_for_ms && len < 8)
2613 return scsi_mode_sense(sdkp->device, dbd, modepage, buffer, len,
2614 SD_TIMEOUT, sdkp->max_retries, data,
2619 * read write protect setting, if possible - called only in sd_revalidate_disk()
2620 * called with buffer of length SD_BUF_SIZE
2623 sd_read_write_protect_flag(struct scsi_disk *sdkp, unsigned char *buffer)
2626 struct scsi_device *sdp = sdkp->device;
2627 struct scsi_mode_data data;
2628 int old_wp = sdkp->write_prot;
2630 set_disk_ro(sdkp->disk, 0);
2631 if (sdp->skip_ms_page_3f) {
2632 sd_first_printk(KERN_NOTICE, sdkp, "Assuming Write Enabled\n");
2636 if (sdp->use_192_bytes_for_3f) {
2637 res = sd_do_mode_sense(sdkp, 0, 0x3F, buffer, 192, &data, NULL);
2640 * First attempt: ask for all pages (0x3F), but only 4 bytes.
2641 * We have to start carefully: some devices hang if we ask
2642 * for more than is available.
2644 res = sd_do_mode_sense(sdkp, 0, 0x3F, buffer, 4, &data, NULL);
2647 * Second attempt: ask for page 0 When only page 0 is
2648 * implemented, a request for page 3F may return Sense Key
2649 * 5: Illegal Request, Sense Code 24: Invalid field in
2653 res = sd_do_mode_sense(sdkp, 0, 0, buffer, 4, &data, NULL);
2656 * Third attempt: ask 255 bytes, as we did earlier.
2659 res = sd_do_mode_sense(sdkp, 0, 0x3F, buffer, 255,
2664 sd_first_printk(KERN_WARNING, sdkp,
2665 "Test WP failed, assume Write Enabled\n");
2667 sdkp->write_prot = ((data.device_specific & 0x80) != 0);
2668 set_disk_ro(sdkp->disk, sdkp->write_prot);
2669 if (sdkp->first_scan || old_wp != sdkp->write_prot) {
2670 sd_printk(KERN_NOTICE, sdkp, "Write Protect is %s\n",
2671 sdkp->write_prot ? "on" : "off");
2672 sd_printk(KERN_DEBUG, sdkp, "Mode Sense: %4ph\n", buffer);
2678 * sd_read_cache_type - called only from sd_revalidate_disk()
2679 * called with buffer of length SD_BUF_SIZE
2682 sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
2685 struct scsi_device *sdp = sdkp->device;
2690 struct scsi_mode_data data;
2691 struct scsi_sense_hdr sshdr;
2692 int old_wce = sdkp->WCE;
2693 int old_rcd = sdkp->RCD;
2694 int old_dpofua = sdkp->DPOFUA;
2697 if (sdkp->cache_override)
2701 if (sdp->skip_ms_page_8) {
2702 if (sdp->type == TYPE_RBC)
2705 if (sdp->skip_ms_page_3f)
2708 if (sdp->use_192_bytes_for_3f)
2712 } else if (sdp->type == TYPE_RBC) {
2720 /* cautiously ask */
2721 res = sd_do_mode_sense(sdkp, dbd, modepage, buffer, first_len,
2727 if (!data.header_length) {
2730 sd_first_printk(KERN_ERR, sdkp,
2731 "Missing header in MODE_SENSE response\n");
2734 /* that went OK, now ask for the proper length */
2738 * We're only interested in the first three bytes, actually.
2739 * But the data cache page is defined for the first 20.
2743 else if (len > SD_BUF_SIZE) {
2744 sd_first_printk(KERN_NOTICE, sdkp, "Truncating mode parameter "
2745 "data from %d to %d bytes\n", len, SD_BUF_SIZE);
2748 if (modepage == 0x3F && sdp->use_192_bytes_for_3f)
2752 if (len > first_len)
2753 res = sd_do_mode_sense(sdkp, dbd, modepage, buffer, len,
2757 int offset = data.header_length + data.block_descriptor_length;
2759 while (offset < len) {
2760 u8 page_code = buffer[offset] & 0x3F;
2761 u8 spf = buffer[offset] & 0x40;
2763 if (page_code == 8 || page_code == 6) {
2764 /* We're interested only in the first 3 bytes.
2766 if (len - offset <= 2) {
2767 sd_first_printk(KERN_ERR, sdkp,
2768 "Incomplete mode parameter "
2772 modepage = page_code;
2776 /* Go to the next page */
2777 if (spf && len - offset > 3)
2778 offset += 4 + (buffer[offset+2] << 8) +
2780 else if (!spf && len - offset > 1)
2781 offset += 2 + buffer[offset+1];
2783 sd_first_printk(KERN_ERR, sdkp,
2785 "parameter data\n");
2791 sd_first_printk(KERN_WARNING, sdkp,
2792 "No Caching mode page found\n");
2796 if (modepage == 8) {
2797 sdkp->WCE = ((buffer[offset + 2] & 0x04) != 0);
2798 sdkp->RCD = ((buffer[offset + 2] & 0x01) != 0);
2800 sdkp->WCE = ((buffer[offset + 2] & 0x01) == 0);
2804 sdkp->DPOFUA = (data.device_specific & 0x10) != 0;
2805 if (sdp->broken_fua) {
2806 sd_first_printk(KERN_NOTICE, sdkp, "Disabling FUA\n");
2808 } else if (sdkp->DPOFUA && !sdkp->device->use_10_for_rw &&
2809 !sdkp->device->use_16_for_rw) {
2810 sd_first_printk(KERN_NOTICE, sdkp,
2811 "Uses READ/WRITE(6), disabling FUA\n");
2815 /* No cache flush allowed for write protected devices */
2816 if (sdkp->WCE && sdkp->write_prot)
2819 if (sdkp->first_scan || old_wce != sdkp->WCE ||
2820 old_rcd != sdkp->RCD || old_dpofua != sdkp->DPOFUA)
2821 sd_printk(KERN_NOTICE, sdkp,
2822 "Write cache: %s, read cache: %s, %s\n",
2823 sdkp->WCE ? "enabled" : "disabled",
2824 sdkp->RCD ? "disabled" : "enabled",
2825 sdkp->DPOFUA ? "supports DPO and FUA"
2826 : "doesn't support DPO or FUA");
2832 if (scsi_sense_valid(&sshdr) &&
2833 sshdr.sense_key == ILLEGAL_REQUEST &&
2834 sshdr.asc == 0x24 && sshdr.ascq == 0x0)
2835 /* Invalid field in CDB */
2836 sd_first_printk(KERN_NOTICE, sdkp, "Cache data unavailable\n");
2838 sd_first_printk(KERN_ERR, sdkp,
2839 "Asking for cache data failed\n");
2842 if (sdp->wce_default_on) {
2843 sd_first_printk(KERN_NOTICE, sdkp,
2844 "Assuming drive cache: write back\n");
2847 sd_first_printk(KERN_WARNING, sdkp,
2848 "Assuming drive cache: write through\n");
2856 * The ATO bit indicates whether the DIF application tag is available
2857 * for use by the operating system.
2859 static void sd_read_app_tag_own(struct scsi_disk *sdkp, unsigned char *buffer)
2862 struct scsi_device *sdp = sdkp->device;
2863 struct scsi_mode_data data;
2864 struct scsi_sense_hdr sshdr;
2866 if (sdp->type != TYPE_DISK && sdp->type != TYPE_ZBC)
2869 if (sdkp->protection_type == 0)
2872 res = scsi_mode_sense(sdp, 1, 0x0a, buffer, 36, SD_TIMEOUT,
2873 sdkp->max_retries, &data, &sshdr);
2875 if (res < 0 || !data.header_length ||
2877 sd_first_printk(KERN_WARNING, sdkp,
2878 "getting Control mode page failed, assume no ATO\n");
2880 if (scsi_sense_valid(&sshdr))
2881 sd_print_sense_hdr(sdkp, &sshdr);
2886 offset = data.header_length + data.block_descriptor_length;
2888 if ((buffer[offset] & 0x3f) != 0x0a) {
2889 sd_first_printk(KERN_ERR, sdkp, "ATO Got wrong page\n");
2893 if ((buffer[offset + 5] & 0x80) == 0)
2902 * sd_read_block_limits - Query disk device for preferred I/O sizes.
2903 * @sdkp: disk to query
2905 static void sd_read_block_limits(struct scsi_disk *sdkp)
2907 struct scsi_vpd *vpd;
2911 vpd = rcu_dereference(sdkp->device->vpd_pgb0);
2912 if (!vpd || vpd->len < 16)
2915 sdkp->min_xfer_blocks = get_unaligned_be16(&vpd->data[6]);
2916 sdkp->max_xfer_blocks = get_unaligned_be32(&vpd->data[8]);
2917 sdkp->opt_xfer_blocks = get_unaligned_be32(&vpd->data[12]);
2919 if (vpd->len >= 64) {
2920 unsigned int lba_count, desc_count;
2922 sdkp->max_ws_blocks = (u32)get_unaligned_be64(&vpd->data[36]);
2927 lba_count = get_unaligned_be32(&vpd->data[20]);
2928 desc_count = get_unaligned_be32(&vpd->data[24]);
2930 if (lba_count && desc_count)
2931 sdkp->max_unmap_blocks = lba_count;
2933 sdkp->unmap_granularity = get_unaligned_be32(&vpd->data[28]);
2935 if (vpd->data[32] & 0x80)
2936 sdkp->unmap_alignment =
2937 get_unaligned_be32(&vpd->data[32]) & ~(1 << 31);
2939 if (!sdkp->lbpvpd) { /* LBP VPD page not provided */
2941 if (sdkp->max_unmap_blocks)
2942 sd_config_discard(sdkp, SD_LBP_UNMAP);
2944 sd_config_discard(sdkp, SD_LBP_WS16);
2946 } else { /* LBP VPD page tells us what to use */
2947 if (sdkp->lbpu && sdkp->max_unmap_blocks)
2948 sd_config_discard(sdkp, SD_LBP_UNMAP);
2949 else if (sdkp->lbpws)
2950 sd_config_discard(sdkp, SD_LBP_WS16);
2951 else if (sdkp->lbpws10)
2952 sd_config_discard(sdkp, SD_LBP_WS10);
2954 sd_config_discard(sdkp, SD_LBP_DISABLE);
2963 * sd_read_block_characteristics - Query block dev. characteristics
2964 * @sdkp: disk to query
2966 static void sd_read_block_characteristics(struct scsi_disk *sdkp)
2968 struct request_queue *q = sdkp->disk->queue;
2969 struct scsi_vpd *vpd;
2974 vpd = rcu_dereference(sdkp->device->vpd_pgb1);
2976 if (!vpd || vpd->len < 8) {
2981 rot = get_unaligned_be16(&vpd->data[4]);
2982 zoned = (vpd->data[8] >> 4) & 3;
2986 blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
2987 blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q);
2990 if (sdkp->device->type == TYPE_ZBC) {
2992 * Host-managed: Per ZBC and ZAC specifications, writes in
2993 * sequential write required zones of host-managed devices must
2994 * be aligned to the device physical block size.
2996 disk_set_zoned(sdkp->disk, BLK_ZONED_HM);
2997 blk_queue_zone_write_granularity(q, sdkp->physical_block_size);
2999 sdkp->zoned = zoned;
3000 if (sdkp->zoned == 1) {
3002 disk_set_zoned(sdkp->disk, BLK_ZONED_HA);
3004 /* Regular disk or drive managed disk */
3005 disk_set_zoned(sdkp->disk, BLK_ZONED_NONE);
3009 if (!sdkp->first_scan)
3012 if (blk_queue_is_zoned(q)) {
3013 sd_printk(KERN_NOTICE, sdkp, "Host-%s zoned block device\n",
3014 q->limits.zoned == BLK_ZONED_HM ? "managed" : "aware");
3016 if (sdkp->zoned == 1)
3017 sd_printk(KERN_NOTICE, sdkp,
3018 "Host-aware SMR disk used as regular disk\n");
3019 else if (sdkp->zoned == 2)
3020 sd_printk(KERN_NOTICE, sdkp,
3021 "Drive-managed SMR disk\n");
3026 * sd_read_block_provisioning - Query provisioning VPD page
3027 * @sdkp: disk to query
3029 static void sd_read_block_provisioning(struct scsi_disk *sdkp)
3031 struct scsi_vpd *vpd;
3033 if (sdkp->lbpme == 0)
3037 vpd = rcu_dereference(sdkp->device->vpd_pgb2);
3039 if (!vpd || vpd->len < 8) {
3045 sdkp->lbpu = (vpd->data[5] >> 7) & 1; /* UNMAP */
3046 sdkp->lbpws = (vpd->data[5] >> 6) & 1; /* WRITE SAME(16) w/ UNMAP */
3047 sdkp->lbpws10 = (vpd->data[5] >> 5) & 1; /* WRITE SAME(10) w/ UNMAP */
3051 static void sd_read_write_same(struct scsi_disk *sdkp, unsigned char *buffer)
3053 struct scsi_device *sdev = sdkp->device;
3055 if (sdev->host->no_write_same) {
3056 sdev->no_write_same = 1;
3061 if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE, INQUIRY) < 0) {
3062 struct scsi_vpd *vpd;
3064 sdev->no_report_opcodes = 1;
3066 /* Disable WRITE SAME if REPORT SUPPORTED OPERATION
3067 * CODES is unsupported and the device has an ATA
3068 * Information VPD page (SAT).
3071 vpd = rcu_dereference(sdev->vpd_pg89);
3073 sdev->no_write_same = 1;
3077 if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE, WRITE_SAME_16) == 1)
3080 if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE, WRITE_SAME) == 1)
3084 static void sd_read_security(struct scsi_disk *sdkp, unsigned char *buffer)
3086 struct scsi_device *sdev = sdkp->device;
3088 if (!sdev->security_supported)
3091 if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE,
3092 SECURITY_PROTOCOL_IN) == 1 &&
3093 scsi_report_opcode(sdev, buffer, SD_BUF_SIZE,
3094 SECURITY_PROTOCOL_OUT) == 1)
3098 static inline sector_t sd64_to_sectors(struct scsi_disk *sdkp, u8 *buf)
3100 return logical_to_sectors(sdkp->device, get_unaligned_be64(buf));
3104 * sd_read_cpr - Query concurrent positioning ranges
3105 * @sdkp: disk to query
3107 static void sd_read_cpr(struct scsi_disk *sdkp)
3109 struct blk_independent_access_ranges *iars = NULL;
3110 unsigned char *buffer = NULL;
3111 unsigned int nr_cpr = 0;
3112 int i, vpd_len, buf_len = SD_BUF_SIZE;
3116 * We need to have the capacity set first for the block layer to be
3117 * able to check the ranges.
3119 if (sdkp->first_scan)
3122 if (!sdkp->capacity)
3126 * Concurrent Positioning Ranges VPD: there can be at most 256 ranges,
3127 * leading to a maximum page size of 64 + 256*32 bytes.
3129 buf_len = 64 + 256*32;
3130 buffer = kmalloc(buf_len, GFP_KERNEL);
3131 if (!buffer || scsi_get_vpd_page(sdkp->device, 0xb9, buffer, buf_len))
3134 /* We must have at least a 64B header and one 32B range descriptor */
3135 vpd_len = get_unaligned_be16(&buffer[2]) + 4;
3136 if (vpd_len > buf_len || vpd_len < 64 + 32 || (vpd_len & 31)) {
3137 sd_printk(KERN_ERR, sdkp,
3138 "Invalid Concurrent Positioning Ranges VPD page\n");
3142 nr_cpr = (vpd_len - 64) / 32;
3148 iars = disk_alloc_independent_access_ranges(sdkp->disk, nr_cpr);
3155 for (i = 0; i < nr_cpr; i++, desc += 32) {
3157 sd_printk(KERN_ERR, sdkp,
3158 "Invalid Concurrent Positioning Range number\n");
3163 iars->ia_range[i].sector = sd64_to_sectors(sdkp, desc + 8);
3164 iars->ia_range[i].nr_sectors = sd64_to_sectors(sdkp, desc + 16);
3168 disk_set_independent_access_ranges(sdkp->disk, iars);
3169 if (nr_cpr && sdkp->nr_actuators != nr_cpr) {
3170 sd_printk(KERN_NOTICE, sdkp,
3171 "%u concurrent positioning ranges\n", nr_cpr);
3172 sdkp->nr_actuators = nr_cpr;
3178 static bool sd_validate_min_xfer_size(struct scsi_disk *sdkp)
3180 struct scsi_device *sdp = sdkp->device;
3181 unsigned int min_xfer_bytes =
3182 logical_to_bytes(sdp, sdkp->min_xfer_blocks);
3184 if (sdkp->min_xfer_blocks == 0)
3187 if (min_xfer_bytes & (sdkp->physical_block_size - 1)) {
3188 sd_first_printk(KERN_WARNING, sdkp,
3189 "Preferred minimum I/O size %u bytes not a " \
3190 "multiple of physical block size (%u bytes)\n",
3191 min_xfer_bytes, sdkp->physical_block_size);
3192 sdkp->min_xfer_blocks = 0;
3196 sd_first_printk(KERN_INFO, sdkp, "Preferred minimum I/O size %u bytes\n",
3202 * Determine the device's preferred I/O size for reads and writes
3203 * unless the reported value is unreasonably small, large, not a
3204 * multiple of the physical block size, or simply garbage.
3206 static bool sd_validate_opt_xfer_size(struct scsi_disk *sdkp,
3207 unsigned int dev_max)
3209 struct scsi_device *sdp = sdkp->device;
3210 unsigned int opt_xfer_bytes =
3211 logical_to_bytes(sdp, sdkp->opt_xfer_blocks);
3212 unsigned int min_xfer_bytes =
3213 logical_to_bytes(sdp, sdkp->min_xfer_blocks);
3215 if (sdkp->opt_xfer_blocks == 0)
3218 if (sdkp->opt_xfer_blocks > dev_max) {
3219 sd_first_printk(KERN_WARNING, sdkp,
3220 "Optimal transfer size %u logical blocks " \
3221 "> dev_max (%u logical blocks)\n",
3222 sdkp->opt_xfer_blocks, dev_max);
3226 if (sdkp->opt_xfer_blocks > SD_DEF_XFER_BLOCKS) {
3227 sd_first_printk(KERN_WARNING, sdkp,
3228 "Optimal transfer size %u logical blocks " \
3229 "> sd driver limit (%u logical blocks)\n",
3230 sdkp->opt_xfer_blocks, SD_DEF_XFER_BLOCKS);
3234 if (opt_xfer_bytes < PAGE_SIZE) {
3235 sd_first_printk(KERN_WARNING, sdkp,
3236 "Optimal transfer size %u bytes < " \
3237 "PAGE_SIZE (%u bytes)\n",
3238 opt_xfer_bytes, (unsigned int)PAGE_SIZE);
3242 if (min_xfer_bytes && opt_xfer_bytes % min_xfer_bytes) {
3243 sd_first_printk(KERN_WARNING, sdkp,
3244 "Optimal transfer size %u bytes not a " \
3245 "multiple of preferred minimum block " \
3246 "size (%u bytes)\n",
3247 opt_xfer_bytes, min_xfer_bytes);
3251 if (opt_xfer_bytes & (sdkp->physical_block_size - 1)) {
3252 sd_first_printk(KERN_WARNING, sdkp,
3253 "Optimal transfer size %u bytes not a " \
3254 "multiple of physical block size (%u bytes)\n",
3255 opt_xfer_bytes, sdkp->physical_block_size);
3259 sd_first_printk(KERN_INFO, sdkp, "Optimal transfer size %u bytes\n",
3265 * sd_revalidate_disk - called the first time a new disk is seen,
3266 * performs disk spin up, read_capacity, etc.
3267 * @disk: struct gendisk we care about
3269 static int sd_revalidate_disk(struct gendisk *disk)
3271 struct scsi_disk *sdkp = scsi_disk(disk);
3272 struct scsi_device *sdp = sdkp->device;
3273 struct request_queue *q = sdkp->disk->queue;
3274 sector_t old_capacity = sdkp->capacity;
3275 unsigned char *buffer;
3276 unsigned int dev_max, rw_max;
3278 SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp,
3279 "sd_revalidate_disk\n"));
3282 * If the device is offline, don't try and read capacity or any
3283 * of the other niceties.
3285 if (!scsi_device_online(sdp))
3288 buffer = kmalloc(SD_BUF_SIZE, GFP_KERNEL);
3290 sd_printk(KERN_WARNING, sdkp, "sd_revalidate_disk: Memory "
3291 "allocation failure.\n");
3295 sd_spinup_disk(sdkp);
3298 * Without media there is no reason to ask; moreover, some devices
3299 * react badly if we do.
3301 if (sdkp->media_present) {
3302 sd_read_capacity(sdkp, buffer);
3305 * set the default to rotational. All non-rotational devices
3306 * support the block characteristics VPD page, which will
3307 * cause this to be updated correctly and any device which
3308 * doesn't support it should be treated as rotational.
3310 blk_queue_flag_clear(QUEUE_FLAG_NONROT, q);
3311 blk_queue_flag_set(QUEUE_FLAG_ADD_RANDOM, q);
3313 if (scsi_device_supports_vpd(sdp)) {
3314 sd_read_block_provisioning(sdkp);
3315 sd_read_block_limits(sdkp);
3316 sd_read_block_characteristics(sdkp);
3317 sd_zbc_read_zones(sdkp, buffer);
3321 sd_print_capacity(sdkp, old_capacity);
3323 sd_read_write_protect_flag(sdkp, buffer);
3324 sd_read_cache_type(sdkp, buffer);
3325 sd_read_app_tag_own(sdkp, buffer);
3326 sd_read_write_same(sdkp, buffer);
3327 sd_read_security(sdkp, buffer);
3328 sd_config_protection(sdkp);
3332 * We now have all cache related info, determine how we deal
3333 * with flush requests.
3335 sd_set_flush_flag(sdkp);
3337 /* Initial block count limit based on CDB TRANSFER LENGTH field size. */
3338 dev_max = sdp->use_16_for_rw ? SD_MAX_XFER_BLOCKS : SD_DEF_XFER_BLOCKS;
3340 /* Some devices report a maximum block count for READ/WRITE requests. */
3341 dev_max = min_not_zero(dev_max, sdkp->max_xfer_blocks);
3342 q->limits.max_dev_sectors = logical_to_sectors(sdp, dev_max);
3344 if (sd_validate_min_xfer_size(sdkp))
3345 blk_queue_io_min(sdkp->disk->queue,
3346 logical_to_bytes(sdp, sdkp->min_xfer_blocks));
3348 blk_queue_io_min(sdkp->disk->queue, 0);
3350 if (sd_validate_opt_xfer_size(sdkp, dev_max)) {
3351 q->limits.io_opt = logical_to_bytes(sdp, sdkp->opt_xfer_blocks);
3352 rw_max = logical_to_sectors(sdp, sdkp->opt_xfer_blocks);
3354 q->limits.io_opt = 0;
3355 rw_max = min_not_zero(logical_to_sectors(sdp, dev_max),
3356 (sector_t)BLK_DEF_MAX_SECTORS);
3360 * Limit default to SCSI host optimal sector limit if set. There may be
3361 * an impact on performance for when the size of a request exceeds this
3364 rw_max = min_not_zero(rw_max, sdp->host->opt_sectors);
3366 /* Do not exceed controller limit */
3367 rw_max = min(rw_max, queue_max_hw_sectors(q));
3370 * Only update max_sectors if previously unset or if the current value
3371 * exceeds the capabilities of the hardware.
3373 if (sdkp->first_scan ||
3374 q->limits.max_sectors > q->limits.max_dev_sectors ||
3375 q->limits.max_sectors > q->limits.max_hw_sectors)
3376 q->limits.max_sectors = rw_max;
3378 sdkp->first_scan = 0;
3380 set_capacity_and_notify(disk, logical_to_sectors(sdp, sdkp->capacity));
3381 sd_config_write_same(sdkp);
3385 * For a zoned drive, revalidating the zones can be done only once
3386 * the gendisk capacity is set. So if this fails, set back the gendisk
3389 if (sd_zbc_revalidate_zones(sdkp))
3390 set_capacity_and_notify(disk, 0);
3397 * sd_unlock_native_capacity - unlock native capacity
3398 * @disk: struct gendisk to set capacity for
3400 * Block layer calls this function if it detects that partitions
3401 * on @disk reach beyond the end of the device. If the SCSI host
3402 * implements ->unlock_native_capacity() method, it's invoked to
3403 * give it a chance to adjust the device capacity.
3406 * Defined by block layer. Might sleep.
3408 static void sd_unlock_native_capacity(struct gendisk *disk)
3410 struct scsi_device *sdev = scsi_disk(disk)->device;
3412 if (sdev->host->hostt->unlock_native_capacity)
3413 sdev->host->hostt->unlock_native_capacity(sdev);
3417 * sd_format_disk_name - format disk name
3418 * @prefix: name prefix - ie. "sd" for SCSI disks
3419 * @index: index of the disk to format name for
3420 * @buf: output buffer
3421 * @buflen: length of the output buffer
3423 * SCSI disk names starts at sda. The 26th device is sdz and the
3424 * 27th is sdaa. The last one for two lettered suffix is sdzz
3425 * which is followed by sdaaa.
3427 * This is basically 26 base counting with one extra 'nil' entry
3428 * at the beginning from the second digit on and can be
3429 * determined using similar method as 26 base conversion with the
3430 * index shifted -1 after each digit is computed.
3436 * 0 on success, -errno on failure.
3438 static int sd_format_disk_name(char *prefix, int index, char *buf, int buflen)
3440 const int base = 'z' - 'a' + 1;
3441 char *begin = buf + strlen(prefix);
3442 char *end = buf + buflen;
3452 *--p = 'a' + (index % unit);
3453 index = (index / unit) - 1;
3454 } while (index >= 0);
3456 memmove(begin, p, end - p);
3457 memcpy(buf, prefix, strlen(prefix));
3463 * sd_probe - called during driver initialization and whenever a
3464 * new scsi device is attached to the system. It is called once
3465 * for each scsi device (not just disks) present.
3466 * @dev: pointer to device object
3468 * Returns 0 if successful (or not interested in this scsi device
3469 * (e.g. scanner)); 1 when there is an error.
3471 * Note: this function is invoked from the scsi mid-level.
3472 * This function sets up the mapping between a given
3473 * <host,channel,id,lun> (found in sdp) and new device name
3474 * (e.g. /dev/sda). More precisely it is the block device major
3475 * and minor number that is chosen here.
3477 * Assume sd_probe is not re-entrant (for time being)
3478 * Also think about sd_probe() and sd_remove() running coincidentally.
3480 static int sd_probe(struct device *dev)
3482 struct scsi_device *sdp = to_scsi_device(dev);
3483 struct scsi_disk *sdkp;
3488 scsi_autopm_get_device(sdp);
3490 if (sdp->type != TYPE_DISK &&
3491 sdp->type != TYPE_ZBC &&
3492 sdp->type != TYPE_MOD &&
3493 sdp->type != TYPE_RBC)
3496 if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED) && sdp->type == TYPE_ZBC) {
3497 sdev_printk(KERN_WARNING, sdp,
3498 "Unsupported ZBC host-managed device.\n");
3502 SCSI_LOG_HLQUEUE(3, sdev_printk(KERN_INFO, sdp,
3506 sdkp = kzalloc(sizeof(*sdkp), GFP_KERNEL);
3510 gd = blk_mq_alloc_disk_for_queue(sdp->request_queue,
3511 &sd_bio_compl_lkclass);
3515 index = ida_alloc(&sd_index_ida, GFP_KERNEL);
3517 sdev_printk(KERN_WARNING, sdp, "sd_probe: memory exhausted.\n");
3521 error = sd_format_disk_name("sd", index, gd->disk_name, DISK_NAME_LEN);
3523 sdev_printk(KERN_WARNING, sdp, "SCSI disk (sd) name length exceeded.\n");
3524 goto out_free_index;
3529 sdkp->index = index;
3530 sdkp->max_retries = SD_MAX_RETRIES;
3531 atomic_set(&sdkp->openers, 0);
3532 atomic_set(&sdkp->device->ioerr_cnt, 0);
3534 if (!sdp->request_queue->rq_timeout) {
3535 if (sdp->type != TYPE_MOD)
3536 blk_queue_rq_timeout(sdp->request_queue, SD_TIMEOUT);
3538 blk_queue_rq_timeout(sdp->request_queue,
3542 device_initialize(&sdkp->disk_dev);
3543 sdkp->disk_dev.parent = get_device(dev);
3544 sdkp->disk_dev.class = &sd_disk_class;
3545 dev_set_name(&sdkp->disk_dev, "%s", dev_name(dev));
3547 error = device_add(&sdkp->disk_dev);
3549 put_device(&sdkp->disk_dev);
3553 dev_set_drvdata(dev, sdkp);
3555 gd->major = sd_major((index & 0xf0) >> 4);
3556 gd->first_minor = ((index & 0xf) << 4) | (index & 0xfff00);
3557 gd->minors = SD_MINORS;
3559 gd->fops = &sd_fops;
3560 gd->private_data = sdkp;
3562 /* defaults, until the device tells us otherwise */
3563 sdp->sector_size = 512;
3565 sdkp->media_present = 1;
3566 sdkp->write_prot = 0;
3567 sdkp->cache_override = 0;
3571 sdkp->first_scan = 1;
3572 sdkp->max_medium_access_timeouts = SD_MAX_MEDIUM_TIMEOUTS;
3574 sd_revalidate_disk(gd);
3576 if (sdp->removable) {
3577 gd->flags |= GENHD_FL_REMOVABLE;
3578 gd->events |= DISK_EVENT_MEDIA_CHANGE;
3579 gd->event_flags = DISK_EVENT_FLAG_POLL | DISK_EVENT_FLAG_UEVENT;
3582 blk_pm_runtime_init(sdp->request_queue, dev);
3583 if (sdp->rpm_autosuspend) {
3584 pm_runtime_set_autosuspend_delay(dev,
3585 sdp->host->hostt->rpm_autosuspend_delay);
3588 error = device_add_disk(dev, gd, NULL);
3590 put_device(&sdkp->disk_dev);
3595 if (sdkp->security) {
3596 sdkp->opal_dev = init_opal_dev(sdkp, &sd_sec_submit);
3598 sd_printk(KERN_NOTICE, sdkp, "supports TCG Opal\n");
3601 sd_printk(KERN_NOTICE, sdkp, "Attached SCSI %sdisk\n",
3602 sdp->removable ? "removable " : "");
3603 scsi_autopm_put_device(sdp);
3608 ida_free(&sd_index_ida, index);
3614 scsi_autopm_put_device(sdp);
3619 * sd_remove - called whenever a scsi disk (previously recognized by
3620 * sd_probe) is detached from the system. It is called (potentially
3621 * multiple times) during sd module unload.
3622 * @dev: pointer to device object
3624 * Note: this function is invoked from the scsi mid-level.
3625 * This function potentially frees up a device name (e.g. /dev/sdc)
3626 * that could be re-used by a subsequent sd_probe().
3627 * This function is not called when the built-in sd driver is "exit-ed".
3629 static int sd_remove(struct device *dev)
3631 struct scsi_disk *sdkp = dev_get_drvdata(dev);
3633 scsi_autopm_get_device(sdkp->device);
3635 device_del(&sdkp->disk_dev);
3636 del_gendisk(sdkp->disk);
3639 put_disk(sdkp->disk);
3643 static void scsi_disk_release(struct device *dev)
3645 struct scsi_disk *sdkp = to_scsi_disk(dev);
3647 ida_free(&sd_index_ida, sdkp->index);
3648 sd_zbc_free_zone_info(sdkp);
3649 put_device(&sdkp->device->sdev_gendev);
3650 free_opal_dev(sdkp->opal_dev);
3655 static int sd_start_stop_device(struct scsi_disk *sdkp, int start)
3657 unsigned char cmd[6] = { START_STOP }; /* START_VALID */
3658 struct scsi_sense_hdr sshdr;
3659 const struct scsi_exec_args exec_args = {
3661 .req_flags = BLK_MQ_REQ_PM,
3663 struct scsi_device *sdp = sdkp->device;
3667 cmd[4] |= 1; /* START */
3669 if (sdp->start_stop_pwr_cond)
3670 cmd[4] |= start ? 1 << 4 : 3 << 4; /* Active or Standby */
3672 if (!scsi_device_online(sdp))
3675 res = scsi_execute_cmd(sdp, cmd, REQ_OP_DRV_IN, NULL, 0, SD_TIMEOUT,
3676 sdkp->max_retries, &exec_args);
3678 sd_print_result(sdkp, "Start/Stop Unit failed", res);
3679 if (res > 0 && scsi_sense_valid(&sshdr)) {
3680 sd_print_sense_hdr(sdkp, &sshdr);
3681 /* 0x3a is medium not present */
3682 if (sshdr.asc == 0x3a)
3687 /* SCSI error codes must not go to the generic layer */
3695 * Send a SYNCHRONIZE CACHE instruction down to the device through
3696 * the normal SCSI command structure. Wait for the command to
3699 static void sd_shutdown(struct device *dev)
3701 struct scsi_disk *sdkp = dev_get_drvdata(dev);
3704 return; /* this can happen */
3706 if (pm_runtime_suspended(dev))
3709 if (sdkp->WCE && sdkp->media_present) {
3710 sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n");
3711 sd_sync_cache(sdkp, NULL);
3714 if (system_state != SYSTEM_RESTART && sdkp->device->manage_start_stop) {
3715 sd_printk(KERN_NOTICE, sdkp, "Stopping disk\n");
3716 sd_start_stop_device(sdkp, 0);
3720 static int sd_suspend_common(struct device *dev, bool ignore_stop_errors)
3722 struct scsi_disk *sdkp = dev_get_drvdata(dev);
3723 struct scsi_sense_hdr sshdr;
3726 if (!sdkp) /* E.g.: runtime suspend following sd_remove() */
3729 if (sdkp->WCE && sdkp->media_present) {
3730 if (!sdkp->device->silence_suspend)
3731 sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n");
3732 ret = sd_sync_cache(sdkp, &sshdr);
3735 /* ignore OFFLINE device */
3739 if (!scsi_sense_valid(&sshdr) ||
3740 sshdr.sense_key != ILLEGAL_REQUEST)
3744 * sshdr.sense_key == ILLEGAL_REQUEST means this drive
3745 * doesn't support sync. There's not much to do and
3746 * suspend shouldn't fail.
3752 if (sdkp->device->manage_start_stop) {
3753 if (!sdkp->device->silence_suspend)
3754 sd_printk(KERN_NOTICE, sdkp, "Stopping disk\n");
3755 /* an error is not worth aborting a system sleep */
3756 ret = sd_start_stop_device(sdkp, 0);
3757 if (ignore_stop_errors)
3764 static int sd_suspend_system(struct device *dev)
3766 if (pm_runtime_suspended(dev))
3769 return sd_suspend_common(dev, true);
3772 static int sd_suspend_runtime(struct device *dev)
3774 return sd_suspend_common(dev, false);
3777 static int sd_resume(struct device *dev)
3779 struct scsi_disk *sdkp = dev_get_drvdata(dev);
3782 if (!sdkp) /* E.g.: runtime resume at the start of sd_probe() */
3785 if (!sdkp->device->manage_start_stop)
3788 sd_printk(KERN_NOTICE, sdkp, "Starting disk\n");
3789 ret = sd_start_stop_device(sdkp, 1);
3791 opal_unlock_from_suspend(sdkp->opal_dev);
3795 static int sd_resume_system(struct device *dev)
3797 if (pm_runtime_suspended(dev))
3800 return sd_resume(dev);
3803 static int sd_resume_runtime(struct device *dev)
3805 struct scsi_disk *sdkp = dev_get_drvdata(dev);
3806 struct scsi_device *sdp;
3808 if (!sdkp) /* E.g.: runtime resume at the start of sd_probe() */
3813 if (sdp->ignore_media_change) {
3814 /* clear the device's sense data */
3815 static const u8 cmd[10] = { REQUEST_SENSE };
3816 const struct scsi_exec_args exec_args = {
3817 .req_flags = BLK_MQ_REQ_PM,
3820 if (scsi_execute_cmd(sdp, cmd, REQ_OP_DRV_IN, NULL, 0,
3821 sdp->request_queue->rq_timeout, 1,
3823 sd_printk(KERN_NOTICE, sdkp,
3824 "Failed to clear sense data\n");
3827 return sd_resume(dev);
3831 * init_sd - entry point for this driver (both when built in or when
3834 * Note: this function registers this driver with the scsi mid-level.
3836 static int __init init_sd(void)
3838 int majors = 0, i, err;
3840 SCSI_LOG_HLQUEUE(3, printk("init_sd: sd driver entry point\n"));
3842 for (i = 0; i < SD_MAJORS; i++) {
3843 if (__register_blkdev(sd_major(i), "sd", sd_default_probe))
3851 err = class_register(&sd_disk_class);
3855 sd_page_pool = mempool_create_page_pool(SD_MEMPOOL_SIZE, 0);
3856 if (!sd_page_pool) {
3857 printk(KERN_ERR "sd: can't init discard page pool\n");
3862 err = scsi_register_driver(&sd_template.gendrv);
3864 goto err_out_driver;
3869 mempool_destroy(sd_page_pool);
3871 class_unregister(&sd_disk_class);
3873 for (i = 0; i < SD_MAJORS; i++)
3874 unregister_blkdev(sd_major(i), "sd");
3879 * exit_sd - exit point for this driver (when it is a module).
3881 * Note: this function unregisters this driver from the scsi mid-level.
3883 static void __exit exit_sd(void)
3887 SCSI_LOG_HLQUEUE(3, printk("exit_sd: exiting sd driver\n"));
3889 scsi_unregister_driver(&sd_template.gendrv);
3890 mempool_destroy(sd_page_pool);
3892 class_unregister(&sd_disk_class);
3894 for (i = 0; i < SD_MAJORS; i++)
3895 unregister_blkdev(sd_major(i), "sd");
3898 module_init(init_sd);
3899 module_exit(exit_sd);
3901 void sd_print_sense_hdr(struct scsi_disk *sdkp, struct scsi_sense_hdr *sshdr)
3903 scsi_print_sense_hdr(sdkp->device,
3904 sdkp->disk ? sdkp->disk->disk_name : NULL, sshdr);
3907 void sd_print_result(const struct scsi_disk *sdkp, const char *msg, int result)
3909 const char *hb_string = scsi_hostbyte_string(result);
3912 sd_printk(KERN_INFO, sdkp,
3913 "%s: Result: hostbyte=%s driverbyte=%s\n", msg,
3914 hb_string ? hb_string : "invalid",
3917 sd_printk(KERN_INFO, sdkp,
3918 "%s: Result: hostbyte=0x%02x driverbyte=%s\n",
3919 msg, host_byte(result), "DRIVER_OK");