2 * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
3 * Copyright (C) 1992 Eric Youngdale
4 * Simulate a host adapter with 2 disks attached. Do a lot of checking
5 * to make sure that we are not getting blocks mixed up, and PANIC if
6 * anything out of the ordinary is seen.
7 * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
9 * This version is more generic, simulating a variable number of disk
10 * (or disk like devices) sharing a common amount of RAM. To be more
11 * realistic, the simulated devices have the transport attributes of
15 * For documentation see http://sg.danny.cz/sg/sdebug26.html
17 * D. Gilbert (dpg) work for Magneto-Optical device test [20010421]
18 * dpg: work for devfs large number of disks [20010809]
19 * forked for lk 2.5 series [20011216, 20020101]
20 * use vmalloc() more inquiry+mode_sense [20020302]
21 * add timers for delayed responses [20020721]
22 * Patrick Mansfield <patmans@us.ibm.com> max_luns+scsi_level [20021031]
23 * Mike Anderson <andmike@us.ibm.com> sysfs work [20021118]
24 * dpg: change style of boot options to "scsi_debug.num_tgts=2" and
25 * module options to "modprobe scsi_debug num_tgts=2" [20021221]
28 #include <linux/module.h>
30 #include <linux/kernel.h>
31 #include <linux/errno.h>
32 #include <linux/timer.h>
33 #include <linux/slab.h>
34 #include <linux/types.h>
35 #include <linux/string.h>
36 #include <linux/genhd.h>
38 #include <linux/init.h>
39 #include <linux/proc_fs.h>
40 #include <linux/vmalloc.h>
41 #include <linux/moduleparam.h>
42 #include <linux/scatterlist.h>
43 #include <linux/blkdev.h>
44 #include <linux/crc-t10dif.h>
46 #include <net/checksum.h>
48 #include <asm/unaligned.h>
50 #include <scsi/scsi.h>
51 #include <scsi/scsi_cmnd.h>
52 #include <scsi/scsi_device.h>
53 #include <scsi/scsi_host.h>
54 #include <scsi/scsicam.h>
55 #include <scsi/scsi_eh.h>
56 #include <scsi/scsi_dbg.h>
59 #include "scsi_logging.h"
61 #define SCSI_DEBUG_VERSION "1.82"
62 static const char * scsi_debug_version_date = "20100324";
64 /* Additional Sense Code (ASC) */
65 #define NO_ADDITIONAL_SENSE 0x0
66 #define LOGICAL_UNIT_NOT_READY 0x4
67 #define UNRECOVERED_READ_ERR 0x11
68 #define PARAMETER_LIST_LENGTH_ERR 0x1a
69 #define INVALID_OPCODE 0x20
70 #define ADDR_OUT_OF_RANGE 0x21
71 #define INVALID_COMMAND_OPCODE 0x20
72 #define INVALID_FIELD_IN_CDB 0x24
73 #define INVALID_FIELD_IN_PARAM_LIST 0x26
74 #define POWERON_RESET 0x29
75 #define SAVING_PARAMS_UNSUP 0x39
76 #define TRANSPORT_PROBLEM 0x4b
77 #define THRESHOLD_EXCEEDED 0x5d
78 #define LOW_POWER_COND_ON 0x5e
80 /* Additional Sense Code Qualifier (ASCQ) */
81 #define ACK_NAK_TO 0x3
83 #define SDEBUG_TAGGED_QUEUING 0 /* 0 | MSG_SIMPLE_TAG | MSG_ORDERED_TAG */
85 /* Default values for driver parameters */
86 #define DEF_NUM_HOST 1
87 #define DEF_NUM_TGTS 1
88 #define DEF_MAX_LUNS 1
89 /* With these defaults, this driver will make 1 host with 1 target
90 * (id 0) containing 1 logical unit (lun 0). That is 1 device.
94 #define DEF_DEV_SIZE_MB 8
98 #define DEF_EVERY_NTH 0
103 #define DEF_LBPWS10 0
105 #define DEF_LOWEST_ALIGNED 0
106 #define DEF_NO_LUN_0 0
107 #define DEF_NUM_PARTS 0
109 #define DEF_OPT_BLKS 64
110 #define DEF_PHYSBLK_EXP 0
112 #define DEF_REMOVABLE false
113 #define DEF_SCSI_LEVEL 5 /* INQUIRY, byte2 [5->SPC-3] */
114 #define DEF_SECTOR_SIZE 512
115 #define DEF_UNMAP_ALIGNMENT 0
116 #define DEF_UNMAP_GRANULARITY 1
117 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
118 #define DEF_UNMAP_MAX_DESC 256
119 #define DEF_VIRTUAL_GB 0
120 #define DEF_VPD_USE_HOSTNO 1
121 #define DEF_WRITESAME_LENGTH 0xFFFF
123 /* bit mask values for scsi_debug_opts */
124 #define SCSI_DEBUG_OPT_NOISE 1
125 #define SCSI_DEBUG_OPT_MEDIUM_ERR 2
126 #define SCSI_DEBUG_OPT_TIMEOUT 4
127 #define SCSI_DEBUG_OPT_RECOVERED_ERR 8
128 #define SCSI_DEBUG_OPT_TRANSPORT_ERR 16
129 #define SCSI_DEBUG_OPT_DIF_ERR 32
130 #define SCSI_DEBUG_OPT_DIX_ERR 64
131 #define SCSI_DEBUG_OPT_MAC_TIMEOUT 128
132 /* When "every_nth" > 0 then modulo "every_nth" commands:
133 * - a no response is simulated if SCSI_DEBUG_OPT_TIMEOUT is set
134 * - a RECOVERED_ERROR is simulated on successful read and write
135 * commands if SCSI_DEBUG_OPT_RECOVERED_ERR is set.
136 * - a TRANSPORT_ERROR is simulated on successful read and write
137 * commands if SCSI_DEBUG_OPT_TRANSPORT_ERR is set.
139 * When "every_nth" < 0 then after "- every_nth" commands:
140 * - a no response is simulated if SCSI_DEBUG_OPT_TIMEOUT is set
141 * - a RECOVERED_ERROR is simulated on successful read and write
142 * commands if SCSI_DEBUG_OPT_RECOVERED_ERR is set.
143 * - a TRANSPORT_ERROR is simulated on successful read and write
144 * commands if SCSI_DEBUG_OPT_TRANSPORT_ERR is set.
145 * This will continue until some other action occurs (e.g. the user
146 * writing a new value (other than -1 or 1) to every_nth via sysfs).
149 /* when 1==SCSI_DEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
150 * sector on read commands: */
151 #define OPT_MEDIUM_ERR_ADDR 0x1234 /* that's sector 4660 in decimal */
152 #define OPT_MEDIUM_ERR_NUM 10 /* number of consecutive medium errs */
154 /* If REPORT LUNS has luns >= 256 it can choose "flat space" (value 1)
155 * or "peripheral device" addressing (value 0) */
156 #define SAM2_LUN_ADDRESS_METHOD 0
157 #define SAM2_WLUN_REPORT_LUNS 0xc101
159 /* Can queue up to this number of commands. Typically commands that
160 * that have a non-zero delay are queued. */
161 #define SCSI_DEBUG_CANQUEUE 255
163 static int scsi_debug_add_host = DEF_NUM_HOST;
164 static int scsi_debug_ato = DEF_ATO;
165 static int scsi_debug_delay = DEF_DELAY;
166 static int scsi_debug_dev_size_mb = DEF_DEV_SIZE_MB;
167 static int scsi_debug_dif = DEF_DIF;
168 static int scsi_debug_dix = DEF_DIX;
169 static int scsi_debug_dsense = DEF_D_SENSE;
170 static int scsi_debug_every_nth = DEF_EVERY_NTH;
171 static int scsi_debug_fake_rw = DEF_FAKE_RW;
172 static unsigned int scsi_debug_guard = DEF_GUARD;
173 static int scsi_debug_lowest_aligned = DEF_LOWEST_ALIGNED;
174 static int scsi_debug_max_luns = DEF_MAX_LUNS;
175 static int scsi_debug_max_queue = SCSI_DEBUG_CANQUEUE;
176 static int scsi_debug_no_lun_0 = DEF_NO_LUN_0;
177 static int scsi_debug_no_uld = 0;
178 static int scsi_debug_num_parts = DEF_NUM_PARTS;
179 static int scsi_debug_num_tgts = DEF_NUM_TGTS; /* targets per host */
180 static int scsi_debug_opt_blks = DEF_OPT_BLKS;
181 static int scsi_debug_opts = DEF_OPTS;
182 static int scsi_debug_physblk_exp = DEF_PHYSBLK_EXP;
183 static int scsi_debug_ptype = DEF_PTYPE; /* SCSI peripheral type (0==disk) */
184 static int scsi_debug_scsi_level = DEF_SCSI_LEVEL;
185 static int scsi_debug_sector_size = DEF_SECTOR_SIZE;
186 static int scsi_debug_virtual_gb = DEF_VIRTUAL_GB;
187 static int scsi_debug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
188 static unsigned int scsi_debug_lbpu = DEF_LBPU;
189 static unsigned int scsi_debug_lbpws = DEF_LBPWS;
190 static unsigned int scsi_debug_lbpws10 = DEF_LBPWS10;
191 static unsigned int scsi_debug_lbprz = DEF_LBPRZ;
192 static unsigned int scsi_debug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
193 static unsigned int scsi_debug_unmap_granularity = DEF_UNMAP_GRANULARITY;
194 static unsigned int scsi_debug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
195 static unsigned int scsi_debug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
196 static unsigned int scsi_debug_write_same_length = DEF_WRITESAME_LENGTH;
197 static bool scsi_debug_removable = DEF_REMOVABLE;
199 static int scsi_debug_cmnd_count = 0;
201 #define DEV_READONLY(TGT) (0)
203 static unsigned int sdebug_store_sectors;
204 static sector_t sdebug_capacity; /* in sectors */
206 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
207 may still need them */
208 static int sdebug_heads; /* heads per disk */
209 static int sdebug_cylinders_per; /* cylinders per surface */
210 static int sdebug_sectors_per; /* sectors per cylinder */
212 #define SDEBUG_MAX_PARTS 4
214 #define SDEBUG_SENSE_LEN 32
216 #define SCSI_DEBUG_MAX_CMD_LEN 32
218 static unsigned int scsi_debug_lbp(void)
220 return scsi_debug_lbpu | scsi_debug_lbpws | scsi_debug_lbpws10;
223 struct sdebug_dev_info {
224 struct list_head dev_list;
225 unsigned char sense_buff[SDEBUG_SENSE_LEN]; /* weak nexus */
226 unsigned int channel;
229 struct sdebug_host_info *sdbg_host;
236 struct sdebug_host_info {
237 struct list_head host_list;
238 struct Scsi_Host *shost;
240 struct list_head dev_info_list;
243 #define to_sdebug_host(d) \
244 container_of(d, struct sdebug_host_info, dev)
246 static LIST_HEAD(sdebug_host_list);
247 static DEFINE_SPINLOCK(sdebug_host_list_lock);
249 typedef void (* done_funct_t) (struct scsi_cmnd *);
251 struct sdebug_queued_cmd {
253 struct timer_list cmnd_timer;
254 done_funct_t done_funct;
255 struct scsi_cmnd * a_cmnd;
258 static struct sdebug_queued_cmd queued_arr[SCSI_DEBUG_CANQUEUE];
260 static unsigned char * fake_storep; /* ramdisk storage */
261 static struct sd_dif_tuple *dif_storep; /* protection info */
262 static void *map_storep; /* provisioning map */
264 static unsigned long map_size;
265 static int num_aborts = 0;
266 static int num_dev_resets = 0;
267 static int num_bus_resets = 0;
268 static int num_host_resets = 0;
269 static int dix_writes;
270 static int dix_reads;
271 static int dif_errors;
273 static DEFINE_SPINLOCK(queued_arr_lock);
274 static DEFINE_RWLOCK(atomic_rw);
276 static char sdebug_proc_name[] = "scsi_debug";
278 static struct bus_type pseudo_lld_bus;
280 static struct device_driver sdebug_driverfs_driver = {
281 .name = sdebug_proc_name,
282 .bus = &pseudo_lld_bus,
285 static const int check_condition_result =
286 (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
288 static const int illegal_condition_result =
289 (DRIVER_SENSE << 24) | (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
291 static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
293 static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
296 static void *fake_store(unsigned long long lba)
298 lba = do_div(lba, sdebug_store_sectors);
300 return fake_storep + lba * scsi_debug_sector_size;
303 static struct sd_dif_tuple *dif_store(sector_t sector)
305 sector = do_div(sector, sdebug_store_sectors);
307 return dif_storep + sector;
310 static int sdebug_add_adapter(void);
311 static void sdebug_remove_adapter(void);
313 static void sdebug_max_tgts_luns(void)
315 struct sdebug_host_info *sdbg_host;
316 struct Scsi_Host *hpnt;
318 spin_lock(&sdebug_host_list_lock);
319 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
320 hpnt = sdbg_host->shost;
321 if ((hpnt->this_id >= 0) &&
322 (scsi_debug_num_tgts > hpnt->this_id))
323 hpnt->max_id = scsi_debug_num_tgts + 1;
325 hpnt->max_id = scsi_debug_num_tgts;
326 /* scsi_debug_max_luns; */
327 hpnt->max_lun = SAM2_WLUN_REPORT_LUNS;
329 spin_unlock(&sdebug_host_list_lock);
332 static void mk_sense_buffer(struct sdebug_dev_info *devip, int key,
335 unsigned char *sbuff;
337 sbuff = devip->sense_buff;
338 memset(sbuff, 0, SDEBUG_SENSE_LEN);
340 scsi_build_sense_buffer(scsi_debug_dsense, sbuff, key, asc, asq);
342 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
343 printk(KERN_INFO "scsi_debug: [sense_key,asc,ascq]: "
344 "[0x%x,0x%x,0x%x]\n", key, asc, asq);
347 static void get_data_transfer_info(unsigned char *cmd,
348 unsigned long long *lba, unsigned int *num,
354 case VARIABLE_LENGTH_CMD:
355 *lba = (u64)cmd[19] | (u64)cmd[18] << 8 |
356 (u64)cmd[17] << 16 | (u64)cmd[16] << 24 |
357 (u64)cmd[15] << 32 | (u64)cmd[14] << 40 |
358 (u64)cmd[13] << 48 | (u64)cmd[12] << 56;
360 *ei_lba = (u32)cmd[23] | (u32)cmd[22] << 8 |
361 (u32)cmd[21] << 16 | (u32)cmd[20] << 24;
363 *num = (u32)cmd[31] | (u32)cmd[30] << 8 | (u32)cmd[29] << 16 |
370 *lba = (u64)cmd[9] | (u64)cmd[8] << 8 |
371 (u64)cmd[7] << 16 | (u64)cmd[6] << 24 |
372 (u64)cmd[5] << 32 | (u64)cmd[4] << 40 |
373 (u64)cmd[3] << 48 | (u64)cmd[2] << 56;
375 *num = (u32)cmd[13] | (u32)cmd[12] << 8 | (u32)cmd[11] << 16 |
380 *lba = (u32)cmd[5] | (u32)cmd[4] << 8 | (u32)cmd[3] << 16 |
383 *num = (u32)cmd[9] | (u32)cmd[8] << 8 | (u32)cmd[7] << 16 |
390 *lba = (u32)cmd[5] | (u32)cmd[4] << 8 | (u32)cmd[3] << 16 |
393 *num = (u32)cmd[8] | (u32)cmd[7] << 8;
397 *lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
398 (u32)(cmd[1] & 0x1f) << 16;
399 *num = (0 == cmd[4]) ? 256 : cmd[4];
406 static int scsi_debug_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
408 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) {
409 printk(KERN_INFO "scsi_debug: ioctl: cmd=0x%x\n", cmd);
412 /* return -ENOTTY; // correct return but upsets fdisk */
415 static int check_readiness(struct scsi_cmnd * SCpnt, int reset_only,
416 struct sdebug_dev_info * devip)
419 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
420 printk(KERN_INFO "scsi_debug: Reporting Unit "
421 "attention: power on reset\n");
423 mk_sense_buffer(devip, UNIT_ATTENTION, POWERON_RESET, 0);
424 return check_condition_result;
426 if ((0 == reset_only) && devip->stopped) {
427 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
428 printk(KERN_INFO "scsi_debug: Reporting Not "
429 "ready: initializing command required\n");
430 mk_sense_buffer(devip, NOT_READY, LOGICAL_UNIT_NOT_READY,
432 return check_condition_result;
437 /* Returns 0 if ok else (DID_ERROR << 16). Sets scp->resid . */
438 static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
442 struct scsi_data_buffer *sdb = scsi_in(scp);
446 if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_FROM_DEVICE))
447 return (DID_ERROR << 16);
449 act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
451 sdb->resid = scsi_bufflen(scp) - act_len;
456 /* Returns number of bytes fetched into 'arr' or -1 if error. */
457 static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
460 if (!scsi_bufflen(scp))
462 if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_TO_DEVICE))
465 return scsi_sg_copy_to_buffer(scp, arr, arr_len);
469 static const char * inq_vendor_id = "Linux ";
470 static const char * inq_product_id = "scsi_debug ";
471 static const char * inq_product_rev = "0004";
473 static int inquiry_evpd_83(unsigned char * arr, int port_group_id,
474 int target_dev_id, int dev_id_num,
475 const char * dev_id_str,
481 port_a = target_dev_id + 1;
482 /* T10 vendor identifier field format (faked) */
483 arr[0] = 0x2; /* ASCII */
486 memcpy(&arr[4], inq_vendor_id, 8);
487 memcpy(&arr[12], inq_product_id, 16);
488 memcpy(&arr[28], dev_id_str, dev_id_str_len);
489 num = 8 + 16 + dev_id_str_len;
492 if (dev_id_num >= 0) {
493 /* NAA-5, Logical unit identifier (binary) */
494 arr[num++] = 0x1; /* binary (not necessarily sas) */
495 arr[num++] = 0x3; /* PIV=0, lu, naa */
498 arr[num++] = 0x53; /* naa-5 ieee company id=0x333333 (fake) */
502 arr[num++] = (dev_id_num >> 24);
503 arr[num++] = (dev_id_num >> 16) & 0xff;
504 arr[num++] = (dev_id_num >> 8) & 0xff;
505 arr[num++] = dev_id_num & 0xff;
506 /* Target relative port number */
507 arr[num++] = 0x61; /* proto=sas, binary */
508 arr[num++] = 0x94; /* PIV=1, target port, rel port */
509 arr[num++] = 0x0; /* reserved */
510 arr[num++] = 0x4; /* length */
511 arr[num++] = 0x0; /* reserved */
512 arr[num++] = 0x0; /* reserved */
514 arr[num++] = 0x1; /* relative port A */
516 /* NAA-5, Target port identifier */
517 arr[num++] = 0x61; /* proto=sas, binary */
518 arr[num++] = 0x93; /* piv=1, target port, naa */
521 arr[num++] = 0x52; /* naa-5, company id=0x222222 (fake) */
525 arr[num++] = (port_a >> 24);
526 arr[num++] = (port_a >> 16) & 0xff;
527 arr[num++] = (port_a >> 8) & 0xff;
528 arr[num++] = port_a & 0xff;
529 /* NAA-5, Target port group identifier */
530 arr[num++] = 0x61; /* proto=sas, binary */
531 arr[num++] = 0x95; /* piv=1, target port group id */
536 arr[num++] = (port_group_id >> 8) & 0xff;
537 arr[num++] = port_group_id & 0xff;
538 /* NAA-5, Target device identifier */
539 arr[num++] = 0x61; /* proto=sas, binary */
540 arr[num++] = 0xa3; /* piv=1, target device, naa */
543 arr[num++] = 0x52; /* naa-5, company id=0x222222 (fake) */
547 arr[num++] = (target_dev_id >> 24);
548 arr[num++] = (target_dev_id >> 16) & 0xff;
549 arr[num++] = (target_dev_id >> 8) & 0xff;
550 arr[num++] = target_dev_id & 0xff;
551 /* SCSI name string: Target device identifier */
552 arr[num++] = 0x63; /* proto=sas, UTF-8 */
553 arr[num++] = 0xa8; /* piv=1, target device, SCSI name string */
556 memcpy(arr + num, "naa.52222220", 12);
558 snprintf(b, sizeof(b), "%08X", target_dev_id);
559 memcpy(arr + num, b, 8);
561 memset(arr + num, 0, 4);
567 static unsigned char vpd84_data[] = {
568 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
569 0x22,0x22,0x22,0x0,0xbb,0x1,
570 0x22,0x22,0x22,0x0,0xbb,0x2,
573 static int inquiry_evpd_84(unsigned char * arr)
575 memcpy(arr, vpd84_data, sizeof(vpd84_data));
576 return sizeof(vpd84_data);
579 static int inquiry_evpd_85(unsigned char * arr)
582 const char * na1 = "https://www.kernel.org/config";
583 const char * na2 = "http://www.kernel.org/log";
586 arr[num++] = 0x1; /* lu, storage config */
587 arr[num++] = 0x0; /* reserved */
592 plen = ((plen / 4) + 1) * 4;
593 arr[num++] = plen; /* length, null termianted, padded */
594 memcpy(arr + num, na1, olen);
595 memset(arr + num + olen, 0, plen - olen);
598 arr[num++] = 0x4; /* lu, logging */
599 arr[num++] = 0x0; /* reserved */
604 plen = ((plen / 4) + 1) * 4;
605 arr[num++] = plen; /* length, null terminated, padded */
606 memcpy(arr + num, na2, olen);
607 memset(arr + num + olen, 0, plen - olen);
613 /* SCSI ports VPD page */
614 static int inquiry_evpd_88(unsigned char * arr, int target_dev_id)
619 port_a = target_dev_id + 1;
621 arr[num++] = 0x0; /* reserved */
622 arr[num++] = 0x0; /* reserved */
624 arr[num++] = 0x1; /* relative port 1 (primary) */
625 memset(arr + num, 0, 6);
628 arr[num++] = 12; /* length tp descriptor */
629 /* naa-5 target port identifier (A) */
630 arr[num++] = 0x61; /* proto=sas, binary */
631 arr[num++] = 0x93; /* PIV=1, target port, NAA */
632 arr[num++] = 0x0; /* reserved */
633 arr[num++] = 0x8; /* length */
634 arr[num++] = 0x52; /* NAA-5, company_id=0x222222 (fake) */
638 arr[num++] = (port_a >> 24);
639 arr[num++] = (port_a >> 16) & 0xff;
640 arr[num++] = (port_a >> 8) & 0xff;
641 arr[num++] = port_a & 0xff;
643 arr[num++] = 0x0; /* reserved */
644 arr[num++] = 0x0; /* reserved */
646 arr[num++] = 0x2; /* relative port 2 (secondary) */
647 memset(arr + num, 0, 6);
650 arr[num++] = 12; /* length tp descriptor */
651 /* naa-5 target port identifier (B) */
652 arr[num++] = 0x61; /* proto=sas, binary */
653 arr[num++] = 0x93; /* PIV=1, target port, NAA */
654 arr[num++] = 0x0; /* reserved */
655 arr[num++] = 0x8; /* length */
656 arr[num++] = 0x52; /* NAA-5, company_id=0x222222 (fake) */
660 arr[num++] = (port_b >> 24);
661 arr[num++] = (port_b >> 16) & 0xff;
662 arr[num++] = (port_b >> 8) & 0xff;
663 arr[num++] = port_b & 0xff;
669 static unsigned char vpd89_data[] = {
670 /* from 4th byte */ 0,0,0,0,
671 'l','i','n','u','x',' ',' ',' ',
672 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
674 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
676 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
677 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
678 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
679 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
681 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
683 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
685 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
686 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
687 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
688 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
689 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
690 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
691 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
692 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
693 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
694 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
695 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
696 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
697 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
698 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
699 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
700 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
701 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
702 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
703 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
704 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
705 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
706 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
707 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
708 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
709 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
710 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
713 static int inquiry_evpd_89(unsigned char * arr)
715 memcpy(arr, vpd89_data, sizeof(vpd89_data));
716 return sizeof(vpd89_data);
720 /* Block limits VPD page (SBC-3) */
721 static unsigned char vpdb0_data[] = {
722 /* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
723 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
724 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
725 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
728 static int inquiry_evpd_b0(unsigned char * arr)
732 memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
734 /* Optimal transfer length granularity */
735 gran = 1 << scsi_debug_physblk_exp;
736 arr[2] = (gran >> 8) & 0xff;
737 arr[3] = gran & 0xff;
739 /* Maximum Transfer Length */
740 if (sdebug_store_sectors > 0x400) {
741 arr[4] = (sdebug_store_sectors >> 24) & 0xff;
742 arr[5] = (sdebug_store_sectors >> 16) & 0xff;
743 arr[6] = (sdebug_store_sectors >> 8) & 0xff;
744 arr[7] = sdebug_store_sectors & 0xff;
747 /* Optimal Transfer Length */
748 put_unaligned_be32(scsi_debug_opt_blks, &arr[8]);
750 if (scsi_debug_lbpu) {
751 /* Maximum Unmap LBA Count */
752 put_unaligned_be32(scsi_debug_unmap_max_blocks, &arr[16]);
754 /* Maximum Unmap Block Descriptor Count */
755 put_unaligned_be32(scsi_debug_unmap_max_desc, &arr[20]);
758 /* Unmap Granularity Alignment */
759 if (scsi_debug_unmap_alignment) {
760 put_unaligned_be32(scsi_debug_unmap_alignment, &arr[28]);
761 arr[28] |= 0x80; /* UGAVALID */
764 /* Optimal Unmap Granularity */
765 put_unaligned_be32(scsi_debug_unmap_granularity, &arr[24]);
767 /* Maximum WRITE SAME Length */
768 put_unaligned_be64(scsi_debug_write_same_length, &arr[32]);
770 return 0x3c; /* Mandatory page length for Logical Block Provisioning */
772 return sizeof(vpdb0_data);
775 /* Block device characteristics VPD page (SBC-3) */
776 static int inquiry_evpd_b1(unsigned char *arr)
778 memset(arr, 0, 0x3c);
780 arr[1] = 1; /* non rotating medium (e.g. solid state) */
782 arr[3] = 5; /* less than 1.8" */
787 /* Logical block provisioning VPD page (SBC-3) */
788 static int inquiry_evpd_b2(unsigned char *arr)
791 arr[0] = 0; /* threshold exponent */
796 if (scsi_debug_lbpws)
799 if (scsi_debug_lbpws10)
802 if (scsi_debug_lbprz)
808 #define SDEBUG_LONG_INQ_SZ 96
809 #define SDEBUG_MAX_INQ_ARR_SZ 584
811 static int resp_inquiry(struct scsi_cmnd * scp, int target,
812 struct sdebug_dev_info * devip)
814 unsigned char pq_pdt;
816 unsigned char *cmd = (unsigned char *)scp->cmnd;
817 int alloc_len, n, ret;
819 alloc_len = (cmd[3] << 8) + cmd[4];
820 arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
822 return DID_REQUEUE << 16;
824 pq_pdt = 0x1e; /* present, wlun */
825 else if (scsi_debug_no_lun_0 && (0 == devip->lun))
826 pq_pdt = 0x7f; /* not present, no device type */
828 pq_pdt = (scsi_debug_ptype & 0x1f);
830 if (0x2 & cmd[1]) { /* CMDDT bit set */
831 mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
834 return check_condition_result;
835 } else if (0x1 & cmd[1]) { /* EVPD bit set */
836 int lu_id_num, port_group_id, target_dev_id, len;
838 int host_no = devip->sdbg_host->shost->host_no;
840 port_group_id = (((host_no + 1) & 0x7f) << 8) +
841 (devip->channel & 0x7f);
842 if (0 == scsi_debug_vpd_use_hostno)
844 lu_id_num = devip->wlun ? -1 : (((host_no + 1) * 2000) +
845 (devip->target * 1000) + devip->lun);
846 target_dev_id = ((host_no + 1) * 2000) +
847 (devip->target * 1000) - 3;
848 len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
849 if (0 == cmd[2]) { /* supported vital product data pages */
850 arr[1] = cmd[2]; /*sanity */
852 arr[n++] = 0x0; /* this page */
853 arr[n++] = 0x80; /* unit serial number */
854 arr[n++] = 0x83; /* device identification */
855 arr[n++] = 0x84; /* software interface ident. */
856 arr[n++] = 0x85; /* management network addresses */
857 arr[n++] = 0x86; /* extended inquiry */
858 arr[n++] = 0x87; /* mode page policy */
859 arr[n++] = 0x88; /* SCSI ports */
860 arr[n++] = 0x89; /* ATA information */
861 arr[n++] = 0xb0; /* Block limits (SBC) */
862 arr[n++] = 0xb1; /* Block characteristics (SBC) */
863 if (scsi_debug_lbp()) /* Logical Block Prov. (SBC) */
865 arr[3] = n - 4; /* number of supported VPD pages */
866 } else if (0x80 == cmd[2]) { /* unit serial number */
867 arr[1] = cmd[2]; /*sanity */
869 memcpy(&arr[4], lu_id_str, len);
870 } else if (0x83 == cmd[2]) { /* device identification */
871 arr[1] = cmd[2]; /*sanity */
872 arr[3] = inquiry_evpd_83(&arr[4], port_group_id,
873 target_dev_id, lu_id_num,
875 } else if (0x84 == cmd[2]) { /* Software interface ident. */
876 arr[1] = cmd[2]; /*sanity */
877 arr[3] = inquiry_evpd_84(&arr[4]);
878 } else if (0x85 == cmd[2]) { /* Management network addresses */
879 arr[1] = cmd[2]; /*sanity */
880 arr[3] = inquiry_evpd_85(&arr[4]);
881 } else if (0x86 == cmd[2]) { /* extended inquiry */
882 arr[1] = cmd[2]; /*sanity */
883 arr[3] = 0x3c; /* number of following entries */
884 if (scsi_debug_dif == SD_DIF_TYPE3_PROTECTION)
885 arr[4] = 0x4; /* SPT: GRD_CHK:1 */
886 else if (scsi_debug_dif)
887 arr[4] = 0x5; /* SPT: GRD_CHK:1, REF_CHK:1 */
889 arr[4] = 0x0; /* no protection stuff */
890 arr[5] = 0x7; /* head of q, ordered + simple q's */
891 } else if (0x87 == cmd[2]) { /* mode page policy */
892 arr[1] = cmd[2]; /*sanity */
893 arr[3] = 0x8; /* number of following entries */
894 arr[4] = 0x2; /* disconnect-reconnect mp */
895 arr[6] = 0x80; /* mlus, shared */
896 arr[8] = 0x18; /* protocol specific lu */
897 arr[10] = 0x82; /* mlus, per initiator port */
898 } else if (0x88 == cmd[2]) { /* SCSI Ports */
899 arr[1] = cmd[2]; /*sanity */
900 arr[3] = inquiry_evpd_88(&arr[4], target_dev_id);
901 } else if (0x89 == cmd[2]) { /* ATA information */
902 arr[1] = cmd[2]; /*sanity */
903 n = inquiry_evpd_89(&arr[4]);
906 } else if (0xb0 == cmd[2]) { /* Block limits (SBC) */
907 arr[1] = cmd[2]; /*sanity */
908 arr[3] = inquiry_evpd_b0(&arr[4]);
909 } else if (0xb1 == cmd[2]) { /* Block characteristics (SBC) */
910 arr[1] = cmd[2]; /*sanity */
911 arr[3] = inquiry_evpd_b1(&arr[4]);
912 } else if (0xb2 == cmd[2]) { /* Logical Block Prov. (SBC) */
913 arr[1] = cmd[2]; /*sanity */
914 arr[3] = inquiry_evpd_b2(&arr[4]);
916 /* Illegal request, invalid field in cdb */
917 mk_sense_buffer(devip, ILLEGAL_REQUEST,
918 INVALID_FIELD_IN_CDB, 0);
920 return check_condition_result;
922 len = min(((arr[2] << 8) + arr[3]) + 4, alloc_len);
923 ret = fill_from_dev_buffer(scp, arr,
924 min(len, SDEBUG_MAX_INQ_ARR_SZ));
928 /* drops through here for a standard inquiry */
929 arr[1] = scsi_debug_removable ? 0x80 : 0; /* Removable disk */
930 arr[2] = scsi_debug_scsi_level;
931 arr[3] = 2; /* response_data_format==2 */
932 arr[4] = SDEBUG_LONG_INQ_SZ - 5;
933 arr[5] = scsi_debug_dif ? 1 : 0; /* PROTECT bit */
934 if (0 == scsi_debug_vpd_use_hostno)
935 arr[5] = 0x10; /* claim: implicit TGPS */
936 arr[6] = 0x10; /* claim: MultiP */
937 /* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
938 arr[7] = 0xa; /* claim: LINKED + CMDQUE */
939 memcpy(&arr[8], inq_vendor_id, 8);
940 memcpy(&arr[16], inq_product_id, 16);
941 memcpy(&arr[32], inq_product_rev, 4);
942 /* version descriptors (2 bytes each) follow */
943 arr[58] = 0x0; arr[59] = 0x77; /* SAM-3 ANSI */
944 arr[60] = 0x3; arr[61] = 0x14; /* SPC-3 ANSI */
946 if (scsi_debug_ptype == 0) {
947 arr[n++] = 0x3; arr[n++] = 0x3d; /* SBC-2 ANSI */
948 } else if (scsi_debug_ptype == 1) {
949 arr[n++] = 0x3; arr[n++] = 0x60; /* SSC-2 no version */
951 arr[n++] = 0xc; arr[n++] = 0xf; /* SAS-1.1 rev 10 */
952 ret = fill_from_dev_buffer(scp, arr,
953 min(alloc_len, SDEBUG_LONG_INQ_SZ));
958 static int resp_requests(struct scsi_cmnd * scp,
959 struct sdebug_dev_info * devip)
961 unsigned char * sbuff;
962 unsigned char *cmd = (unsigned char *)scp->cmnd;
963 unsigned char arr[SDEBUG_SENSE_LEN];
967 memset(arr, 0, sizeof(arr));
968 if (devip->reset == 1)
969 mk_sense_buffer(devip, 0, NO_ADDITIONAL_SENSE, 0);
970 want_dsense = !!(cmd[1] & 1) || scsi_debug_dsense;
971 sbuff = devip->sense_buff;
972 if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
975 arr[1] = 0x0; /* NO_SENSE in sense_key */
976 arr[2] = THRESHOLD_EXCEEDED;
977 arr[3] = 0xff; /* TEST set and MRIE==6 */
980 arr[2] = 0x0; /* NO_SENSE in sense_key */
981 arr[7] = 0xa; /* 18 byte sense buffer */
982 arr[12] = THRESHOLD_EXCEEDED;
983 arr[13] = 0xff; /* TEST set and MRIE==6 */
986 memcpy(arr, sbuff, SDEBUG_SENSE_LEN);
987 if ((cmd[1] & 1) && (! scsi_debug_dsense)) {
988 /* DESC bit set and sense_buff in fixed format */
989 memset(arr, 0, sizeof(arr));
991 arr[1] = sbuff[2]; /* sense key */
992 arr[2] = sbuff[12]; /* asc */
993 arr[3] = sbuff[13]; /* ascq */
997 mk_sense_buffer(devip, 0, NO_ADDITIONAL_SENSE, 0);
998 return fill_from_dev_buffer(scp, arr, len);
1001 static int resp_start_stop(struct scsi_cmnd * scp,
1002 struct sdebug_dev_info * devip)
1004 unsigned char *cmd = (unsigned char *)scp->cmnd;
1005 int power_cond, errsts, start;
1007 if ((errsts = check_readiness(scp, 1, devip)))
1009 power_cond = (cmd[4] & 0xf0) >> 4;
1011 mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
1013 return check_condition_result;
1016 if (start == devip->stopped)
1017 devip->stopped = !start;
1021 static sector_t get_sdebug_capacity(void)
1023 if (scsi_debug_virtual_gb > 0)
1024 return (sector_t)scsi_debug_virtual_gb *
1025 (1073741824 / scsi_debug_sector_size);
1027 return sdebug_store_sectors;
1030 #define SDEBUG_READCAP_ARR_SZ 8
1031 static int resp_readcap(struct scsi_cmnd * scp,
1032 struct sdebug_dev_info * devip)
1034 unsigned char arr[SDEBUG_READCAP_ARR_SZ];
1038 if ((errsts = check_readiness(scp, 1, devip)))
1040 /* following just in case virtual_gb changed */
1041 sdebug_capacity = get_sdebug_capacity();
1042 memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
1043 if (sdebug_capacity < 0xffffffff) {
1044 capac = (unsigned int)sdebug_capacity - 1;
1045 arr[0] = (capac >> 24);
1046 arr[1] = (capac >> 16) & 0xff;
1047 arr[2] = (capac >> 8) & 0xff;
1048 arr[3] = capac & 0xff;
1055 arr[6] = (scsi_debug_sector_size >> 8) & 0xff;
1056 arr[7] = scsi_debug_sector_size & 0xff;
1057 return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
1060 #define SDEBUG_READCAP16_ARR_SZ 32
1061 static int resp_readcap16(struct scsi_cmnd * scp,
1062 struct sdebug_dev_info * devip)
1064 unsigned char *cmd = (unsigned char *)scp->cmnd;
1065 unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
1066 unsigned long long capac;
1067 int errsts, k, alloc_len;
1069 if ((errsts = check_readiness(scp, 1, devip)))
1071 alloc_len = ((cmd[10] << 24) + (cmd[11] << 16) + (cmd[12] << 8)
1073 /* following just in case virtual_gb changed */
1074 sdebug_capacity = get_sdebug_capacity();
1075 memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
1076 capac = sdebug_capacity - 1;
1077 for (k = 0; k < 8; ++k, capac >>= 8)
1078 arr[7 - k] = capac & 0xff;
1079 arr[8] = (scsi_debug_sector_size >> 24) & 0xff;
1080 arr[9] = (scsi_debug_sector_size >> 16) & 0xff;
1081 arr[10] = (scsi_debug_sector_size >> 8) & 0xff;
1082 arr[11] = scsi_debug_sector_size & 0xff;
1083 arr[13] = scsi_debug_physblk_exp & 0xf;
1084 arr[14] = (scsi_debug_lowest_aligned >> 8) & 0x3f;
1086 if (scsi_debug_lbp()) {
1087 arr[14] |= 0x80; /* LBPME */
1088 if (scsi_debug_lbprz)
1089 arr[14] |= 0x40; /* LBPRZ */
1092 arr[15] = scsi_debug_lowest_aligned & 0xff;
1094 if (scsi_debug_dif) {
1095 arr[12] = (scsi_debug_dif - 1) << 1; /* P_TYPE */
1096 arr[12] |= 1; /* PROT_EN */
1099 return fill_from_dev_buffer(scp, arr,
1100 min(alloc_len, SDEBUG_READCAP16_ARR_SZ));
1103 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
1105 static int resp_report_tgtpgs(struct scsi_cmnd * scp,
1106 struct sdebug_dev_info * devip)
1108 unsigned char *cmd = (unsigned char *)scp->cmnd;
1109 unsigned char * arr;
1110 int host_no = devip->sdbg_host->shost->host_no;
1111 int n, ret, alen, rlen;
1112 int port_group_a, port_group_b, port_a, port_b;
1114 alen = ((cmd[6] << 24) + (cmd[7] << 16) + (cmd[8] << 8)
1117 arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
1119 return DID_REQUEUE << 16;
1121 * EVPD page 0x88 states we have two ports, one
1122 * real and a fake port with no device connected.
1123 * So we create two port groups with one port each
1124 * and set the group with port B to unavailable.
1126 port_a = 0x1; /* relative port A */
1127 port_b = 0x2; /* relative port B */
1128 port_group_a = (((host_no + 1) & 0x7f) << 8) +
1129 (devip->channel & 0x7f);
1130 port_group_b = (((host_no + 1) & 0x7f) << 8) +
1131 (devip->channel & 0x7f) + 0x80;
1134 * The asymmetric access state is cycled according to the host_id.
1137 if (0 == scsi_debug_vpd_use_hostno) {
1138 arr[n++] = host_no % 3; /* Asymm access state */
1139 arr[n++] = 0x0F; /* claim: all states are supported */
1141 arr[n++] = 0x0; /* Active/Optimized path */
1142 arr[n++] = 0x01; /* claim: only support active/optimized paths */
1144 arr[n++] = (port_group_a >> 8) & 0xff;
1145 arr[n++] = port_group_a & 0xff;
1146 arr[n++] = 0; /* Reserved */
1147 arr[n++] = 0; /* Status code */
1148 arr[n++] = 0; /* Vendor unique */
1149 arr[n++] = 0x1; /* One port per group */
1150 arr[n++] = 0; /* Reserved */
1151 arr[n++] = 0; /* Reserved */
1152 arr[n++] = (port_a >> 8) & 0xff;
1153 arr[n++] = port_a & 0xff;
1154 arr[n++] = 3; /* Port unavailable */
1155 arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
1156 arr[n++] = (port_group_b >> 8) & 0xff;
1157 arr[n++] = port_group_b & 0xff;
1158 arr[n++] = 0; /* Reserved */
1159 arr[n++] = 0; /* Status code */
1160 arr[n++] = 0; /* Vendor unique */
1161 arr[n++] = 0x1; /* One port per group */
1162 arr[n++] = 0; /* Reserved */
1163 arr[n++] = 0; /* Reserved */
1164 arr[n++] = (port_b >> 8) & 0xff;
1165 arr[n++] = port_b & 0xff;
1168 arr[0] = (rlen >> 24) & 0xff;
1169 arr[1] = (rlen >> 16) & 0xff;
1170 arr[2] = (rlen >> 8) & 0xff;
1171 arr[3] = rlen & 0xff;
1174 * Return the smallest value of either
1175 * - The allocated length
1176 * - The constructed command length
1177 * - The maximum array size
1180 ret = fill_from_dev_buffer(scp, arr,
1181 min(rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
1186 /* <<Following mode page info copied from ST318451LW>> */
1188 static int resp_err_recov_pg(unsigned char * p, int pcontrol, int target)
1189 { /* Read-Write Error Recovery page for mode_sense */
1190 unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
1193 memcpy(p, err_recov_pg, sizeof(err_recov_pg));
1195 memset(p + 2, 0, sizeof(err_recov_pg) - 2);
1196 return sizeof(err_recov_pg);
1199 static int resp_disconnect_pg(unsigned char * p, int pcontrol, int target)
1200 { /* Disconnect-Reconnect page for mode_sense */
1201 unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
1202 0, 0, 0, 0, 0, 0, 0, 0};
1204 memcpy(p, disconnect_pg, sizeof(disconnect_pg));
1206 memset(p + 2, 0, sizeof(disconnect_pg) - 2);
1207 return sizeof(disconnect_pg);
1210 static int resp_format_pg(unsigned char * p, int pcontrol, int target)
1211 { /* Format device page for mode_sense */
1212 unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
1213 0, 0, 0, 0, 0, 0, 0, 0,
1214 0, 0, 0, 0, 0x40, 0, 0, 0};
1216 memcpy(p, format_pg, sizeof(format_pg));
1217 p[10] = (sdebug_sectors_per >> 8) & 0xff;
1218 p[11] = sdebug_sectors_per & 0xff;
1219 p[12] = (scsi_debug_sector_size >> 8) & 0xff;
1220 p[13] = scsi_debug_sector_size & 0xff;
1221 if (scsi_debug_removable)
1222 p[20] |= 0x20; /* should agree with INQUIRY */
1224 memset(p + 2, 0, sizeof(format_pg) - 2);
1225 return sizeof(format_pg);
1228 static int resp_caching_pg(unsigned char * p, int pcontrol, int target)
1229 { /* Caching page for mode_sense */
1230 unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
1231 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0, 0, 0, 0, 0};
1233 memcpy(p, caching_pg, sizeof(caching_pg));
1235 memset(p + 2, 0, sizeof(caching_pg) - 2);
1236 return sizeof(caching_pg);
1239 static int resp_ctrl_m_pg(unsigned char * p, int pcontrol, int target)
1240 { /* Control mode page for mode_sense */
1241 unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
1243 unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
1246 if (scsi_debug_dsense)
1247 ctrl_m_pg[2] |= 0x4;
1249 ctrl_m_pg[2] &= ~0x4;
1252 ctrl_m_pg[5] |= 0x80; /* ATO=1 */
1254 memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
1256 memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
1257 else if (2 == pcontrol)
1258 memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
1259 return sizeof(ctrl_m_pg);
1263 static int resp_iec_m_pg(unsigned char * p, int pcontrol, int target)
1264 { /* Informational Exceptions control mode page for mode_sense */
1265 unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
1267 unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1270 memcpy(p, iec_m_pg, sizeof(iec_m_pg));
1272 memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
1273 else if (2 == pcontrol)
1274 memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
1275 return sizeof(iec_m_pg);
1278 static int resp_sas_sf_m_pg(unsigned char * p, int pcontrol, int target)
1279 { /* SAS SSP mode page - short format for mode_sense */
1280 unsigned char sas_sf_m_pg[] = {0x19, 0x6,
1281 0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
1283 memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
1285 memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
1286 return sizeof(sas_sf_m_pg);
1290 static int resp_sas_pcd_m_spg(unsigned char * p, int pcontrol, int target,
1292 { /* SAS phy control and discover mode page for mode_sense */
1293 unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
1294 0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
1295 0x52, 0x22, 0x22, 0x20, 0x0, 0x0, 0x0, 0x0,
1296 0x51, 0x11, 0x11, 0x10, 0x0, 0x0, 0x0, 0x1,
1297 0x2, 0, 0, 0, 0, 0, 0, 0,
1298 0x88, 0x99, 0, 0, 0, 0, 0, 0,
1299 0, 0, 0, 0, 0, 0, 0, 0,
1300 0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
1301 0x52, 0x22, 0x22, 0x20, 0x0, 0x0, 0x0, 0x0,
1302 0x51, 0x11, 0x11, 0x10, 0x0, 0x0, 0x0, 0x1,
1303 0x3, 0, 0, 0, 0, 0, 0, 0,
1304 0x88, 0x99, 0, 0, 0, 0, 0, 0,
1305 0, 0, 0, 0, 0, 0, 0, 0,
1309 port_a = target_dev_id + 1;
1310 port_b = port_a + 1;
1311 memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
1312 p[20] = (port_a >> 24);
1313 p[21] = (port_a >> 16) & 0xff;
1314 p[22] = (port_a >> 8) & 0xff;
1315 p[23] = port_a & 0xff;
1316 p[48 + 20] = (port_b >> 24);
1317 p[48 + 21] = (port_b >> 16) & 0xff;
1318 p[48 + 22] = (port_b >> 8) & 0xff;
1319 p[48 + 23] = port_b & 0xff;
1321 memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
1322 return sizeof(sas_pcd_m_pg);
1325 static int resp_sas_sha_m_spg(unsigned char * p, int pcontrol)
1326 { /* SAS SSP shared protocol specific port mode subpage */
1327 unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
1328 0, 0, 0, 0, 0, 0, 0, 0,
1331 memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
1333 memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
1334 return sizeof(sas_sha_m_pg);
1337 #define SDEBUG_MAX_MSENSE_SZ 256
1339 static int resp_mode_sense(struct scsi_cmnd * scp, int target,
1340 struct sdebug_dev_info * devip)
1342 unsigned char dbd, llbaa;
1343 int pcontrol, pcode, subpcode, bd_len;
1344 unsigned char dev_spec;
1345 int k, alloc_len, msense_6, offset, len, errsts, target_dev_id;
1347 unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
1348 unsigned char *cmd = (unsigned char *)scp->cmnd;
1350 if ((errsts = check_readiness(scp, 1, devip)))
1352 dbd = !!(cmd[1] & 0x8);
1353 pcontrol = (cmd[2] & 0xc0) >> 6;
1354 pcode = cmd[2] & 0x3f;
1356 msense_6 = (MODE_SENSE == cmd[0]);
1357 llbaa = msense_6 ? 0 : !!(cmd[1] & 0x10);
1358 if ((0 == scsi_debug_ptype) && (0 == dbd))
1359 bd_len = llbaa ? 16 : 8;
1362 alloc_len = msense_6 ? cmd[4] : ((cmd[7] << 8) | cmd[8]);
1363 memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
1364 if (0x3 == pcontrol) { /* Saving values not supported */
1365 mk_sense_buffer(devip, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP,
1367 return check_condition_result;
1369 target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
1370 (devip->target * 1000) - 3;
1371 /* set DPOFUA bit for disks */
1372 if (0 == scsi_debug_ptype)
1373 dev_spec = (DEV_READONLY(target) ? 0x80 : 0x0) | 0x10;
1383 arr[4] = 0x1; /* set LONGLBA bit */
1384 arr[7] = bd_len; /* assume 255 or less */
1388 if ((bd_len > 0) && (!sdebug_capacity))
1389 sdebug_capacity = get_sdebug_capacity();
1392 if (sdebug_capacity > 0xfffffffe) {
1398 ap[0] = (sdebug_capacity >> 24) & 0xff;
1399 ap[1] = (sdebug_capacity >> 16) & 0xff;
1400 ap[2] = (sdebug_capacity >> 8) & 0xff;
1401 ap[3] = sdebug_capacity & 0xff;
1403 ap[6] = (scsi_debug_sector_size >> 8) & 0xff;
1404 ap[7] = scsi_debug_sector_size & 0xff;
1407 } else if (16 == bd_len) {
1408 unsigned long long capac = sdebug_capacity;
1410 for (k = 0; k < 8; ++k, capac >>= 8)
1411 ap[7 - k] = capac & 0xff;
1412 ap[12] = (scsi_debug_sector_size >> 24) & 0xff;
1413 ap[13] = (scsi_debug_sector_size >> 16) & 0xff;
1414 ap[14] = (scsi_debug_sector_size >> 8) & 0xff;
1415 ap[15] = scsi_debug_sector_size & 0xff;
1420 if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) {
1421 /* TODO: Control Extension page */
1422 mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
1424 return check_condition_result;
1427 case 0x1: /* Read-Write error recovery page, direct access */
1428 len = resp_err_recov_pg(ap, pcontrol, target);
1431 case 0x2: /* Disconnect-Reconnect page, all devices */
1432 len = resp_disconnect_pg(ap, pcontrol, target);
1435 case 0x3: /* Format device page, direct access */
1436 len = resp_format_pg(ap, pcontrol, target);
1439 case 0x8: /* Caching page, direct access */
1440 len = resp_caching_pg(ap, pcontrol, target);
1443 case 0xa: /* Control Mode page, all devices */
1444 len = resp_ctrl_m_pg(ap, pcontrol, target);
1447 case 0x19: /* if spc==1 then sas phy, control+discover */
1448 if ((subpcode > 0x2) && (subpcode < 0xff)) {
1449 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1450 INVALID_FIELD_IN_CDB, 0);
1451 return check_condition_result;
1454 if ((0x0 == subpcode) || (0xff == subpcode))
1455 len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
1456 if ((0x1 == subpcode) || (0xff == subpcode))
1457 len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
1459 if ((0x2 == subpcode) || (0xff == subpcode))
1460 len += resp_sas_sha_m_spg(ap + len, pcontrol);
1463 case 0x1c: /* Informational Exceptions Mode page, all devices */
1464 len = resp_iec_m_pg(ap, pcontrol, target);
1467 case 0x3f: /* Read all Mode pages */
1468 if ((0 == subpcode) || (0xff == subpcode)) {
1469 len = resp_err_recov_pg(ap, pcontrol, target);
1470 len += resp_disconnect_pg(ap + len, pcontrol, target);
1471 len += resp_format_pg(ap + len, pcontrol, target);
1472 len += resp_caching_pg(ap + len, pcontrol, target);
1473 len += resp_ctrl_m_pg(ap + len, pcontrol, target);
1474 len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
1475 if (0xff == subpcode) {
1476 len += resp_sas_pcd_m_spg(ap + len, pcontrol,
1477 target, target_dev_id);
1478 len += resp_sas_sha_m_spg(ap + len, pcontrol);
1480 len += resp_iec_m_pg(ap + len, pcontrol, target);
1482 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1483 INVALID_FIELD_IN_CDB, 0);
1484 return check_condition_result;
1489 mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
1491 return check_condition_result;
1494 arr[0] = offset - 1;
1496 arr[0] = ((offset - 2) >> 8) & 0xff;
1497 arr[1] = (offset - 2) & 0xff;
1499 return fill_from_dev_buffer(scp, arr, min(alloc_len, offset));
1502 #define SDEBUG_MAX_MSELECT_SZ 512
1504 static int resp_mode_select(struct scsi_cmnd * scp, int mselect6,
1505 struct sdebug_dev_info * devip)
1507 int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
1508 int param_len, res, errsts, mpage;
1509 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
1510 unsigned char *cmd = (unsigned char *)scp->cmnd;
1512 if ((errsts = check_readiness(scp, 1, devip)))
1514 memset(arr, 0, sizeof(arr));
1517 param_len = mselect6 ? cmd[4] : ((cmd[7] << 8) + cmd[8]);
1518 if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
1519 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1520 INVALID_FIELD_IN_CDB, 0);
1521 return check_condition_result;
1523 res = fetch_to_dev_buffer(scp, arr, param_len);
1525 return (DID_ERROR << 16);
1526 else if ((res < param_len) &&
1527 (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
1528 printk(KERN_INFO "scsi_debug: mode_select: cdb indicated=%d, "
1529 " IO sent=%d bytes\n", param_len, res);
1530 md_len = mselect6 ? (arr[0] + 1) : ((arr[0] << 8) + arr[1] + 2);
1531 bd_len = mselect6 ? arr[3] : ((arr[6] << 8) + arr[7]);
1533 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1534 INVALID_FIELD_IN_PARAM_LIST, 0);
1535 return check_condition_result;
1537 off = bd_len + (mselect6 ? 4 : 8);
1538 mpage = arr[off] & 0x3f;
1539 ps = !!(arr[off] & 0x80);
1541 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1542 INVALID_FIELD_IN_PARAM_LIST, 0);
1543 return check_condition_result;
1545 spf = !!(arr[off] & 0x40);
1546 pg_len = spf ? ((arr[off + 2] << 8) + arr[off + 3] + 4) :
1548 if ((pg_len + off) > param_len) {
1549 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1550 PARAMETER_LIST_LENGTH_ERR, 0);
1551 return check_condition_result;
1554 case 0xa: /* Control Mode page */
1555 if (ctrl_m_pg[1] == arr[off + 1]) {
1556 memcpy(ctrl_m_pg + 2, arr + off + 2,
1557 sizeof(ctrl_m_pg) - 2);
1558 scsi_debug_dsense = !!(ctrl_m_pg[2] & 0x4);
1562 case 0x1c: /* Informational Exceptions Mode page */
1563 if (iec_m_pg[1] == arr[off + 1]) {
1564 memcpy(iec_m_pg + 2, arr + off + 2,
1565 sizeof(iec_m_pg) - 2);
1572 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1573 INVALID_FIELD_IN_PARAM_LIST, 0);
1574 return check_condition_result;
1577 static int resp_temp_l_pg(unsigned char * arr)
1579 unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
1580 0x0, 0x1, 0x3, 0x2, 0x0, 65,
1583 memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
1584 return sizeof(temp_l_pg);
1587 static int resp_ie_l_pg(unsigned char * arr)
1589 unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
1592 memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
1593 if (iec_m_pg[2] & 0x4) { /* TEST bit set */
1594 arr[4] = THRESHOLD_EXCEEDED;
1597 return sizeof(ie_l_pg);
1600 #define SDEBUG_MAX_LSENSE_SZ 512
1602 static int resp_log_sense(struct scsi_cmnd * scp,
1603 struct sdebug_dev_info * devip)
1605 int ppc, sp, pcontrol, pcode, subpcode, alloc_len, errsts, len, n;
1606 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
1607 unsigned char *cmd = (unsigned char *)scp->cmnd;
1609 if ((errsts = check_readiness(scp, 1, devip)))
1611 memset(arr, 0, sizeof(arr));
1615 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1616 INVALID_FIELD_IN_CDB, 0);
1617 return check_condition_result;
1619 pcontrol = (cmd[2] & 0xc0) >> 6;
1620 pcode = cmd[2] & 0x3f;
1621 subpcode = cmd[3] & 0xff;
1622 alloc_len = (cmd[7] << 8) + cmd[8];
1624 if (0 == subpcode) {
1626 case 0x0: /* Supported log pages log page */
1628 arr[n++] = 0x0; /* this page */
1629 arr[n++] = 0xd; /* Temperature */
1630 arr[n++] = 0x2f; /* Informational exceptions */
1633 case 0xd: /* Temperature log page */
1634 arr[3] = resp_temp_l_pg(arr + 4);
1636 case 0x2f: /* Informational exceptions log page */
1637 arr[3] = resp_ie_l_pg(arr + 4);
1640 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1641 INVALID_FIELD_IN_CDB, 0);
1642 return check_condition_result;
1644 } else if (0xff == subpcode) {
1648 case 0x0: /* Supported log pages and subpages log page */
1651 arr[n++] = 0x0; /* 0,0 page */
1653 arr[n++] = 0xff; /* this page */
1655 arr[n++] = 0x0; /* Temperature */
1657 arr[n++] = 0x0; /* Informational exceptions */
1660 case 0xd: /* Temperature subpages */
1663 arr[n++] = 0x0; /* Temperature */
1666 case 0x2f: /* Informational exceptions subpages */
1669 arr[n++] = 0x0; /* Informational exceptions */
1673 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1674 INVALID_FIELD_IN_CDB, 0);
1675 return check_condition_result;
1678 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1679 INVALID_FIELD_IN_CDB, 0);
1680 return check_condition_result;
1682 len = min(((arr[2] << 8) + arr[3]) + 4, alloc_len);
1683 return fill_from_dev_buffer(scp, arr,
1684 min(len, SDEBUG_MAX_INQ_ARR_SZ));
1687 static int check_device_access_params(struct sdebug_dev_info *devi,
1688 unsigned long long lba, unsigned int num)
1690 if (lba + num > sdebug_capacity) {
1691 mk_sense_buffer(devi, ILLEGAL_REQUEST, ADDR_OUT_OF_RANGE, 0);
1692 return check_condition_result;
1694 /* transfer length excessive (tie in to block limits VPD page) */
1695 if (num > sdebug_store_sectors) {
1696 mk_sense_buffer(devi, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
1697 return check_condition_result;
1702 /* Returns number of bytes copied or -1 if error. */
1703 static int do_device_access(struct scsi_cmnd *scmd,
1704 struct sdebug_dev_info *devi,
1705 unsigned long long lba, unsigned int num, int write)
1708 unsigned long long block, rest = 0;
1709 struct scsi_data_buffer *sdb;
1710 enum dma_data_direction dir;
1711 size_t (*func)(struct scatterlist *, unsigned int, void *, size_t,
1715 sdb = scsi_out(scmd);
1716 dir = DMA_TO_DEVICE;
1717 func = sg_pcopy_to_buffer;
1719 sdb = scsi_in(scmd);
1720 dir = DMA_FROM_DEVICE;
1721 func = sg_pcopy_from_buffer;
1726 if (!(scsi_bidi_cmnd(scmd) || scmd->sc_data_direction == dir))
1729 block = do_div(lba, sdebug_store_sectors);
1730 if (block + num > sdebug_store_sectors)
1731 rest = block + num - sdebug_store_sectors;
1733 ret = func(sdb->table.sgl, sdb->table.nents,
1734 fake_storep + (block * scsi_debug_sector_size),
1735 (num - rest) * scsi_debug_sector_size, 0);
1736 if (ret != (num - rest) * scsi_debug_sector_size)
1740 ret += func(sdb->table.sgl, sdb->table.nents,
1741 fake_storep, rest * scsi_debug_sector_size,
1742 (num - rest) * scsi_debug_sector_size);
1748 static __be16 dif_compute_csum(const void *buf, int len)
1752 if (scsi_debug_guard)
1753 csum = (__force __be16)ip_compute_csum(buf, len);
1755 csum = cpu_to_be16(crc_t10dif(buf, len));
1760 static int dif_verify(struct sd_dif_tuple *sdt, const void *data,
1761 sector_t sector, u32 ei_lba)
1763 __be16 csum = dif_compute_csum(data, scsi_debug_sector_size);
1765 if (sdt->guard_tag != csum) {
1766 pr_err("%s: GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
1768 (unsigned long)sector,
1769 be16_to_cpu(sdt->guard_tag),
1773 if (scsi_debug_dif == SD_DIF_TYPE1_PROTECTION &&
1774 be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
1775 pr_err("%s: REF check failed on sector %lu\n",
1776 __func__, (unsigned long)sector);
1779 if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
1780 be32_to_cpu(sdt->ref_tag) != ei_lba) {
1781 pr_err("%s: REF check failed on sector %lu\n",
1782 __func__, (unsigned long)sector);
1788 static void dif_copy_prot(struct scsi_cmnd *SCpnt, sector_t sector,
1789 unsigned int sectors, bool read)
1791 unsigned int i, resid;
1792 struct scatterlist *psgl;
1794 const void *dif_store_end = dif_storep + sdebug_store_sectors;
1796 /* Bytes of protection data to copy into sgl */
1797 resid = sectors * sizeof(*dif_storep);
1799 scsi_for_each_prot_sg(SCpnt, psgl, scsi_prot_sg_count(SCpnt), i) {
1800 int len = min(psgl->length, resid);
1801 void *start = dif_store(sector);
1804 if (dif_store_end < start + len)
1805 rest = start + len - dif_store_end;
1807 paddr = kmap_atomic(sg_page(psgl)) + psgl->offset;
1810 memcpy(paddr, start, len - rest);
1812 memcpy(start, paddr, len - rest);
1816 memcpy(paddr + len - rest, dif_storep, rest);
1818 memcpy(dif_storep, paddr + len - rest, rest);
1821 sector += len / sizeof(*dif_storep);
1823 kunmap_atomic(paddr);
1827 static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec,
1828 unsigned int sectors, u32 ei_lba)
1831 struct sd_dif_tuple *sdt;
1834 for (i = 0; i < sectors; i++, ei_lba++) {
1837 sector = start_sec + i;
1838 sdt = dif_store(sector);
1840 if (sdt->app_tag == cpu_to_be16(0xffff))
1843 ret = dif_verify(sdt, fake_store(sector), sector, ei_lba);
1850 dif_copy_prot(SCpnt, start_sec, sectors, true);
1856 static int resp_read(struct scsi_cmnd *SCpnt, unsigned long long lba,
1857 unsigned int num, struct sdebug_dev_info *devip,
1860 unsigned long iflags;
1863 ret = check_device_access_params(devip, lba, num);
1867 if ((SCSI_DEBUG_OPT_MEDIUM_ERR & scsi_debug_opts) &&
1868 (lba <= (OPT_MEDIUM_ERR_ADDR + OPT_MEDIUM_ERR_NUM - 1)) &&
1869 ((lba + num) > OPT_MEDIUM_ERR_ADDR)) {
1870 /* claim unrecoverable read error */
1871 mk_sense_buffer(devip, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
1872 /* set info field and valid bit for fixed descriptor */
1873 if (0x70 == (devip->sense_buff[0] & 0x7f)) {
1874 devip->sense_buff[0] |= 0x80; /* Valid bit */
1875 ret = (lba < OPT_MEDIUM_ERR_ADDR)
1876 ? OPT_MEDIUM_ERR_ADDR : (int)lba;
1877 devip->sense_buff[3] = (ret >> 24) & 0xff;
1878 devip->sense_buff[4] = (ret >> 16) & 0xff;
1879 devip->sense_buff[5] = (ret >> 8) & 0xff;
1880 devip->sense_buff[6] = ret & 0xff;
1882 scsi_set_resid(SCpnt, scsi_bufflen(SCpnt));
1883 return check_condition_result;
1887 if (scsi_debug_dix && scsi_prot_sg_count(SCpnt)) {
1888 int prot_ret = prot_verify_read(SCpnt, lba, num, ei_lba);
1891 mk_sense_buffer(devip, ABORTED_COMMAND, 0x10, prot_ret);
1892 return illegal_condition_result;
1896 read_lock_irqsave(&atomic_rw, iflags);
1897 ret = do_device_access(SCpnt, devip, lba, num, 0);
1898 read_unlock_irqrestore(&atomic_rw, iflags);
1900 return DID_ERROR << 16;
1902 scsi_in(SCpnt)->resid = scsi_bufflen(SCpnt) - ret;
1907 void dump_sector(unsigned char *buf, int len)
1911 printk(KERN_ERR ">>> Sector Dump <<<\n");
1913 for (i = 0 ; i < len ; i += 16) {
1914 printk(KERN_ERR "%04d: ", i);
1916 for (j = 0 ; j < 16 ; j++) {
1917 unsigned char c = buf[i+j];
1918 if (c >= 0x20 && c < 0x7e)
1919 printk(" %c ", buf[i+j]);
1921 printk("%02x ", buf[i+j]);
1928 static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
1929 unsigned int sectors, u32 ei_lba)
1932 struct sd_dif_tuple *sdt;
1933 struct scatterlist *dsgl;
1934 struct scatterlist *psgl = scsi_prot_sglist(SCpnt);
1935 void *daddr, *paddr;
1936 sector_t sector = start_sec;
1939 BUG_ON(scsi_sg_count(SCpnt) == 0);
1940 BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
1944 /* For each data page */
1945 scsi_for_each_sg(SCpnt, dsgl, scsi_sg_count(SCpnt), i) {
1946 daddr = kmap_atomic(sg_page(dsgl)) + dsgl->offset;
1947 paddr = kmap_atomic(sg_page(psgl)) + psgl->offset;
1949 /* For each sector-sized chunk in data page */
1950 for (j = 0; j < dsgl->length; j += scsi_debug_sector_size) {
1952 /* If we're at the end of the current
1953 * protection page advance to the next one
1955 if (ppage_offset >= psgl->length) {
1956 kunmap_atomic(paddr);
1957 psgl = sg_next(psgl);
1958 BUG_ON(psgl == NULL);
1959 paddr = kmap_atomic(sg_page(psgl))
1964 sdt = paddr + ppage_offset;
1966 ret = dif_verify(sdt, daddr + j, sector, ei_lba);
1968 dump_sector(daddr + j, scsi_debug_sector_size);
1974 ppage_offset += sizeof(struct sd_dif_tuple);
1977 kunmap_atomic(paddr);
1978 kunmap_atomic(daddr);
1981 dif_copy_prot(SCpnt, start_sec, sectors, false);
1988 kunmap_atomic(paddr);
1989 kunmap_atomic(daddr);
1993 static unsigned long lba_to_map_index(sector_t lba)
1995 if (scsi_debug_unmap_alignment) {
1996 lba += scsi_debug_unmap_granularity -
1997 scsi_debug_unmap_alignment;
1999 do_div(lba, scsi_debug_unmap_granularity);
2004 static sector_t map_index_to_lba(unsigned long index)
2006 sector_t lba = index * scsi_debug_unmap_granularity;
2008 if (scsi_debug_unmap_alignment) {
2009 lba -= scsi_debug_unmap_granularity -
2010 scsi_debug_unmap_alignment;
2016 static unsigned int map_state(sector_t lba, unsigned int *num)
2019 unsigned int mapped;
2020 unsigned long index;
2023 index = lba_to_map_index(lba);
2024 mapped = test_bit(index, map_storep);
2027 next = find_next_zero_bit(map_storep, map_size, index);
2029 next = find_next_bit(map_storep, map_size, index);
2031 end = min_t(sector_t, sdebug_store_sectors, map_index_to_lba(next));
2037 static void map_region(sector_t lba, unsigned int len)
2039 sector_t end = lba + len;
2042 unsigned long index = lba_to_map_index(lba);
2044 if (index < map_size)
2045 set_bit(index, map_storep);
2047 lba = map_index_to_lba(index + 1);
2051 static void unmap_region(sector_t lba, unsigned int len)
2053 sector_t end = lba + len;
2056 unsigned long index = lba_to_map_index(lba);
2058 if (lba == map_index_to_lba(index) &&
2059 lba + scsi_debug_unmap_granularity <= end &&
2061 clear_bit(index, map_storep);
2062 if (scsi_debug_lbprz) {
2063 memset(fake_storep +
2064 lba * scsi_debug_sector_size, 0,
2065 scsi_debug_sector_size *
2066 scsi_debug_unmap_granularity);
2069 memset(dif_storep + lba, 0xff,
2070 sizeof(*dif_storep) *
2071 scsi_debug_unmap_granularity);
2074 lba = map_index_to_lba(index + 1);
2078 static int resp_write(struct scsi_cmnd *SCpnt, unsigned long long lba,
2079 unsigned int num, struct sdebug_dev_info *devip,
2082 unsigned long iflags;
2085 ret = check_device_access_params(devip, lba, num);
2090 if (scsi_debug_dix && scsi_prot_sg_count(SCpnt)) {
2091 int prot_ret = prot_verify_write(SCpnt, lba, num, ei_lba);
2094 mk_sense_buffer(devip, ILLEGAL_REQUEST, 0x10, prot_ret);
2095 return illegal_condition_result;
2099 write_lock_irqsave(&atomic_rw, iflags);
2100 ret = do_device_access(SCpnt, devip, lba, num, 1);
2101 if (scsi_debug_lbp())
2102 map_region(lba, num);
2103 write_unlock_irqrestore(&atomic_rw, iflags);
2105 return (DID_ERROR << 16);
2106 else if ((ret < (num * scsi_debug_sector_size)) &&
2107 (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
2108 printk(KERN_INFO "scsi_debug: write: cdb indicated=%u, "
2109 " IO sent=%d bytes\n", num * scsi_debug_sector_size, ret);
2114 static int resp_write_same(struct scsi_cmnd *scmd, unsigned long long lba,
2115 unsigned int num, struct sdebug_dev_info *devip,
2116 u32 ei_lba, unsigned int unmap)
2118 unsigned long iflags;
2119 unsigned long long i;
2122 ret = check_device_access_params(devip, lba, num);
2126 if (num > scsi_debug_write_same_length) {
2127 mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
2129 return check_condition_result;
2132 write_lock_irqsave(&atomic_rw, iflags);
2134 if (unmap && scsi_debug_lbp()) {
2135 unmap_region(lba, num);
2139 /* Else fetch one logical block */
2140 ret = fetch_to_dev_buffer(scmd,
2141 fake_storep + (lba * scsi_debug_sector_size),
2142 scsi_debug_sector_size);
2145 write_unlock_irqrestore(&atomic_rw, iflags);
2146 return (DID_ERROR << 16);
2147 } else if ((ret < (num * scsi_debug_sector_size)) &&
2148 (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
2149 printk(KERN_INFO "scsi_debug: write same: cdb indicated=%u, "
2150 " IO sent=%d bytes\n", num * scsi_debug_sector_size, ret);
2152 /* Copy first sector to remaining blocks */
2153 for (i = 1 ; i < num ; i++)
2154 memcpy(fake_storep + ((lba + i) * scsi_debug_sector_size),
2155 fake_storep + (lba * scsi_debug_sector_size),
2156 scsi_debug_sector_size);
2158 if (scsi_debug_lbp())
2159 map_region(lba, num);
2161 write_unlock_irqrestore(&atomic_rw, iflags);
2166 struct unmap_block_desc {
2172 static int resp_unmap(struct scsi_cmnd * scmd, struct sdebug_dev_info * devip)
2175 struct unmap_block_desc *desc;
2176 unsigned int i, payload_len, descriptors;
2179 ret = check_readiness(scmd, 1, devip);
2183 payload_len = get_unaligned_be16(&scmd->cmnd[7]);
2184 BUG_ON(scsi_bufflen(scmd) != payload_len);
2186 descriptors = (payload_len - 8) / 16;
2188 buf = kmalloc(scsi_bufflen(scmd), GFP_ATOMIC);
2190 return check_condition_result;
2192 scsi_sg_copy_to_buffer(scmd, buf, scsi_bufflen(scmd));
2194 BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
2195 BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
2197 desc = (void *)&buf[8];
2199 for (i = 0 ; i < descriptors ; i++) {
2200 unsigned long long lba = get_unaligned_be64(&desc[i].lba);
2201 unsigned int num = get_unaligned_be32(&desc[i].blocks);
2203 ret = check_device_access_params(devip, lba, num);
2207 unmap_region(lba, num);
2218 #define SDEBUG_GET_LBA_STATUS_LEN 32
2220 static int resp_get_lba_status(struct scsi_cmnd * scmd,
2221 struct sdebug_dev_info * devip)
2223 unsigned long long lba;
2224 unsigned int alloc_len, mapped, num;
2225 unsigned char arr[SDEBUG_GET_LBA_STATUS_LEN];
2228 ret = check_readiness(scmd, 1, devip);
2232 lba = get_unaligned_be64(&scmd->cmnd[2]);
2233 alloc_len = get_unaligned_be32(&scmd->cmnd[10]);
2238 ret = check_device_access_params(devip, lba, 1);
2242 mapped = map_state(lba, &num);
2244 memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
2245 put_unaligned_be32(20, &arr[0]); /* Parameter Data Length */
2246 put_unaligned_be64(lba, &arr[8]); /* LBA */
2247 put_unaligned_be32(num, &arr[16]); /* Number of blocks */
2248 arr[20] = !mapped; /* mapped = 0, unmapped = 1 */
2250 return fill_from_dev_buffer(scmd, arr, SDEBUG_GET_LBA_STATUS_LEN);
2253 #define SDEBUG_RLUN_ARR_SZ 256
2255 static int resp_report_luns(struct scsi_cmnd * scp,
2256 struct sdebug_dev_info * devip)
2258 unsigned int alloc_len;
2259 int lun_cnt, i, upper, num, n, wlun, lun;
2260 unsigned char *cmd = (unsigned char *)scp->cmnd;
2261 int select_report = (int)cmd[2];
2262 struct scsi_lun *one_lun;
2263 unsigned char arr[SDEBUG_RLUN_ARR_SZ];
2264 unsigned char * max_addr;
2266 alloc_len = cmd[9] + (cmd[8] << 8) + (cmd[7] << 16) + (cmd[6] << 24);
2267 if ((alloc_len < 4) || (select_report > 2)) {
2268 mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
2270 return check_condition_result;
2272 /* can produce response with up to 16k luns (lun 0 to lun 16383) */
2273 memset(arr, 0, SDEBUG_RLUN_ARR_SZ);
2274 lun_cnt = scsi_debug_max_luns;
2275 if (1 == select_report)
2277 else if (scsi_debug_no_lun_0 && (lun_cnt > 0))
2279 wlun = (select_report > 0) ? 1 : 0;
2280 num = lun_cnt + wlun;
2281 arr[2] = ((sizeof(struct scsi_lun) * num) >> 8) & 0xff;
2282 arr[3] = (sizeof(struct scsi_lun) * num) & 0xff;
2283 n = min((int)((SDEBUG_RLUN_ARR_SZ - 8) /
2284 sizeof(struct scsi_lun)), num);
2289 one_lun = (struct scsi_lun *) &arr[8];
2290 max_addr = arr + SDEBUG_RLUN_ARR_SZ;
2291 for (i = 0, lun = (scsi_debug_no_lun_0 ? 1 : 0);
2292 ((i < lun_cnt) && ((unsigned char *)(one_lun + i) < max_addr));
2294 upper = (lun >> 8) & 0x3f;
2296 one_lun[i].scsi_lun[0] =
2297 (upper | (SAM2_LUN_ADDRESS_METHOD << 6));
2298 one_lun[i].scsi_lun[1] = lun & 0xff;
2301 one_lun[i].scsi_lun[0] = (SAM2_WLUN_REPORT_LUNS >> 8) & 0xff;
2302 one_lun[i].scsi_lun[1] = SAM2_WLUN_REPORT_LUNS & 0xff;
2305 alloc_len = (unsigned char *)(one_lun + i) - arr;
2306 return fill_from_dev_buffer(scp, arr,
2307 min((int)alloc_len, SDEBUG_RLUN_ARR_SZ));
2310 static int resp_xdwriteread(struct scsi_cmnd *scp, unsigned long long lba,
2311 unsigned int num, struct sdebug_dev_info *devip)
2314 unsigned char *kaddr, *buf;
2315 unsigned int offset;
2316 struct scatterlist *sg;
2317 struct scsi_data_buffer *sdb = scsi_in(scp);
2319 /* better not to use temporary buffer. */
2320 buf = kmalloc(scsi_bufflen(scp), GFP_ATOMIC);
2324 scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
2327 for_each_sg(sdb->table.sgl, sg, sdb->table.nents, i) {
2328 kaddr = (unsigned char *)kmap_atomic(sg_page(sg));
2332 for (j = 0; j < sg->length; j++)
2333 *(kaddr + sg->offset + j) ^= *(buf + offset + j);
2335 offset += sg->length;
2336 kunmap_atomic(kaddr);
2345 /* When timer goes off this function is called. */
2346 static void timer_intr_handler(unsigned long indx)
2348 struct sdebug_queued_cmd * sqcp;
2349 unsigned long iflags;
2351 if (indx >= scsi_debug_max_queue) {
2352 printk(KERN_ERR "scsi_debug:timer_intr_handler: indx too "
2356 spin_lock_irqsave(&queued_arr_lock, iflags);
2357 sqcp = &queued_arr[(int)indx];
2358 if (! sqcp->in_use) {
2359 printk(KERN_ERR "scsi_debug:timer_intr_handler: Unexpected "
2361 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2365 if (sqcp->done_funct) {
2366 sqcp->a_cmnd->result = sqcp->scsi_result;
2367 sqcp->done_funct(sqcp->a_cmnd); /* callback to mid level */
2369 sqcp->done_funct = NULL;
2370 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2374 static struct sdebug_dev_info *
2375 sdebug_device_create(struct sdebug_host_info *sdbg_host, gfp_t flags)
2377 struct sdebug_dev_info *devip;
2379 devip = kzalloc(sizeof(*devip), flags);
2381 devip->sdbg_host = sdbg_host;
2382 list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
2387 static struct sdebug_dev_info * devInfoReg(struct scsi_device * sdev)
2389 struct sdebug_host_info * sdbg_host;
2390 struct sdebug_dev_info * open_devip = NULL;
2391 struct sdebug_dev_info * devip =
2392 (struct sdebug_dev_info *)sdev->hostdata;
2396 sdbg_host = *(struct sdebug_host_info **)shost_priv(sdev->host);
2398 printk(KERN_ERR "Host info NULL\n");
2401 list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
2402 if ((devip->used) && (devip->channel == sdev->channel) &&
2403 (devip->target == sdev->id) &&
2404 (devip->lun == sdev->lun))
2407 if ((!devip->used) && (!open_devip))
2411 if (!open_devip) { /* try and make a new one */
2412 open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
2414 printk(KERN_ERR "%s: out of memory at line %d\n",
2415 __func__, __LINE__);
2420 open_devip->channel = sdev->channel;
2421 open_devip->target = sdev->id;
2422 open_devip->lun = sdev->lun;
2423 open_devip->sdbg_host = sdbg_host;
2424 open_devip->reset = 1;
2425 open_devip->used = 1;
2426 memset(open_devip->sense_buff, 0, SDEBUG_SENSE_LEN);
2427 if (scsi_debug_dsense)
2428 open_devip->sense_buff[0] = 0x72;
2430 open_devip->sense_buff[0] = 0x70;
2431 open_devip->sense_buff[7] = 0xa;
2433 if (sdev->lun == SAM2_WLUN_REPORT_LUNS)
2434 open_devip->wlun = SAM2_WLUN_REPORT_LUNS & 0xff;
2439 static int scsi_debug_slave_alloc(struct scsi_device *sdp)
2441 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2442 printk(KERN_INFO "scsi_debug: slave_alloc <%u %u %u %u>\n",
2443 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
2444 queue_flag_set_unlocked(QUEUE_FLAG_BIDI, sdp->request_queue);
2448 static int scsi_debug_slave_configure(struct scsi_device *sdp)
2450 struct sdebug_dev_info *devip;
2452 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2453 printk(KERN_INFO "scsi_debug: slave_configure <%u %u %u %u>\n",
2454 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
2455 if (sdp->host->max_cmd_len != SCSI_DEBUG_MAX_CMD_LEN)
2456 sdp->host->max_cmd_len = SCSI_DEBUG_MAX_CMD_LEN;
2457 devip = devInfoReg(sdp);
2459 return 1; /* no resources, will be marked offline */
2460 sdp->hostdata = devip;
2461 if (sdp->host->cmd_per_lun)
2462 scsi_adjust_queue_depth(sdp, SDEBUG_TAGGED_QUEUING,
2463 sdp->host->cmd_per_lun);
2464 blk_queue_max_segment_size(sdp->request_queue, 256 * 1024);
2465 if (scsi_debug_no_uld)
2466 sdp->no_uld_attach = 1;
2470 static void scsi_debug_slave_destroy(struct scsi_device *sdp)
2472 struct sdebug_dev_info *devip =
2473 (struct sdebug_dev_info *)sdp->hostdata;
2475 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2476 printk(KERN_INFO "scsi_debug: slave_destroy <%u %u %u %u>\n",
2477 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
2479 /* make this slot available for re-use */
2481 sdp->hostdata = NULL;
2485 /* Returns 1 if found 'cmnd' and deleted its timer. else returns 0 */
2486 static int stop_queued_cmnd(struct scsi_cmnd *cmnd)
2488 unsigned long iflags;
2490 struct sdebug_queued_cmd *sqcp;
2492 spin_lock_irqsave(&queued_arr_lock, iflags);
2493 for (k = 0; k < scsi_debug_max_queue; ++k) {
2494 sqcp = &queued_arr[k];
2495 if (sqcp->in_use && (cmnd == sqcp->a_cmnd)) {
2496 del_timer_sync(&sqcp->cmnd_timer);
2498 sqcp->a_cmnd = NULL;
2502 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2503 return (k < scsi_debug_max_queue) ? 1 : 0;
2506 /* Deletes (stops) timers of all queued commands */
2507 static void stop_all_queued(void)
2509 unsigned long iflags;
2511 struct sdebug_queued_cmd *sqcp;
2513 spin_lock_irqsave(&queued_arr_lock, iflags);
2514 for (k = 0; k < scsi_debug_max_queue; ++k) {
2515 sqcp = &queued_arr[k];
2516 if (sqcp->in_use && sqcp->a_cmnd) {
2517 del_timer_sync(&sqcp->cmnd_timer);
2519 sqcp->a_cmnd = NULL;
2522 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2525 static int scsi_debug_abort(struct scsi_cmnd * SCpnt)
2527 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2528 printk(KERN_INFO "scsi_debug: abort\n");
2530 stop_queued_cmnd(SCpnt);
2534 static int scsi_debug_biosparam(struct scsi_device *sdev,
2535 struct block_device * bdev, sector_t capacity, int *info)
2540 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2541 printk(KERN_INFO "scsi_debug: biosparam\n");
2542 buf = scsi_bios_ptable(bdev);
2544 res = scsi_partsize(buf, capacity,
2545 &info[2], &info[0], &info[1]);
2550 info[0] = sdebug_heads;
2551 info[1] = sdebug_sectors_per;
2552 info[2] = sdebug_cylinders_per;
2556 static int scsi_debug_device_reset(struct scsi_cmnd * SCpnt)
2558 struct sdebug_dev_info * devip;
2560 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2561 printk(KERN_INFO "scsi_debug: device_reset\n");
2564 devip = devInfoReg(SCpnt->device);
2571 static int scsi_debug_bus_reset(struct scsi_cmnd * SCpnt)
2573 struct sdebug_host_info *sdbg_host;
2574 struct sdebug_dev_info * dev_info;
2575 struct scsi_device * sdp;
2576 struct Scsi_Host * hp;
2578 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2579 printk(KERN_INFO "scsi_debug: bus_reset\n");
2581 if (SCpnt && ((sdp = SCpnt->device)) && ((hp = sdp->host))) {
2582 sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
2584 list_for_each_entry(dev_info,
2585 &sdbg_host->dev_info_list,
2587 dev_info->reset = 1;
2593 static int scsi_debug_host_reset(struct scsi_cmnd * SCpnt)
2595 struct sdebug_host_info * sdbg_host;
2596 struct sdebug_dev_info * dev_info;
2598 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2599 printk(KERN_INFO "scsi_debug: host_reset\n");
2601 spin_lock(&sdebug_host_list_lock);
2602 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
2603 list_for_each_entry(dev_info, &sdbg_host->dev_info_list,
2605 dev_info->reset = 1;
2607 spin_unlock(&sdebug_host_list_lock);
2612 /* Initializes timers in queued array */
2613 static void __init init_all_queued(void)
2615 unsigned long iflags;
2617 struct sdebug_queued_cmd * sqcp;
2619 spin_lock_irqsave(&queued_arr_lock, iflags);
2620 for (k = 0; k < scsi_debug_max_queue; ++k) {
2621 sqcp = &queued_arr[k];
2622 init_timer(&sqcp->cmnd_timer);
2624 sqcp->a_cmnd = NULL;
2626 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2629 static void __init sdebug_build_parts(unsigned char *ramp,
2630 unsigned long store_size)
2632 struct partition * pp;
2633 int starts[SDEBUG_MAX_PARTS + 2];
2634 int sectors_per_part, num_sectors, k;
2635 int heads_by_sects, start_sec, end_sec;
2637 /* assume partition table already zeroed */
2638 if ((scsi_debug_num_parts < 1) || (store_size < 1048576))
2640 if (scsi_debug_num_parts > SDEBUG_MAX_PARTS) {
2641 scsi_debug_num_parts = SDEBUG_MAX_PARTS;
2642 printk(KERN_WARNING "scsi_debug:build_parts: reducing "
2643 "partitions to %d\n", SDEBUG_MAX_PARTS);
2645 num_sectors = (int)sdebug_store_sectors;
2646 sectors_per_part = (num_sectors - sdebug_sectors_per)
2647 / scsi_debug_num_parts;
2648 heads_by_sects = sdebug_heads * sdebug_sectors_per;
2649 starts[0] = sdebug_sectors_per;
2650 for (k = 1; k < scsi_debug_num_parts; ++k)
2651 starts[k] = ((k * sectors_per_part) / heads_by_sects)
2653 starts[scsi_debug_num_parts] = num_sectors;
2654 starts[scsi_debug_num_parts + 1] = 0;
2656 ramp[510] = 0x55; /* magic partition markings */
2658 pp = (struct partition *)(ramp + 0x1be);
2659 for (k = 0; starts[k + 1]; ++k, ++pp) {
2660 start_sec = starts[k];
2661 end_sec = starts[k + 1] - 1;
2664 pp->cyl = start_sec / heads_by_sects;
2665 pp->head = (start_sec - (pp->cyl * heads_by_sects))
2666 / sdebug_sectors_per;
2667 pp->sector = (start_sec % sdebug_sectors_per) + 1;
2669 pp->end_cyl = end_sec / heads_by_sects;
2670 pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
2671 / sdebug_sectors_per;
2672 pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
2674 pp->start_sect = cpu_to_le32(start_sec);
2675 pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1);
2676 pp->sys_ind = 0x83; /* plain Linux partition */
2680 static int schedule_resp(struct scsi_cmnd * cmnd,
2681 struct sdebug_dev_info * devip,
2682 done_funct_t done, int scsi_result, int delta_jiff)
2684 if ((SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) && cmnd) {
2686 struct scsi_device * sdp = cmnd->device;
2688 printk(KERN_INFO "scsi_debug: <%u %u %u %u> "
2689 "non-zero result=0x%x\n", sdp->host->host_no,
2690 sdp->channel, sdp->id, sdp->lun, scsi_result);
2693 if (cmnd && devip) {
2694 /* simulate autosense by this driver */
2695 if (SAM_STAT_CHECK_CONDITION == (scsi_result & 0xff))
2696 memcpy(cmnd->sense_buffer, devip->sense_buff,
2697 (SCSI_SENSE_BUFFERSIZE > SDEBUG_SENSE_LEN) ?
2698 SDEBUG_SENSE_LEN : SCSI_SENSE_BUFFERSIZE);
2700 if (delta_jiff <= 0) {
2702 cmnd->result = scsi_result;
2707 unsigned long iflags;
2709 struct sdebug_queued_cmd * sqcp = NULL;
2711 spin_lock_irqsave(&queued_arr_lock, iflags);
2712 for (k = 0; k < scsi_debug_max_queue; ++k) {
2713 sqcp = &queued_arr[k];
2717 if (k >= scsi_debug_max_queue) {
2718 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2719 printk(KERN_WARNING "scsi_debug: can_queue exceeded\n");
2720 return 1; /* report busy to mid level */
2723 sqcp->a_cmnd = cmnd;
2724 sqcp->scsi_result = scsi_result;
2725 sqcp->done_funct = done;
2726 sqcp->cmnd_timer.function = timer_intr_handler;
2727 sqcp->cmnd_timer.data = k;
2728 sqcp->cmnd_timer.expires = jiffies + delta_jiff;
2729 add_timer(&sqcp->cmnd_timer);
2730 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2736 /* Note: The following macros create attribute files in the
2737 /sys/module/scsi_debug/parameters directory. Unfortunately this
2738 driver is unaware of a change and cannot trigger auxiliary actions
2739 as it can when the corresponding attribute in the
2740 /sys/bus/pseudo/drivers/scsi_debug directory is changed.
2742 module_param_named(add_host, scsi_debug_add_host, int, S_IRUGO | S_IWUSR);
2743 module_param_named(ato, scsi_debug_ato, int, S_IRUGO);
2744 module_param_named(delay, scsi_debug_delay, int, S_IRUGO | S_IWUSR);
2745 module_param_named(dev_size_mb, scsi_debug_dev_size_mb, int, S_IRUGO);
2746 module_param_named(dif, scsi_debug_dif, int, S_IRUGO);
2747 module_param_named(dix, scsi_debug_dix, int, S_IRUGO);
2748 module_param_named(dsense, scsi_debug_dsense, int, S_IRUGO | S_IWUSR);
2749 module_param_named(every_nth, scsi_debug_every_nth, int, S_IRUGO | S_IWUSR);
2750 module_param_named(fake_rw, scsi_debug_fake_rw, int, S_IRUGO | S_IWUSR);
2751 module_param_named(guard, scsi_debug_guard, uint, S_IRUGO);
2752 module_param_named(lbpu, scsi_debug_lbpu, int, S_IRUGO);
2753 module_param_named(lbpws, scsi_debug_lbpws, int, S_IRUGO);
2754 module_param_named(lbpws10, scsi_debug_lbpws10, int, S_IRUGO);
2755 module_param_named(lbprz, scsi_debug_lbprz, int, S_IRUGO);
2756 module_param_named(lowest_aligned, scsi_debug_lowest_aligned, int, S_IRUGO);
2757 module_param_named(max_luns, scsi_debug_max_luns, int, S_IRUGO | S_IWUSR);
2758 module_param_named(max_queue, scsi_debug_max_queue, int, S_IRUGO | S_IWUSR);
2759 module_param_named(no_lun_0, scsi_debug_no_lun_0, int, S_IRUGO | S_IWUSR);
2760 module_param_named(no_uld, scsi_debug_no_uld, int, S_IRUGO);
2761 module_param_named(num_parts, scsi_debug_num_parts, int, S_IRUGO);
2762 module_param_named(num_tgts, scsi_debug_num_tgts, int, S_IRUGO | S_IWUSR);
2763 module_param_named(opt_blks, scsi_debug_opt_blks, int, S_IRUGO);
2764 module_param_named(opts, scsi_debug_opts, int, S_IRUGO | S_IWUSR);
2765 module_param_named(physblk_exp, scsi_debug_physblk_exp, int, S_IRUGO);
2766 module_param_named(ptype, scsi_debug_ptype, int, S_IRUGO | S_IWUSR);
2767 module_param_named(removable, scsi_debug_removable, bool, S_IRUGO | S_IWUSR);
2768 module_param_named(scsi_level, scsi_debug_scsi_level, int, S_IRUGO);
2769 module_param_named(sector_size, scsi_debug_sector_size, int, S_IRUGO);
2770 module_param_named(unmap_alignment, scsi_debug_unmap_alignment, int, S_IRUGO);
2771 module_param_named(unmap_granularity, scsi_debug_unmap_granularity, int, S_IRUGO);
2772 module_param_named(unmap_max_blocks, scsi_debug_unmap_max_blocks, int, S_IRUGO);
2773 module_param_named(unmap_max_desc, scsi_debug_unmap_max_desc, int, S_IRUGO);
2774 module_param_named(virtual_gb, scsi_debug_virtual_gb, int, S_IRUGO | S_IWUSR);
2775 module_param_named(vpd_use_hostno, scsi_debug_vpd_use_hostno, int,
2777 module_param_named(write_same_length, scsi_debug_write_same_length, int,
2780 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
2781 MODULE_DESCRIPTION("SCSI debug adapter driver");
2782 MODULE_LICENSE("GPL");
2783 MODULE_VERSION(SCSI_DEBUG_VERSION);
2785 MODULE_PARM_DESC(add_host, "0..127 hosts allowed(def=1)");
2786 MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
2787 MODULE_PARM_DESC(delay, "# of jiffies to delay response(def=1)");
2788 MODULE_PARM_DESC(dev_size_mb, "size in MB of ram shared by devs(def=8)");
2789 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
2790 MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
2791 MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
2792 MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
2793 MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
2794 MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
2795 MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
2796 MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
2797 MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
2798 MODULE_PARM_DESC(lbprz, "unmapped blocks return 0 on read (def=1)");
2799 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
2800 MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
2801 MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to 255(def))");
2802 MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
2803 MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
2804 MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
2805 MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
2806 MODULE_PARM_DESC(opt_blks, "optimal transfer length in block (def=64)");
2807 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
2808 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
2809 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
2810 MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
2811 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=5[SPC-3])");
2812 MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
2813 MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
2814 MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
2815 MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
2816 MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
2817 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte size (def=0 -> use dev_size_mb)");
2818 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
2819 MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
2821 static char sdebug_info[256];
2823 static const char * scsi_debug_info(struct Scsi_Host * shp)
2825 sprintf(sdebug_info, "scsi_debug, version %s [%s], "
2826 "dev_size_mb=%d, opts=0x%x", SCSI_DEBUG_VERSION,
2827 scsi_debug_version_date, scsi_debug_dev_size_mb,
2832 /* scsi_debug_proc_info
2833 * Used if the driver currently has no own support for /proc/scsi
2835 static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer, int length)
2839 int minLen = length > 15 ? 15 : length;
2841 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
2843 memcpy(arr, buffer, minLen);
2845 if (1 != sscanf(arr, "%d", &opts))
2847 scsi_debug_opts = opts;
2848 if (scsi_debug_every_nth != 0)
2849 scsi_debug_cmnd_count = 0;
2853 static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
2855 seq_printf(m, "scsi_debug adapter driver, version "
2857 "num_tgts=%d, shared (ram) size=%d MB, opts=0x%x, "
2858 "every_nth=%d(curr:%d)\n"
2859 "delay=%d, max_luns=%d, scsi_level=%d\n"
2860 "sector_size=%d bytes, cylinders=%d, heads=%d, sectors=%d\n"
2861 "number of aborts=%d, device_reset=%d, bus_resets=%d, "
2862 "host_resets=%d\ndix_reads=%d dix_writes=%d dif_errors=%d\n",
2863 SCSI_DEBUG_VERSION, scsi_debug_version_date, scsi_debug_num_tgts,
2864 scsi_debug_dev_size_mb, scsi_debug_opts, scsi_debug_every_nth,
2865 scsi_debug_cmnd_count, scsi_debug_delay,
2866 scsi_debug_max_luns, scsi_debug_scsi_level,
2867 scsi_debug_sector_size, sdebug_cylinders_per, sdebug_heads,
2868 sdebug_sectors_per, num_aborts, num_dev_resets, num_bus_resets,
2869 num_host_resets, dix_reads, dix_writes, dif_errors);
2873 static ssize_t delay_show(struct device_driver *ddp, char *buf)
2875 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_delay);
2878 static ssize_t delay_store(struct device_driver *ddp, const char *buf,
2884 if (1 == sscanf(buf, "%10s", work)) {
2885 if ((1 == sscanf(work, "%d", &delay)) && (delay >= 0)) {
2886 scsi_debug_delay = delay;
2892 static DRIVER_ATTR_RW(delay);
2894 static ssize_t opts_show(struct device_driver *ddp, char *buf)
2896 return scnprintf(buf, PAGE_SIZE, "0x%x\n", scsi_debug_opts);
2899 static ssize_t opts_store(struct device_driver *ddp, const char *buf,
2905 if (1 == sscanf(buf, "%10s", work)) {
2906 if (0 == strnicmp(work,"0x", 2)) {
2907 if (1 == sscanf(&work[2], "%x", &opts))
2910 if (1 == sscanf(work, "%d", &opts))
2916 scsi_debug_opts = opts;
2917 scsi_debug_cmnd_count = 0;
2920 static DRIVER_ATTR_RW(opts);
2922 static ssize_t ptype_show(struct device_driver *ddp, char *buf)
2924 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_ptype);
2926 static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
2931 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
2932 scsi_debug_ptype = n;
2937 static DRIVER_ATTR_RW(ptype);
2939 static ssize_t dsense_show(struct device_driver *ddp, char *buf)
2941 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dsense);
2943 static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
2948 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
2949 scsi_debug_dsense = n;
2954 static DRIVER_ATTR_RW(dsense);
2956 static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
2958 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_fake_rw);
2960 static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
2965 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
2966 scsi_debug_fake_rw = n;
2971 static DRIVER_ATTR_RW(fake_rw);
2973 static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
2975 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_no_lun_0);
2977 static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
2982 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
2983 scsi_debug_no_lun_0 = n;
2988 static DRIVER_ATTR_RW(no_lun_0);
2990 static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
2992 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_num_tgts);
2994 static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
2999 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
3000 scsi_debug_num_tgts = n;
3001 sdebug_max_tgts_luns();
3006 static DRIVER_ATTR_RW(num_tgts);
3008 static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)
3010 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dev_size_mb);
3012 static DRIVER_ATTR_RO(dev_size_mb);
3014 static ssize_t num_parts_show(struct device_driver *ddp, char *buf)
3016 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_num_parts);
3018 static DRIVER_ATTR_RO(num_parts);
3020 static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
3022 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_every_nth);
3024 static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
3029 if ((count > 0) && (1 == sscanf(buf, "%d", &nth))) {
3030 scsi_debug_every_nth = nth;
3031 scsi_debug_cmnd_count = 0;
3036 static DRIVER_ATTR_RW(every_nth);
3038 static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
3040 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_max_luns);
3042 static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
3047 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
3048 scsi_debug_max_luns = n;
3049 sdebug_max_tgts_luns();
3054 static DRIVER_ATTR_RW(max_luns);
3056 static ssize_t max_queue_show(struct device_driver *ddp, char *buf)
3058 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_max_queue);
3060 static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
3065 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
3066 (n <= SCSI_DEBUG_CANQUEUE)) {
3067 scsi_debug_max_queue = n;
3072 static DRIVER_ATTR_RW(max_queue);
3074 static ssize_t no_uld_show(struct device_driver *ddp, char *buf)
3076 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_no_uld);
3078 static DRIVER_ATTR_RO(no_uld);
3080 static ssize_t scsi_level_show(struct device_driver *ddp, char *buf)
3082 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_scsi_level);
3084 static DRIVER_ATTR_RO(scsi_level);
3086 static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
3088 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_virtual_gb);
3090 static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
3095 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
3096 scsi_debug_virtual_gb = n;
3098 sdebug_capacity = get_sdebug_capacity();
3104 static DRIVER_ATTR_RW(virtual_gb);
3106 static ssize_t add_host_show(struct device_driver *ddp, char *buf)
3108 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_add_host);
3111 static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
3116 if (sscanf(buf, "%d", &delta_hosts) != 1)
3118 if (delta_hosts > 0) {
3120 sdebug_add_adapter();
3121 } while (--delta_hosts);
3122 } else if (delta_hosts < 0) {
3124 sdebug_remove_adapter();
3125 } while (++delta_hosts);
3129 static DRIVER_ATTR_RW(add_host);
3131 static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf)
3133 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_vpd_use_hostno);
3135 static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf,
3140 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
3141 scsi_debug_vpd_use_hostno = n;
3146 static DRIVER_ATTR_RW(vpd_use_hostno);
3148 static ssize_t sector_size_show(struct device_driver *ddp, char *buf)
3150 return scnprintf(buf, PAGE_SIZE, "%u\n", scsi_debug_sector_size);
3152 static DRIVER_ATTR_RO(sector_size);
3154 static ssize_t dix_show(struct device_driver *ddp, char *buf)
3156 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dix);
3158 static DRIVER_ATTR_RO(dix);
3160 static ssize_t dif_show(struct device_driver *ddp, char *buf)
3162 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dif);
3164 static DRIVER_ATTR_RO(dif);
3166 static ssize_t guard_show(struct device_driver *ddp, char *buf)
3168 return scnprintf(buf, PAGE_SIZE, "%u\n", scsi_debug_guard);
3170 static DRIVER_ATTR_RO(guard);
3172 static ssize_t ato_show(struct device_driver *ddp, char *buf)
3174 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_ato);
3176 static DRIVER_ATTR_RO(ato);
3178 static ssize_t map_show(struct device_driver *ddp, char *buf)
3182 if (!scsi_debug_lbp())
3183 return scnprintf(buf, PAGE_SIZE, "0-%u\n",
3184 sdebug_store_sectors);
3186 count = bitmap_scnlistprintf(buf, PAGE_SIZE, map_storep, map_size);
3188 buf[count++] = '\n';
3193 static DRIVER_ATTR_RO(map);
3195 static ssize_t removable_show(struct device_driver *ddp, char *buf)
3197 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_removable ? 1 : 0);
3199 static ssize_t removable_store(struct device_driver *ddp, const char *buf,
3204 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
3205 scsi_debug_removable = (n > 0);
3210 static DRIVER_ATTR_RW(removable);
3212 /* Note: The following array creates attribute files in the
3213 /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
3214 files (over those found in the /sys/module/scsi_debug/parameters
3215 directory) is that auxiliary actions can be triggered when an attribute
3216 is changed. For example see: sdebug_add_host_store() above.
3219 static struct attribute *sdebug_drv_attrs[] = {
3220 &driver_attr_delay.attr,
3221 &driver_attr_opts.attr,
3222 &driver_attr_ptype.attr,
3223 &driver_attr_dsense.attr,
3224 &driver_attr_fake_rw.attr,
3225 &driver_attr_no_lun_0.attr,
3226 &driver_attr_num_tgts.attr,
3227 &driver_attr_dev_size_mb.attr,
3228 &driver_attr_num_parts.attr,
3229 &driver_attr_every_nth.attr,
3230 &driver_attr_max_luns.attr,
3231 &driver_attr_max_queue.attr,
3232 &driver_attr_no_uld.attr,
3233 &driver_attr_scsi_level.attr,
3234 &driver_attr_virtual_gb.attr,
3235 &driver_attr_add_host.attr,
3236 &driver_attr_vpd_use_hostno.attr,
3237 &driver_attr_sector_size.attr,
3238 &driver_attr_dix.attr,
3239 &driver_attr_dif.attr,
3240 &driver_attr_guard.attr,
3241 &driver_attr_ato.attr,
3242 &driver_attr_map.attr,
3243 &driver_attr_removable.attr,
3246 ATTRIBUTE_GROUPS(sdebug_drv);
3248 static struct device *pseudo_primary;
3250 static int __init scsi_debug_init(void)
3257 switch (scsi_debug_sector_size) {
3264 printk(KERN_ERR "scsi_debug_init: invalid sector_size %d\n",
3265 scsi_debug_sector_size);
3269 switch (scsi_debug_dif) {
3271 case SD_DIF_TYPE0_PROTECTION:
3272 case SD_DIF_TYPE1_PROTECTION:
3273 case SD_DIF_TYPE2_PROTECTION:
3274 case SD_DIF_TYPE3_PROTECTION:
3278 printk(KERN_ERR "scsi_debug_init: dif must be 0, 1, 2 or 3\n");
3282 if (scsi_debug_guard > 1) {
3283 printk(KERN_ERR "scsi_debug_init: guard must be 0 or 1\n");
3287 if (scsi_debug_ato > 1) {
3288 printk(KERN_ERR "scsi_debug_init: ato must be 0 or 1\n");
3292 if (scsi_debug_physblk_exp > 15) {
3293 printk(KERN_ERR "scsi_debug_init: invalid physblk_exp %u\n",
3294 scsi_debug_physblk_exp);
3298 if (scsi_debug_lowest_aligned > 0x3fff) {
3299 printk(KERN_ERR "scsi_debug_init: lowest_aligned too big: %u\n",
3300 scsi_debug_lowest_aligned);
3304 if (scsi_debug_dev_size_mb < 1)
3305 scsi_debug_dev_size_mb = 1; /* force minimum 1 MB ramdisk */
3306 sz = (unsigned long)scsi_debug_dev_size_mb * 1048576;
3307 sdebug_store_sectors = sz / scsi_debug_sector_size;
3308 sdebug_capacity = get_sdebug_capacity();
3310 /* play around with geometry, don't waste too much on track 0 */
3312 sdebug_sectors_per = 32;
3313 if (scsi_debug_dev_size_mb >= 16)
3315 else if (scsi_debug_dev_size_mb >= 256)
3317 sdebug_cylinders_per = (unsigned long)sdebug_capacity /
3318 (sdebug_sectors_per * sdebug_heads);
3319 if (sdebug_cylinders_per >= 1024) {
3320 /* other LLDs do this; implies >= 1GB ram disk ... */
3322 sdebug_sectors_per = 63;
3323 sdebug_cylinders_per = (unsigned long)sdebug_capacity /
3324 (sdebug_sectors_per * sdebug_heads);
3327 fake_storep = vmalloc(sz);
3328 if (NULL == fake_storep) {
3329 printk(KERN_ERR "scsi_debug_init: out of memory, 1\n");
3332 memset(fake_storep, 0, sz);
3333 if (scsi_debug_num_parts > 0)
3334 sdebug_build_parts(fake_storep, sz);
3336 if (scsi_debug_dix) {
3339 dif_size = sdebug_store_sectors * sizeof(struct sd_dif_tuple);
3340 dif_storep = vmalloc(dif_size);
3342 printk(KERN_ERR "scsi_debug_init: dif_storep %u bytes @ %p\n",
3343 dif_size, dif_storep);
3345 if (dif_storep == NULL) {
3346 printk(KERN_ERR "scsi_debug_init: out of mem. (DIX)\n");
3351 memset(dif_storep, 0xff, dif_size);
3354 /* Logical Block Provisioning */
3355 if (scsi_debug_lbp()) {
3356 scsi_debug_unmap_max_blocks =
3357 clamp(scsi_debug_unmap_max_blocks, 0U, 0xffffffffU);
3359 scsi_debug_unmap_max_desc =
3360 clamp(scsi_debug_unmap_max_desc, 0U, 256U);
3362 scsi_debug_unmap_granularity =
3363 clamp(scsi_debug_unmap_granularity, 1U, 0xffffffffU);
3365 if (scsi_debug_unmap_alignment &&
3366 scsi_debug_unmap_granularity <=
3367 scsi_debug_unmap_alignment) {
3369 "%s: ERR: unmap_granularity <= unmap_alignment\n",
3374 map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
3375 map_storep = vmalloc(BITS_TO_LONGS(map_size) * sizeof(long));
3377 printk(KERN_INFO "scsi_debug_init: %lu provisioning blocks\n",
3380 if (map_storep == NULL) {
3381 printk(KERN_ERR "scsi_debug_init: out of mem. (MAP)\n");
3386 bitmap_zero(map_storep, map_size);
3388 /* Map first 1KB for partition table */
3389 if (scsi_debug_num_parts)
3393 pseudo_primary = root_device_register("pseudo_0");
3394 if (IS_ERR(pseudo_primary)) {
3395 printk(KERN_WARNING "scsi_debug: root_device_register() error\n");
3396 ret = PTR_ERR(pseudo_primary);
3399 ret = bus_register(&pseudo_lld_bus);
3401 printk(KERN_WARNING "scsi_debug: bus_register error: %d\n",
3405 ret = driver_register(&sdebug_driverfs_driver);
3407 printk(KERN_WARNING "scsi_debug: driver_register error: %d\n",
3414 host_to_add = scsi_debug_add_host;
3415 scsi_debug_add_host = 0;
3417 for (k = 0; k < host_to_add; k++) {
3418 if (sdebug_add_adapter()) {
3419 printk(KERN_ERR "scsi_debug_init: "
3420 "sdebug_add_adapter failed k=%d\n", k);
3425 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) {
3426 printk(KERN_INFO "scsi_debug_init: built %d host(s)\n",
3427 scsi_debug_add_host);
3432 bus_unregister(&pseudo_lld_bus);
3434 root_device_unregister(pseudo_primary);
3445 static void __exit scsi_debug_exit(void)
3447 int k = scsi_debug_add_host;
3451 sdebug_remove_adapter();
3452 driver_unregister(&sdebug_driverfs_driver);
3453 bus_unregister(&pseudo_lld_bus);
3454 root_device_unregister(pseudo_primary);
3462 device_initcall(scsi_debug_init);
3463 module_exit(scsi_debug_exit);
3465 static void sdebug_release_adapter(struct device * dev)
3467 struct sdebug_host_info *sdbg_host;
3469 sdbg_host = to_sdebug_host(dev);
3473 static int sdebug_add_adapter(void)
3475 int k, devs_per_host;
3477 struct sdebug_host_info *sdbg_host;
3478 struct sdebug_dev_info *sdbg_devinfo, *tmp;
3480 sdbg_host = kzalloc(sizeof(*sdbg_host),GFP_KERNEL);
3481 if (NULL == sdbg_host) {
3482 printk(KERN_ERR "%s: out of memory at line %d\n",
3483 __func__, __LINE__);
3487 INIT_LIST_HEAD(&sdbg_host->dev_info_list);
3489 devs_per_host = scsi_debug_num_tgts * scsi_debug_max_luns;
3490 for (k = 0; k < devs_per_host; k++) {
3491 sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
3492 if (!sdbg_devinfo) {
3493 printk(KERN_ERR "%s: out of memory at line %d\n",
3494 __func__, __LINE__);
3500 spin_lock(&sdebug_host_list_lock);
3501 list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
3502 spin_unlock(&sdebug_host_list_lock);
3504 sdbg_host->dev.bus = &pseudo_lld_bus;
3505 sdbg_host->dev.parent = pseudo_primary;
3506 sdbg_host->dev.release = &sdebug_release_adapter;
3507 dev_set_name(&sdbg_host->dev, "adapter%d", scsi_debug_add_host);
3509 error = device_register(&sdbg_host->dev);
3514 ++scsi_debug_add_host;
3518 list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
3520 list_del(&sdbg_devinfo->dev_list);
3521 kfree(sdbg_devinfo);
3528 static void sdebug_remove_adapter(void)
3530 struct sdebug_host_info * sdbg_host = NULL;
3532 spin_lock(&sdebug_host_list_lock);
3533 if (!list_empty(&sdebug_host_list)) {
3534 sdbg_host = list_entry(sdebug_host_list.prev,
3535 struct sdebug_host_info, host_list);
3536 list_del(&sdbg_host->host_list);
3538 spin_unlock(&sdebug_host_list_lock);
3543 device_unregister(&sdbg_host->dev);
3544 --scsi_debug_add_host;
3548 int scsi_debug_queuecommand_lck(struct scsi_cmnd *SCpnt, done_funct_t done)
3550 unsigned char *cmd = (unsigned char *) SCpnt->cmnd;
3553 unsigned long long lba;
3556 int target = SCpnt->device->id;
3557 struct sdebug_dev_info *devip = NULL;
3558 int inj_recovered = 0;
3559 int inj_transport = 0;
3562 int delay_override = 0;
3565 scsi_set_resid(SCpnt, 0);
3566 if ((SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) && cmd) {
3567 printk(KERN_INFO "scsi_debug: cmd ");
3568 for (k = 0, len = SCpnt->cmd_len; k < len; ++k)
3569 printk("%02x ", (int)cmd[k]);
3573 if (target == SCpnt->device->host->hostt->this_id) {
3574 printk(KERN_INFO "scsi_debug: initiator's id used as "
3576 return schedule_resp(SCpnt, NULL, done,
3577 DID_NO_CONNECT << 16, 0);
3580 if ((SCpnt->device->lun >= scsi_debug_max_luns) &&
3581 (SCpnt->device->lun != SAM2_WLUN_REPORT_LUNS))
3582 return schedule_resp(SCpnt, NULL, done,
3583 DID_NO_CONNECT << 16, 0);
3584 devip = devInfoReg(SCpnt->device);
3586 return schedule_resp(SCpnt, NULL, done,
3587 DID_NO_CONNECT << 16, 0);
3589 if ((scsi_debug_every_nth != 0) &&
3590 (++scsi_debug_cmnd_count >= abs(scsi_debug_every_nth))) {
3591 scsi_debug_cmnd_count = 0;
3592 if (scsi_debug_every_nth < -1)
3593 scsi_debug_every_nth = -1;
3594 if (SCSI_DEBUG_OPT_TIMEOUT & scsi_debug_opts)
3595 return 0; /* ignore command causing timeout */
3596 else if (SCSI_DEBUG_OPT_MAC_TIMEOUT & scsi_debug_opts &&
3597 scsi_medium_access_command(SCpnt))
3598 return 0; /* time out reads and writes */
3599 else if (SCSI_DEBUG_OPT_RECOVERED_ERR & scsi_debug_opts)
3600 inj_recovered = 1; /* to reads and writes below */
3601 else if (SCSI_DEBUG_OPT_TRANSPORT_ERR & scsi_debug_opts)
3602 inj_transport = 1; /* to reads and writes below */
3603 else if (SCSI_DEBUG_OPT_DIF_ERR & scsi_debug_opts)
3604 inj_dif = 1; /* to reads and writes below */
3605 else if (SCSI_DEBUG_OPT_DIX_ERR & scsi_debug_opts)
3606 inj_dix = 1; /* to reads and writes below */
3613 case TEST_UNIT_READY:
3615 break; /* only allowable wlun commands */
3617 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
3618 printk(KERN_INFO "scsi_debug: Opcode: 0x%x "
3619 "not supported for wlun\n", *cmd);
3620 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3622 errsts = check_condition_result;
3623 return schedule_resp(SCpnt, devip, done, errsts,
3629 case INQUIRY: /* mandatory, ignore unit attention */
3631 errsts = resp_inquiry(SCpnt, target, devip);
3633 case REQUEST_SENSE: /* mandatory, ignore unit attention */
3635 errsts = resp_requests(SCpnt, devip);
3637 case REZERO_UNIT: /* actually this is REWIND for SSC */
3639 errsts = resp_start_stop(SCpnt, devip);
3641 case ALLOW_MEDIUM_REMOVAL:
3642 errsts = check_readiness(SCpnt, 1, devip);
3645 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
3646 printk(KERN_INFO "scsi_debug: Medium removal %s\n",
3647 cmd[4] ? "inhibited" : "enabled");
3649 case SEND_DIAGNOSTIC: /* mandatory */
3650 errsts = check_readiness(SCpnt, 1, devip);
3652 case TEST_UNIT_READY: /* mandatory */
3654 errsts = check_readiness(SCpnt, 0, devip);
3657 errsts = check_readiness(SCpnt, 1, devip);
3660 errsts = check_readiness(SCpnt, 1, devip);
3663 errsts = check_readiness(SCpnt, 1, devip);
3666 errsts = check_readiness(SCpnt, 1, devip);
3669 errsts = resp_readcap(SCpnt, devip);
3671 case SERVICE_ACTION_IN:
3672 if (cmd[1] == SAI_READ_CAPACITY_16)
3673 errsts = resp_readcap16(SCpnt, devip);
3674 else if (cmd[1] == SAI_GET_LBA_STATUS) {
3676 if (scsi_debug_lbp() == 0) {
3677 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3678 INVALID_COMMAND_OPCODE, 0);
3679 errsts = check_condition_result;
3681 errsts = resp_get_lba_status(SCpnt, devip);
3683 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3685 errsts = check_condition_result;
3688 case MAINTENANCE_IN:
3689 if (MI_REPORT_TARGET_PGS != cmd[1]) {
3690 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3692 errsts = check_condition_result;
3695 errsts = resp_report_tgtpgs(SCpnt, devip);
3700 /* READ{10,12,16} and DIF Type 2 are natural enemies */
3701 if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
3703 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3704 INVALID_COMMAND_OPCODE, 0);
3705 errsts = check_condition_result;
3709 if ((scsi_debug_dif == SD_DIF_TYPE1_PROTECTION ||
3710 scsi_debug_dif == SD_DIF_TYPE3_PROTECTION) &&
3711 (cmd[1] & 0xe0) == 0)
3712 printk(KERN_ERR "Unprotected RD/WR to DIF device\n");
3717 errsts = check_readiness(SCpnt, 0, devip);
3720 if (scsi_debug_fake_rw)
3722 get_data_transfer_info(cmd, &lba, &num, &ei_lba);
3723 errsts = resp_read(SCpnt, lba, num, devip, ei_lba);
3724 if (inj_recovered && (0 == errsts)) {
3725 mk_sense_buffer(devip, RECOVERED_ERROR,
3726 THRESHOLD_EXCEEDED, 0);
3727 errsts = check_condition_result;
3728 } else if (inj_transport && (0 == errsts)) {
3729 mk_sense_buffer(devip, ABORTED_COMMAND,
3730 TRANSPORT_PROBLEM, ACK_NAK_TO);
3731 errsts = check_condition_result;
3732 } else if (inj_dif && (0 == errsts)) {
3733 mk_sense_buffer(devip, ABORTED_COMMAND, 0x10, 1);
3734 errsts = illegal_condition_result;
3735 } else if (inj_dix && (0 == errsts)) {
3736 mk_sense_buffer(devip, ILLEGAL_REQUEST, 0x10, 1);
3737 errsts = illegal_condition_result;
3740 case REPORT_LUNS: /* mandatory, ignore unit attention */
3742 errsts = resp_report_luns(SCpnt, devip);
3744 case VERIFY: /* 10 byte SBC-2 command */
3745 errsts = check_readiness(SCpnt, 0, devip);
3750 /* WRITE{10,12,16} and DIF Type 2 are natural enemies */
3751 if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
3753 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3754 INVALID_COMMAND_OPCODE, 0);
3755 errsts = check_condition_result;
3759 if ((scsi_debug_dif == SD_DIF_TYPE1_PROTECTION ||
3760 scsi_debug_dif == SD_DIF_TYPE3_PROTECTION) &&
3761 (cmd[1] & 0xe0) == 0)
3762 printk(KERN_ERR "Unprotected RD/WR to DIF device\n");
3767 errsts = check_readiness(SCpnt, 0, devip);
3770 if (scsi_debug_fake_rw)
3772 get_data_transfer_info(cmd, &lba, &num, &ei_lba);
3773 errsts = resp_write(SCpnt, lba, num, devip, ei_lba);
3774 if (inj_recovered && (0 == errsts)) {
3775 mk_sense_buffer(devip, RECOVERED_ERROR,
3776 THRESHOLD_EXCEEDED, 0);
3777 errsts = check_condition_result;
3778 } else if (inj_dif && (0 == errsts)) {
3779 mk_sense_buffer(devip, ABORTED_COMMAND, 0x10, 1);
3780 errsts = illegal_condition_result;
3781 } else if (inj_dix && (0 == errsts)) {
3782 mk_sense_buffer(devip, ILLEGAL_REQUEST, 0x10, 1);
3783 errsts = illegal_condition_result;
3789 if ((*cmd == WRITE_SAME_16 && scsi_debug_lbpws == 0) ||
3790 (*cmd == WRITE_SAME && scsi_debug_lbpws10 == 0)) {
3791 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3792 INVALID_FIELD_IN_CDB, 0);
3793 errsts = check_condition_result;
3799 errsts = check_readiness(SCpnt, 0, devip);
3802 get_data_transfer_info(cmd, &lba, &num, &ei_lba);
3803 errsts = resp_write_same(SCpnt, lba, num, devip, ei_lba, unmap);
3806 errsts = check_readiness(SCpnt, 0, devip);
3810 if (scsi_debug_unmap_max_desc == 0 || scsi_debug_lbpu == 0) {
3811 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3812 INVALID_COMMAND_OPCODE, 0);
3813 errsts = check_condition_result;
3815 errsts = resp_unmap(SCpnt, devip);
3819 errsts = resp_mode_sense(SCpnt, target, devip);
3822 errsts = resp_mode_select(SCpnt, 1, devip);
3824 case MODE_SELECT_10:
3825 errsts = resp_mode_select(SCpnt, 0, devip);
3828 errsts = resp_log_sense(SCpnt, devip);
3830 case SYNCHRONIZE_CACHE:
3832 errsts = check_readiness(SCpnt, 0, devip);
3835 errsts = check_readiness(SCpnt, 1, devip);
3837 case XDWRITEREAD_10:
3838 if (!scsi_bidi_cmnd(SCpnt)) {
3839 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3840 INVALID_FIELD_IN_CDB, 0);
3841 errsts = check_condition_result;
3845 errsts = check_readiness(SCpnt, 0, devip);
3848 if (scsi_debug_fake_rw)
3850 get_data_transfer_info(cmd, &lba, &num, &ei_lba);
3851 errsts = resp_read(SCpnt, lba, num, devip, ei_lba);
3854 errsts = resp_write(SCpnt, lba, num, devip, ei_lba);
3857 errsts = resp_xdwriteread(SCpnt, lba, num, devip);
3859 case VARIABLE_LENGTH_CMD:
3860 if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION) {
3862 if ((cmd[10] & 0xe0) == 0)
3864 "Unprotected RD/WR to DIF device\n");
3866 if (cmd[9] == READ_32) {
3867 BUG_ON(SCpnt->cmd_len < 32);
3871 if (cmd[9] == WRITE_32) {
3872 BUG_ON(SCpnt->cmd_len < 32);
3877 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3878 INVALID_FIELD_IN_CDB, 0);
3879 errsts = check_condition_result;
3883 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
3884 printk(KERN_INFO "scsi_debug: Opcode: 0x%x not "
3885 "supported\n", *cmd);
3886 errsts = check_readiness(SCpnt, 1, devip);
3888 break; /* Unit attention takes precedence */
3889 mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
3890 errsts = check_condition_result;
3893 return schedule_resp(SCpnt, devip, done, errsts,
3894 (delay_override ? 0 : scsi_debug_delay));
3897 static DEF_SCSI_QCMD(scsi_debug_queuecommand)
3899 static struct scsi_host_template sdebug_driver_template = {
3900 .show_info = scsi_debug_show_info,
3901 .write_info = scsi_debug_write_info,
3902 .proc_name = sdebug_proc_name,
3903 .name = "SCSI DEBUG",
3904 .info = scsi_debug_info,
3905 .slave_alloc = scsi_debug_slave_alloc,
3906 .slave_configure = scsi_debug_slave_configure,
3907 .slave_destroy = scsi_debug_slave_destroy,
3908 .ioctl = scsi_debug_ioctl,
3909 .queuecommand = scsi_debug_queuecommand,
3910 .eh_abort_handler = scsi_debug_abort,
3911 .eh_bus_reset_handler = scsi_debug_bus_reset,
3912 .eh_device_reset_handler = scsi_debug_device_reset,
3913 .eh_host_reset_handler = scsi_debug_host_reset,
3914 .bios_param = scsi_debug_biosparam,
3915 .can_queue = SCSI_DEBUG_CANQUEUE,
3917 .sg_tablesize = 256,
3919 .max_sectors = 0xffff,
3920 .use_clustering = DISABLE_CLUSTERING,
3921 .module = THIS_MODULE,
3924 static int sdebug_driver_probe(struct device * dev)
3927 struct sdebug_host_info *sdbg_host;
3928 struct Scsi_Host *hpnt;
3931 sdbg_host = to_sdebug_host(dev);
3933 sdebug_driver_template.can_queue = scsi_debug_max_queue;
3934 hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
3936 printk(KERN_ERR "%s: scsi_register failed\n", __func__);
3941 sdbg_host->shost = hpnt;
3942 *((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host;
3943 if ((hpnt->this_id >= 0) && (scsi_debug_num_tgts > hpnt->this_id))
3944 hpnt->max_id = scsi_debug_num_tgts + 1;
3946 hpnt->max_id = scsi_debug_num_tgts;
3947 hpnt->max_lun = SAM2_WLUN_REPORT_LUNS; /* = scsi_debug_max_luns; */
3951 switch (scsi_debug_dif) {
3953 case SD_DIF_TYPE1_PROTECTION:
3954 host_prot = SHOST_DIF_TYPE1_PROTECTION;
3956 host_prot |= SHOST_DIX_TYPE1_PROTECTION;
3959 case SD_DIF_TYPE2_PROTECTION:
3960 host_prot = SHOST_DIF_TYPE2_PROTECTION;
3962 host_prot |= SHOST_DIX_TYPE2_PROTECTION;
3965 case SD_DIF_TYPE3_PROTECTION:
3966 host_prot = SHOST_DIF_TYPE3_PROTECTION;
3968 host_prot |= SHOST_DIX_TYPE3_PROTECTION;
3973 host_prot |= SHOST_DIX_TYPE0_PROTECTION;
3977 scsi_host_set_prot(hpnt, host_prot);
3979 printk(KERN_INFO "scsi_debug: host protection%s%s%s%s%s%s%s\n",
3980 (host_prot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
3981 (host_prot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
3982 (host_prot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
3983 (host_prot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
3984 (host_prot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
3985 (host_prot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
3986 (host_prot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
3988 if (scsi_debug_guard == 1)
3989 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
3991 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
3993 error = scsi_add_host(hpnt, &sdbg_host->dev);
3995 printk(KERN_ERR "%s: scsi_add_host failed\n", __func__);
3997 scsi_host_put(hpnt);
3999 scsi_scan_host(hpnt);
4005 static int sdebug_driver_remove(struct device * dev)
4007 struct sdebug_host_info *sdbg_host;
4008 struct sdebug_dev_info *sdbg_devinfo, *tmp;
4010 sdbg_host = to_sdebug_host(dev);
4013 printk(KERN_ERR "%s: Unable to locate host info\n",
4018 scsi_remove_host(sdbg_host->shost);
4020 list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
4022 list_del(&sdbg_devinfo->dev_list);
4023 kfree(sdbg_devinfo);
4026 scsi_host_put(sdbg_host->shost);
4030 static int pseudo_lld_bus_match(struct device *dev,
4031 struct device_driver *dev_driver)
4036 static struct bus_type pseudo_lld_bus = {
4038 .match = pseudo_lld_bus_match,
4039 .probe = sdebug_driver_probe,
4040 .remove = sdebug_driver_remove,
4041 .drv_groups = sdebug_drv_groups,