1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
4 * Copyright (C) 1992 Eric Youngdale
5 * Simulate a host adapter with 2 disks attached. Do a lot of checking
6 * to make sure that we are not getting blocks mixed up, and PANIC if
7 * anything out of the ordinary is seen.
8 * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
10 * Copyright (C) 2001 - 2021 Douglas Gilbert
12 * For documentation see http://sg.danny.cz/sg/scsi_debug.html
16 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
18 #include <linux/module.h>
19 #include <linux/align.h>
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/jiffies.h>
23 #include <linux/slab.h>
24 #include <linux/types.h>
25 #include <linux/string.h>
27 #include <linux/init.h>
28 #include <linux/proc_fs.h>
29 #include <linux/vmalloc.h>
30 #include <linux/moduleparam.h>
31 #include <linux/scatterlist.h>
32 #include <linux/blkdev.h>
33 #include <linux/crc-t10dif.h>
34 #include <linux/spinlock.h>
35 #include <linux/interrupt.h>
36 #include <linux/atomic.h>
37 #include <linux/hrtimer.h>
38 #include <linux/uuid.h>
39 #include <linux/t10-pi.h>
40 #include <linux/msdos_partition.h>
41 #include <linux/random.h>
42 #include <linux/xarray.h>
43 #include <linux/prefetch.h>
44 #include <linux/debugfs.h>
45 #include <linux/async.h>
47 #include <net/checksum.h>
49 #include <asm/unaligned.h>
51 #include <scsi/scsi.h>
52 #include <scsi/scsi_cmnd.h>
53 #include <scsi/scsi_device.h>
54 #include <scsi/scsi_host.h>
55 #include <scsi/scsicam.h>
56 #include <scsi/scsi_eh.h>
57 #include <scsi/scsi_tcq.h>
58 #include <scsi/scsi_dbg.h>
61 #include "scsi_logging.h"
63 /* make sure inq_product_rev string corresponds to this version */
64 #define SDEBUG_VERSION "0191" /* format to fit INQUIRY revision field */
65 static const char *sdebug_version_date = "20210520";
67 #define MY_NAME "scsi_debug"
69 /* Additional Sense Code (ASC) */
70 #define NO_ADDITIONAL_SENSE 0x0
71 #define LOGICAL_UNIT_NOT_READY 0x4
72 #define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
73 #define UNRECOVERED_READ_ERR 0x11
74 #define PARAMETER_LIST_LENGTH_ERR 0x1a
75 #define INVALID_OPCODE 0x20
76 #define LBA_OUT_OF_RANGE 0x21
77 #define INVALID_FIELD_IN_CDB 0x24
78 #define INVALID_FIELD_IN_PARAM_LIST 0x26
79 #define WRITE_PROTECTED 0x27
80 #define UA_RESET_ASC 0x29
81 #define UA_CHANGED_ASC 0x2a
82 #define TARGET_CHANGED_ASC 0x3f
83 #define LUNS_CHANGED_ASCQ 0x0e
84 #define INSUFF_RES_ASC 0x55
85 #define INSUFF_RES_ASCQ 0x3
86 #define POWER_ON_RESET_ASCQ 0x0
87 #define POWER_ON_OCCURRED_ASCQ 0x1
88 #define BUS_RESET_ASCQ 0x2 /* scsi bus reset occurred */
89 #define MODE_CHANGED_ASCQ 0x1 /* mode parameters changed */
90 #define CAPACITY_CHANGED_ASCQ 0x9
91 #define SAVING_PARAMS_UNSUP 0x39
92 #define TRANSPORT_PROBLEM 0x4b
93 #define THRESHOLD_EXCEEDED 0x5d
94 #define LOW_POWER_COND_ON 0x5e
95 #define MISCOMPARE_VERIFY_ASC 0x1d
96 #define MICROCODE_CHANGED_ASCQ 0x1 /* with TARGET_CHANGED_ASC */
97 #define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16
98 #define WRITE_ERROR_ASC 0xc
99 #define UNALIGNED_WRITE_ASCQ 0x4
100 #define WRITE_BOUNDARY_ASCQ 0x5
101 #define READ_INVDATA_ASCQ 0x6
102 #define READ_BOUNDARY_ASCQ 0x7
103 #define ATTEMPT_ACCESS_GAP 0x9
104 #define INSUFF_ZONE_ASCQ 0xe
106 /* Additional Sense Code Qualifier (ASCQ) */
107 #define ACK_NAK_TO 0x3
109 /* Default values for driver parameters */
110 #define DEF_NUM_HOST 1
111 #define DEF_NUM_TGTS 1
112 #define DEF_MAX_LUNS 1
113 /* With these defaults, this driver will make 1 host with 1 target
114 * (id 0) containing 1 logical unit (lun 0). That is 1 device.
117 #define DEF_CDB_LEN 10
118 #define DEF_JDELAY 1 /* if > 0 unit is a jiffy */
119 #define DEF_DEV_SIZE_PRE_INIT 0
120 #define DEF_DEV_SIZE_MB 8
121 #define DEF_ZBC_DEV_SIZE_MB 128
124 #define DEF_PER_HOST_STORE false
125 #define DEF_D_SENSE 0
126 #define DEF_EVERY_NTH 0
127 #define DEF_FAKE_RW 0
129 #define DEF_HOST_LOCK 0
132 #define DEF_LBPWS10 0
134 #define DEF_LOWEST_ALIGNED 0
135 #define DEF_NDELAY 0 /* if > 0 unit is a nanosecond */
136 #define DEF_NO_LUN_0 0
137 #define DEF_NUM_PARTS 0
139 #define DEF_OPT_BLKS 1024
140 #define DEF_PHYSBLK_EXP 0
141 #define DEF_OPT_XFERLEN_EXP 0
142 #define DEF_PTYPE TYPE_DISK
143 #define DEF_RANDOM false
144 #define DEF_REMOVABLE false
145 #define DEF_SCSI_LEVEL 7 /* INQUIRY, byte2 [6->SPC-4; 7->SPC-5] */
146 #define DEF_SECTOR_SIZE 512
147 #define DEF_UNMAP_ALIGNMENT 0
148 #define DEF_UNMAP_GRANULARITY 1
149 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
150 #define DEF_UNMAP_MAX_DESC 256
151 #define DEF_VIRTUAL_GB 0
152 #define DEF_VPD_USE_HOSTNO 1
153 #define DEF_WRITESAME_LENGTH 0xFFFF
155 #define DEF_STATISTICS false
156 #define DEF_SUBMIT_QUEUES 1
157 #define DEF_TUR_MS_TO_READY 0
158 #define DEF_UUID_CTL 0
159 #define JDELAY_OVERRIDDEN -9999
161 /* Default parameters for ZBC drives */
162 #define DEF_ZBC_ZONE_SIZE_MB 128
163 #define DEF_ZBC_MAX_OPEN_ZONES 8
164 #define DEF_ZBC_NR_CONV_ZONES 1
166 #define SDEBUG_LUN_0_VAL 0
168 /* bit mask values for sdebug_opts */
169 #define SDEBUG_OPT_NOISE 1
170 #define SDEBUG_OPT_MEDIUM_ERR 2
171 #define SDEBUG_OPT_TIMEOUT 4
172 #define SDEBUG_OPT_RECOVERED_ERR 8
173 #define SDEBUG_OPT_TRANSPORT_ERR 16
174 #define SDEBUG_OPT_DIF_ERR 32
175 #define SDEBUG_OPT_DIX_ERR 64
176 #define SDEBUG_OPT_MAC_TIMEOUT 128
177 #define SDEBUG_OPT_SHORT_TRANSFER 0x100
178 #define SDEBUG_OPT_Q_NOISE 0x200
179 #define SDEBUG_OPT_ALL_TSF 0x400 /* ignore */
180 #define SDEBUG_OPT_RARE_TSF 0x800
181 #define SDEBUG_OPT_N_WCE 0x1000
182 #define SDEBUG_OPT_RESET_NOISE 0x2000
183 #define SDEBUG_OPT_NO_CDB_NOISE 0x4000
184 #define SDEBUG_OPT_HOST_BUSY 0x8000
185 #define SDEBUG_OPT_CMD_ABORT 0x10000
186 #define SDEBUG_OPT_ALL_NOISE (SDEBUG_OPT_NOISE | SDEBUG_OPT_Q_NOISE | \
187 SDEBUG_OPT_RESET_NOISE)
188 #define SDEBUG_OPT_ALL_INJECTING (SDEBUG_OPT_RECOVERED_ERR | \
189 SDEBUG_OPT_TRANSPORT_ERR | \
190 SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR | \
191 SDEBUG_OPT_SHORT_TRANSFER | \
192 SDEBUG_OPT_HOST_BUSY | \
193 SDEBUG_OPT_CMD_ABORT)
194 #define SDEBUG_OPT_RECOV_DIF_DIX (SDEBUG_OPT_RECOVERED_ERR | \
195 SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR)
197 /* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs) are returned in
198 * priority order. In the subset implemented here lower numbers have higher
199 * priority. The UA numbers should be a sequence starting from 0 with
200 * SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */
201 #define SDEBUG_UA_POR 0 /* Power on, reset, or bus device reset */
202 #define SDEBUG_UA_POOCCUR 1 /* Power on occurred */
203 #define SDEBUG_UA_BUS_RESET 2
204 #define SDEBUG_UA_MODE_CHANGED 3
205 #define SDEBUG_UA_CAPACITY_CHANGED 4
206 #define SDEBUG_UA_LUNS_CHANGED 5
207 #define SDEBUG_UA_MICROCODE_CHANGED 6 /* simulate firmware change */
208 #define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 7
209 #define SDEBUG_NUM_UAS 8
211 /* when 1==SDEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
212 * sector on read commands: */
213 #define OPT_MEDIUM_ERR_ADDR 0x1234 /* that's sector 4660 in decimal */
214 #define OPT_MEDIUM_ERR_NUM 10 /* number of consecutive medium errs */
216 /* SDEBUG_CANQUEUE is the maximum number of commands that can be queued
217 * (for response) per submit queue at one time. Can be reduced by max_queue
218 * option. Command responses are not queued when jdelay=0 and ndelay=0. The
219 * per-device DEF_CMD_PER_LUN can be changed via sysfs:
220 * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth
221 * but cannot exceed SDEBUG_CANQUEUE .
223 #define SDEBUG_CANQUEUE_WORDS 3 /* a WORD is bits in a long */
224 #define SDEBUG_CANQUEUE (SDEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
225 #define DEF_CMD_PER_LUN SDEBUG_CANQUEUE
227 /* UA - Unit Attention; SA - Service Action; SSU - Start Stop Unit */
228 #define F_D_IN 1 /* Data-in command (e.g. READ) */
229 #define F_D_OUT 2 /* Data-out command (e.g. WRITE) */
230 #define F_D_OUT_MAYBE 4 /* WRITE SAME, NDOB bit */
232 #define F_RL_WLUN_OK 0x10 /* allowed with REPORT LUNS W-LUN */
233 #define F_SKIP_UA 0x20 /* bypass UAs (e.g. INQUIRY command) */
234 #define F_DELAY_OVERR 0x40 /* for commands like INQUIRY */
235 #define F_SA_LOW 0x80 /* SA is in cdb byte 1, bits 4 to 0 */
236 #define F_SA_HIGH 0x100 /* SA is in cdb bytes 8 and 9 */
237 #define F_INV_OP 0x200 /* invalid opcode (not supported) */
238 #define F_FAKE_RW 0x400 /* bypass resp_*() when fake_rw set */
239 #define F_M_ACCESS 0x800 /* media access, reacts to SSU state */
240 #define F_SSU_DELAY 0x1000 /* SSU command delay (long-ish) */
241 #define F_SYNC_DELAY 0x2000 /* SYNCHRONIZE CACHE delay */
243 /* Useful combinations of the above flags */
244 #define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
245 #define FF_MEDIA_IO (F_M_ACCESS | F_FAKE_RW)
246 #define FF_SA (F_SA_HIGH | F_SA_LOW)
247 #define F_LONG_DELAY (F_SSU_DELAY | F_SYNC_DELAY)
249 #define SDEBUG_MAX_PARTS 4
251 #define SDEBUG_MAX_CMD_LEN 32
253 #define SDEB_XA_NOT_IN_USE XA_MARK_1
255 static struct kmem_cache *queued_cmd_cache;
257 #define TO_QUEUED_CMD(scmd) ((void *)(scmd)->host_scribble)
258 #define ASSIGN_QUEUED_CMD(scmnd, qc) { (scmnd)->host_scribble = (void *) qc; }
260 /* Zone types (zbcr05 table 25) */
265 /* ZBC_ZTYPE_SOBR = 0x4, */
269 /* enumeration names taken from table 26, zbcr05 */
271 ZBC_NOT_WRITE_POINTER = 0x0,
273 ZC2_IMPLICIT_OPEN = 0x2,
274 ZC3_EXPLICIT_OPEN = 0x3,
281 struct sdeb_zone_state { /* ZBC: per zone state */
282 enum sdebug_z_type z_type;
283 enum sdebug_z_cond z_cond;
284 bool z_non_seq_resource;
290 enum sdebug_err_type {
291 ERR_TMOUT_CMD = 0, /* make specific scsi command timeout */
292 ERR_FAIL_QUEUE_CMD = 1, /* make specific scsi command's */
293 /* queuecmd return failed */
294 ERR_FAIL_CMD = 2, /* make specific scsi command's */
295 /* queuecmd return succeed but */
296 /* with errors set in scsi_cmnd */
297 ERR_ABORT_CMD_FAILED = 3, /* control return FAILED from */
298 /* scsi_debug_abort() */
299 ERR_LUN_RESET_FAILED = 4, /* control return FAILED from */
300 /* scsi_debug_device_reseLUN_RESET_FAILEDt() */
303 struct sdebug_err_inject {
305 struct list_head list;
312 * For ERR_FAIL_QUEUE_CMD
320 unsigned char host_byte;
321 unsigned char driver_byte;
322 unsigned char status_byte;
323 unsigned char sense_key;
330 struct sdebug_dev_info {
331 struct list_head dev_list;
332 unsigned int channel;
336 struct sdebug_host_info *sdbg_host;
337 unsigned long uas_bm[1];
338 atomic_t stopped; /* 1: by SSU, 2: device start */
341 /* For ZBC devices */
342 enum blk_zoned_model zmodel;
345 unsigned int zsize_shift;
346 unsigned int nr_zones;
347 unsigned int nr_conv_zones;
348 unsigned int nr_seq_zones;
349 unsigned int nr_imp_open;
350 unsigned int nr_exp_open;
351 unsigned int nr_closed;
352 unsigned int max_open;
353 ktime_t create_ts; /* time since bootup that this device was created */
354 struct sdeb_zone_state *zstate;
356 struct dentry *debugfs_entry;
357 struct spinlock list_lock;
358 struct list_head inject_err_list;
361 struct sdebug_target_info {
363 struct dentry *debugfs_entry;
366 struct sdebug_host_info {
367 struct list_head host_list;
368 int si_idx; /* sdeb_store_info (per host) xarray index */
369 struct Scsi_Host *shost;
371 struct list_head dev_info_list;
374 /* There is an xarray of pointers to this struct's objects, one per host */
375 struct sdeb_store_info {
376 rwlock_t macc_lck; /* for atomic media access on this store */
377 u8 *storep; /* user data storage (ram) */
378 struct t10_pi_tuple *dif_storep; /* protection info */
379 void *map_storep; /* provisioning map */
382 #define dev_to_sdebug_host(d) \
383 container_of(d, struct sdebug_host_info, dev)
385 #define shost_to_sdebug_host(shost) \
386 dev_to_sdebug_host(shost->dma_dev)
388 enum sdeb_defer_type {SDEB_DEFER_NONE = 0, SDEB_DEFER_HRT = 1,
389 SDEB_DEFER_WQ = 2, SDEB_DEFER_POLL = 3};
391 struct sdebug_defer {
393 struct execute_work ew;
394 ktime_t cmpl_ts;/* time since boot to complete this cmd */
396 bool aborted; /* true when blk_abort_request() already called */
397 enum sdeb_defer_type defer_t;
400 struct sdebug_queued_cmd {
401 /* corresponding bit set in in_use_bm[] in owning struct sdebug_queue
402 * instance indicates this slot is in use.
404 struct sdebug_defer sd_dp;
405 struct scsi_cmnd *scmd;
408 struct sdebug_scsi_cmd {
412 static atomic_t sdebug_cmnd_count; /* number of incoming commands */
413 static atomic_t sdebug_completions; /* count of deferred completions */
414 static atomic_t sdebug_miss_cpus; /* submission + completion cpus differ */
415 static atomic_t sdebug_a_tsf; /* 'almost task set full' counter */
416 static atomic_t sdeb_inject_pending;
417 static atomic_t sdeb_mq_poll_count; /* bumped when mq_poll returns > 0 */
419 struct opcode_info_t {
420 u8 num_attached; /* 0 if this is it (i.e. a leaf); use 0xff */
421 /* for terminating element */
422 u8 opcode; /* if num_attached > 0, preferred */
423 u16 sa; /* service action */
424 u32 flags; /* OR-ed set of SDEB_F_* */
425 int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
426 const struct opcode_info_t *arrp; /* num_attached elements or NULL */
427 u8 len_mask[16]; /* len_mask[0]-->cdb_len, then mask for cdb */
428 /* 1 to min(cdb_len, 15); ignore cdb[15...] */
431 /* SCSI opcodes (first byte of cdb) of interest mapped onto these indexes */
432 enum sdeb_opcode_index {
433 SDEB_I_INVALID_OPCODE = 0,
435 SDEB_I_REPORT_LUNS = 2,
436 SDEB_I_REQUEST_SENSE = 3,
437 SDEB_I_TEST_UNIT_READY = 4,
438 SDEB_I_MODE_SENSE = 5, /* 6, 10 */
439 SDEB_I_MODE_SELECT = 6, /* 6, 10 */
440 SDEB_I_LOG_SENSE = 7,
441 SDEB_I_READ_CAPACITY = 8, /* 10; 16 is in SA_IN(16) */
442 SDEB_I_READ = 9, /* 6, 10, 12, 16 */
443 SDEB_I_WRITE = 10, /* 6, 10, 12, 16 */
444 SDEB_I_START_STOP = 11,
445 SDEB_I_SERV_ACT_IN_16 = 12, /* add ...SERV_ACT_IN_12 if needed */
446 SDEB_I_SERV_ACT_OUT_16 = 13, /* add ...SERV_ACT_OUT_12 if needed */
447 SDEB_I_MAINT_IN = 14,
448 SDEB_I_MAINT_OUT = 15,
449 SDEB_I_VERIFY = 16, /* VERIFY(10), VERIFY(16) */
450 SDEB_I_VARIABLE_LEN = 17, /* READ(32), WRITE(32), WR_SCAT(32) */
451 SDEB_I_RESERVE = 18, /* 6, 10 */
452 SDEB_I_RELEASE = 19, /* 6, 10 */
453 SDEB_I_ALLOW_REMOVAL = 20, /* PREVENT ALLOW MEDIUM REMOVAL */
454 SDEB_I_REZERO_UNIT = 21, /* REWIND in SSC */
455 SDEB_I_ATA_PT = 22, /* 12, 16 */
456 SDEB_I_SEND_DIAG = 23,
458 SDEB_I_WRITE_BUFFER = 25,
459 SDEB_I_WRITE_SAME = 26, /* 10, 16 */
460 SDEB_I_SYNC_CACHE = 27, /* 10, 16 */
461 SDEB_I_COMP_WRITE = 28,
462 SDEB_I_PRE_FETCH = 29, /* 10, 16 */
463 SDEB_I_ZONE_OUT = 30, /* 0x94+SA; includes no data xfer */
464 SDEB_I_ZONE_IN = 31, /* 0x95+SA; all have data-in */
465 SDEB_I_LAST_ELEM_P1 = 32, /* keep this last (previous + 1) */
469 static const unsigned char opcode_ind_arr[256] = {
470 /* 0x0; 0x0->0x1f: 6 byte cdbs */
471 SDEB_I_TEST_UNIT_READY, SDEB_I_REZERO_UNIT, 0, SDEB_I_REQUEST_SENSE,
473 SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, 0,
474 0, 0, SDEB_I_INQUIRY, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
476 0, 0, SDEB_I_MODE_SENSE, SDEB_I_START_STOP, 0, SDEB_I_SEND_DIAG,
477 SDEB_I_ALLOW_REMOVAL, 0,
478 /* 0x20; 0x20->0x3f: 10 byte cdbs */
479 0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY, 0, 0,
480 SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, SDEB_I_VERIFY,
481 0, 0, 0, 0, SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, 0,
482 0, 0, 0, SDEB_I_WRITE_BUFFER, 0, 0, 0, 0,
483 /* 0x40; 0x40->0x5f: 10 byte cdbs */
484 0, SDEB_I_WRITE_SAME, SDEB_I_UNMAP, 0, 0, 0, 0, 0,
485 0, 0, 0, 0, 0, SDEB_I_LOG_SENSE, 0, 0,
486 0, 0, 0, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
488 0, 0, SDEB_I_MODE_SENSE, 0, 0, 0, 0, 0,
489 /* 0x60; 0x60->0x7d are reserved, 0x7e is "extended cdb" */
490 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
491 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
492 0, SDEB_I_VARIABLE_LEN,
493 /* 0x80; 0x80->0x9f: 16 byte cdbs */
494 0, 0, 0, 0, 0, SDEB_I_ATA_PT, 0, 0,
495 SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0,
496 0, 0, 0, SDEB_I_VERIFY,
497 SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, SDEB_I_WRITE_SAME,
498 SDEB_I_ZONE_OUT, SDEB_I_ZONE_IN, 0, 0,
499 0, 0, 0, 0, 0, 0, SDEB_I_SERV_ACT_IN_16, SDEB_I_SERV_ACT_OUT_16,
500 /* 0xa0; 0xa0->0xbf: 12 byte cdbs */
501 SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN,
502 SDEB_I_MAINT_OUT, 0, 0, 0,
503 SDEB_I_READ, 0 /* SDEB_I_SERV_ACT_OUT_12 */, SDEB_I_WRITE,
504 0 /* SDEB_I_SERV_ACT_IN_12 */, 0, 0, 0, 0,
505 0, 0, 0, 0, 0, 0, 0, 0,
506 0, 0, 0, 0, 0, 0, 0, 0,
507 /* 0xc0; 0xc0->0xff: vendor specific */
508 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
509 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
510 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
511 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
515 * The following "response" functions return the SCSI mid-level's 4 byte
516 * tuple-in-an-int. To handle commands with an IMMED bit, for a faster
517 * command completion, they can mask their return value with
518 * SDEG_RES_IMMED_MASK .
520 #define SDEG_RES_IMMED_MASK 0x40000000
522 static int resp_inquiry(struct scsi_cmnd *, struct sdebug_dev_info *);
523 static int resp_report_luns(struct scsi_cmnd *, struct sdebug_dev_info *);
524 static int resp_requests(struct scsi_cmnd *, struct sdebug_dev_info *);
525 static int resp_mode_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
526 static int resp_mode_select(struct scsi_cmnd *, struct sdebug_dev_info *);
527 static int resp_log_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
528 static int resp_readcap(struct scsi_cmnd *, struct sdebug_dev_info *);
529 static int resp_read_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
530 static int resp_write_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
531 static int resp_write_scat(struct scsi_cmnd *, struct sdebug_dev_info *);
532 static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *);
533 static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *);
534 static int resp_get_lba_status(struct scsi_cmnd *, struct sdebug_dev_info *);
535 static int resp_report_tgtpgs(struct scsi_cmnd *, struct sdebug_dev_info *);
536 static int resp_unmap(struct scsi_cmnd *, struct sdebug_dev_info *);
537 static int resp_rsup_opcodes(struct scsi_cmnd *, struct sdebug_dev_info *);
538 static int resp_rsup_tmfs(struct scsi_cmnd *, struct sdebug_dev_info *);
539 static int resp_verify(struct scsi_cmnd *, struct sdebug_dev_info *);
540 static int resp_write_same_10(struct scsi_cmnd *, struct sdebug_dev_info *);
541 static int resp_write_same_16(struct scsi_cmnd *, struct sdebug_dev_info *);
542 static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *);
543 static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *);
544 static int resp_sync_cache(struct scsi_cmnd *, struct sdebug_dev_info *);
545 static int resp_pre_fetch(struct scsi_cmnd *, struct sdebug_dev_info *);
546 static int resp_report_zones(struct scsi_cmnd *, struct sdebug_dev_info *);
547 static int resp_open_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
548 static int resp_close_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
549 static int resp_finish_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
550 static int resp_rwp_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
552 static int sdebug_do_add_host(bool mk_new_store);
553 static int sdebug_add_host_helper(int per_host_idx);
554 static void sdebug_do_remove_host(bool the_end);
555 static int sdebug_add_store(void);
556 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip);
557 static void sdebug_erase_all_stores(bool apart_from_first);
559 static void sdebug_free_queued_cmd(struct sdebug_queued_cmd *sqcp);
562 * The following are overflow arrays for cdbs that "hit" the same index in
563 * the opcode_info_arr array. The most time sensitive (or commonly used) cdb
564 * should be placed in opcode_info_arr[], the others should be placed here.
566 static const struct opcode_info_t msense_iarr[] = {
567 {0, 0x1a, 0, F_D_IN, NULL, NULL,
568 {6, 0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
571 static const struct opcode_info_t mselect_iarr[] = {
572 {0, 0x15, 0, F_D_OUT, NULL, NULL,
573 {6, 0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
576 static const struct opcode_info_t read_iarr[] = {
577 {0, 0x28, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(10) */
578 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
580 {0, 0x8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL, /* READ(6) */
581 {6, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
582 {0, 0xa8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(12) */
583 {12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf,
587 static const struct opcode_info_t write_iarr[] = {
588 {0, 0x2a, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(10) */
589 NULL, {10, 0xfb, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7,
591 {0, 0xa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(6) */
592 NULL, {6, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0,
594 {0, 0xaa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(12) */
595 NULL, {12, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
596 0xbf, 0xc7, 0, 0, 0, 0} },
599 static const struct opcode_info_t verify_iarr[] = {
600 {0, 0x2f, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,/* VERIFY(10) */
601 NULL, {10, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xff, 0xff, 0xc7,
605 static const struct opcode_info_t sa_in_16_iarr[] = {
606 {0, 0x9e, 0x12, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
607 {16, 0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
608 0xff, 0xff, 0xff, 0, 0xc7} }, /* GET LBA STATUS(16) */
611 static const struct opcode_info_t vl_iarr[] = { /* VARIABLE LENGTH */
612 {0, 0x7f, 0xb, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_dt0,
613 NULL, {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0xb, 0xfa,
614 0, 0xff, 0xff, 0xff, 0xff} }, /* WRITE(32) */
615 {0, 0x7f, 0x11, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
616 NULL, {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x11, 0xf8,
617 0, 0xff, 0xff, 0x0, 0x0} }, /* WRITE SCATTERED(32) */
620 static const struct opcode_info_t maint_in_iarr[] = { /* MAINT IN */
621 {0, 0xa3, 0xc, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL,
622 {12, 0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
623 0xc7, 0, 0, 0, 0} }, /* REPORT SUPPORTED OPERATION CODES */
624 {0, 0xa3, 0xd, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL,
625 {12, 0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
626 0, 0} }, /* REPORTED SUPPORTED TASK MANAGEMENT FUNCTIONS */
629 static const struct opcode_info_t write_same_iarr[] = {
630 {0, 0x93, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_write_same_16, NULL,
631 {16, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
632 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* WRITE SAME(16) */
635 static const struct opcode_info_t reserve_iarr[] = {
636 {0, 0x16, 0, F_D_OUT, NULL, NULL, /* RESERVE(6) */
637 {6, 0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
640 static const struct opcode_info_t release_iarr[] = {
641 {0, 0x17, 0, F_D_OUT, NULL, NULL, /* RELEASE(6) */
642 {6, 0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
645 static const struct opcode_info_t sync_cache_iarr[] = {
646 {0, 0x91, 0, F_SYNC_DELAY | F_M_ACCESS, resp_sync_cache, NULL,
647 {16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
648 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* SYNC_CACHE (16) */
651 static const struct opcode_info_t pre_fetch_iarr[] = {
652 {0, 0x90, 0, F_SYNC_DELAY | FF_MEDIA_IO, resp_pre_fetch, NULL,
653 {16, 0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
654 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* PRE-FETCH (16) */
657 static const struct opcode_info_t zone_out_iarr[] = { /* ZONE OUT(16) */
658 {0, 0x94, 0x1, F_SA_LOW | F_M_ACCESS, resp_close_zone, NULL,
659 {16, 0x1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
660 0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} }, /* CLOSE ZONE */
661 {0, 0x94, 0x2, F_SA_LOW | F_M_ACCESS, resp_finish_zone, NULL,
662 {16, 0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
663 0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} }, /* FINISH ZONE */
664 {0, 0x94, 0x4, F_SA_LOW | F_M_ACCESS, resp_rwp_zone, NULL,
665 {16, 0x4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
666 0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} }, /* RESET WRITE POINTER */
669 static const struct opcode_info_t zone_in_iarr[] = { /* ZONE IN(16) */
670 {0, 0x95, 0x6, F_SA_LOW | F_D_IN | F_M_ACCESS, NULL, NULL,
671 {16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
672 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* REPORT ZONES */
676 /* This array is accessed via SDEB_I_* values. Make sure all are mapped,
677 * plus the terminating elements for logic that scans this table such as
678 * REPORT SUPPORTED OPERATION CODES. */
679 static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEM_P1 + 1] = {
681 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* unknown opcodes */
682 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
683 {0, 0x12, 0, FF_RESPOND | F_D_IN, resp_inquiry, NULL, /* INQUIRY */
684 {6, 0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
685 {0, 0xa0, 0, FF_RESPOND | F_D_IN, resp_report_luns, NULL,
686 {12, 0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
687 0, 0} }, /* REPORT LUNS */
688 {0, 0x3, 0, FF_RESPOND | F_D_IN, resp_requests, NULL,
689 {6, 0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
690 {0, 0x0, 0, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */
691 {6, 0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
693 {ARRAY_SIZE(msense_iarr), 0x5a, 0, F_D_IN, /* MODE SENSE(10) */
694 resp_mode_sense, msense_iarr, {10, 0xf8, 0xff, 0xff, 0, 0, 0,
695 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
696 {ARRAY_SIZE(mselect_iarr), 0x55, 0, F_D_OUT, /* MODE SELECT(10) */
697 resp_mode_select, mselect_iarr, {10, 0xf1, 0, 0, 0, 0, 0, 0xff,
698 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
699 {0, 0x4d, 0, F_D_IN, resp_log_sense, NULL, /* LOG SENSE */
700 {10, 0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0,
702 {0, 0x25, 0, F_D_IN, resp_readcap, NULL, /* READ CAPACITY(10) */
703 {10, 0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0,
705 {ARRAY_SIZE(read_iarr), 0x88, 0, F_D_IN | FF_MEDIA_IO, /* READ(16) */
706 resp_read_dt0, read_iarr, {16, 0xfe, 0xff, 0xff, 0xff, 0xff,
707 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
709 {ARRAY_SIZE(write_iarr), 0x8a, 0, F_D_OUT | FF_MEDIA_IO,
710 resp_write_dt0, write_iarr, /* WRITE(16) */
711 {16, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
712 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
713 {0, 0x1b, 0, F_SSU_DELAY, resp_start_stop, NULL,/* START STOP UNIT */
714 {6, 0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
715 {ARRAY_SIZE(sa_in_16_iarr), 0x9e, 0x10, F_SA_LOW | F_D_IN,
716 resp_readcap16, sa_in_16_iarr, /* SA_IN(16), READ CAPACITY(16) */
717 {16, 0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
718 0xff, 0xff, 0xff, 0xff, 0x1, 0xc7} },
719 {0, 0x9f, 0x12, F_SA_LOW | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
720 NULL, {16, 0x12, 0xf9, 0x0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff,
721 0xff, 0xff, 0xff, 0xff, 0xc7} }, /* SA_OUT(16), WRITE SCAT(16) */
722 {ARRAY_SIZE(maint_in_iarr), 0xa3, 0xa, F_SA_LOW | F_D_IN,
723 resp_report_tgtpgs, /* MAINT IN, REPORT TARGET PORT GROUPS */
724 maint_in_iarr, {12, 0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff,
725 0xff, 0, 0xc7, 0, 0, 0, 0} },
727 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
728 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
729 {ARRAY_SIZE(verify_iarr), 0x8f, 0,
730 F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify, /* VERIFY(16) */
731 verify_iarr, {16, 0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
732 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },
733 {ARRAY_SIZE(vl_iarr), 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_MEDIA_IO,
734 resp_read_dt0, vl_iarr, /* VARIABLE LENGTH, READ(32) */
735 {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x9, 0xfe, 0, 0xff, 0xff,
737 {ARRAY_SIZE(reserve_iarr), 0x56, 0, F_D_OUT,
738 NULL, reserve_iarr, /* RESERVE(10) <no response function> */
739 {10, 0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
741 {ARRAY_SIZE(release_iarr), 0x57, 0, F_D_OUT,
742 NULL, release_iarr, /* RELEASE(10) <no response function> */
743 {10, 0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
746 {0, 0x1e, 0, 0, NULL, NULL, /* ALLOW REMOVAL */
747 {6, 0, 0, 0, 0x3, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
748 {0, 0x1, 0, 0, resp_start_stop, NULL, /* REWIND ?? */
749 {6, 0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
750 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */
751 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
752 {0, 0x1d, F_D_OUT, 0, NULL, NULL, /* SEND DIAGNOSTIC */
753 {6, 0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
754 {0, 0x42, 0, F_D_OUT | FF_MEDIA_IO, resp_unmap, NULL, /* UNMAP */
755 {10, 0x1, 0, 0, 0, 0, 0x3f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
757 {0, 0x3b, 0, F_D_OUT_MAYBE, resp_write_buffer, NULL,
758 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0,
759 0, 0, 0, 0} }, /* WRITE_BUFFER */
760 {ARRAY_SIZE(write_same_iarr), 0x41, 0, F_D_OUT_MAYBE | FF_MEDIA_IO,
761 resp_write_same_10, write_same_iarr, /* WRITE SAME(10) */
762 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0,
764 {ARRAY_SIZE(sync_cache_iarr), 0x35, 0, F_SYNC_DELAY | F_M_ACCESS,
765 resp_sync_cache, sync_cache_iarr,
766 {10, 0x7, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
767 0, 0, 0, 0} }, /* SYNC_CACHE (10) */
768 {0, 0x89, 0, F_D_OUT | FF_MEDIA_IO, resp_comp_write, NULL,
769 {16, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
770 0, 0xff, 0x3f, 0xc7} }, /* COMPARE AND WRITE */
771 {ARRAY_SIZE(pre_fetch_iarr), 0x34, 0, F_SYNC_DELAY | FF_MEDIA_IO,
772 resp_pre_fetch, pre_fetch_iarr,
773 {10, 0x2, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
774 0, 0, 0, 0} }, /* PRE-FETCH (10) */
777 {ARRAY_SIZE(zone_out_iarr), 0x94, 0x3, F_SA_LOW | F_M_ACCESS,
778 resp_open_zone, zone_out_iarr, /* ZONE_OUT(16), OPEN ZONE) */
779 {16, 0x3 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
780 0xff, 0xff, 0x0, 0x0, 0xff, 0xff, 0x1, 0xc7} },
781 {ARRAY_SIZE(zone_in_iarr), 0x95, 0x0, F_SA_LOW | F_M_ACCESS,
782 resp_report_zones, zone_in_iarr, /* ZONE_IN(16), REPORT ZONES) */
783 {16, 0x0 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
784 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xc7} },
786 {0xff, 0, 0, 0, NULL, NULL, /* terminating element */
787 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
790 static int sdebug_num_hosts;
791 static int sdebug_add_host = DEF_NUM_HOST; /* in sysfs this is relative */
792 static int sdebug_ato = DEF_ATO;
793 static int sdebug_cdb_len = DEF_CDB_LEN;
794 static int sdebug_jdelay = DEF_JDELAY; /* if > 0 then unit is jiffies */
795 static int sdebug_dev_size_mb = DEF_DEV_SIZE_PRE_INIT;
796 static int sdebug_dif = DEF_DIF;
797 static int sdebug_dix = DEF_DIX;
798 static int sdebug_dsense = DEF_D_SENSE;
799 static int sdebug_every_nth = DEF_EVERY_NTH;
800 static int sdebug_fake_rw = DEF_FAKE_RW;
801 static unsigned int sdebug_guard = DEF_GUARD;
802 static int sdebug_host_max_queue; /* per host */
803 static int sdebug_lowest_aligned = DEF_LOWEST_ALIGNED;
804 static int sdebug_max_luns = DEF_MAX_LUNS;
805 static int sdebug_max_queue = SDEBUG_CANQUEUE; /* per submit queue */
806 static unsigned int sdebug_medium_error_start = OPT_MEDIUM_ERR_ADDR;
807 static int sdebug_medium_error_count = OPT_MEDIUM_ERR_NUM;
808 static int sdebug_ndelay = DEF_NDELAY; /* if > 0 then unit is nanoseconds */
809 static int sdebug_no_lun_0 = DEF_NO_LUN_0;
810 static int sdebug_no_uld;
811 static int sdebug_num_parts = DEF_NUM_PARTS;
812 static int sdebug_num_tgts = DEF_NUM_TGTS; /* targets per host */
813 static int sdebug_opt_blks = DEF_OPT_BLKS;
814 static int sdebug_opts = DEF_OPTS;
815 static int sdebug_physblk_exp = DEF_PHYSBLK_EXP;
816 static int sdebug_opt_xferlen_exp = DEF_OPT_XFERLEN_EXP;
817 static int sdebug_ptype = DEF_PTYPE; /* SCSI peripheral device type */
818 static int sdebug_scsi_level = DEF_SCSI_LEVEL;
819 static int sdebug_sector_size = DEF_SECTOR_SIZE;
820 static int sdeb_tur_ms_to_ready = DEF_TUR_MS_TO_READY;
821 static int sdebug_virtual_gb = DEF_VIRTUAL_GB;
822 static int sdebug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
823 static unsigned int sdebug_lbpu = DEF_LBPU;
824 static unsigned int sdebug_lbpws = DEF_LBPWS;
825 static unsigned int sdebug_lbpws10 = DEF_LBPWS10;
826 static unsigned int sdebug_lbprz = DEF_LBPRZ;
827 static unsigned int sdebug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
828 static unsigned int sdebug_unmap_granularity = DEF_UNMAP_GRANULARITY;
829 static unsigned int sdebug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
830 static unsigned int sdebug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
831 static unsigned int sdebug_write_same_length = DEF_WRITESAME_LENGTH;
832 static int sdebug_uuid_ctl = DEF_UUID_CTL;
833 static bool sdebug_random = DEF_RANDOM;
834 static bool sdebug_per_host_store = DEF_PER_HOST_STORE;
835 static bool sdebug_removable = DEF_REMOVABLE;
836 static bool sdebug_clustering;
837 static bool sdebug_host_lock = DEF_HOST_LOCK;
838 static bool sdebug_strict = DEF_STRICT;
839 static bool sdebug_any_injecting_opt;
840 static bool sdebug_no_rwlock;
841 static bool sdebug_verbose;
842 static bool have_dif_prot;
843 static bool write_since_sync;
844 static bool sdebug_statistics = DEF_STATISTICS;
845 static bool sdebug_wp;
846 static bool sdebug_allow_restart;
847 /* Following enum: 0: no zbc, def; 1: host aware; 2: host managed */
848 static enum blk_zoned_model sdeb_zbc_model = BLK_ZONED_NONE;
849 static char *sdeb_zbc_model_s;
851 enum sam_lun_addr_method {SAM_LUN_AM_PERIPHERAL = 0x0,
852 SAM_LUN_AM_FLAT = 0x1,
853 SAM_LUN_AM_LOGICAL_UNIT = 0x2,
854 SAM_LUN_AM_EXTENDED = 0x3};
855 static enum sam_lun_addr_method sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
856 static int sdebug_lun_am_i = (int)SAM_LUN_AM_PERIPHERAL;
858 static unsigned int sdebug_store_sectors;
859 static sector_t sdebug_capacity; /* in sectors */
861 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
862 may still need them */
863 static int sdebug_heads; /* heads per disk */
864 static int sdebug_cylinders_per; /* cylinders per surface */
865 static int sdebug_sectors_per; /* sectors per cylinder */
867 static LIST_HEAD(sdebug_host_list);
868 static DEFINE_MUTEX(sdebug_host_list_mutex);
870 static struct xarray per_store_arr;
871 static struct xarray *per_store_ap = &per_store_arr;
872 static int sdeb_first_idx = -1; /* invalid index ==> none created */
873 static int sdeb_most_recent_idx = -1;
874 static DEFINE_RWLOCK(sdeb_fake_rw_lck); /* need a RW lock when fake_rw=1 */
876 static unsigned long map_size;
877 static int num_aborts;
878 static int num_dev_resets;
879 static int num_target_resets;
880 static int num_bus_resets;
881 static int num_host_resets;
882 static int dix_writes;
883 static int dix_reads;
884 static int dif_errors;
886 /* ZBC global data */
887 static bool sdeb_zbc_in_use; /* true for host-aware and host-managed disks */
888 static int sdeb_zbc_zone_cap_mb;
889 static int sdeb_zbc_zone_size_mb;
890 static int sdeb_zbc_max_open = DEF_ZBC_MAX_OPEN_ZONES;
891 static int sdeb_zbc_nr_conv = DEF_ZBC_NR_CONV_ZONES;
893 static int submit_queues = DEF_SUBMIT_QUEUES; /* > 1 for multi-queue (mq) */
894 static int poll_queues; /* iouring iopoll interface.*/
896 static char sdebug_proc_name[] = MY_NAME;
897 static const char *my_name = MY_NAME;
899 static struct bus_type pseudo_lld_bus;
901 static struct device_driver sdebug_driverfs_driver = {
902 .name = sdebug_proc_name,
903 .bus = &pseudo_lld_bus,
906 static const int check_condition_result =
907 SAM_STAT_CHECK_CONDITION;
909 static const int illegal_condition_result =
910 (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
912 static const int device_qfull_result =
913 (DID_ABORT << 16) | SAM_STAT_TASK_SET_FULL;
915 static const int condition_met_result = SAM_STAT_CONDITION_MET;
917 static struct dentry *sdebug_debugfs_root;
919 static void sdebug_err_free(struct rcu_head *head)
921 struct sdebug_err_inject *inject =
922 container_of(head, typeof(*inject), rcu);
927 static void sdebug_err_add(struct scsi_device *sdev, struct sdebug_err_inject *new)
929 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdev->hostdata;
930 struct sdebug_err_inject *err;
932 spin_lock(&devip->list_lock);
933 list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
934 if (err->type == new->type && err->cmd == new->cmd) {
935 list_del_rcu(&err->list);
936 call_rcu(&err->rcu, sdebug_err_free);
940 list_add_tail_rcu(&new->list, &devip->inject_err_list);
941 spin_unlock(&devip->list_lock);
944 static int sdebug_err_remove(struct scsi_device *sdev, const char *buf, size_t count)
946 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdev->hostdata;
947 struct sdebug_err_inject *err;
951 if (sscanf(buf, "- %d %hhx", &type, &cmd) != 2) {
956 spin_lock(&devip->list_lock);
957 list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
958 if (err->type == type && err->cmd == cmd) {
959 list_del_rcu(&err->list);
960 call_rcu(&err->rcu, sdebug_err_free);
961 spin_unlock(&devip->list_lock);
966 spin_unlock(&devip->list_lock);
972 static int sdebug_error_show(struct seq_file *m, void *p)
974 struct scsi_device *sdev = (struct scsi_device *)m->private;
975 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdev->hostdata;
976 struct sdebug_err_inject *err;
978 seq_puts(m, "Type\tCount\tCommand\n");
981 list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
984 case ERR_ABORT_CMD_FAILED:
985 case ERR_LUN_RESET_FAILED:
986 seq_printf(m, "%d\t%d\t0x%x\n", err->type, err->cnt,
990 case ERR_FAIL_QUEUE_CMD:
991 seq_printf(m, "%d\t%d\t0x%x\t0x%x\n", err->type,
992 err->cnt, err->cmd, err->queuecmd_ret);
996 seq_printf(m, "%d\t%d\t0x%x\t0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
997 err->type, err->cnt, err->cmd,
998 err->host_byte, err->driver_byte,
999 err->status_byte, err->sense_key,
1000 err->asc, err->asq);
1009 static int sdebug_error_open(struct inode *inode, struct file *file)
1011 return single_open(file, sdebug_error_show, inode->i_private);
1014 static ssize_t sdebug_error_write(struct file *file, const char __user *ubuf,
1015 size_t count, loff_t *ppos)
1018 unsigned int inject_type;
1019 struct sdebug_err_inject *inject;
1020 struct scsi_device *sdev = (struct scsi_device *)file->f_inode->i_private;
1022 buf = kmalloc(count, GFP_KERNEL);
1026 if (copy_from_user(buf, ubuf, count)) {
1032 return sdebug_err_remove(sdev, buf, count);
1034 if (sscanf(buf, "%d", &inject_type) != 1) {
1039 inject = kzalloc(sizeof(struct sdebug_err_inject), GFP_KERNEL);
1045 switch (inject_type) {
1047 case ERR_ABORT_CMD_FAILED:
1048 case ERR_LUN_RESET_FAILED:
1049 if (sscanf(buf, "%d %d %hhx", &inject->type, &inject->cnt,
1054 case ERR_FAIL_QUEUE_CMD:
1055 if (sscanf(buf, "%d %d %hhx %x", &inject->type, &inject->cnt,
1056 &inject->cmd, &inject->queuecmd_ret) != 4)
1061 if (sscanf(buf, "%d %d %hhx %hhx %hhx %hhx %hhx %hhx %hhx",
1062 &inject->type, &inject->cnt, &inject->cmd,
1063 &inject->host_byte, &inject->driver_byte,
1064 &inject->status_byte, &inject->sense_key,
1065 &inject->asc, &inject->asq) != 9)
1075 sdebug_err_add(sdev, inject);
1085 static const struct file_operations sdebug_error_fops = {
1086 .open = sdebug_error_open,
1088 .write = sdebug_error_write,
1089 .release = single_release,
1092 static int sdebug_target_reset_fail_show(struct seq_file *m, void *p)
1094 struct scsi_target *starget = (struct scsi_target *)m->private;
1095 struct sdebug_target_info *targetip =
1096 (struct sdebug_target_info *)starget->hostdata;
1099 seq_printf(m, "%c\n", targetip->reset_fail ? 'Y' : 'N');
1104 static int sdebug_target_reset_fail_open(struct inode *inode, struct file *file)
1106 return single_open(file, sdebug_target_reset_fail_show, inode->i_private);
1109 static ssize_t sdebug_target_reset_fail_write(struct file *file,
1110 const char __user *ubuf, size_t count, loff_t *ppos)
1113 struct scsi_target *starget =
1114 (struct scsi_target *)file->f_inode->i_private;
1115 struct sdebug_target_info *targetip =
1116 (struct sdebug_target_info *)starget->hostdata;
1119 ret = kstrtobool_from_user(ubuf, count, &targetip->reset_fail);
1120 return ret < 0 ? ret : count;
1125 static const struct file_operations sdebug_target_reset_fail_fops = {
1126 .open = sdebug_target_reset_fail_open,
1128 .write = sdebug_target_reset_fail_write,
1129 .release = single_release,
1132 static int sdebug_target_alloc(struct scsi_target *starget)
1134 struct sdebug_target_info *targetip;
1135 struct dentry *dentry;
1137 targetip = kzalloc(sizeof(struct sdebug_target_info), GFP_KERNEL);
1141 targetip->debugfs_entry = debugfs_create_dir(dev_name(&starget->dev),
1142 sdebug_debugfs_root);
1143 if (IS_ERR_OR_NULL(targetip->debugfs_entry))
1144 pr_info("%s: failed to create debugfs directory for target %s\n",
1145 __func__, dev_name(&starget->dev));
1147 debugfs_create_file("fail_reset", 0600, targetip->debugfs_entry, starget,
1148 &sdebug_target_reset_fail_fops);
1149 if (IS_ERR_OR_NULL(dentry))
1150 pr_info("%s: failed to create fail_reset file for target %s\n",
1151 __func__, dev_name(&starget->dev));
1153 starget->hostdata = targetip;
1158 static void sdebug_tartget_cleanup_async(void *data, async_cookie_t cookie)
1160 struct sdebug_target_info *targetip = data;
1162 debugfs_remove(targetip->debugfs_entry);
1166 static void sdebug_target_destroy(struct scsi_target *starget)
1168 struct sdebug_target_info *targetip;
1170 targetip = (struct sdebug_target_info *)starget->hostdata;
1172 starget->hostdata = NULL;
1173 async_schedule(sdebug_tartget_cleanup_async, targetip);
1177 /* Only do the extra work involved in logical block provisioning if one or
1178 * more of the lbpu, lbpws or lbpws10 parameters are given and we are doing
1179 * real reads and writes (i.e. not skipping them for speed).
1181 static inline bool scsi_debug_lbp(void)
1183 return 0 == sdebug_fake_rw &&
1184 (sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10);
1187 static void *lba2fake_store(struct sdeb_store_info *sip,
1188 unsigned long long lba)
1190 struct sdeb_store_info *lsip = sip;
1192 lba = do_div(lba, sdebug_store_sectors);
1193 if (!sip || !sip->storep) {
1195 lsip = xa_load(per_store_ap, 0); /* should never be NULL */
1197 return lsip->storep + lba * sdebug_sector_size;
1200 static struct t10_pi_tuple *dif_store(struct sdeb_store_info *sip,
1203 sector = sector_div(sector, sdebug_store_sectors);
1205 return sip->dif_storep + sector;
1208 static void sdebug_max_tgts_luns(void)
1210 struct sdebug_host_info *sdbg_host;
1211 struct Scsi_Host *hpnt;
1213 mutex_lock(&sdebug_host_list_mutex);
1214 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
1215 hpnt = sdbg_host->shost;
1216 if ((hpnt->this_id >= 0) &&
1217 (sdebug_num_tgts > hpnt->this_id))
1218 hpnt->max_id = sdebug_num_tgts + 1;
1220 hpnt->max_id = sdebug_num_tgts;
1221 /* sdebug_max_luns; */
1222 hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
1224 mutex_unlock(&sdebug_host_list_mutex);
1227 enum sdeb_cmd_data {SDEB_IN_DATA = 0, SDEB_IN_CDB = 1};
1229 /* Set in_bit to -1 to indicate no bit position of invalid field */
1230 static void mk_sense_invalid_fld(struct scsi_cmnd *scp,
1231 enum sdeb_cmd_data c_d,
1232 int in_byte, int in_bit)
1234 unsigned char *sbuff;
1238 sbuff = scp->sense_buffer;
1240 sdev_printk(KERN_ERR, scp->device,
1241 "%s: sense_buffer is NULL\n", __func__);
1244 asc = c_d ? INVALID_FIELD_IN_CDB : INVALID_FIELD_IN_PARAM_LIST;
1245 memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
1246 scsi_build_sense(scp, sdebug_dsense, ILLEGAL_REQUEST, asc, 0);
1247 memset(sks, 0, sizeof(sks));
1253 sks[0] |= 0x7 & in_bit;
1255 put_unaligned_be16(in_byte, sks + 1);
1256 if (sdebug_dsense) {
1260 sbuff[sl + 1] = 0x6;
1261 memcpy(sbuff + sl + 4, sks, 3);
1263 memcpy(sbuff + 15, sks, 3);
1265 sdev_printk(KERN_INFO, scp->device, "%s: [sense_key,asc,ascq"
1266 "]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n",
1267 my_name, asc, c_d ? 'C' : 'D', in_byte, in_bit);
1270 static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
1272 if (!scp->sense_buffer) {
1273 sdev_printk(KERN_ERR, scp->device,
1274 "%s: sense_buffer is NULL\n", __func__);
1277 memset(scp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1279 scsi_build_sense(scp, sdebug_dsense, key, asc, asq);
1282 sdev_printk(KERN_INFO, scp->device,
1283 "%s: [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
1284 my_name, key, asc, asq);
1287 static void mk_sense_invalid_opcode(struct scsi_cmnd *scp)
1289 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
1292 static int scsi_debug_ioctl(struct scsi_device *dev, unsigned int cmd,
1295 if (sdebug_verbose) {
1297 sdev_printk(KERN_INFO, dev,
1298 "%s: BLKFLSBUF [0x1261]\n", __func__);
1299 else if (0x5331 == cmd)
1300 sdev_printk(KERN_INFO, dev,
1301 "%s: CDROM_GET_CAPABILITY [0x5331]\n",
1304 sdev_printk(KERN_INFO, dev, "%s: cmd=0x%x\n",
1308 /* return -ENOTTY; // correct return but upsets fdisk */
1311 static void config_cdb_len(struct scsi_device *sdev)
1313 switch (sdebug_cdb_len) {
1314 case 6: /* suggest 6 byte READ, WRITE and MODE SENSE/SELECT */
1315 sdev->use_10_for_rw = false;
1316 sdev->use_16_for_rw = false;
1317 sdev->use_10_for_ms = false;
1319 case 10: /* suggest 10 byte RWs and 6 byte MODE SENSE/SELECT */
1320 sdev->use_10_for_rw = true;
1321 sdev->use_16_for_rw = false;
1322 sdev->use_10_for_ms = false;
1324 case 12: /* suggest 10 byte RWs and 10 byte MODE SENSE/SELECT */
1325 sdev->use_10_for_rw = true;
1326 sdev->use_16_for_rw = false;
1327 sdev->use_10_for_ms = true;
1330 sdev->use_10_for_rw = false;
1331 sdev->use_16_for_rw = true;
1332 sdev->use_10_for_ms = true;
1334 case 32: /* No knobs to suggest this so same as 16 for now */
1335 sdev->use_10_for_rw = false;
1336 sdev->use_16_for_rw = true;
1337 sdev->use_10_for_ms = true;
1340 pr_warn("unexpected cdb_len=%d, force to 10\n",
1342 sdev->use_10_for_rw = true;
1343 sdev->use_16_for_rw = false;
1344 sdev->use_10_for_ms = false;
1345 sdebug_cdb_len = 10;
1350 static void all_config_cdb_len(void)
1352 struct sdebug_host_info *sdbg_host;
1353 struct Scsi_Host *shost;
1354 struct scsi_device *sdev;
1356 mutex_lock(&sdebug_host_list_mutex);
1357 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
1358 shost = sdbg_host->shost;
1359 shost_for_each_device(sdev, shost) {
1360 config_cdb_len(sdev);
1363 mutex_unlock(&sdebug_host_list_mutex);
1366 static void clear_luns_changed_on_target(struct sdebug_dev_info *devip)
1368 struct sdebug_host_info *sdhp = devip->sdbg_host;
1369 struct sdebug_dev_info *dp;
1371 list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
1372 if ((devip->sdbg_host == dp->sdbg_host) &&
1373 (devip->target == dp->target)) {
1374 clear_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
1379 static int make_ua(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1383 k = find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS);
1384 if (k != SDEBUG_NUM_UAS) {
1385 const char *cp = NULL;
1389 mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1390 POWER_ON_RESET_ASCQ);
1392 cp = "power on reset";
1394 case SDEBUG_UA_POOCCUR:
1395 mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1396 POWER_ON_OCCURRED_ASCQ);
1398 cp = "power on occurred";
1400 case SDEBUG_UA_BUS_RESET:
1401 mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1406 case SDEBUG_UA_MODE_CHANGED:
1407 mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1410 cp = "mode parameters changed";
1412 case SDEBUG_UA_CAPACITY_CHANGED:
1413 mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1414 CAPACITY_CHANGED_ASCQ);
1416 cp = "capacity data changed";
1418 case SDEBUG_UA_MICROCODE_CHANGED:
1419 mk_sense_buffer(scp, UNIT_ATTENTION,
1421 MICROCODE_CHANGED_ASCQ);
1423 cp = "microcode has been changed";
1425 case SDEBUG_UA_MICROCODE_CHANGED_WO_RESET:
1426 mk_sense_buffer(scp, UNIT_ATTENTION,
1428 MICROCODE_CHANGED_WO_RESET_ASCQ);
1430 cp = "microcode has been changed without reset";
1432 case SDEBUG_UA_LUNS_CHANGED:
1434 * SPC-3 behavior is to report a UNIT ATTENTION with
1435 * ASC/ASCQ REPORTED LUNS DATA HAS CHANGED on every LUN
1436 * on the target, until a REPORT LUNS command is
1437 * received. SPC-4 behavior is to report it only once.
1438 * NOTE: sdebug_scsi_level does not use the same
1439 * values as struct scsi_device->scsi_level.
1441 if (sdebug_scsi_level >= 6) /* SPC-4 and above */
1442 clear_luns_changed_on_target(devip);
1443 mk_sense_buffer(scp, UNIT_ATTENTION,
1447 cp = "reported luns data has changed";
1450 pr_warn("unexpected unit attention code=%d\n", k);
1455 clear_bit(k, devip->uas_bm);
1457 sdev_printk(KERN_INFO, scp->device,
1458 "%s reports: Unit attention: %s\n",
1460 return check_condition_result;
1465 /* Build SCSI "data-in" buffer. Returns 0 if ok else (DID_ERROR << 16). */
1466 static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1470 struct scsi_data_buffer *sdb = &scp->sdb;
1474 if (scp->sc_data_direction != DMA_FROM_DEVICE)
1475 return DID_ERROR << 16;
1477 act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
1479 scsi_set_resid(scp, scsi_bufflen(scp) - act_len);
1484 /* Partial build of SCSI "data-in" buffer. Returns 0 if ok else
1485 * (DID_ERROR << 16). Can write to offset in data-in buffer. If multiple
1486 * calls, not required to write in ascending offset order. Assumes resid
1487 * set to scsi_bufflen() prior to any calls.
1489 static int p_fill_from_dev_buffer(struct scsi_cmnd *scp, const void *arr,
1490 int arr_len, unsigned int off_dst)
1492 unsigned int act_len, n;
1493 struct scsi_data_buffer *sdb = &scp->sdb;
1494 off_t skip = off_dst;
1496 if (sdb->length <= off_dst)
1498 if (scp->sc_data_direction != DMA_FROM_DEVICE)
1499 return DID_ERROR << 16;
1501 act_len = sg_pcopy_from_buffer(sdb->table.sgl, sdb->table.nents,
1502 arr, arr_len, skip);
1503 pr_debug("%s: off_dst=%u, scsi_bufflen=%u, act_len=%u, resid=%d\n",
1504 __func__, off_dst, scsi_bufflen(scp), act_len,
1505 scsi_get_resid(scp));
1506 n = scsi_bufflen(scp) - (off_dst + act_len);
1507 scsi_set_resid(scp, min_t(u32, scsi_get_resid(scp), n));
1511 /* Fetches from SCSI "data-out" buffer. Returns number of bytes fetched into
1512 * 'arr' or -1 if error.
1514 static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1517 if (!scsi_bufflen(scp))
1519 if (scp->sc_data_direction != DMA_TO_DEVICE)
1522 return scsi_sg_copy_to_buffer(scp, arr, arr_len);
1526 static char sdebug_inq_vendor_id[9] = "Linux ";
1527 static char sdebug_inq_product_id[17] = "scsi_debug ";
1528 static char sdebug_inq_product_rev[5] = SDEBUG_VERSION;
1529 /* Use some locally assigned NAAs for SAS addresses. */
1530 static const u64 naa3_comp_a = 0x3222222000000000ULL;
1531 static const u64 naa3_comp_b = 0x3333333000000000ULL;
1532 static const u64 naa3_comp_c = 0x3111111000000000ULL;
1534 /* Device identification VPD page. Returns number of bytes placed in arr */
1535 static int inquiry_vpd_83(unsigned char *arr, int port_group_id,
1536 int target_dev_id, int dev_id_num,
1537 const char *dev_id_str, int dev_id_str_len,
1538 const uuid_t *lu_name)
1543 port_a = target_dev_id + 1;
1544 /* T10 vendor identifier field format (faked) */
1545 arr[0] = 0x2; /* ASCII */
1548 memcpy(&arr[4], sdebug_inq_vendor_id, 8);
1549 memcpy(&arr[12], sdebug_inq_product_id, 16);
1550 memcpy(&arr[28], dev_id_str, dev_id_str_len);
1551 num = 8 + 16 + dev_id_str_len;
1554 if (dev_id_num >= 0) {
1555 if (sdebug_uuid_ctl) {
1556 /* Locally assigned UUID */
1557 arr[num++] = 0x1; /* binary (not necessarily sas) */
1558 arr[num++] = 0xa; /* PIV=0, lu, naa */
1561 arr[num++] = 0x10; /* uuid type=1, locally assigned */
1563 memcpy(arr + num, lu_name, 16);
1566 /* NAA-3, Logical unit identifier (binary) */
1567 arr[num++] = 0x1; /* binary (not necessarily sas) */
1568 arr[num++] = 0x3; /* PIV=0, lu, naa */
1571 put_unaligned_be64(naa3_comp_b + dev_id_num, arr + num);
1574 /* Target relative port number */
1575 arr[num++] = 0x61; /* proto=sas, binary */
1576 arr[num++] = 0x94; /* PIV=1, target port, rel port */
1577 arr[num++] = 0x0; /* reserved */
1578 arr[num++] = 0x4; /* length */
1579 arr[num++] = 0x0; /* reserved */
1580 arr[num++] = 0x0; /* reserved */
1582 arr[num++] = 0x1; /* relative port A */
1584 /* NAA-3, Target port identifier */
1585 arr[num++] = 0x61; /* proto=sas, binary */
1586 arr[num++] = 0x93; /* piv=1, target port, naa */
1589 put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1591 /* NAA-3, Target port group identifier */
1592 arr[num++] = 0x61; /* proto=sas, binary */
1593 arr[num++] = 0x95; /* piv=1, target port group id */
1598 put_unaligned_be16(port_group_id, arr + num);
1600 /* NAA-3, Target device identifier */
1601 arr[num++] = 0x61; /* proto=sas, binary */
1602 arr[num++] = 0xa3; /* piv=1, target device, naa */
1605 put_unaligned_be64(naa3_comp_a + target_dev_id, arr + num);
1607 /* SCSI name string: Target device identifier */
1608 arr[num++] = 0x63; /* proto=sas, UTF-8 */
1609 arr[num++] = 0xa8; /* piv=1, target device, SCSI name string */
1612 memcpy(arr + num, "naa.32222220", 12);
1614 snprintf(b, sizeof(b), "%08X", target_dev_id);
1615 memcpy(arr + num, b, 8);
1617 memset(arr + num, 0, 4);
1622 static unsigned char vpd84_data[] = {
1623 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
1624 0x22,0x22,0x22,0x0,0xbb,0x1,
1625 0x22,0x22,0x22,0x0,0xbb,0x2,
1628 /* Software interface identification VPD page */
1629 static int inquiry_vpd_84(unsigned char *arr)
1631 memcpy(arr, vpd84_data, sizeof(vpd84_data));
1632 return sizeof(vpd84_data);
1635 /* Management network addresses VPD page */
1636 static int inquiry_vpd_85(unsigned char *arr)
1639 const char *na1 = "https://www.kernel.org/config";
1640 const char *na2 = "http://www.kernel.org/log";
1643 arr[num++] = 0x1; /* lu, storage config */
1644 arr[num++] = 0x0; /* reserved */
1649 plen = ((plen / 4) + 1) * 4;
1650 arr[num++] = plen; /* length, null termianted, padded */
1651 memcpy(arr + num, na1, olen);
1652 memset(arr + num + olen, 0, plen - olen);
1655 arr[num++] = 0x4; /* lu, logging */
1656 arr[num++] = 0x0; /* reserved */
1661 plen = ((plen / 4) + 1) * 4;
1662 arr[num++] = plen; /* length, null terminated, padded */
1663 memcpy(arr + num, na2, olen);
1664 memset(arr + num + olen, 0, plen - olen);
1670 /* SCSI ports VPD page */
1671 static int inquiry_vpd_88(unsigned char *arr, int target_dev_id)
1676 port_a = target_dev_id + 1;
1677 port_b = port_a + 1;
1678 arr[num++] = 0x0; /* reserved */
1679 arr[num++] = 0x0; /* reserved */
1681 arr[num++] = 0x1; /* relative port 1 (primary) */
1682 memset(arr + num, 0, 6);
1685 arr[num++] = 12; /* length tp descriptor */
1686 /* naa-5 target port identifier (A) */
1687 arr[num++] = 0x61; /* proto=sas, binary */
1688 arr[num++] = 0x93; /* PIV=1, target port, NAA */
1689 arr[num++] = 0x0; /* reserved */
1690 arr[num++] = 0x8; /* length */
1691 put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1693 arr[num++] = 0x0; /* reserved */
1694 arr[num++] = 0x0; /* reserved */
1696 arr[num++] = 0x2; /* relative port 2 (secondary) */
1697 memset(arr + num, 0, 6);
1700 arr[num++] = 12; /* length tp descriptor */
1701 /* naa-5 target port identifier (B) */
1702 arr[num++] = 0x61; /* proto=sas, binary */
1703 arr[num++] = 0x93; /* PIV=1, target port, NAA */
1704 arr[num++] = 0x0; /* reserved */
1705 arr[num++] = 0x8; /* length */
1706 put_unaligned_be64(naa3_comp_a + port_b, arr + num);
1713 static unsigned char vpd89_data[] = {
1714 /* from 4th byte */ 0,0,0,0,
1715 'l','i','n','u','x',' ',' ',' ',
1716 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
1718 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
1720 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
1721 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
1722 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
1723 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
1725 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1727 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1729 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
1730 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
1731 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
1732 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
1733 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
1734 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
1735 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
1736 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1737 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1738 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1739 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
1740 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
1741 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
1742 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
1743 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1744 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1745 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1746 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1747 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1748 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1749 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1750 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1751 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1752 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1753 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1754 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
1757 /* ATA Information VPD page */
1758 static int inquiry_vpd_89(unsigned char *arr)
1760 memcpy(arr, vpd89_data, sizeof(vpd89_data));
1761 return sizeof(vpd89_data);
1765 static unsigned char vpdb0_data[] = {
1766 /* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
1767 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1768 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1769 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1772 /* Block limits VPD page (SBC-3) */
1773 static int inquiry_vpd_b0(unsigned char *arr)
1777 memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
1779 /* Optimal transfer length granularity */
1780 if (sdebug_opt_xferlen_exp != 0 &&
1781 sdebug_physblk_exp < sdebug_opt_xferlen_exp)
1782 gran = 1 << sdebug_opt_xferlen_exp;
1784 gran = 1 << sdebug_physblk_exp;
1785 put_unaligned_be16(gran, arr + 2);
1787 /* Maximum Transfer Length */
1788 if (sdebug_store_sectors > 0x400)
1789 put_unaligned_be32(sdebug_store_sectors, arr + 4);
1791 /* Optimal Transfer Length */
1792 put_unaligned_be32(sdebug_opt_blks, &arr[8]);
1795 /* Maximum Unmap LBA Count */
1796 put_unaligned_be32(sdebug_unmap_max_blocks, &arr[16]);
1798 /* Maximum Unmap Block Descriptor Count */
1799 put_unaligned_be32(sdebug_unmap_max_desc, &arr[20]);
1802 /* Unmap Granularity Alignment */
1803 if (sdebug_unmap_alignment) {
1804 put_unaligned_be32(sdebug_unmap_alignment, &arr[28]);
1805 arr[28] |= 0x80; /* UGAVALID */
1808 /* Optimal Unmap Granularity */
1809 put_unaligned_be32(sdebug_unmap_granularity, &arr[24]);
1811 /* Maximum WRITE SAME Length */
1812 put_unaligned_be64(sdebug_write_same_length, &arr[32]);
1814 return 0x3c; /* Mandatory page length for Logical Block Provisioning */
1817 /* Block device characteristics VPD page (SBC-3) */
1818 static int inquiry_vpd_b1(struct sdebug_dev_info *devip, unsigned char *arr)
1820 memset(arr, 0, 0x3c);
1822 arr[1] = 1; /* non rotating medium (e.g. solid state) */
1824 arr[3] = 5; /* less than 1.8" */
1825 if (devip->zmodel == BLK_ZONED_HA)
1826 arr[4] = 1 << 4; /* zoned field = 01b */
1831 /* Logical block provisioning VPD page (SBC-4) */
1832 static int inquiry_vpd_b2(unsigned char *arr)
1834 memset(arr, 0, 0x4);
1835 arr[0] = 0; /* threshold exponent */
1842 if (sdebug_lbprz && scsi_debug_lbp())
1843 arr[1] |= (sdebug_lbprz & 0x7) << 2; /* sbc4r07 and later */
1844 /* anc_sup=0; dp=0 (no provisioning group descriptor) */
1845 /* minimum_percentage=0; provisioning_type=0 (unknown) */
1846 /* threshold_percentage=0 */
1850 /* Zoned block device characteristics VPD page (ZBC mandatory) */
1851 static int inquiry_vpd_b6(struct sdebug_dev_info *devip, unsigned char *arr)
1853 memset(arr, 0, 0x3c);
1854 arr[0] = 0x1; /* set URSWRZ (unrestricted read in seq. wr req zone) */
1856 * Set Optimal number of open sequential write preferred zones and
1857 * Optimal number of non-sequentially written sequential write
1858 * preferred zones fields to 'not reported' (0xffffffff). Leave other
1859 * fields set to zero, apart from Max. number of open swrz_s field.
1861 put_unaligned_be32(0xffffffff, &arr[4]);
1862 put_unaligned_be32(0xffffffff, &arr[8]);
1863 if (sdeb_zbc_model == BLK_ZONED_HM && devip->max_open)
1864 put_unaligned_be32(devip->max_open, &arr[12]);
1866 put_unaligned_be32(0xffffffff, &arr[12]);
1867 if (devip->zcap < devip->zsize) {
1868 arr[19] = ZBC_CONSTANT_ZONE_START_OFFSET;
1869 put_unaligned_be64(devip->zsize, &arr[20]);
1876 #define SDEBUG_LONG_INQ_SZ 96
1877 #define SDEBUG_MAX_INQ_ARR_SZ 584
1879 static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1881 unsigned char pq_pdt;
1883 unsigned char *cmd = scp->cmnd;
1886 bool have_wlun, is_disk, is_zbc, is_disk_zbc;
1888 alloc_len = get_unaligned_be16(cmd + 3);
1889 arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
1891 return DID_REQUEUE << 16;
1892 is_disk = (sdebug_ptype == TYPE_DISK);
1893 is_zbc = (devip->zmodel != BLK_ZONED_NONE);
1894 is_disk_zbc = (is_disk || is_zbc);
1895 have_wlun = scsi_is_wlun(scp->device->lun);
1897 pq_pdt = TYPE_WLUN; /* present, wlun */
1898 else if (sdebug_no_lun_0 && (devip->lun == SDEBUG_LUN_0_VAL))
1899 pq_pdt = 0x7f; /* not present, PQ=3, PDT=0x1f */
1901 pq_pdt = (sdebug_ptype & 0x1f);
1903 if (0x2 & cmd[1]) { /* CMDDT bit set */
1904 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
1906 return check_condition_result;
1907 } else if (0x1 & cmd[1]) { /* EVPD bit set */
1908 int lu_id_num, port_group_id, target_dev_id;
1911 int host_no = devip->sdbg_host->shost->host_no;
1913 port_group_id = (((host_no + 1) & 0x7f) << 8) +
1914 (devip->channel & 0x7f);
1915 if (sdebug_vpd_use_hostno == 0)
1917 lu_id_num = have_wlun ? -1 : (((host_no + 1) * 2000) +
1918 (devip->target * 1000) + devip->lun);
1919 target_dev_id = ((host_no + 1) * 2000) +
1920 (devip->target * 1000) - 3;
1921 len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
1922 if (0 == cmd[2]) { /* supported vital product data pages */
1923 arr[1] = cmd[2]; /*sanity */
1925 arr[n++] = 0x0; /* this page */
1926 arr[n++] = 0x80; /* unit serial number */
1927 arr[n++] = 0x83; /* device identification */
1928 arr[n++] = 0x84; /* software interface ident. */
1929 arr[n++] = 0x85; /* management network addresses */
1930 arr[n++] = 0x86; /* extended inquiry */
1931 arr[n++] = 0x87; /* mode page policy */
1932 arr[n++] = 0x88; /* SCSI ports */
1933 if (is_disk_zbc) { /* SBC or ZBC */
1934 arr[n++] = 0x89; /* ATA information */
1935 arr[n++] = 0xb0; /* Block limits */
1936 arr[n++] = 0xb1; /* Block characteristics */
1938 arr[n++] = 0xb2; /* LB Provisioning */
1940 arr[n++] = 0xb6; /* ZB dev. char. */
1942 arr[3] = n - 4; /* number of supported VPD pages */
1943 } else if (0x80 == cmd[2]) { /* unit serial number */
1944 arr[1] = cmd[2]; /*sanity */
1946 memcpy(&arr[4], lu_id_str, len);
1947 } else if (0x83 == cmd[2]) { /* device identification */
1948 arr[1] = cmd[2]; /*sanity */
1949 arr[3] = inquiry_vpd_83(&arr[4], port_group_id,
1950 target_dev_id, lu_id_num,
1953 } else if (0x84 == cmd[2]) { /* Software interface ident. */
1954 arr[1] = cmd[2]; /*sanity */
1955 arr[3] = inquiry_vpd_84(&arr[4]);
1956 } else if (0x85 == cmd[2]) { /* Management network addresses */
1957 arr[1] = cmd[2]; /*sanity */
1958 arr[3] = inquiry_vpd_85(&arr[4]);
1959 } else if (0x86 == cmd[2]) { /* extended inquiry */
1960 arr[1] = cmd[2]; /*sanity */
1961 arr[3] = 0x3c; /* number of following entries */
1962 if (sdebug_dif == T10_PI_TYPE3_PROTECTION)
1963 arr[4] = 0x4; /* SPT: GRD_CHK:1 */
1964 else if (have_dif_prot)
1965 arr[4] = 0x5; /* SPT: GRD_CHK:1, REF_CHK:1 */
1967 arr[4] = 0x0; /* no protection stuff */
1968 arr[5] = 0x7; /* head of q, ordered + simple q's */
1969 } else if (0x87 == cmd[2]) { /* mode page policy */
1970 arr[1] = cmd[2]; /*sanity */
1971 arr[3] = 0x8; /* number of following entries */
1972 arr[4] = 0x2; /* disconnect-reconnect mp */
1973 arr[6] = 0x80; /* mlus, shared */
1974 arr[8] = 0x18; /* protocol specific lu */
1975 arr[10] = 0x82; /* mlus, per initiator port */
1976 } else if (0x88 == cmd[2]) { /* SCSI Ports */
1977 arr[1] = cmd[2]; /*sanity */
1978 arr[3] = inquiry_vpd_88(&arr[4], target_dev_id);
1979 } else if (is_disk_zbc && 0x89 == cmd[2]) { /* ATA info */
1980 arr[1] = cmd[2]; /*sanity */
1981 n = inquiry_vpd_89(&arr[4]);
1982 put_unaligned_be16(n, arr + 2);
1983 } else if (is_disk_zbc && 0xb0 == cmd[2]) { /* Block limits */
1984 arr[1] = cmd[2]; /*sanity */
1985 arr[3] = inquiry_vpd_b0(&arr[4]);
1986 } else if (is_disk_zbc && 0xb1 == cmd[2]) { /* Block char. */
1987 arr[1] = cmd[2]; /*sanity */
1988 arr[3] = inquiry_vpd_b1(devip, &arr[4]);
1989 } else if (is_disk && 0xb2 == cmd[2]) { /* LB Prov. */
1990 arr[1] = cmd[2]; /*sanity */
1991 arr[3] = inquiry_vpd_b2(&arr[4]);
1992 } else if (is_zbc && cmd[2] == 0xb6) { /* ZB dev. charact. */
1993 arr[1] = cmd[2]; /*sanity */
1994 arr[3] = inquiry_vpd_b6(devip, &arr[4]);
1996 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
1998 return check_condition_result;
2000 len = min_t(u32, get_unaligned_be16(arr + 2) + 4, alloc_len);
2001 ret = fill_from_dev_buffer(scp, arr,
2002 min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ));
2006 /* drops through here for a standard inquiry */
2007 arr[1] = sdebug_removable ? 0x80 : 0; /* Removable disk */
2008 arr[2] = sdebug_scsi_level;
2009 arr[3] = 2; /* response_data_format==2 */
2010 arr[4] = SDEBUG_LONG_INQ_SZ - 5;
2011 arr[5] = (int)have_dif_prot; /* PROTECT bit */
2012 if (sdebug_vpd_use_hostno == 0)
2013 arr[5] |= 0x10; /* claim: implicit TPGS */
2014 arr[6] = 0x10; /* claim: MultiP */
2015 /* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
2016 arr[7] = 0xa; /* claim: LINKED + CMDQUE */
2017 memcpy(&arr[8], sdebug_inq_vendor_id, 8);
2018 memcpy(&arr[16], sdebug_inq_product_id, 16);
2019 memcpy(&arr[32], sdebug_inq_product_rev, 4);
2020 /* Use Vendor Specific area to place driver date in ASCII hex */
2021 memcpy(&arr[36], sdebug_version_date, 8);
2022 /* version descriptors (2 bytes each) follow */
2023 put_unaligned_be16(0xc0, arr + 58); /* SAM-6 no version claimed */
2024 put_unaligned_be16(0x5c0, arr + 60); /* SPC-5 no version claimed */
2026 if (is_disk) { /* SBC-4 no version claimed */
2027 put_unaligned_be16(0x600, arr + n);
2029 } else if (sdebug_ptype == TYPE_TAPE) { /* SSC-4 rev 3 */
2030 put_unaligned_be16(0x525, arr + n);
2032 } else if (is_zbc) { /* ZBC BSR INCITS 536 revision 05 */
2033 put_unaligned_be16(0x624, arr + n);
2036 put_unaligned_be16(0x2100, arr + n); /* SPL-4 no version claimed */
2037 ret = fill_from_dev_buffer(scp, arr,
2038 min_t(u32, alloc_len, SDEBUG_LONG_INQ_SZ));
2043 /* See resp_iec_m_pg() for how this data is manipulated */
2044 static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
2047 static int resp_requests(struct scsi_cmnd *scp,
2048 struct sdebug_dev_info *devip)
2050 unsigned char *cmd = scp->cmnd;
2051 unsigned char arr[SCSI_SENSE_BUFFERSIZE]; /* assume >= 18 bytes */
2052 bool dsense = !!(cmd[1] & 1);
2053 u32 alloc_len = cmd[4];
2055 int stopped_state = atomic_read(&devip->stopped);
2057 memset(arr, 0, sizeof(arr));
2058 if (stopped_state > 0) { /* some "pollable" data [spc6r02: 5.12.2] */
2062 arr[2] = LOGICAL_UNIT_NOT_READY;
2063 arr[3] = (stopped_state == 2) ? 0x1 : 0x2;
2067 arr[2] = NOT_READY; /* NO_SENSE in sense_key */
2068 arr[7] = 0xa; /* 18 byte sense buffer */
2069 arr[12] = LOGICAL_UNIT_NOT_READY;
2070 arr[13] = (stopped_state == 2) ? 0x1 : 0x2;
2072 } else if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
2073 /* Information exceptions control mode page: TEST=1, MRIE=6 */
2076 arr[1] = 0x0; /* NO_SENSE in sense_key */
2077 arr[2] = THRESHOLD_EXCEEDED;
2078 arr[3] = 0xff; /* Failure prediction(false) */
2082 arr[2] = 0x0; /* NO_SENSE in sense_key */
2083 arr[7] = 0xa; /* 18 byte sense buffer */
2084 arr[12] = THRESHOLD_EXCEEDED;
2085 arr[13] = 0xff; /* Failure prediction(false) */
2087 } else { /* nothing to report */
2090 memset(arr, 0, len);
2093 memset(arr, 0, len);
2098 return fill_from_dev_buffer(scp, arr, min_t(u32, len, alloc_len));
2101 static int resp_start_stop(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2103 unsigned char *cmd = scp->cmnd;
2104 int power_cond, want_stop, stopped_state;
2107 power_cond = (cmd[4] & 0xf0) >> 4;
2109 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 7);
2110 return check_condition_result;
2112 want_stop = !(cmd[4] & 1);
2113 stopped_state = atomic_read(&devip->stopped);
2114 if (stopped_state == 2) {
2115 ktime_t now_ts = ktime_get_boottime();
2117 if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
2118 u64 diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
2120 if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
2121 /* tur_ms_to_ready timer extinguished */
2122 atomic_set(&devip->stopped, 0);
2126 if (stopped_state == 2) {
2128 stopped_state = 1; /* dummy up success */
2129 } else { /* Disallow tur_ms_to_ready delay to be overridden */
2130 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 0 /* START bit */);
2131 return check_condition_result;
2135 changing = (stopped_state != want_stop);
2137 atomic_xchg(&devip->stopped, want_stop);
2138 if (!changing || (cmd[1] & 0x1)) /* state unchanged or IMMED bit set in cdb */
2139 return SDEG_RES_IMMED_MASK;
2144 static sector_t get_sdebug_capacity(void)
2146 static const unsigned int gibibyte = 1073741824;
2148 if (sdebug_virtual_gb > 0)
2149 return (sector_t)sdebug_virtual_gb *
2150 (gibibyte / sdebug_sector_size);
2152 return sdebug_store_sectors;
2155 #define SDEBUG_READCAP_ARR_SZ 8
2156 static int resp_readcap(struct scsi_cmnd *scp,
2157 struct sdebug_dev_info *devip)
2159 unsigned char arr[SDEBUG_READCAP_ARR_SZ];
2162 /* following just in case virtual_gb changed */
2163 sdebug_capacity = get_sdebug_capacity();
2164 memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
2165 if (sdebug_capacity < 0xffffffff) {
2166 capac = (unsigned int)sdebug_capacity - 1;
2167 put_unaligned_be32(capac, arr + 0);
2169 put_unaligned_be32(0xffffffff, arr + 0);
2170 put_unaligned_be16(sdebug_sector_size, arr + 6);
2171 return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
2174 #define SDEBUG_READCAP16_ARR_SZ 32
2175 static int resp_readcap16(struct scsi_cmnd *scp,
2176 struct sdebug_dev_info *devip)
2178 unsigned char *cmd = scp->cmnd;
2179 unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
2182 alloc_len = get_unaligned_be32(cmd + 10);
2183 /* following just in case virtual_gb changed */
2184 sdebug_capacity = get_sdebug_capacity();
2185 memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
2186 put_unaligned_be64((u64)(sdebug_capacity - 1), arr + 0);
2187 put_unaligned_be32(sdebug_sector_size, arr + 8);
2188 arr[13] = sdebug_physblk_exp & 0xf;
2189 arr[14] = (sdebug_lowest_aligned >> 8) & 0x3f;
2191 if (scsi_debug_lbp()) {
2192 arr[14] |= 0x80; /* LBPME */
2193 /* from sbc4r07, this LBPRZ field is 1 bit, but the LBPRZ in
2194 * the LB Provisioning VPD page is 3 bits. Note that lbprz=2
2195 * in the wider field maps to 0 in this field.
2197 if (sdebug_lbprz & 1) /* precisely what the draft requires */
2202 * Since the scsi_debug READ CAPACITY implementation always reports the
2203 * total disk capacity, set RC BASIS = 1 for host-managed ZBC devices.
2205 if (devip->zmodel == BLK_ZONED_HM)
2208 arr[15] = sdebug_lowest_aligned & 0xff;
2210 if (have_dif_prot) {
2211 arr[12] = (sdebug_dif - 1) << 1; /* P_TYPE */
2212 arr[12] |= 1; /* PROT_EN */
2215 return fill_from_dev_buffer(scp, arr,
2216 min_t(u32, alloc_len, SDEBUG_READCAP16_ARR_SZ));
2219 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
2221 static int resp_report_tgtpgs(struct scsi_cmnd *scp,
2222 struct sdebug_dev_info *devip)
2224 unsigned char *cmd = scp->cmnd;
2226 int host_no = devip->sdbg_host->shost->host_no;
2227 int port_group_a, port_group_b, port_a, port_b;
2231 alen = get_unaligned_be32(cmd + 6);
2232 arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
2234 return DID_REQUEUE << 16;
2236 * EVPD page 0x88 states we have two ports, one
2237 * real and a fake port with no device connected.
2238 * So we create two port groups with one port each
2239 * and set the group with port B to unavailable.
2241 port_a = 0x1; /* relative port A */
2242 port_b = 0x2; /* relative port B */
2243 port_group_a = (((host_no + 1) & 0x7f) << 8) +
2244 (devip->channel & 0x7f);
2245 port_group_b = (((host_no + 1) & 0x7f) << 8) +
2246 (devip->channel & 0x7f) + 0x80;
2249 * The asymmetric access state is cycled according to the host_id.
2252 if (sdebug_vpd_use_hostno == 0) {
2253 arr[n++] = host_no % 3; /* Asymm access state */
2254 arr[n++] = 0x0F; /* claim: all states are supported */
2256 arr[n++] = 0x0; /* Active/Optimized path */
2257 arr[n++] = 0x01; /* only support active/optimized paths */
2259 put_unaligned_be16(port_group_a, arr + n);
2261 arr[n++] = 0; /* Reserved */
2262 arr[n++] = 0; /* Status code */
2263 arr[n++] = 0; /* Vendor unique */
2264 arr[n++] = 0x1; /* One port per group */
2265 arr[n++] = 0; /* Reserved */
2266 arr[n++] = 0; /* Reserved */
2267 put_unaligned_be16(port_a, arr + n);
2269 arr[n++] = 3; /* Port unavailable */
2270 arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
2271 put_unaligned_be16(port_group_b, arr + n);
2273 arr[n++] = 0; /* Reserved */
2274 arr[n++] = 0; /* Status code */
2275 arr[n++] = 0; /* Vendor unique */
2276 arr[n++] = 0x1; /* One port per group */
2277 arr[n++] = 0; /* Reserved */
2278 arr[n++] = 0; /* Reserved */
2279 put_unaligned_be16(port_b, arr + n);
2283 put_unaligned_be32(rlen, arr + 0);
2286 * Return the smallest value of either
2287 * - The allocated length
2288 * - The constructed command length
2289 * - The maximum array size
2291 rlen = min(alen, n);
2292 ret = fill_from_dev_buffer(scp, arr,
2293 min_t(u32, rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
2298 static int resp_rsup_opcodes(struct scsi_cmnd *scp,
2299 struct sdebug_dev_info *devip)
2302 u8 reporting_opts, req_opcode, sdeb_i, supp;
2304 u32 alloc_len, a_len;
2305 int k, offset, len, errsts, count, bump, na;
2306 const struct opcode_info_t *oip;
2307 const struct opcode_info_t *r_oip;
2309 u8 *cmd = scp->cmnd;
2311 rctd = !!(cmd[2] & 0x80);
2312 reporting_opts = cmd[2] & 0x7;
2313 req_opcode = cmd[3];
2314 req_sa = get_unaligned_be16(cmd + 4);
2315 alloc_len = get_unaligned_be32(cmd + 6);
2316 if (alloc_len < 4 || alloc_len > 0xffff) {
2317 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
2318 return check_condition_result;
2320 if (alloc_len > 8192)
2324 arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_ATOMIC);
2326 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
2328 return check_condition_result;
2330 switch (reporting_opts) {
2331 case 0: /* all commands */
2332 /* count number of commands */
2333 for (count = 0, oip = opcode_info_arr;
2334 oip->num_attached != 0xff; ++oip) {
2335 if (F_INV_OP & oip->flags)
2337 count += (oip->num_attached + 1);
2339 bump = rctd ? 20 : 8;
2340 put_unaligned_be32(count * bump, arr);
2341 for (offset = 4, oip = opcode_info_arr;
2342 oip->num_attached != 0xff && offset < a_len; ++oip) {
2343 if (F_INV_OP & oip->flags)
2345 na = oip->num_attached;
2346 arr[offset] = oip->opcode;
2347 put_unaligned_be16(oip->sa, arr + offset + 2);
2349 arr[offset + 5] |= 0x2;
2350 if (FF_SA & oip->flags)
2351 arr[offset + 5] |= 0x1;
2352 put_unaligned_be16(oip->len_mask[0], arr + offset + 6);
2354 put_unaligned_be16(0xa, arr + offset + 8);
2356 for (k = 0, oip = oip->arrp; k < na; ++k, ++oip) {
2357 if (F_INV_OP & oip->flags)
2360 arr[offset] = oip->opcode;
2361 put_unaligned_be16(oip->sa, arr + offset + 2);
2363 arr[offset + 5] |= 0x2;
2364 if (FF_SA & oip->flags)
2365 arr[offset + 5] |= 0x1;
2366 put_unaligned_be16(oip->len_mask[0],
2369 put_unaligned_be16(0xa,
2376 case 1: /* one command: opcode only */
2377 case 2: /* one command: opcode plus service action */
2378 case 3: /* one command: if sa==0 then opcode only else opcode+sa */
2379 sdeb_i = opcode_ind_arr[req_opcode];
2380 oip = &opcode_info_arr[sdeb_i];
2381 if (F_INV_OP & oip->flags) {
2385 if (1 == reporting_opts) {
2386 if (FF_SA & oip->flags) {
2387 mk_sense_invalid_fld(scp, SDEB_IN_CDB,
2390 return check_condition_result;
2393 } else if (2 == reporting_opts &&
2394 0 == (FF_SA & oip->flags)) {
2395 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
2396 kfree(arr); /* point at requested sa */
2397 return check_condition_result;
2399 if (0 == (FF_SA & oip->flags) &&
2400 req_opcode == oip->opcode)
2402 else if (0 == (FF_SA & oip->flags)) {
2403 na = oip->num_attached;
2404 for (k = 0, oip = oip->arrp; k < na;
2406 if (req_opcode == oip->opcode)
2409 supp = (k >= na) ? 1 : 3;
2410 } else if (req_sa != oip->sa) {
2411 na = oip->num_attached;
2412 for (k = 0, oip = oip->arrp; k < na;
2414 if (req_sa == oip->sa)
2417 supp = (k >= na) ? 1 : 3;
2421 u = oip->len_mask[0];
2422 put_unaligned_be16(u, arr + 2);
2423 arr[4] = oip->opcode;
2424 for (k = 1; k < u; ++k)
2425 arr[4 + k] = (k < 16) ?
2426 oip->len_mask[k] : 0xff;
2431 arr[1] = (rctd ? 0x80 : 0) | supp;
2433 put_unaligned_be16(0xa, arr + offset);
2438 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
2440 return check_condition_result;
2442 offset = (offset < a_len) ? offset : a_len;
2443 len = (offset < alloc_len) ? offset : alloc_len;
2444 errsts = fill_from_dev_buffer(scp, arr, len);
2449 static int resp_rsup_tmfs(struct scsi_cmnd *scp,
2450 struct sdebug_dev_info *devip)
2455 u8 *cmd = scp->cmnd;
2457 memset(arr, 0, sizeof(arr));
2458 repd = !!(cmd[2] & 0x80);
2459 alloc_len = get_unaligned_be32(cmd + 6);
2460 if (alloc_len < 4) {
2461 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
2462 return check_condition_result;
2464 arr[0] = 0xc8; /* ATS | ATSS | LURS */
2465 arr[1] = 0x1; /* ITNRS */
2472 len = (len < alloc_len) ? len : alloc_len;
2473 return fill_from_dev_buffer(scp, arr, len);
2476 /* <<Following mode page info copied from ST318451LW>> */
2478 static int resp_err_recov_pg(unsigned char *p, int pcontrol, int target)
2479 { /* Read-Write Error Recovery page for mode_sense */
2480 unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
2483 memcpy(p, err_recov_pg, sizeof(err_recov_pg));
2485 memset(p + 2, 0, sizeof(err_recov_pg) - 2);
2486 return sizeof(err_recov_pg);
2489 static int resp_disconnect_pg(unsigned char *p, int pcontrol, int target)
2490 { /* Disconnect-Reconnect page for mode_sense */
2491 unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
2492 0, 0, 0, 0, 0, 0, 0, 0};
2494 memcpy(p, disconnect_pg, sizeof(disconnect_pg));
2496 memset(p + 2, 0, sizeof(disconnect_pg) - 2);
2497 return sizeof(disconnect_pg);
2500 static int resp_format_pg(unsigned char *p, int pcontrol, int target)
2501 { /* Format device page for mode_sense */
2502 unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
2503 0, 0, 0, 0, 0, 0, 0, 0,
2504 0, 0, 0, 0, 0x40, 0, 0, 0};
2506 memcpy(p, format_pg, sizeof(format_pg));
2507 put_unaligned_be16(sdebug_sectors_per, p + 10);
2508 put_unaligned_be16(sdebug_sector_size, p + 12);
2509 if (sdebug_removable)
2510 p[20] |= 0x20; /* should agree with INQUIRY */
2512 memset(p + 2, 0, sizeof(format_pg) - 2);
2513 return sizeof(format_pg);
2516 static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2517 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,
2520 static int resp_caching_pg(unsigned char *p, int pcontrol, int target)
2521 { /* Caching page for mode_sense */
2522 unsigned char ch_caching_pg[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0,
2523 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
2524 unsigned char d_caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2525 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0, 0, 0, 0, 0};
2527 if (SDEBUG_OPT_N_WCE & sdebug_opts)
2528 caching_pg[2] &= ~0x4; /* set WCE=0 (default WCE=1) */
2529 memcpy(p, caching_pg, sizeof(caching_pg));
2531 memcpy(p + 2, ch_caching_pg, sizeof(ch_caching_pg));
2532 else if (2 == pcontrol)
2533 memcpy(p, d_caching_pg, sizeof(d_caching_pg));
2534 return sizeof(caching_pg);
2537 static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2540 static int resp_ctrl_m_pg(unsigned char *p, int pcontrol, int target)
2541 { /* Control mode page for mode_sense */
2542 unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
2544 unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2548 ctrl_m_pg[2] |= 0x4;
2550 ctrl_m_pg[2] &= ~0x4;
2553 ctrl_m_pg[5] |= 0x80; /* ATO=1 */
2555 memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
2557 memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
2558 else if (2 == pcontrol)
2559 memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
2560 return sizeof(ctrl_m_pg);
2564 static int resp_iec_m_pg(unsigned char *p, int pcontrol, int target)
2565 { /* Informational Exceptions control mode page for mode_sense */
2566 unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
2568 unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
2571 memcpy(p, iec_m_pg, sizeof(iec_m_pg));
2573 memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
2574 else if (2 == pcontrol)
2575 memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
2576 return sizeof(iec_m_pg);
2579 static int resp_sas_sf_m_pg(unsigned char *p, int pcontrol, int target)
2580 { /* SAS SSP mode page - short format for mode_sense */
2581 unsigned char sas_sf_m_pg[] = {0x19, 0x6,
2582 0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
2584 memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
2586 memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
2587 return sizeof(sas_sf_m_pg);
2591 static int resp_sas_pcd_m_spg(unsigned char *p, int pcontrol, int target,
2593 { /* SAS phy control and discover mode page for mode_sense */
2594 unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
2595 0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
2596 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2597 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2598 0x2, 0, 0, 0, 0, 0, 0, 0,
2599 0x88, 0x99, 0, 0, 0, 0, 0, 0,
2600 0, 0, 0, 0, 0, 0, 0, 0,
2601 0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
2602 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2603 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2604 0x3, 0, 0, 0, 0, 0, 0, 0,
2605 0x88, 0x99, 0, 0, 0, 0, 0, 0,
2606 0, 0, 0, 0, 0, 0, 0, 0,
2610 put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 16);
2611 put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 24);
2612 put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 64);
2613 put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 72);
2614 port_a = target_dev_id + 1;
2615 port_b = port_a + 1;
2616 memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
2617 put_unaligned_be32(port_a, p + 20);
2618 put_unaligned_be32(port_b, p + 48 + 20);
2620 memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
2621 return sizeof(sas_pcd_m_pg);
2624 static int resp_sas_sha_m_spg(unsigned char *p, int pcontrol)
2625 { /* SAS SSP shared protocol specific port mode subpage */
2626 unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
2627 0, 0, 0, 0, 0, 0, 0, 0,
2630 memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
2632 memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
2633 return sizeof(sas_sha_m_pg);
2636 #define SDEBUG_MAX_MSENSE_SZ 256
2638 static int resp_mode_sense(struct scsi_cmnd *scp,
2639 struct sdebug_dev_info *devip)
2641 int pcontrol, pcode, subpcode, bd_len;
2642 unsigned char dev_spec;
2643 u32 alloc_len, offset, len;
2645 int target = scp->device->id;
2647 unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
2648 unsigned char *cmd = scp->cmnd;
2649 bool dbd, llbaa, msense_6, is_disk, is_zbc, bad_pcode;
2651 dbd = !!(cmd[1] & 0x8); /* disable block descriptors */
2652 pcontrol = (cmd[2] & 0xc0) >> 6;
2653 pcode = cmd[2] & 0x3f;
2655 msense_6 = (MODE_SENSE == cmd[0]);
2656 llbaa = msense_6 ? false : !!(cmd[1] & 0x10);
2657 is_disk = (sdebug_ptype == TYPE_DISK);
2658 is_zbc = (devip->zmodel != BLK_ZONED_NONE);
2659 if ((is_disk || is_zbc) && !dbd)
2660 bd_len = llbaa ? 16 : 8;
2663 alloc_len = msense_6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2664 memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
2665 if (0x3 == pcontrol) { /* Saving values not supported */
2666 mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0);
2667 return check_condition_result;
2669 target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
2670 (devip->target * 1000) - 3;
2671 /* for disks+zbc set DPOFUA bit and clear write protect (WP) bit */
2672 if (is_disk || is_zbc) {
2673 dev_spec = 0x10; /* =0x90 if WP=1 implies read-only */
2685 arr[4] = 0x1; /* set LONGLBA bit */
2686 arr[7] = bd_len; /* assume 255 or less */
2690 if ((bd_len > 0) && (!sdebug_capacity))
2691 sdebug_capacity = get_sdebug_capacity();
2694 if (sdebug_capacity > 0xfffffffe)
2695 put_unaligned_be32(0xffffffff, ap + 0);
2697 put_unaligned_be32(sdebug_capacity, ap + 0);
2698 put_unaligned_be16(sdebug_sector_size, ap + 6);
2701 } else if (16 == bd_len) {
2702 put_unaligned_be64((u64)sdebug_capacity, ap + 0);
2703 put_unaligned_be32(sdebug_sector_size, ap + 12);
2708 if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) {
2709 /* TODO: Control Extension page */
2710 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2711 return check_condition_result;
2716 case 0x1: /* Read-Write error recovery page, direct access */
2717 len = resp_err_recov_pg(ap, pcontrol, target);
2720 case 0x2: /* Disconnect-Reconnect page, all devices */
2721 len = resp_disconnect_pg(ap, pcontrol, target);
2724 case 0x3: /* Format device page, direct access */
2726 len = resp_format_pg(ap, pcontrol, target);
2731 case 0x8: /* Caching page, direct access */
2732 if (is_disk || is_zbc) {
2733 len = resp_caching_pg(ap, pcontrol, target);
2738 case 0xa: /* Control Mode page, all devices */
2739 len = resp_ctrl_m_pg(ap, pcontrol, target);
2742 case 0x19: /* if spc==1 then sas phy, control+discover */
2743 if ((subpcode > 0x2) && (subpcode < 0xff)) {
2744 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2745 return check_condition_result;
2748 if ((0x0 == subpcode) || (0xff == subpcode))
2749 len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2750 if ((0x1 == subpcode) || (0xff == subpcode))
2751 len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
2753 if ((0x2 == subpcode) || (0xff == subpcode))
2754 len += resp_sas_sha_m_spg(ap + len, pcontrol);
2757 case 0x1c: /* Informational Exceptions Mode page, all devices */
2758 len = resp_iec_m_pg(ap, pcontrol, target);
2761 case 0x3f: /* Read all Mode pages */
2762 if ((0 == subpcode) || (0xff == subpcode)) {
2763 len = resp_err_recov_pg(ap, pcontrol, target);
2764 len += resp_disconnect_pg(ap + len, pcontrol, target);
2766 len += resp_format_pg(ap + len, pcontrol,
2768 len += resp_caching_pg(ap + len, pcontrol,
2770 } else if (is_zbc) {
2771 len += resp_caching_pg(ap + len, pcontrol,
2774 len += resp_ctrl_m_pg(ap + len, pcontrol, target);
2775 len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2776 if (0xff == subpcode) {
2777 len += resp_sas_pcd_m_spg(ap + len, pcontrol,
2778 target, target_dev_id);
2779 len += resp_sas_sha_m_spg(ap + len, pcontrol);
2781 len += resp_iec_m_pg(ap + len, pcontrol, target);
2784 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2785 return check_condition_result;
2793 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2794 return check_condition_result;
2797 arr[0] = offset - 1;
2799 put_unaligned_be16((offset - 2), arr + 0);
2800 return fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, offset));
2803 #define SDEBUG_MAX_MSELECT_SZ 512
2805 static int resp_mode_select(struct scsi_cmnd *scp,
2806 struct sdebug_dev_info *devip)
2808 int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
2809 int param_len, res, mpage;
2810 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
2811 unsigned char *cmd = scp->cmnd;
2812 int mselect6 = (MODE_SELECT == cmd[0]);
2814 memset(arr, 0, sizeof(arr));
2817 param_len = mselect6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2818 if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
2819 mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1);
2820 return check_condition_result;
2822 res = fetch_to_dev_buffer(scp, arr, param_len);
2824 return DID_ERROR << 16;
2825 else if (sdebug_verbose && (res < param_len))
2826 sdev_printk(KERN_INFO, scp->device,
2827 "%s: cdb indicated=%d, IO sent=%d bytes\n",
2828 __func__, param_len, res);
2829 md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2);
2830 bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6);
2831 off = bd_len + (mselect6 ? 4 : 8);
2832 if (md_len > 2 || off >= res) {
2833 mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
2834 return check_condition_result;
2836 mpage = arr[off] & 0x3f;
2837 ps = !!(arr[off] & 0x80);
2839 mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 7);
2840 return check_condition_result;
2842 spf = !!(arr[off] & 0x40);
2843 pg_len = spf ? (get_unaligned_be16(arr + off + 2) + 4) :
2845 if ((pg_len + off) > param_len) {
2846 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2847 PARAMETER_LIST_LENGTH_ERR, 0);
2848 return check_condition_result;
2851 case 0x8: /* Caching Mode page */
2852 if (caching_pg[1] == arr[off + 1]) {
2853 memcpy(caching_pg + 2, arr + off + 2,
2854 sizeof(caching_pg) - 2);
2855 goto set_mode_changed_ua;
2858 case 0xa: /* Control Mode page */
2859 if (ctrl_m_pg[1] == arr[off + 1]) {
2860 memcpy(ctrl_m_pg + 2, arr + off + 2,
2861 sizeof(ctrl_m_pg) - 2);
2862 if (ctrl_m_pg[4] & 0x8)
2866 sdebug_dsense = !!(ctrl_m_pg[2] & 0x4);
2867 goto set_mode_changed_ua;
2870 case 0x1c: /* Informational Exceptions Mode page */
2871 if (iec_m_pg[1] == arr[off + 1]) {
2872 memcpy(iec_m_pg + 2, arr + off + 2,
2873 sizeof(iec_m_pg) - 2);
2874 goto set_mode_changed_ua;
2880 mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 5);
2881 return check_condition_result;
2882 set_mode_changed_ua:
2883 set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm);
2887 static int resp_temp_l_pg(unsigned char *arr)
2889 unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
2890 0x0, 0x1, 0x3, 0x2, 0x0, 65,
2893 memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
2894 return sizeof(temp_l_pg);
2897 static int resp_ie_l_pg(unsigned char *arr)
2899 unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
2902 memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
2903 if (iec_m_pg[2] & 0x4) { /* TEST bit set */
2904 arr[4] = THRESHOLD_EXCEEDED;
2907 return sizeof(ie_l_pg);
2910 static int resp_env_rep_l_spg(unsigned char *arr)
2912 unsigned char env_rep_l_spg[] = {0x0, 0x0, 0x23, 0x8,
2913 0x0, 40, 72, 0xff, 45, 18, 0, 0,
2914 0x1, 0x0, 0x23, 0x8,
2915 0x0, 55, 72, 35, 55, 45, 0, 0,
2918 memcpy(arr, env_rep_l_spg, sizeof(env_rep_l_spg));
2919 return sizeof(env_rep_l_spg);
2922 #define SDEBUG_MAX_LSENSE_SZ 512
2924 static int resp_log_sense(struct scsi_cmnd *scp,
2925 struct sdebug_dev_info *devip)
2927 int ppc, sp, pcode, subpcode;
2928 u32 alloc_len, len, n;
2929 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
2930 unsigned char *cmd = scp->cmnd;
2932 memset(arr, 0, sizeof(arr));
2936 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, ppc ? 1 : 0);
2937 return check_condition_result;
2939 pcode = cmd[2] & 0x3f;
2940 subpcode = cmd[3] & 0xff;
2941 alloc_len = get_unaligned_be16(cmd + 7);
2943 if (0 == subpcode) {
2945 case 0x0: /* Supported log pages log page */
2947 arr[n++] = 0x0; /* this page */
2948 arr[n++] = 0xd; /* Temperature */
2949 arr[n++] = 0x2f; /* Informational exceptions */
2952 case 0xd: /* Temperature log page */
2953 arr[3] = resp_temp_l_pg(arr + 4);
2955 case 0x2f: /* Informational exceptions log page */
2956 arr[3] = resp_ie_l_pg(arr + 4);
2959 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2960 return check_condition_result;
2962 } else if (0xff == subpcode) {
2966 case 0x0: /* Supported log pages and subpages log page */
2969 arr[n++] = 0x0; /* 0,0 page */
2971 arr[n++] = 0xff; /* this page */
2973 arr[n++] = 0x0; /* Temperature */
2975 arr[n++] = 0x1; /* Environment reporting */
2977 arr[n++] = 0xff; /* all 0xd subpages */
2979 arr[n++] = 0x0; /* Informational exceptions */
2981 arr[n++] = 0xff; /* all 0x2f subpages */
2984 case 0xd: /* Temperature subpages */
2987 arr[n++] = 0x0; /* Temperature */
2989 arr[n++] = 0x1; /* Environment reporting */
2991 arr[n++] = 0xff; /* these subpages */
2994 case 0x2f: /* Informational exceptions subpages */
2997 arr[n++] = 0x0; /* Informational exceptions */
2999 arr[n++] = 0xff; /* these subpages */
3003 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
3004 return check_condition_result;
3006 } else if (subpcode > 0) {
3009 if (pcode == 0xd && subpcode == 1)
3010 arr[3] = resp_env_rep_l_spg(arr + 4);
3012 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
3013 return check_condition_result;
3016 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
3017 return check_condition_result;
3019 len = min_t(u32, get_unaligned_be16(arr + 2) + 4, alloc_len);
3020 return fill_from_dev_buffer(scp, arr,
3021 min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ));
3024 static inline bool sdebug_dev_is_zoned(struct sdebug_dev_info *devip)
3026 return devip->nr_zones != 0;
3029 static struct sdeb_zone_state *zbc_zone(struct sdebug_dev_info *devip,
3030 unsigned long long lba)
3032 u32 zno = lba >> devip->zsize_shift;
3033 struct sdeb_zone_state *zsp;
3035 if (devip->zcap == devip->zsize || zno < devip->nr_conv_zones)
3036 return &devip->zstate[zno];
3039 * If the zone capacity is less than the zone size, adjust for gap
3042 zno = 2 * zno - devip->nr_conv_zones;
3043 WARN_ONCE(zno >= devip->nr_zones, "%u > %u\n", zno, devip->nr_zones);
3044 zsp = &devip->zstate[zno];
3045 if (lba >= zsp->z_start + zsp->z_size)
3047 WARN_ON_ONCE(lba >= zsp->z_start + zsp->z_size);
3051 static inline bool zbc_zone_is_conv(struct sdeb_zone_state *zsp)
3053 return zsp->z_type == ZBC_ZTYPE_CNV;
3056 static inline bool zbc_zone_is_gap(struct sdeb_zone_state *zsp)
3058 return zsp->z_type == ZBC_ZTYPE_GAP;
3061 static inline bool zbc_zone_is_seq(struct sdeb_zone_state *zsp)
3063 return !zbc_zone_is_conv(zsp) && !zbc_zone_is_gap(zsp);
3066 static void zbc_close_zone(struct sdebug_dev_info *devip,
3067 struct sdeb_zone_state *zsp)
3069 enum sdebug_z_cond zc;
3071 if (!zbc_zone_is_seq(zsp))
3075 if (!(zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN))
3078 if (zc == ZC2_IMPLICIT_OPEN)
3079 devip->nr_imp_open--;
3081 devip->nr_exp_open--;
3083 if (zsp->z_wp == zsp->z_start) {
3084 zsp->z_cond = ZC1_EMPTY;
3086 zsp->z_cond = ZC4_CLOSED;
3091 static void zbc_close_imp_open_zone(struct sdebug_dev_info *devip)
3093 struct sdeb_zone_state *zsp = &devip->zstate[0];
3096 for (i = 0; i < devip->nr_zones; i++, zsp++) {
3097 if (zsp->z_cond == ZC2_IMPLICIT_OPEN) {
3098 zbc_close_zone(devip, zsp);
3104 static void zbc_open_zone(struct sdebug_dev_info *devip,
3105 struct sdeb_zone_state *zsp, bool explicit)
3107 enum sdebug_z_cond zc;
3109 if (!zbc_zone_is_seq(zsp))
3113 if ((explicit && zc == ZC3_EXPLICIT_OPEN) ||
3114 (!explicit && zc == ZC2_IMPLICIT_OPEN))
3117 /* Close an implicit open zone if necessary */
3118 if (explicit && zsp->z_cond == ZC2_IMPLICIT_OPEN)
3119 zbc_close_zone(devip, zsp);
3120 else if (devip->max_open &&
3121 devip->nr_imp_open + devip->nr_exp_open >= devip->max_open)
3122 zbc_close_imp_open_zone(devip);
3124 if (zsp->z_cond == ZC4_CLOSED)
3127 zsp->z_cond = ZC3_EXPLICIT_OPEN;
3128 devip->nr_exp_open++;
3130 zsp->z_cond = ZC2_IMPLICIT_OPEN;
3131 devip->nr_imp_open++;
3135 static inline void zbc_set_zone_full(struct sdebug_dev_info *devip,
3136 struct sdeb_zone_state *zsp)
3138 switch (zsp->z_cond) {
3139 case ZC2_IMPLICIT_OPEN:
3140 devip->nr_imp_open--;
3142 case ZC3_EXPLICIT_OPEN:
3143 devip->nr_exp_open--;
3146 WARN_ONCE(true, "Invalid zone %llu condition %x\n",
3147 zsp->z_start, zsp->z_cond);
3150 zsp->z_cond = ZC5_FULL;
3153 static void zbc_inc_wp(struct sdebug_dev_info *devip,
3154 unsigned long long lba, unsigned int num)
3156 struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
3157 unsigned long long n, end, zend = zsp->z_start + zsp->z_size;
3159 if (!zbc_zone_is_seq(zsp))
3162 if (zsp->z_type == ZBC_ZTYPE_SWR) {
3164 if (zsp->z_wp >= zend)
3165 zbc_set_zone_full(devip, zsp);
3170 if (lba != zsp->z_wp)
3171 zsp->z_non_seq_resource = true;
3177 } else if (end > zsp->z_wp) {
3183 if (zsp->z_wp >= zend)
3184 zbc_set_zone_full(devip, zsp);
3190 zend = zsp->z_start + zsp->z_size;
3195 static int check_zbc_access_params(struct scsi_cmnd *scp,
3196 unsigned long long lba, unsigned int num, bool write)
3198 struct scsi_device *sdp = scp->device;
3199 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
3200 struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
3201 struct sdeb_zone_state *zsp_end = zbc_zone(devip, lba + num - 1);
3204 if (devip->zmodel == BLK_ZONED_HA)
3206 /* For host-managed, reads cannot cross zone types boundaries */
3207 if (zsp->z_type != zsp_end->z_type) {
3208 mk_sense_buffer(scp, ILLEGAL_REQUEST,
3211 return check_condition_result;
3216 /* Writing into a gap zone is not allowed */
3217 if (zbc_zone_is_gap(zsp)) {
3218 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE,
3219 ATTEMPT_ACCESS_GAP);
3220 return check_condition_result;
3223 /* No restrictions for writes within conventional zones */
3224 if (zbc_zone_is_conv(zsp)) {
3225 if (!zbc_zone_is_conv(zsp_end)) {
3226 mk_sense_buffer(scp, ILLEGAL_REQUEST,
3228 WRITE_BOUNDARY_ASCQ);
3229 return check_condition_result;
3234 if (zsp->z_type == ZBC_ZTYPE_SWR) {
3235 /* Writes cannot cross sequential zone boundaries */
3236 if (zsp_end != zsp) {
3237 mk_sense_buffer(scp, ILLEGAL_REQUEST,
3239 WRITE_BOUNDARY_ASCQ);
3240 return check_condition_result;
3242 /* Cannot write full zones */
3243 if (zsp->z_cond == ZC5_FULL) {
3244 mk_sense_buffer(scp, ILLEGAL_REQUEST,
3245 INVALID_FIELD_IN_CDB, 0);
3246 return check_condition_result;
3248 /* Writes must be aligned to the zone WP */
3249 if (lba != zsp->z_wp) {
3250 mk_sense_buffer(scp, ILLEGAL_REQUEST,
3252 UNALIGNED_WRITE_ASCQ);
3253 return check_condition_result;
3257 /* Handle implicit open of closed and empty zones */
3258 if (zsp->z_cond == ZC1_EMPTY || zsp->z_cond == ZC4_CLOSED) {
3259 if (devip->max_open &&
3260 devip->nr_exp_open >= devip->max_open) {
3261 mk_sense_buffer(scp, DATA_PROTECT,
3264 return check_condition_result;
3266 zbc_open_zone(devip, zsp, false);
3272 static inline int check_device_access_params
3273 (struct scsi_cmnd *scp, unsigned long long lba,
3274 unsigned int num, bool write)
3276 struct scsi_device *sdp = scp->device;
3277 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
3279 if (lba + num > sdebug_capacity) {
3280 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
3281 return check_condition_result;
3283 /* transfer length excessive (tie in to block limits VPD page) */
3284 if (num > sdebug_store_sectors) {
3285 /* needs work to find which cdb byte 'num' comes from */
3286 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3287 return check_condition_result;
3289 if (write && unlikely(sdebug_wp)) {
3290 mk_sense_buffer(scp, DATA_PROTECT, WRITE_PROTECTED, 0x2);
3291 return check_condition_result;
3293 if (sdebug_dev_is_zoned(devip))
3294 return check_zbc_access_params(scp, lba, num, write);
3300 * Note: if BUG_ON() fires it usually indicates a problem with the parser
3301 * tables. Perhaps a missing F_FAKE_RW or FF_MEDIA_IO flag. Response functions
3302 * that access any of the "stores" in struct sdeb_store_info should call this
3303 * function with bug_if_fake_rw set to true.
3305 static inline struct sdeb_store_info *devip2sip(struct sdebug_dev_info *devip,
3306 bool bug_if_fake_rw)
3308 if (sdebug_fake_rw) {
3309 BUG_ON(bug_if_fake_rw); /* See note above */
3312 return xa_load(per_store_ap, devip->sdbg_host->si_idx);
3315 /* Returns number of bytes copied or -1 if error. */
3316 static int do_device_access(struct sdeb_store_info *sip, struct scsi_cmnd *scp,
3317 u32 sg_skip, u64 lba, u32 num, bool do_write)
3320 u64 block, rest = 0;
3321 enum dma_data_direction dir;
3322 struct scsi_data_buffer *sdb = &scp->sdb;
3326 dir = DMA_TO_DEVICE;
3327 write_since_sync = true;
3329 dir = DMA_FROM_DEVICE;
3332 if (!sdb->length || !sip)
3334 if (scp->sc_data_direction != dir)
3338 block = do_div(lba, sdebug_store_sectors);
3339 if (block + num > sdebug_store_sectors)
3340 rest = block + num - sdebug_store_sectors;
3342 ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
3343 fsp + (block * sdebug_sector_size),
3344 (num - rest) * sdebug_sector_size, sg_skip, do_write);
3345 if (ret != (num - rest) * sdebug_sector_size)
3349 ret += sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
3350 fsp, rest * sdebug_sector_size,
3351 sg_skip + ((num - rest) * sdebug_sector_size),
3358 /* Returns number of bytes copied or -1 if error. */
3359 static int do_dout_fetch(struct scsi_cmnd *scp, u32 num, u8 *doutp)
3361 struct scsi_data_buffer *sdb = &scp->sdb;
3365 if (scp->sc_data_direction != DMA_TO_DEVICE)
3367 return sg_copy_buffer(sdb->table.sgl, sdb->table.nents, doutp,
3368 num * sdebug_sector_size, 0, true);
3371 /* If sip->storep+lba compares equal to arr(num), then copy top half of
3372 * arr into sip->storep+lba and return true. If comparison fails then
3374 static bool comp_write_worker(struct sdeb_store_info *sip, u64 lba, u32 num,
3375 const u8 *arr, bool compare_only)
3378 u64 block, rest = 0;
3379 u32 store_blks = sdebug_store_sectors;
3380 u32 lb_size = sdebug_sector_size;
3381 u8 *fsp = sip->storep;
3383 block = do_div(lba, store_blks);
3384 if (block + num > store_blks)
3385 rest = block + num - store_blks;
3387 res = !memcmp(fsp + (block * lb_size), arr, (num - rest) * lb_size);
3391 res = memcmp(fsp, arr + ((num - rest) * lb_size),
3397 arr += num * lb_size;
3398 memcpy(fsp + (block * lb_size), arr, (num - rest) * lb_size);
3400 memcpy(fsp, arr + ((num - rest) * lb_size), rest * lb_size);
3404 static __be16 dif_compute_csum(const void *buf, int len)
3409 csum = (__force __be16)ip_compute_csum(buf, len);
3411 csum = cpu_to_be16(crc_t10dif(buf, len));
3416 static int dif_verify(struct t10_pi_tuple *sdt, const void *data,
3417 sector_t sector, u32 ei_lba)
3419 __be16 csum = dif_compute_csum(data, sdebug_sector_size);
3421 if (sdt->guard_tag != csum) {
3422 pr_err("GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
3423 (unsigned long)sector,
3424 be16_to_cpu(sdt->guard_tag),
3428 if (sdebug_dif == T10_PI_TYPE1_PROTECTION &&
3429 be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
3430 pr_err("REF check failed on sector %lu\n",
3431 (unsigned long)sector);
3434 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3435 be32_to_cpu(sdt->ref_tag) != ei_lba) {
3436 pr_err("REF check failed on sector %lu\n",
3437 (unsigned long)sector);
3443 static void dif_copy_prot(struct scsi_cmnd *scp, sector_t sector,
3444 unsigned int sectors, bool read)
3448 struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3449 scp->device->hostdata, true);
3450 struct t10_pi_tuple *dif_storep = sip->dif_storep;
3451 const void *dif_store_end = dif_storep + sdebug_store_sectors;
3452 struct sg_mapping_iter miter;
3454 /* Bytes of protection data to copy into sgl */
3455 resid = sectors * sizeof(*dif_storep);
3457 sg_miter_start(&miter, scsi_prot_sglist(scp),
3458 scsi_prot_sg_count(scp), SG_MITER_ATOMIC |
3459 (read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
3461 while (sg_miter_next(&miter) && resid > 0) {
3462 size_t len = min_t(size_t, miter.length, resid);
3463 void *start = dif_store(sip, sector);
3466 if (dif_store_end < start + len)
3467 rest = start + len - dif_store_end;
3472 memcpy(paddr, start, len - rest);
3474 memcpy(start, paddr, len - rest);
3478 memcpy(paddr + len - rest, dif_storep, rest);
3480 memcpy(dif_storep, paddr + len - rest, rest);
3483 sector += len / sizeof(*dif_storep);
3486 sg_miter_stop(&miter);
3489 static int prot_verify_read(struct scsi_cmnd *scp, sector_t start_sec,
3490 unsigned int sectors, u32 ei_lba)
3495 struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3496 scp->device->hostdata, true);
3497 struct t10_pi_tuple *sdt;
3499 for (i = 0; i < sectors; i++, ei_lba++) {
3500 sector = start_sec + i;
3501 sdt = dif_store(sip, sector);
3503 if (sdt->app_tag == cpu_to_be16(0xffff))
3507 * Because scsi_debug acts as both initiator and
3508 * target we proceed to verify the PI even if
3509 * RDPROTECT=3. This is done so the "initiator" knows
3510 * which type of error to return. Otherwise we would
3511 * have to iterate over the PI twice.
3513 if (scp->cmnd[1] >> 5) { /* RDPROTECT */
3514 ret = dif_verify(sdt, lba2fake_store(sip, sector),
3523 dif_copy_prot(scp, start_sec, sectors, true);
3530 sdeb_read_lock(struct sdeb_store_info *sip)
3532 if (sdebug_no_rwlock) {
3534 __acquire(&sip->macc_lck);
3536 __acquire(&sdeb_fake_rw_lck);
3539 read_lock(&sip->macc_lck);
3541 read_lock(&sdeb_fake_rw_lck);
3546 sdeb_read_unlock(struct sdeb_store_info *sip)
3548 if (sdebug_no_rwlock) {
3550 __release(&sip->macc_lck);
3552 __release(&sdeb_fake_rw_lck);
3555 read_unlock(&sip->macc_lck);
3557 read_unlock(&sdeb_fake_rw_lck);
3562 sdeb_write_lock(struct sdeb_store_info *sip)
3564 if (sdebug_no_rwlock) {
3566 __acquire(&sip->macc_lck);
3568 __acquire(&sdeb_fake_rw_lck);
3571 write_lock(&sip->macc_lck);
3573 write_lock(&sdeb_fake_rw_lck);
3578 sdeb_write_unlock(struct sdeb_store_info *sip)
3580 if (sdebug_no_rwlock) {
3582 __release(&sip->macc_lck);
3584 __release(&sdeb_fake_rw_lck);
3587 write_unlock(&sip->macc_lck);
3589 write_unlock(&sdeb_fake_rw_lck);
3593 static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3600 struct sdeb_store_info *sip = devip2sip(devip, true);
3601 u8 *cmd = scp->cmnd;
3606 lba = get_unaligned_be64(cmd + 2);
3607 num = get_unaligned_be32(cmd + 10);
3612 lba = get_unaligned_be32(cmd + 2);
3613 num = get_unaligned_be16(cmd + 7);
3618 lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3619 (u32)(cmd[1] & 0x1f) << 16;
3620 num = (0 == cmd[4]) ? 256 : cmd[4];
3625 lba = get_unaligned_be32(cmd + 2);
3626 num = get_unaligned_be32(cmd + 6);
3629 case XDWRITEREAD_10:
3631 lba = get_unaligned_be32(cmd + 2);
3632 num = get_unaligned_be16(cmd + 7);
3635 default: /* assume READ(32) */
3636 lba = get_unaligned_be64(cmd + 12);
3637 ei_lba = get_unaligned_be32(cmd + 20);
3638 num = get_unaligned_be32(cmd + 28);
3642 if (unlikely(have_dif_prot && check_prot)) {
3643 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3645 mk_sense_invalid_opcode(scp);
3646 return check_condition_result;
3648 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3649 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3650 (cmd[1] & 0xe0) == 0)
3651 sdev_printk(KERN_ERR, scp->device, "Unprotected RD "
3654 if (unlikely((sdebug_opts & SDEBUG_OPT_SHORT_TRANSFER) &&
3655 atomic_read(&sdeb_inject_pending))) {
3657 atomic_set(&sdeb_inject_pending, 0);
3660 ret = check_device_access_params(scp, lba, num, false);
3663 if (unlikely((SDEBUG_OPT_MEDIUM_ERR & sdebug_opts) &&
3664 (lba <= (sdebug_medium_error_start + sdebug_medium_error_count - 1)) &&
3665 ((lba + num) > sdebug_medium_error_start))) {
3666 /* claim unrecoverable read error */
3667 mk_sense_buffer(scp, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
3668 /* set info field and valid bit for fixed descriptor */
3669 if (0x70 == (scp->sense_buffer[0] & 0x7f)) {
3670 scp->sense_buffer[0] |= 0x80; /* Valid bit */
3671 ret = (lba < OPT_MEDIUM_ERR_ADDR)
3672 ? OPT_MEDIUM_ERR_ADDR : (int)lba;
3673 put_unaligned_be32(ret, scp->sense_buffer + 3);
3675 scsi_set_resid(scp, scsi_bufflen(scp));
3676 return check_condition_result;
3679 sdeb_read_lock(sip);
3682 if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3683 switch (prot_verify_read(scp, lba, num, ei_lba)) {
3684 case 1: /* Guard tag error */
3685 if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */
3686 sdeb_read_unlock(sip);
3687 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3688 return check_condition_result;
3689 } else if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) {
3690 sdeb_read_unlock(sip);
3691 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3692 return illegal_condition_result;
3695 case 3: /* Reference tag error */
3696 if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */
3697 sdeb_read_unlock(sip);
3698 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3);
3699 return check_condition_result;
3700 } else if (scp->prot_flags & SCSI_PROT_REF_CHECK) {
3701 sdeb_read_unlock(sip);
3702 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3);
3703 return illegal_condition_result;
3709 ret = do_device_access(sip, scp, 0, lba, num, false);
3710 sdeb_read_unlock(sip);
3711 if (unlikely(ret == -1))
3712 return DID_ERROR << 16;
3714 scsi_set_resid(scp, scsi_bufflen(scp) - ret);
3716 if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3717 atomic_read(&sdeb_inject_pending))) {
3718 if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3719 mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3720 atomic_set(&sdeb_inject_pending, 0);
3721 return check_condition_result;
3722 } else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3723 /* Logical block guard check failed */
3724 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3725 atomic_set(&sdeb_inject_pending, 0);
3726 return illegal_condition_result;
3727 } else if (SDEBUG_OPT_DIX_ERR & sdebug_opts) {
3728 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3729 atomic_set(&sdeb_inject_pending, 0);
3730 return illegal_condition_result;
3736 static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
3737 unsigned int sectors, u32 ei_lba)
3740 struct t10_pi_tuple *sdt;
3742 sector_t sector = start_sec;
3745 struct sg_mapping_iter diter;
3746 struct sg_mapping_iter piter;
3748 BUG_ON(scsi_sg_count(SCpnt) == 0);
3749 BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
3751 sg_miter_start(&piter, scsi_prot_sglist(SCpnt),
3752 scsi_prot_sg_count(SCpnt),
3753 SG_MITER_ATOMIC | SG_MITER_FROM_SG);
3754 sg_miter_start(&diter, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
3755 SG_MITER_ATOMIC | SG_MITER_FROM_SG);
3757 /* For each protection page */
3758 while (sg_miter_next(&piter)) {
3760 if (WARN_ON(!sg_miter_next(&diter))) {
3765 for (ppage_offset = 0; ppage_offset < piter.length;
3766 ppage_offset += sizeof(struct t10_pi_tuple)) {
3767 /* If we're at the end of the current
3768 * data page advance to the next one
3770 if (dpage_offset >= diter.length) {
3771 if (WARN_ON(!sg_miter_next(&diter))) {
3778 sdt = piter.addr + ppage_offset;
3779 daddr = diter.addr + dpage_offset;
3781 if (SCpnt->cmnd[1] >> 5 != 3) { /* WRPROTECT */
3782 ret = dif_verify(sdt, daddr, sector, ei_lba);
3789 dpage_offset += sdebug_sector_size;
3791 diter.consumed = dpage_offset;
3792 sg_miter_stop(&diter);
3794 sg_miter_stop(&piter);
3796 dif_copy_prot(SCpnt, start_sec, sectors, false);
3803 sg_miter_stop(&diter);
3804 sg_miter_stop(&piter);
3808 static unsigned long lba_to_map_index(sector_t lba)
3810 if (sdebug_unmap_alignment)
3811 lba += sdebug_unmap_granularity - sdebug_unmap_alignment;
3812 sector_div(lba, sdebug_unmap_granularity);
3816 static sector_t map_index_to_lba(unsigned long index)
3818 sector_t lba = index * sdebug_unmap_granularity;
3820 if (sdebug_unmap_alignment)
3821 lba -= sdebug_unmap_granularity - sdebug_unmap_alignment;
3825 static unsigned int map_state(struct sdeb_store_info *sip, sector_t lba,
3829 unsigned int mapped;
3830 unsigned long index;
3833 index = lba_to_map_index(lba);
3834 mapped = test_bit(index, sip->map_storep);
3837 next = find_next_zero_bit(sip->map_storep, map_size, index);
3839 next = find_next_bit(sip->map_storep, map_size, index);
3841 end = min_t(sector_t, sdebug_store_sectors, map_index_to_lba(next));
3846 static void map_region(struct sdeb_store_info *sip, sector_t lba,
3849 sector_t end = lba + len;
3852 unsigned long index = lba_to_map_index(lba);
3854 if (index < map_size)
3855 set_bit(index, sip->map_storep);
3857 lba = map_index_to_lba(index + 1);
3861 static void unmap_region(struct sdeb_store_info *sip, sector_t lba,
3864 sector_t end = lba + len;
3865 u8 *fsp = sip->storep;
3868 unsigned long index = lba_to_map_index(lba);
3870 if (lba == map_index_to_lba(index) &&
3871 lba + sdebug_unmap_granularity <= end &&
3873 clear_bit(index, sip->map_storep);
3874 if (sdebug_lbprz) { /* for LBPRZ=2 return 0xff_s */
3875 memset(fsp + lba * sdebug_sector_size,
3876 (sdebug_lbprz & 1) ? 0 : 0xff,
3877 sdebug_sector_size *
3878 sdebug_unmap_granularity);
3880 if (sip->dif_storep) {
3881 memset(sip->dif_storep + lba, 0xff,
3882 sizeof(*sip->dif_storep) *
3883 sdebug_unmap_granularity);
3886 lba = map_index_to_lba(index + 1);
3890 static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3897 struct sdeb_store_info *sip = devip2sip(devip, true);
3898 u8 *cmd = scp->cmnd;
3903 lba = get_unaligned_be64(cmd + 2);
3904 num = get_unaligned_be32(cmd + 10);
3909 lba = get_unaligned_be32(cmd + 2);
3910 num = get_unaligned_be16(cmd + 7);
3915 lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3916 (u32)(cmd[1] & 0x1f) << 16;
3917 num = (0 == cmd[4]) ? 256 : cmd[4];
3922 lba = get_unaligned_be32(cmd + 2);
3923 num = get_unaligned_be32(cmd + 6);
3926 case 0x53: /* XDWRITEREAD(10) */
3928 lba = get_unaligned_be32(cmd + 2);
3929 num = get_unaligned_be16(cmd + 7);
3932 default: /* assume WRITE(32) */
3933 lba = get_unaligned_be64(cmd + 12);
3934 ei_lba = get_unaligned_be32(cmd + 20);
3935 num = get_unaligned_be32(cmd + 28);
3939 if (unlikely(have_dif_prot && check_prot)) {
3940 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3942 mk_sense_invalid_opcode(scp);
3943 return check_condition_result;
3945 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3946 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3947 (cmd[1] & 0xe0) == 0)
3948 sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3952 sdeb_write_lock(sip);
3953 ret = check_device_access_params(scp, lba, num, true);
3955 sdeb_write_unlock(sip);
3960 if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3961 switch (prot_verify_write(scp, lba, num, ei_lba)) {
3962 case 1: /* Guard tag error */
3963 if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) {
3964 sdeb_write_unlock(sip);
3965 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3966 return illegal_condition_result;
3967 } else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */
3968 sdeb_write_unlock(sip);
3969 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3970 return check_condition_result;
3973 case 3: /* Reference tag error */
3974 if (scp->prot_flags & SCSI_PROT_REF_CHECK) {
3975 sdeb_write_unlock(sip);
3976 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3);
3977 return illegal_condition_result;
3978 } else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */
3979 sdeb_write_unlock(sip);
3980 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3);
3981 return check_condition_result;
3987 ret = do_device_access(sip, scp, 0, lba, num, true);
3988 if (unlikely(scsi_debug_lbp()))
3989 map_region(sip, lba, num);
3990 /* If ZBC zone then bump its write pointer */
3991 if (sdebug_dev_is_zoned(devip))
3992 zbc_inc_wp(devip, lba, num);
3993 sdeb_write_unlock(sip);
3994 if (unlikely(-1 == ret))
3995 return DID_ERROR << 16;
3996 else if (unlikely(sdebug_verbose &&
3997 (ret < (num * sdebug_sector_size))))
3998 sdev_printk(KERN_INFO, scp->device,
3999 "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
4000 my_name, num * sdebug_sector_size, ret);
4002 if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
4003 atomic_read(&sdeb_inject_pending))) {
4004 if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
4005 mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
4006 atomic_set(&sdeb_inject_pending, 0);
4007 return check_condition_result;
4008 } else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
4009 /* Logical block guard check failed */
4010 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
4011 atomic_set(&sdeb_inject_pending, 0);
4012 return illegal_condition_result;
4013 } else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
4014 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
4015 atomic_set(&sdeb_inject_pending, 0);
4016 return illegal_condition_result;
4023 * T10 has only specified WRITE SCATTERED(16) and WRITE SCATTERED(32).
4024 * No READ GATHERED yet (requires bidi or long cdb holding gather list).
4026 static int resp_write_scat(struct scsi_cmnd *scp,
4027 struct sdebug_dev_info *devip)
4029 u8 *cmd = scp->cmnd;
4032 struct sdeb_store_info *sip = devip2sip(devip, true);
4034 u16 lbdof, num_lrd, k;
4035 u32 num, num_by, bt_len, lbdof_blen, sg_off, cum_lb;
4036 u32 lb_size = sdebug_sector_size;
4041 static const u32 lrd_size = 32; /* + parameter list header size */
4043 if (cmd[0] == VARIABLE_LENGTH_CMD) {
4045 wrprotect = (cmd[10] >> 5) & 0x7;
4046 lbdof = get_unaligned_be16(cmd + 12);
4047 num_lrd = get_unaligned_be16(cmd + 16);
4048 bt_len = get_unaligned_be32(cmd + 28);
4049 } else { /* that leaves WRITE SCATTERED(16) */
4051 wrprotect = (cmd[2] >> 5) & 0x7;
4052 lbdof = get_unaligned_be16(cmd + 4);
4053 num_lrd = get_unaligned_be16(cmd + 8);
4054 bt_len = get_unaligned_be32(cmd + 10);
4055 if (unlikely(have_dif_prot)) {
4056 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
4058 mk_sense_invalid_opcode(scp);
4059 return illegal_condition_result;
4061 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
4062 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
4064 sdev_printk(KERN_ERR, scp->device,
4065 "Unprotected WR to DIF device\n");
4068 if ((num_lrd == 0) || (bt_len == 0))
4069 return 0; /* T10 says these do-nothings are not errors */
4072 sdev_printk(KERN_INFO, scp->device,
4073 "%s: %s: LB Data Offset field bad\n",
4075 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4076 return illegal_condition_result;
4078 lbdof_blen = lbdof * lb_size;
4079 if ((lrd_size + (num_lrd * lrd_size)) > lbdof_blen) {
4081 sdev_printk(KERN_INFO, scp->device,
4082 "%s: %s: LBA range descriptors don't fit\n",
4084 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4085 return illegal_condition_result;
4087 lrdp = kzalloc(lbdof_blen, GFP_ATOMIC | __GFP_NOWARN);
4089 return SCSI_MLQUEUE_HOST_BUSY;
4091 sdev_printk(KERN_INFO, scp->device,
4092 "%s: %s: Fetch header+scatter_list, lbdof_blen=%u\n",
4093 my_name, __func__, lbdof_blen);
4094 res = fetch_to_dev_buffer(scp, lrdp, lbdof_blen);
4096 ret = DID_ERROR << 16;
4100 sdeb_write_lock(sip);
4101 sg_off = lbdof_blen;
4102 /* Spec says Buffer xfer Length field in number of LBs in dout */
4104 for (k = 0, up = lrdp + lrd_size; k < num_lrd; ++k, up += lrd_size) {
4105 lba = get_unaligned_be64(up + 0);
4106 num = get_unaligned_be32(up + 8);
4108 sdev_printk(KERN_INFO, scp->device,
4109 "%s: %s: k=%d LBA=0x%llx num=%u sg_off=%u\n",
4110 my_name, __func__, k, lba, num, sg_off);
4113 ret = check_device_access_params(scp, lba, num, true);
4115 goto err_out_unlock;
4116 num_by = num * lb_size;
4117 ei_lba = is_16 ? 0 : get_unaligned_be32(up + 12);
4119 if ((cum_lb + num) > bt_len) {
4121 sdev_printk(KERN_INFO, scp->device,
4122 "%s: %s: sum of blocks > data provided\n",
4124 mk_sense_buffer(scp, ILLEGAL_REQUEST, WRITE_ERROR_ASC,
4126 ret = illegal_condition_result;
4127 goto err_out_unlock;
4131 if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
4132 int prot_ret = prot_verify_write(scp, lba, num,
4136 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10,
4138 ret = illegal_condition_result;
4139 goto err_out_unlock;
4143 ret = do_device_access(sip, scp, sg_off, lba, num, true);
4144 /* If ZBC zone then bump its write pointer */
4145 if (sdebug_dev_is_zoned(devip))
4146 zbc_inc_wp(devip, lba, num);
4147 if (unlikely(scsi_debug_lbp()))
4148 map_region(sip, lba, num);
4149 if (unlikely(-1 == ret)) {
4150 ret = DID_ERROR << 16;
4151 goto err_out_unlock;
4152 } else if (unlikely(sdebug_verbose && (ret < num_by)))
4153 sdev_printk(KERN_INFO, scp->device,
4154 "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
4155 my_name, num_by, ret);
4157 if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
4158 atomic_read(&sdeb_inject_pending))) {
4159 if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
4160 mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
4161 atomic_set(&sdeb_inject_pending, 0);
4162 ret = check_condition_result;
4163 goto err_out_unlock;
4164 } else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
4165 /* Logical block guard check failed */
4166 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
4167 atomic_set(&sdeb_inject_pending, 0);
4168 ret = illegal_condition_result;
4169 goto err_out_unlock;
4170 } else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
4171 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
4172 atomic_set(&sdeb_inject_pending, 0);
4173 ret = illegal_condition_result;
4174 goto err_out_unlock;
4182 sdeb_write_unlock(sip);
4188 static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
4189 u32 ei_lba, bool unmap, bool ndob)
4191 struct scsi_device *sdp = scp->device;
4192 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
4193 unsigned long long i;
4195 u32 lb_size = sdebug_sector_size;
4197 struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
4198 scp->device->hostdata, true);
4202 sdeb_write_lock(sip);
4204 ret = check_device_access_params(scp, lba, num, true);
4206 sdeb_write_unlock(sip);
4210 if (unmap && scsi_debug_lbp()) {
4211 unmap_region(sip, lba, num);
4215 block = do_div(lbaa, sdebug_store_sectors);
4216 /* if ndob then zero 1 logical block, else fetch 1 logical block */
4218 fs1p = fsp + (block * lb_size);
4220 memset(fs1p, 0, lb_size);
4223 ret = fetch_to_dev_buffer(scp, fs1p, lb_size);
4226 sdeb_write_unlock(sip);
4227 return DID_ERROR << 16;
4228 } else if (sdebug_verbose && !ndob && (ret < lb_size))
4229 sdev_printk(KERN_INFO, scp->device,
4230 "%s: %s: lb size=%u, IO sent=%d bytes\n",
4231 my_name, "write same", lb_size, ret);
4233 /* Copy first sector to remaining blocks */
4234 for (i = 1 ; i < num ; i++) {
4236 block = do_div(lbaa, sdebug_store_sectors);
4237 memmove(fsp + (block * lb_size), fs1p, lb_size);
4239 if (scsi_debug_lbp())
4240 map_region(sip, lba, num);
4241 /* If ZBC zone then bump its write pointer */
4242 if (sdebug_dev_is_zoned(devip))
4243 zbc_inc_wp(devip, lba, num);
4245 sdeb_write_unlock(sip);
4250 static int resp_write_same_10(struct scsi_cmnd *scp,
4251 struct sdebug_dev_info *devip)
4253 u8 *cmd = scp->cmnd;
4260 if (sdebug_lbpws10 == 0) {
4261 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
4262 return check_condition_result;
4266 lba = get_unaligned_be32(cmd + 2);
4267 num = get_unaligned_be16(cmd + 7);
4268 if (num > sdebug_write_same_length) {
4269 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
4270 return check_condition_result;
4272 return resp_write_same(scp, lba, num, ei_lba, unmap, false);
4275 static int resp_write_same_16(struct scsi_cmnd *scp,
4276 struct sdebug_dev_info *devip)
4278 u8 *cmd = scp->cmnd;
4285 if (cmd[1] & 0x8) { /* UNMAP */
4286 if (sdebug_lbpws == 0) {
4287 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
4288 return check_condition_result;
4292 if (cmd[1] & 0x1) /* NDOB (no data-out buffer, assumes zeroes) */
4294 lba = get_unaligned_be64(cmd + 2);
4295 num = get_unaligned_be32(cmd + 10);
4296 if (num > sdebug_write_same_length) {
4297 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
4298 return check_condition_result;
4300 return resp_write_same(scp, lba, num, ei_lba, unmap, ndob);
4303 /* Note the mode field is in the same position as the (lower) service action
4304 * field. For the Report supported operation codes command, SPC-4 suggests
4305 * each mode of this command should be reported separately; for future. */
4306 static int resp_write_buffer(struct scsi_cmnd *scp,
4307 struct sdebug_dev_info *devip)
4309 u8 *cmd = scp->cmnd;
4310 struct scsi_device *sdp = scp->device;
4311 struct sdebug_dev_info *dp;
4314 mode = cmd[1] & 0x1f;
4316 case 0x4: /* download microcode (MC) and activate (ACT) */
4317 /* set UAs on this device only */
4318 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
4319 set_bit(SDEBUG_UA_MICROCODE_CHANGED, devip->uas_bm);
4321 case 0x5: /* download MC, save and ACT */
4322 set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET, devip->uas_bm);
4324 case 0x6: /* download MC with offsets and ACT */
4325 /* set UAs on most devices (LUs) in this target */
4326 list_for_each_entry(dp,
4327 &devip->sdbg_host->dev_info_list,
4329 if (dp->target == sdp->id) {
4330 set_bit(SDEBUG_UA_BUS_RESET, dp->uas_bm);
4332 set_bit(SDEBUG_UA_MICROCODE_CHANGED,
4336 case 0x7: /* download MC with offsets, save, and ACT */
4337 /* set UA on all devices (LUs) in this target */
4338 list_for_each_entry(dp,
4339 &devip->sdbg_host->dev_info_list,
4341 if (dp->target == sdp->id)
4342 set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET,
4346 /* do nothing for this command for other mode values */
4352 static int resp_comp_write(struct scsi_cmnd *scp,
4353 struct sdebug_dev_info *devip)
4355 u8 *cmd = scp->cmnd;
4357 struct sdeb_store_info *sip = devip2sip(devip, true);
4360 u32 lb_size = sdebug_sector_size;
4365 lba = get_unaligned_be64(cmd + 2);
4366 num = cmd[13]; /* 1 to a maximum of 255 logical blocks */
4368 return 0; /* degenerate case, not an error */
4369 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
4371 mk_sense_invalid_opcode(scp);
4372 return check_condition_result;
4374 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
4375 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
4376 (cmd[1] & 0xe0) == 0)
4377 sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
4379 ret = check_device_access_params(scp, lba, num, false);
4383 arr = kcalloc(lb_size, dnum, GFP_ATOMIC);
4385 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4387 return check_condition_result;
4390 sdeb_write_lock(sip);
4392 ret = do_dout_fetch(scp, dnum, arr);
4394 retval = DID_ERROR << 16;
4396 } else if (sdebug_verbose && (ret < (dnum * lb_size)))
4397 sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb "
4398 "indicated=%u, IO sent=%d bytes\n", my_name,
4399 dnum * lb_size, ret);
4400 if (!comp_write_worker(sip, lba, num, arr, false)) {
4401 mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
4402 retval = check_condition_result;
4405 if (scsi_debug_lbp())
4406 map_region(sip, lba, num);
4408 sdeb_write_unlock(sip);
4413 struct unmap_block_desc {
4419 static int resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4422 struct unmap_block_desc *desc;
4423 struct sdeb_store_info *sip = devip2sip(devip, true);
4424 unsigned int i, payload_len, descriptors;
4427 if (!scsi_debug_lbp())
4428 return 0; /* fib and say its done */
4429 payload_len = get_unaligned_be16(scp->cmnd + 7);
4430 BUG_ON(scsi_bufflen(scp) != payload_len);
4432 descriptors = (payload_len - 8) / 16;
4433 if (descriptors > sdebug_unmap_max_desc) {
4434 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
4435 return check_condition_result;
4438 buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC);
4440 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4442 return check_condition_result;
4445 scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
4447 BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
4448 BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
4450 desc = (void *)&buf[8];
4452 sdeb_write_lock(sip);
4454 for (i = 0 ; i < descriptors ; i++) {
4455 unsigned long long lba = get_unaligned_be64(&desc[i].lba);
4456 unsigned int num = get_unaligned_be32(&desc[i].blocks);
4458 ret = check_device_access_params(scp, lba, num, true);
4462 unmap_region(sip, lba, num);
4468 sdeb_write_unlock(sip);
4474 #define SDEBUG_GET_LBA_STATUS_LEN 32
4476 static int resp_get_lba_status(struct scsi_cmnd *scp,
4477 struct sdebug_dev_info *devip)
4479 u8 *cmd = scp->cmnd;
4481 u32 alloc_len, mapped, num;
4483 u8 arr[SDEBUG_GET_LBA_STATUS_LEN];
4485 lba = get_unaligned_be64(cmd + 2);
4486 alloc_len = get_unaligned_be32(cmd + 10);
4491 ret = check_device_access_params(scp, lba, 1, false);
4495 if (scsi_debug_lbp()) {
4496 struct sdeb_store_info *sip = devip2sip(devip, true);
4498 mapped = map_state(sip, lba, &num);
4501 /* following just in case virtual_gb changed */
4502 sdebug_capacity = get_sdebug_capacity();
4503 if (sdebug_capacity - lba <= 0xffffffff)
4504 num = sdebug_capacity - lba;
4509 memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
4510 put_unaligned_be32(20, arr); /* Parameter Data Length */
4511 put_unaligned_be64(lba, arr + 8); /* LBA */
4512 put_unaligned_be32(num, arr + 16); /* Number of blocks */
4513 arr[20] = !mapped; /* prov_stat=0: mapped; 1: dealloc */
4515 return fill_from_dev_buffer(scp, arr, SDEBUG_GET_LBA_STATUS_LEN);
4518 static int resp_sync_cache(struct scsi_cmnd *scp,
4519 struct sdebug_dev_info *devip)
4524 u8 *cmd = scp->cmnd;
4526 if (cmd[0] == SYNCHRONIZE_CACHE) { /* 10 byte cdb */
4527 lba = get_unaligned_be32(cmd + 2);
4528 num_blocks = get_unaligned_be16(cmd + 7);
4529 } else { /* SYNCHRONIZE_CACHE(16) */
4530 lba = get_unaligned_be64(cmd + 2);
4531 num_blocks = get_unaligned_be32(cmd + 10);
4533 if (lba + num_blocks > sdebug_capacity) {
4534 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4535 return check_condition_result;
4537 if (!write_since_sync || (cmd[1] & 0x2))
4538 res = SDEG_RES_IMMED_MASK;
4539 else /* delay if write_since_sync and IMMED clear */
4540 write_since_sync = false;
4545 * Assuming the LBA+num_blocks is not out-of-range, this function will return
4546 * CONDITION MET if the specified blocks will/have fitted in the cache, and
4547 * a GOOD status otherwise. Model a disk with a big cache and yield
4548 * CONDITION MET. Actually tries to bring range in main memory into the
4549 * cache associated with the CPU(s).
4551 static int resp_pre_fetch(struct scsi_cmnd *scp,
4552 struct sdebug_dev_info *devip)
4556 u64 block, rest = 0;
4558 u8 *cmd = scp->cmnd;
4559 struct sdeb_store_info *sip = devip2sip(devip, true);
4560 u8 *fsp = sip->storep;
4562 if (cmd[0] == PRE_FETCH) { /* 10 byte cdb */
4563 lba = get_unaligned_be32(cmd + 2);
4564 nblks = get_unaligned_be16(cmd + 7);
4565 } else { /* PRE-FETCH(16) */
4566 lba = get_unaligned_be64(cmd + 2);
4567 nblks = get_unaligned_be32(cmd + 10);
4569 if (lba + nblks > sdebug_capacity) {
4570 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4571 return check_condition_result;
4575 /* PRE-FETCH spec says nothing about LBP or PI so skip them */
4576 block = do_div(lba, sdebug_store_sectors);
4577 if (block + nblks > sdebug_store_sectors)
4578 rest = block + nblks - sdebug_store_sectors;
4580 /* Try to bring the PRE-FETCH range into CPU's cache */
4581 sdeb_read_lock(sip);
4582 prefetch_range(fsp + (sdebug_sector_size * block),
4583 (nblks - rest) * sdebug_sector_size);
4585 prefetch_range(fsp, rest * sdebug_sector_size);
4586 sdeb_read_unlock(sip);
4589 res = SDEG_RES_IMMED_MASK;
4590 return res | condition_met_result;
4593 #define RL_BUCKET_ELEMS 8
4595 /* Even though each pseudo target has a REPORT LUNS "well known logical unit"
4596 * (W-LUN), the normal Linux scanning logic does not associate it with a
4597 * device (e.g. /dev/sg7). The following magic will make that association:
4598 * "cd /sys/class/scsi_host/host<n> ; echo '- - 49409' > scan"
4599 * where <n> is a host number. If there are multiple targets in a host then
4600 * the above will associate a W-LUN to each target. To only get a W-LUN
4601 * for target 2, then use "echo '- 2 49409' > scan" .
4603 static int resp_report_luns(struct scsi_cmnd *scp,
4604 struct sdebug_dev_info *devip)
4606 unsigned char *cmd = scp->cmnd;
4607 unsigned int alloc_len;
4608 unsigned char select_report;
4610 struct scsi_lun *lun_p;
4611 u8 arr[RL_BUCKET_ELEMS * sizeof(struct scsi_lun)];
4612 unsigned int lun_cnt; /* normal LUN count (max: 256) */
4613 unsigned int wlun_cnt; /* report luns W-LUN count */
4614 unsigned int tlun_cnt; /* total LUN count */
4615 unsigned int rlen; /* response length (in bytes) */
4617 unsigned int off_rsp = 0;
4618 const int sz_lun = sizeof(struct scsi_lun);
4620 clear_luns_changed_on_target(devip);
4622 select_report = cmd[2];
4623 alloc_len = get_unaligned_be32(cmd + 6);
4625 if (alloc_len < 4) {
4626 pr_err("alloc len too small %d\n", alloc_len);
4627 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
4628 return check_condition_result;
4631 switch (select_report) {
4632 case 0: /* all LUNs apart from W-LUNs */
4633 lun_cnt = sdebug_max_luns;
4636 case 1: /* only W-LUNs */
4640 case 2: /* all LUNs */
4641 lun_cnt = sdebug_max_luns;
4644 case 0x10: /* only administrative LUs */
4645 case 0x11: /* see SPC-5 */
4646 case 0x12: /* only subsiduary LUs owned by referenced LU */
4648 pr_debug("select report invalid %d\n", select_report);
4649 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
4650 return check_condition_result;
4653 if (sdebug_no_lun_0 && (lun_cnt > 0))
4656 tlun_cnt = lun_cnt + wlun_cnt;
4657 rlen = tlun_cnt * sz_lun; /* excluding 8 byte header */
4658 scsi_set_resid(scp, scsi_bufflen(scp));
4659 pr_debug("select_report %d luns = %d wluns = %d no_lun0 %d\n",
4660 select_report, lun_cnt, wlun_cnt, sdebug_no_lun_0);
4662 /* loops rely on sizeof response header same as sizeof lun (both 8) */
4663 lun = sdebug_no_lun_0 ? 1 : 0;
4664 for (k = 0, j = 0, res = 0; true; ++k, j = 0) {
4665 memset(arr, 0, sizeof(arr));
4666 lun_p = (struct scsi_lun *)&arr[0];
4668 put_unaligned_be32(rlen, &arr[0]);
4672 for ( ; j < RL_BUCKET_ELEMS; ++j, ++lun_p) {
4673 if ((k * RL_BUCKET_ELEMS) + j > lun_cnt)
4675 int_to_scsilun(lun++, lun_p);
4676 if (lun > 1 && sdebug_lun_am == SAM_LUN_AM_FLAT)
4677 lun_p->scsi_lun[0] |= 0x40;
4679 if (j < RL_BUCKET_ELEMS)
4682 res = p_fill_from_dev_buffer(scp, arr, n, off_rsp);
4688 int_to_scsilun(SCSI_W_LUN_REPORT_LUNS, lun_p);
4692 res = p_fill_from_dev_buffer(scp, arr, j * sz_lun, off_rsp);
4696 static int resp_verify(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4698 bool is_bytchk3 = false;
4701 u32 vnum, a_num, off;
4702 const u32 lb_size = sdebug_sector_size;
4705 u8 *cmd = scp->cmnd;
4706 struct sdeb_store_info *sip = devip2sip(devip, true);
4708 bytchk = (cmd[1] >> 1) & 0x3;
4710 return 0; /* always claim internal verify okay */
4711 } else if (bytchk == 2) {
4712 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
4713 return check_condition_result;
4714 } else if (bytchk == 3) {
4715 is_bytchk3 = true; /* 1 block sent, compared repeatedly */
4719 lba = get_unaligned_be64(cmd + 2);
4720 vnum = get_unaligned_be32(cmd + 10);
4722 case VERIFY: /* is VERIFY(10) */
4723 lba = get_unaligned_be32(cmd + 2);
4724 vnum = get_unaligned_be16(cmd + 7);
4727 mk_sense_invalid_opcode(scp);
4728 return check_condition_result;
4731 return 0; /* not an error */
4732 a_num = is_bytchk3 ? 1 : vnum;
4733 /* Treat following check like one for read (i.e. no write) access */
4734 ret = check_device_access_params(scp, lba, a_num, false);
4738 arr = kcalloc(lb_size, vnum, GFP_ATOMIC | __GFP_NOWARN);
4740 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4742 return check_condition_result;
4744 /* Not changing store, so only need read access */
4745 sdeb_read_lock(sip);
4747 ret = do_dout_fetch(scp, a_num, arr);
4749 ret = DID_ERROR << 16;
4751 } else if (sdebug_verbose && (ret < (a_num * lb_size))) {
4752 sdev_printk(KERN_INFO, scp->device,
4753 "%s: %s: cdb indicated=%u, IO sent=%d bytes\n",
4754 my_name, __func__, a_num * lb_size, ret);
4757 for (j = 1, off = lb_size; j < vnum; ++j, off += lb_size)
4758 memcpy(arr + off, arr, lb_size);
4761 if (!comp_write_worker(sip, lba, vnum, arr, true)) {
4762 mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
4763 ret = check_condition_result;
4767 sdeb_read_unlock(sip);
4772 #define RZONES_DESC_HD 64
4774 /* Report zones depending on start LBA and reporting options */
4775 static int resp_report_zones(struct scsi_cmnd *scp,
4776 struct sdebug_dev_info *devip)
4778 unsigned int rep_max_zones, nrz = 0;
4780 u32 alloc_len, rep_opts, rep_len;
4783 u8 *arr = NULL, *desc;
4784 u8 *cmd = scp->cmnd;
4785 struct sdeb_zone_state *zsp = NULL;
4786 struct sdeb_store_info *sip = devip2sip(devip, false);
4788 if (!sdebug_dev_is_zoned(devip)) {
4789 mk_sense_invalid_opcode(scp);
4790 return check_condition_result;
4792 zs_lba = get_unaligned_be64(cmd + 2);
4793 alloc_len = get_unaligned_be32(cmd + 10);
4795 return 0; /* not an error */
4796 rep_opts = cmd[14] & 0x3f;
4797 partial = cmd[14] & 0x80;
4799 if (zs_lba >= sdebug_capacity) {
4800 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4801 return check_condition_result;
4804 rep_max_zones = (alloc_len - 64) >> ilog2(RZONES_DESC_HD);
4806 arr = kzalloc(alloc_len, GFP_ATOMIC | __GFP_NOWARN);
4808 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4810 return check_condition_result;
4813 sdeb_read_lock(sip);
4816 for (lba = zs_lba; lba < sdebug_capacity;
4817 lba = zsp->z_start + zsp->z_size) {
4818 if (WARN_ONCE(zbc_zone(devip, lba) == zsp, "lba = %llu\n", lba))
4820 zsp = zbc_zone(devip, lba);
4827 if (zsp->z_cond != ZC1_EMPTY)
4831 /* Implicit open zones */
4832 if (zsp->z_cond != ZC2_IMPLICIT_OPEN)
4836 /* Explicit open zones */
4837 if (zsp->z_cond != ZC3_EXPLICIT_OPEN)
4842 if (zsp->z_cond != ZC4_CLOSED)
4847 if (zsp->z_cond != ZC5_FULL)
4854 * Read-only, offline, reset WP recommended are
4855 * not emulated: no zones to report;
4859 /* non-seq-resource set */
4860 if (!zsp->z_non_seq_resource)
4864 /* All zones except gap zones. */
4865 if (zbc_zone_is_gap(zsp))
4869 /* Not write pointer (conventional) zones */
4870 if (zbc_zone_is_seq(zsp))
4874 mk_sense_buffer(scp, ILLEGAL_REQUEST,
4875 INVALID_FIELD_IN_CDB, 0);
4876 ret = check_condition_result;
4880 if (nrz < rep_max_zones) {
4881 /* Fill zone descriptor */
4882 desc[0] = zsp->z_type;
4883 desc[1] = zsp->z_cond << 4;
4884 if (zsp->z_non_seq_resource)
4886 put_unaligned_be64((u64)zsp->z_size, desc + 8);
4887 put_unaligned_be64((u64)zsp->z_start, desc + 16);
4888 put_unaligned_be64((u64)zsp->z_wp, desc + 24);
4892 if (partial && nrz >= rep_max_zones)
4899 /* Zone list length. */
4900 put_unaligned_be32(nrz * RZONES_DESC_HD, arr + 0);
4902 put_unaligned_be64(sdebug_capacity - 1, arr + 8);
4903 /* Zone starting LBA granularity. */
4904 if (devip->zcap < devip->zsize)
4905 put_unaligned_be64(devip->zsize, arr + 16);
4907 rep_len = (unsigned long)desc - (unsigned long)arr;
4908 ret = fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, rep_len));
4911 sdeb_read_unlock(sip);
4916 /* Logic transplanted from tcmu-runner, file_zbc.c */
4917 static void zbc_open_all(struct sdebug_dev_info *devip)
4919 struct sdeb_zone_state *zsp = &devip->zstate[0];
4922 for (i = 0; i < devip->nr_zones; i++, zsp++) {
4923 if (zsp->z_cond == ZC4_CLOSED)
4924 zbc_open_zone(devip, &devip->zstate[i], true);
4928 static int resp_open_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4932 enum sdebug_z_cond zc;
4933 u8 *cmd = scp->cmnd;
4934 struct sdeb_zone_state *zsp;
4935 bool all = cmd[14] & 0x01;
4936 struct sdeb_store_info *sip = devip2sip(devip, false);
4938 if (!sdebug_dev_is_zoned(devip)) {
4939 mk_sense_invalid_opcode(scp);
4940 return check_condition_result;
4943 sdeb_write_lock(sip);
4946 /* Check if all closed zones can be open */
4947 if (devip->max_open &&
4948 devip->nr_exp_open + devip->nr_closed > devip->max_open) {
4949 mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
4951 res = check_condition_result;
4954 /* Open all closed zones */
4955 zbc_open_all(devip);
4959 /* Open the specified zone */
4960 z_id = get_unaligned_be64(cmd + 2);
4961 if (z_id >= sdebug_capacity) {
4962 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4963 res = check_condition_result;
4967 zsp = zbc_zone(devip, z_id);
4968 if (z_id != zsp->z_start) {
4969 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4970 res = check_condition_result;
4973 if (zbc_zone_is_conv(zsp)) {
4974 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4975 res = check_condition_result;
4980 if (zc == ZC3_EXPLICIT_OPEN || zc == ZC5_FULL)
4983 if (devip->max_open && devip->nr_exp_open >= devip->max_open) {
4984 mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
4986 res = check_condition_result;
4990 zbc_open_zone(devip, zsp, true);
4992 sdeb_write_unlock(sip);
4996 static void zbc_close_all(struct sdebug_dev_info *devip)
5000 for (i = 0; i < devip->nr_zones; i++)
5001 zbc_close_zone(devip, &devip->zstate[i]);
5004 static int resp_close_zone(struct scsi_cmnd *scp,
5005 struct sdebug_dev_info *devip)
5009 u8 *cmd = scp->cmnd;
5010 struct sdeb_zone_state *zsp;
5011 bool all = cmd[14] & 0x01;
5012 struct sdeb_store_info *sip = devip2sip(devip, false);
5014 if (!sdebug_dev_is_zoned(devip)) {
5015 mk_sense_invalid_opcode(scp);
5016 return check_condition_result;
5019 sdeb_write_lock(sip);
5022 zbc_close_all(devip);
5026 /* Close specified zone */
5027 z_id = get_unaligned_be64(cmd + 2);
5028 if (z_id >= sdebug_capacity) {
5029 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
5030 res = check_condition_result;
5034 zsp = zbc_zone(devip, z_id);
5035 if (z_id != zsp->z_start) {
5036 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
5037 res = check_condition_result;
5040 if (zbc_zone_is_conv(zsp)) {
5041 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
5042 res = check_condition_result;
5046 zbc_close_zone(devip, zsp);
5048 sdeb_write_unlock(sip);
5052 static void zbc_finish_zone(struct sdebug_dev_info *devip,
5053 struct sdeb_zone_state *zsp, bool empty)
5055 enum sdebug_z_cond zc = zsp->z_cond;
5057 if (zc == ZC4_CLOSED || zc == ZC2_IMPLICIT_OPEN ||
5058 zc == ZC3_EXPLICIT_OPEN || (empty && zc == ZC1_EMPTY)) {
5059 if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
5060 zbc_close_zone(devip, zsp);
5061 if (zsp->z_cond == ZC4_CLOSED)
5063 zsp->z_wp = zsp->z_start + zsp->z_size;
5064 zsp->z_cond = ZC5_FULL;
5068 static void zbc_finish_all(struct sdebug_dev_info *devip)
5072 for (i = 0; i < devip->nr_zones; i++)
5073 zbc_finish_zone(devip, &devip->zstate[i], false);
5076 static int resp_finish_zone(struct scsi_cmnd *scp,
5077 struct sdebug_dev_info *devip)
5079 struct sdeb_zone_state *zsp;
5082 u8 *cmd = scp->cmnd;
5083 bool all = cmd[14] & 0x01;
5084 struct sdeb_store_info *sip = devip2sip(devip, false);
5086 if (!sdebug_dev_is_zoned(devip)) {
5087 mk_sense_invalid_opcode(scp);
5088 return check_condition_result;
5091 sdeb_write_lock(sip);
5094 zbc_finish_all(devip);
5098 /* Finish the specified zone */
5099 z_id = get_unaligned_be64(cmd + 2);
5100 if (z_id >= sdebug_capacity) {
5101 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
5102 res = check_condition_result;
5106 zsp = zbc_zone(devip, z_id);
5107 if (z_id != zsp->z_start) {
5108 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
5109 res = check_condition_result;
5112 if (zbc_zone_is_conv(zsp)) {
5113 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
5114 res = check_condition_result;
5118 zbc_finish_zone(devip, zsp, true);
5120 sdeb_write_unlock(sip);
5124 static void zbc_rwp_zone(struct sdebug_dev_info *devip,
5125 struct sdeb_zone_state *zsp)
5127 enum sdebug_z_cond zc;
5128 struct sdeb_store_info *sip = devip2sip(devip, false);
5130 if (!zbc_zone_is_seq(zsp))
5134 if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
5135 zbc_close_zone(devip, zsp);
5137 if (zsp->z_cond == ZC4_CLOSED)
5140 if (zsp->z_wp > zsp->z_start)
5141 memset(sip->storep + zsp->z_start * sdebug_sector_size, 0,
5142 (zsp->z_wp - zsp->z_start) * sdebug_sector_size);
5144 zsp->z_non_seq_resource = false;
5145 zsp->z_wp = zsp->z_start;
5146 zsp->z_cond = ZC1_EMPTY;
5149 static void zbc_rwp_all(struct sdebug_dev_info *devip)
5153 for (i = 0; i < devip->nr_zones; i++)
5154 zbc_rwp_zone(devip, &devip->zstate[i]);
5157 static int resp_rwp_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
5159 struct sdeb_zone_state *zsp;
5162 u8 *cmd = scp->cmnd;
5163 bool all = cmd[14] & 0x01;
5164 struct sdeb_store_info *sip = devip2sip(devip, false);
5166 if (!sdebug_dev_is_zoned(devip)) {
5167 mk_sense_invalid_opcode(scp);
5168 return check_condition_result;
5171 sdeb_write_lock(sip);
5178 z_id = get_unaligned_be64(cmd + 2);
5179 if (z_id >= sdebug_capacity) {
5180 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
5181 res = check_condition_result;
5185 zsp = zbc_zone(devip, z_id);
5186 if (z_id != zsp->z_start) {
5187 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
5188 res = check_condition_result;
5191 if (zbc_zone_is_conv(zsp)) {
5192 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
5193 res = check_condition_result;
5197 zbc_rwp_zone(devip, zsp);
5199 sdeb_write_unlock(sip);
5203 static u32 get_tag(struct scsi_cmnd *cmnd)
5205 return blk_mq_unique_tag(scsi_cmd_to_rq(cmnd));
5208 /* Queued (deferred) command completions converge here. */
5209 static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
5211 struct sdebug_queued_cmd *sqcp = container_of(sd_dp, struct sdebug_queued_cmd, sd_dp);
5212 unsigned long flags;
5213 struct scsi_cmnd *scp = sqcp->scmd;
5214 struct sdebug_scsi_cmd *sdsc;
5217 if (sdebug_statistics) {
5218 atomic_inc(&sdebug_completions);
5219 if (raw_smp_processor_id() != sd_dp->issuing_cpu)
5220 atomic_inc(&sdebug_miss_cpus);
5224 pr_err("scmd=NULL\n");
5228 sdsc = scsi_cmd_priv(scp);
5229 spin_lock_irqsave(&sdsc->lock, flags);
5230 aborted = sd_dp->aborted;
5231 if (unlikely(aborted))
5232 sd_dp->aborted = false;
5233 ASSIGN_QUEUED_CMD(scp, NULL);
5235 spin_unlock_irqrestore(&sdsc->lock, flags);
5238 pr_info("bypassing scsi_done() due to aborted cmd, kicking-off EH\n");
5239 blk_abort_request(scsi_cmd_to_rq(scp));
5243 scsi_done(scp); /* callback to mid level */
5245 sdebug_free_queued_cmd(sqcp);
5248 /* When high resolution timer goes off this function is called. */
5249 static enum hrtimer_restart sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
5251 struct sdebug_defer *sd_dp = container_of(timer, struct sdebug_defer,
5253 sdebug_q_cmd_complete(sd_dp);
5254 return HRTIMER_NORESTART;
5257 /* When work queue schedules work, it calls this function. */
5258 static void sdebug_q_cmd_wq_complete(struct work_struct *work)
5260 struct sdebug_defer *sd_dp = container_of(work, struct sdebug_defer,
5262 sdebug_q_cmd_complete(sd_dp);
5265 static bool got_shared_uuid;
5266 static uuid_t shared_uuid;
5268 static int sdebug_device_create_zones(struct sdebug_dev_info *devip)
5270 struct sdeb_zone_state *zsp;
5271 sector_t capacity = get_sdebug_capacity();
5272 sector_t conv_capacity;
5273 sector_t zstart = 0;
5277 * Set the zone size: if sdeb_zbc_zone_size_mb is not set, figure out
5278 * a zone size allowing for at least 4 zones on the device. Otherwise,
5279 * use the specified zone size checking that at least 2 zones can be
5280 * created for the device.
5282 if (!sdeb_zbc_zone_size_mb) {
5283 devip->zsize = (DEF_ZBC_ZONE_SIZE_MB * SZ_1M)
5284 >> ilog2(sdebug_sector_size);
5285 while (capacity < devip->zsize << 2 && devip->zsize >= 2)
5287 if (devip->zsize < 2) {
5288 pr_err("Device capacity too small\n");
5292 if (!is_power_of_2(sdeb_zbc_zone_size_mb)) {
5293 pr_err("Zone size is not a power of 2\n");
5296 devip->zsize = (sdeb_zbc_zone_size_mb * SZ_1M)
5297 >> ilog2(sdebug_sector_size);
5298 if (devip->zsize >= capacity) {
5299 pr_err("Zone size too large for device capacity\n");
5304 devip->zsize_shift = ilog2(devip->zsize);
5305 devip->nr_zones = (capacity + devip->zsize - 1) >> devip->zsize_shift;
5307 if (sdeb_zbc_zone_cap_mb == 0) {
5308 devip->zcap = devip->zsize;
5310 devip->zcap = (sdeb_zbc_zone_cap_mb * SZ_1M) >>
5311 ilog2(sdebug_sector_size);
5312 if (devip->zcap > devip->zsize) {
5313 pr_err("Zone capacity too large\n");
5318 conv_capacity = (sector_t)sdeb_zbc_nr_conv << devip->zsize_shift;
5319 if (conv_capacity >= capacity) {
5320 pr_err("Number of conventional zones too large\n");
5323 devip->nr_conv_zones = sdeb_zbc_nr_conv;
5324 devip->nr_seq_zones = ALIGN(capacity - conv_capacity, devip->zsize) >>
5326 devip->nr_zones = devip->nr_conv_zones + devip->nr_seq_zones;
5328 /* Add gap zones if zone capacity is smaller than the zone size */
5329 if (devip->zcap < devip->zsize)
5330 devip->nr_zones += devip->nr_seq_zones;
5332 if (devip->zmodel == BLK_ZONED_HM) {
5333 /* zbc_max_open_zones can be 0, meaning "not reported" */
5334 if (sdeb_zbc_max_open >= devip->nr_zones - 1)
5335 devip->max_open = (devip->nr_zones - 1) / 2;
5337 devip->max_open = sdeb_zbc_max_open;
5340 devip->zstate = kcalloc(devip->nr_zones,
5341 sizeof(struct sdeb_zone_state), GFP_KERNEL);
5345 for (i = 0; i < devip->nr_zones; i++) {
5346 zsp = &devip->zstate[i];
5348 zsp->z_start = zstart;
5350 if (i < devip->nr_conv_zones) {
5351 zsp->z_type = ZBC_ZTYPE_CNV;
5352 zsp->z_cond = ZBC_NOT_WRITE_POINTER;
5353 zsp->z_wp = (sector_t)-1;
5355 min_t(u64, devip->zsize, capacity - zstart);
5356 } else if ((zstart & (devip->zsize - 1)) == 0) {
5357 if (devip->zmodel == BLK_ZONED_HM)
5358 zsp->z_type = ZBC_ZTYPE_SWR;
5360 zsp->z_type = ZBC_ZTYPE_SWP;
5361 zsp->z_cond = ZC1_EMPTY;
5362 zsp->z_wp = zsp->z_start;
5364 min_t(u64, devip->zcap, capacity - zstart);
5366 zsp->z_type = ZBC_ZTYPE_GAP;
5367 zsp->z_cond = ZBC_NOT_WRITE_POINTER;
5368 zsp->z_wp = (sector_t)-1;
5369 zsp->z_size = min_t(u64, devip->zsize - devip->zcap,
5373 WARN_ON_ONCE((int)zsp->z_size <= 0);
5374 zstart += zsp->z_size;
5380 static struct sdebug_dev_info *sdebug_device_create(
5381 struct sdebug_host_info *sdbg_host, gfp_t flags)
5383 struct sdebug_dev_info *devip;
5385 devip = kzalloc(sizeof(*devip), flags);
5387 if (sdebug_uuid_ctl == 1)
5388 uuid_gen(&devip->lu_name);
5389 else if (sdebug_uuid_ctl == 2) {
5390 if (got_shared_uuid)
5391 devip->lu_name = shared_uuid;
5393 uuid_gen(&shared_uuid);
5394 got_shared_uuid = true;
5395 devip->lu_name = shared_uuid;
5398 devip->sdbg_host = sdbg_host;
5399 if (sdeb_zbc_in_use) {
5400 devip->zmodel = sdeb_zbc_model;
5401 if (sdebug_device_create_zones(devip)) {
5406 devip->zmodel = BLK_ZONED_NONE;
5408 devip->create_ts = ktime_get_boottime();
5409 atomic_set(&devip->stopped, (sdeb_tur_ms_to_ready > 0 ? 2 : 0));
5410 spin_lock_init(&devip->list_lock);
5411 INIT_LIST_HEAD(&devip->inject_err_list);
5412 list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
5417 static struct sdebug_dev_info *find_build_dev_info(struct scsi_device *sdev)
5419 struct sdebug_host_info *sdbg_host;
5420 struct sdebug_dev_info *open_devip = NULL;
5421 struct sdebug_dev_info *devip;
5423 sdbg_host = shost_to_sdebug_host(sdev->host);
5425 list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
5426 if ((devip->used) && (devip->channel == sdev->channel) &&
5427 (devip->target == sdev->id) &&
5428 (devip->lun == sdev->lun))
5431 if ((!devip->used) && (!open_devip))
5435 if (!open_devip) { /* try and make a new one */
5436 open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
5438 pr_err("out of memory at line %d\n", __LINE__);
5443 open_devip->channel = sdev->channel;
5444 open_devip->target = sdev->id;
5445 open_devip->lun = sdev->lun;
5446 open_devip->sdbg_host = sdbg_host;
5447 set_bit(SDEBUG_UA_POOCCUR, open_devip->uas_bm);
5448 open_devip->used = true;
5452 static int scsi_debug_slave_alloc(struct scsi_device *sdp)
5455 pr_info("slave_alloc <%u %u %u %llu>\n",
5456 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5461 static int scsi_debug_slave_configure(struct scsi_device *sdp)
5463 struct sdebug_dev_info *devip =
5464 (struct sdebug_dev_info *)sdp->hostdata;
5465 struct dentry *dentry;
5468 pr_info("slave_configure <%u %u %u %llu>\n",
5469 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5470 if (sdp->host->max_cmd_len != SDEBUG_MAX_CMD_LEN)
5471 sdp->host->max_cmd_len = SDEBUG_MAX_CMD_LEN;
5472 if (devip == NULL) {
5473 devip = find_build_dev_info(sdp);
5475 return 1; /* no resources, will be marked offline */
5477 sdp->hostdata = devip;
5479 sdp->no_uld_attach = 1;
5480 config_cdb_len(sdp);
5482 if (sdebug_allow_restart)
5483 sdp->allow_restart = 1;
5485 devip->debugfs_entry = debugfs_create_dir(dev_name(&sdp->sdev_dev),
5486 sdebug_debugfs_root);
5487 if (IS_ERR_OR_NULL(devip->debugfs_entry))
5488 pr_info("%s: failed to create debugfs directory for device %s\n",
5489 __func__, dev_name(&sdp->sdev_gendev));
5491 dentry = debugfs_create_file("error", 0600, devip->debugfs_entry, sdp,
5492 &sdebug_error_fops);
5493 if (IS_ERR_OR_NULL(dentry))
5494 pr_info("%s: failed to create error file for device %s\n",
5495 __func__, dev_name(&sdp->sdev_gendev));
5500 static void scsi_debug_slave_destroy(struct scsi_device *sdp)
5502 struct sdebug_dev_info *devip =
5503 (struct sdebug_dev_info *)sdp->hostdata;
5504 struct sdebug_err_inject *err;
5507 pr_info("slave_destroy <%u %u %u %llu>\n",
5508 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5513 spin_lock(&devip->list_lock);
5514 list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
5515 list_del_rcu(&err->list);
5516 call_rcu(&err->rcu, sdebug_err_free);
5518 spin_unlock(&devip->list_lock);
5520 debugfs_remove(devip->debugfs_entry);
5522 /* make this slot available for re-use */
5523 devip->used = false;
5524 sdp->hostdata = NULL;
5527 /* Returns true if we require the queued memory to be freed by the caller. */
5528 static bool stop_qc_helper(struct sdebug_defer *sd_dp,
5529 enum sdeb_defer_type defer_t)
5531 if (defer_t == SDEB_DEFER_HRT) {
5532 int res = hrtimer_try_to_cancel(&sd_dp->hrt);
5535 case 0: /* Not active, it must have already run */
5536 case -1: /* -1 It's executing the CB */
5538 case 1: /* Was active, we've now cancelled */
5542 } else if (defer_t == SDEB_DEFER_WQ) {
5543 /* Cancel if pending */
5544 if (cancel_work_sync(&sd_dp->ew.work))
5546 /* Was not pending, so it must have run */
5548 } else if (defer_t == SDEB_DEFER_POLL) {
5556 static bool scsi_debug_stop_cmnd(struct scsi_cmnd *cmnd)
5558 enum sdeb_defer_type l_defer_t;
5559 struct sdebug_defer *sd_dp;
5560 struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmnd);
5561 struct sdebug_queued_cmd *sqcp = TO_QUEUED_CMD(cmnd);
5563 lockdep_assert_held(&sdsc->lock);
5567 sd_dp = &sqcp->sd_dp;
5568 l_defer_t = READ_ONCE(sd_dp->defer_t);
5569 ASSIGN_QUEUED_CMD(cmnd, NULL);
5571 if (stop_qc_helper(sd_dp, l_defer_t))
5572 sdebug_free_queued_cmd(sqcp);
5578 * Called from scsi_debug_abort() only, which is for timed-out cmd.
5580 static bool scsi_debug_abort_cmnd(struct scsi_cmnd *cmnd)
5582 struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmnd);
5583 unsigned long flags;
5586 spin_lock_irqsave(&sdsc->lock, flags);
5587 res = scsi_debug_stop_cmnd(cmnd);
5588 spin_unlock_irqrestore(&sdsc->lock, flags);
5594 * All we can do is set the cmnd as internally aborted and wait for it to
5595 * finish. We cannot call scsi_done() as normal completion path may do that.
5597 static bool sdebug_stop_cmnd(struct request *rq, void *data)
5599 scsi_debug_abort_cmnd(blk_mq_rq_to_pdu(rq));
5604 /* Deletes (stops) timers or work queues of all queued commands */
5605 static void stop_all_queued(void)
5607 struct sdebug_host_info *sdhp;
5609 mutex_lock(&sdebug_host_list_mutex);
5610 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
5611 struct Scsi_Host *shost = sdhp->shost;
5613 blk_mq_tagset_busy_iter(&shost->tag_set, sdebug_stop_cmnd, NULL);
5615 mutex_unlock(&sdebug_host_list_mutex);
5618 static int sdebug_fail_abort(struct scsi_cmnd *cmnd)
5620 struct scsi_device *sdp = cmnd->device;
5621 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
5622 struct sdebug_err_inject *err;
5623 unsigned char *cmd = cmnd->cmnd;
5630 list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
5631 if (err->type == ERR_ABORT_CMD_FAILED &&
5632 (err->cmd == cmd[0] || err->cmd == 0xff)) {
5646 static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
5648 bool ok = scsi_debug_abort_cmnd(SCpnt);
5649 u8 *cmd = SCpnt->cmnd;
5654 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5655 sdev_printk(KERN_INFO, SCpnt->device,
5656 "%s: command%s found\n", __func__,
5659 if (sdebug_fail_abort(SCpnt)) {
5660 scmd_printk(KERN_INFO, SCpnt, "fail abort command 0x%x\n",
5668 static bool scsi_debug_stop_all_queued_iter(struct request *rq, void *data)
5670 struct scsi_device *sdp = data;
5671 struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
5673 if (scmd->device == sdp)
5674 scsi_debug_abort_cmnd(scmd);
5679 /* Deletes (stops) timers or work queues of all queued commands per sdev */
5680 static void scsi_debug_stop_all_queued(struct scsi_device *sdp)
5682 struct Scsi_Host *shost = sdp->host;
5684 blk_mq_tagset_busy_iter(&shost->tag_set,
5685 scsi_debug_stop_all_queued_iter, sdp);
5688 static int sdebug_fail_lun_reset(struct scsi_cmnd *cmnd)
5690 struct scsi_device *sdp = cmnd->device;
5691 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
5692 struct sdebug_err_inject *err;
5693 unsigned char *cmd = cmnd->cmnd;
5700 list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
5701 if (err->type == ERR_LUN_RESET_FAILED &&
5702 (err->cmd == cmd[0] || err->cmd == 0xff)) {
5716 static int scsi_debug_device_reset(struct scsi_cmnd *SCpnt)
5718 struct scsi_device *sdp = SCpnt->device;
5719 struct sdebug_dev_info *devip = sdp->hostdata;
5720 u8 *cmd = SCpnt->cmnd;
5725 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5726 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5728 scsi_debug_stop_all_queued(sdp);
5730 set_bit(SDEBUG_UA_POR, devip->uas_bm);
5732 if (sdebug_fail_lun_reset(SCpnt)) {
5733 scmd_printk(KERN_INFO, SCpnt, "fail lun reset 0x%x\n", opcode);
5740 static int sdebug_fail_target_reset(struct scsi_cmnd *cmnd)
5742 struct scsi_target *starget = scsi_target(cmnd->device);
5743 struct sdebug_target_info *targetip =
5744 (struct sdebug_target_info *)starget->hostdata;
5747 return targetip->reset_fail;
5752 static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
5754 struct scsi_device *sdp = SCpnt->device;
5755 struct sdebug_host_info *sdbg_host = shost_to_sdebug_host(sdp->host);
5756 struct sdebug_dev_info *devip;
5757 u8 *cmd = SCpnt->cmnd;
5761 ++num_target_resets;
5762 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5763 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5765 list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
5766 if (devip->target == sdp->id) {
5767 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5772 if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5773 sdev_printk(KERN_INFO, sdp,
5774 "%s: %d device(s) found in target\n", __func__, k);
5776 if (sdebug_fail_target_reset(SCpnt)) {
5777 scmd_printk(KERN_INFO, SCpnt, "fail target reset 0x%x\n",
5785 static int scsi_debug_bus_reset(struct scsi_cmnd *SCpnt)
5787 struct scsi_device *sdp = SCpnt->device;
5788 struct sdebug_host_info *sdbg_host = shost_to_sdebug_host(sdp->host);
5789 struct sdebug_dev_info *devip;
5794 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5795 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5797 list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
5798 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5802 if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5803 sdev_printk(KERN_INFO, sdp,
5804 "%s: %d device(s) found in host\n", __func__, k);
5808 static int scsi_debug_host_reset(struct scsi_cmnd *SCpnt)
5810 struct sdebug_host_info *sdbg_host;
5811 struct sdebug_dev_info *devip;
5815 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5816 sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__);
5817 mutex_lock(&sdebug_host_list_mutex);
5818 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
5819 list_for_each_entry(devip, &sdbg_host->dev_info_list,
5821 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5825 mutex_unlock(&sdebug_host_list_mutex);
5827 if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5828 sdev_printk(KERN_INFO, SCpnt->device,
5829 "%s: %d device(s) found\n", __func__, k);
5833 static void sdebug_build_parts(unsigned char *ramp, unsigned long store_size)
5835 struct msdos_partition *pp;
5836 int starts[SDEBUG_MAX_PARTS + 2], max_part_secs;
5837 int sectors_per_part, num_sectors, k;
5838 int heads_by_sects, start_sec, end_sec;
5840 /* assume partition table already zeroed */
5841 if ((sdebug_num_parts < 1) || (store_size < 1048576))
5843 if (sdebug_num_parts > SDEBUG_MAX_PARTS) {
5844 sdebug_num_parts = SDEBUG_MAX_PARTS;
5845 pr_warn("reducing partitions to %d\n", SDEBUG_MAX_PARTS);
5847 num_sectors = (int)get_sdebug_capacity();
5848 sectors_per_part = (num_sectors - sdebug_sectors_per)
5850 heads_by_sects = sdebug_heads * sdebug_sectors_per;
5851 starts[0] = sdebug_sectors_per;
5852 max_part_secs = sectors_per_part;
5853 for (k = 1; k < sdebug_num_parts; ++k) {
5854 starts[k] = ((k * sectors_per_part) / heads_by_sects)
5856 if (starts[k] - starts[k - 1] < max_part_secs)
5857 max_part_secs = starts[k] - starts[k - 1];
5859 starts[sdebug_num_parts] = num_sectors;
5860 starts[sdebug_num_parts + 1] = 0;
5862 ramp[510] = 0x55; /* magic partition markings */
5864 pp = (struct msdos_partition *)(ramp + 0x1be);
5865 for (k = 0; starts[k + 1]; ++k, ++pp) {
5866 start_sec = starts[k];
5867 end_sec = starts[k] + max_part_secs - 1;
5870 pp->cyl = start_sec / heads_by_sects;
5871 pp->head = (start_sec - (pp->cyl * heads_by_sects))
5872 / sdebug_sectors_per;
5873 pp->sector = (start_sec % sdebug_sectors_per) + 1;
5875 pp->end_cyl = end_sec / heads_by_sects;
5876 pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
5877 / sdebug_sectors_per;
5878 pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
5880 pp->start_sect = cpu_to_le32(start_sec);
5881 pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1);
5882 pp->sys_ind = 0x83; /* plain Linux partition */
5886 static void block_unblock_all_queues(bool block)
5888 struct sdebug_host_info *sdhp;
5890 lockdep_assert_held(&sdebug_host_list_mutex);
5892 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
5893 struct Scsi_Host *shost = sdhp->shost;
5896 scsi_block_requests(shost);
5898 scsi_unblock_requests(shost);
5902 /* Adjust (by rounding down) the sdebug_cmnd_count so abs(every_nth)-1
5903 * commands will be processed normally before triggers occur.
5905 static void tweak_cmnd_count(void)
5909 modulo = abs(sdebug_every_nth);
5913 mutex_lock(&sdebug_host_list_mutex);
5914 block_unblock_all_queues(true);
5915 count = atomic_read(&sdebug_cmnd_count);
5916 atomic_set(&sdebug_cmnd_count, (count / modulo) * modulo);
5917 block_unblock_all_queues(false);
5918 mutex_unlock(&sdebug_host_list_mutex);
5921 static void clear_queue_stats(void)
5923 atomic_set(&sdebug_cmnd_count, 0);
5924 atomic_set(&sdebug_completions, 0);
5925 atomic_set(&sdebug_miss_cpus, 0);
5926 atomic_set(&sdebug_a_tsf, 0);
5929 static bool inject_on_this_cmd(void)
5931 if (sdebug_every_nth == 0)
5933 return (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) == 0;
5936 #define INCLUSIVE_TIMING_MAX_NS 1000000 /* 1 millisecond */
5939 void sdebug_free_queued_cmd(struct sdebug_queued_cmd *sqcp)
5942 kmem_cache_free(queued_cmd_cache, sqcp);
5945 static struct sdebug_queued_cmd *sdebug_alloc_queued_cmd(struct scsi_cmnd *scmd)
5947 struct sdebug_queued_cmd *sqcp;
5948 struct sdebug_defer *sd_dp;
5950 sqcp = kmem_cache_zalloc(queued_cmd_cache, GFP_ATOMIC);
5954 sd_dp = &sqcp->sd_dp;
5956 hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
5957 sd_dp->hrt.function = sdebug_q_cmd_hrt_complete;
5958 INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete);
5965 /* Complete the processing of the thread that queued a SCSI command to this
5966 * driver. It either completes the command by calling cmnd_done() or
5967 * schedules a hr timer or work queue then returns 0. Returns
5968 * SCSI_MLQUEUE_HOST_BUSY if temporarily out of resources.
5970 static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
5972 int (*pfp)(struct scsi_cmnd *,
5973 struct sdebug_dev_info *),
5974 int delta_jiff, int ndelay)
5976 struct request *rq = scsi_cmd_to_rq(cmnd);
5977 bool polled = rq->cmd_flags & REQ_POLLED;
5978 struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmnd);
5979 unsigned long flags;
5980 u64 ns_from_boot = 0;
5981 struct sdebug_queued_cmd *sqcp;
5982 struct scsi_device *sdp;
5983 struct sdebug_defer *sd_dp;
5985 if (unlikely(devip == NULL)) {
5986 if (scsi_result == 0)
5987 scsi_result = DID_NO_CONNECT << 16;
5988 goto respond_in_thread;
5992 if (delta_jiff == 0)
5993 goto respond_in_thread;
5996 if (unlikely(sdebug_every_nth && (SDEBUG_OPT_RARE_TSF & sdebug_opts) &&
5997 (scsi_result == 0))) {
5998 int num_in_q = scsi_device_busy(sdp);
5999 int qdepth = cmnd->device->queue_depth;
6001 if ((num_in_q == qdepth) &&
6002 (atomic_inc_return(&sdebug_a_tsf) >=
6003 abs(sdebug_every_nth))) {
6004 atomic_set(&sdebug_a_tsf, 0);
6005 scsi_result = device_qfull_result;
6007 if (unlikely(SDEBUG_OPT_Q_NOISE & sdebug_opts))
6008 sdev_printk(KERN_INFO, sdp, "%s: num_in_q=%d +1, <inject> status: TASK SET FULL\n",
6009 __func__, num_in_q);
6013 sqcp = sdebug_alloc_queued_cmd(cmnd);
6015 pr_err("%s no alloc\n", __func__);
6016 return SCSI_MLQUEUE_HOST_BUSY;
6018 sd_dp = &sqcp->sd_dp;
6021 ns_from_boot = ktime_get_boottime_ns();
6023 /* one of the resp_*() response functions is called here */
6024 cmnd->result = pfp ? pfp(cmnd, devip) : 0;
6025 if (cmnd->result & SDEG_RES_IMMED_MASK) {
6026 cmnd->result &= ~SDEG_RES_IMMED_MASK;
6027 delta_jiff = ndelay = 0;
6029 if (cmnd->result == 0 && scsi_result != 0)
6030 cmnd->result = scsi_result;
6031 if (cmnd->result == 0 && unlikely(sdebug_opts & SDEBUG_OPT_TRANSPORT_ERR)) {
6032 if (atomic_read(&sdeb_inject_pending)) {
6033 mk_sense_buffer(cmnd, ABORTED_COMMAND, TRANSPORT_PROBLEM, ACK_NAK_TO);
6034 atomic_set(&sdeb_inject_pending, 0);
6035 cmnd->result = check_condition_result;
6039 if (unlikely(sdebug_verbose && cmnd->result))
6040 sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n",
6041 __func__, cmnd->result);
6043 if (delta_jiff > 0 || ndelay > 0) {
6046 if (delta_jiff > 0) {
6047 u64 ns = jiffies_to_nsecs(delta_jiff);
6049 if (sdebug_random && ns < U32_MAX) {
6050 ns = get_random_u32_below((u32)ns);
6051 } else if (sdebug_random) {
6052 ns >>= 12; /* scale to 4 usec precision */
6053 if (ns < U32_MAX) /* over 4 hours max */
6054 ns = get_random_u32_below((u32)ns);
6057 kt = ns_to_ktime(ns);
6058 } else { /* ndelay has a 4.2 second max */
6059 kt = sdebug_random ? get_random_u32_below((u32)ndelay) :
6061 if (ndelay < INCLUSIVE_TIMING_MAX_NS) {
6062 u64 d = ktime_get_boottime_ns() - ns_from_boot;
6064 if (kt <= d) { /* elapsed duration >= kt */
6065 /* call scsi_done() from this thread */
6066 sdebug_free_queued_cmd(sqcp);
6070 /* otherwise reduce kt by elapsed time */
6074 if (sdebug_statistics)
6075 sd_dp->issuing_cpu = raw_smp_processor_id();
6077 spin_lock_irqsave(&sdsc->lock, flags);
6078 sd_dp->cmpl_ts = ktime_add(ns_to_ktime(ns_from_boot), kt);
6079 ASSIGN_QUEUED_CMD(cmnd, sqcp);
6080 WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_POLL);
6081 spin_unlock_irqrestore(&sdsc->lock, flags);
6083 /* schedule the invocation of scsi_done() for a later time */
6084 spin_lock_irqsave(&sdsc->lock, flags);
6085 ASSIGN_QUEUED_CMD(cmnd, sqcp);
6086 WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_HRT);
6087 hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED);
6089 * The completion handler will try to grab sqcp->lock,
6090 * so there is no chance that the completion handler
6091 * will call scsi_done() until we release the lock
6092 * here (so ok to keep referencing sdsc).
6094 spin_unlock_irqrestore(&sdsc->lock, flags);
6096 } else { /* jdelay < 0, use work queue */
6097 if (unlikely((sdebug_opts & SDEBUG_OPT_CMD_ABORT) &&
6098 atomic_read(&sdeb_inject_pending))) {
6099 sd_dp->aborted = true;
6100 atomic_set(&sdeb_inject_pending, 0);
6101 sdev_printk(KERN_INFO, sdp, "abort request tag=%#x\n",
6102 blk_mq_unique_tag_to_tag(get_tag(cmnd)));
6105 if (sdebug_statistics)
6106 sd_dp->issuing_cpu = raw_smp_processor_id();
6108 spin_lock_irqsave(&sdsc->lock, flags);
6109 ASSIGN_QUEUED_CMD(cmnd, sqcp);
6110 sd_dp->cmpl_ts = ns_to_ktime(ns_from_boot);
6111 WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_POLL);
6112 spin_unlock_irqrestore(&sdsc->lock, flags);
6114 spin_lock_irqsave(&sdsc->lock, flags);
6115 ASSIGN_QUEUED_CMD(cmnd, sqcp);
6116 WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_WQ);
6117 schedule_work(&sd_dp->ew.work);
6118 spin_unlock_irqrestore(&sdsc->lock, flags);
6124 respond_in_thread: /* call back to mid-layer using invocation thread */
6125 cmnd->result = pfp != NULL ? pfp(cmnd, devip) : 0;
6126 cmnd->result &= ~SDEG_RES_IMMED_MASK;
6127 if (cmnd->result == 0 && scsi_result != 0)
6128 cmnd->result = scsi_result;
6133 /* Note: The following macros create attribute files in the
6134 /sys/module/scsi_debug/parameters directory. Unfortunately this
6135 driver is unaware of a change and cannot trigger auxiliary actions
6136 as it can when the corresponding attribute in the
6137 /sys/bus/pseudo/drivers/scsi_debug directory is changed.
6139 module_param_named(add_host, sdebug_add_host, int, S_IRUGO | S_IWUSR);
6140 module_param_named(ato, sdebug_ato, int, S_IRUGO);
6141 module_param_named(cdb_len, sdebug_cdb_len, int, 0644);
6142 module_param_named(clustering, sdebug_clustering, bool, S_IRUGO | S_IWUSR);
6143 module_param_named(delay, sdebug_jdelay, int, S_IRUGO | S_IWUSR);
6144 module_param_named(dev_size_mb, sdebug_dev_size_mb, int, S_IRUGO);
6145 module_param_named(dif, sdebug_dif, int, S_IRUGO);
6146 module_param_named(dix, sdebug_dix, int, S_IRUGO);
6147 module_param_named(dsense, sdebug_dsense, int, S_IRUGO | S_IWUSR);
6148 module_param_named(every_nth, sdebug_every_nth, int, S_IRUGO | S_IWUSR);
6149 module_param_named(fake_rw, sdebug_fake_rw, int, S_IRUGO | S_IWUSR);
6150 module_param_named(guard, sdebug_guard, uint, S_IRUGO);
6151 module_param_named(host_lock, sdebug_host_lock, bool, S_IRUGO | S_IWUSR);
6152 module_param_named(host_max_queue, sdebug_host_max_queue, int, S_IRUGO);
6153 module_param_string(inq_product, sdebug_inq_product_id,
6154 sizeof(sdebug_inq_product_id), S_IRUGO | S_IWUSR);
6155 module_param_string(inq_rev, sdebug_inq_product_rev,
6156 sizeof(sdebug_inq_product_rev), S_IRUGO | S_IWUSR);
6157 module_param_string(inq_vendor, sdebug_inq_vendor_id,
6158 sizeof(sdebug_inq_vendor_id), S_IRUGO | S_IWUSR);
6159 module_param_named(lbprz, sdebug_lbprz, int, S_IRUGO);
6160 module_param_named(lbpu, sdebug_lbpu, int, S_IRUGO);
6161 module_param_named(lbpws, sdebug_lbpws, int, S_IRUGO);
6162 module_param_named(lbpws10, sdebug_lbpws10, int, S_IRUGO);
6163 module_param_named(lowest_aligned, sdebug_lowest_aligned, int, S_IRUGO);
6164 module_param_named(lun_format, sdebug_lun_am_i, int, S_IRUGO | S_IWUSR);
6165 module_param_named(max_luns, sdebug_max_luns, int, S_IRUGO | S_IWUSR);
6166 module_param_named(max_queue, sdebug_max_queue, int, S_IRUGO | S_IWUSR);
6167 module_param_named(medium_error_count, sdebug_medium_error_count, int,
6169 module_param_named(medium_error_start, sdebug_medium_error_start, int,
6171 module_param_named(ndelay, sdebug_ndelay, int, S_IRUGO | S_IWUSR);
6172 module_param_named(no_lun_0, sdebug_no_lun_0, int, S_IRUGO | S_IWUSR);
6173 module_param_named(no_rwlock, sdebug_no_rwlock, bool, S_IRUGO | S_IWUSR);
6174 module_param_named(no_uld, sdebug_no_uld, int, S_IRUGO);
6175 module_param_named(num_parts, sdebug_num_parts, int, S_IRUGO);
6176 module_param_named(num_tgts, sdebug_num_tgts, int, S_IRUGO | S_IWUSR);
6177 module_param_named(opt_blks, sdebug_opt_blks, int, S_IRUGO);
6178 module_param_named(opt_xferlen_exp, sdebug_opt_xferlen_exp, int, S_IRUGO);
6179 module_param_named(opts, sdebug_opts, int, S_IRUGO | S_IWUSR);
6180 module_param_named(per_host_store, sdebug_per_host_store, bool,
6182 module_param_named(physblk_exp, sdebug_physblk_exp, int, S_IRUGO);
6183 module_param_named(ptype, sdebug_ptype, int, S_IRUGO | S_IWUSR);
6184 module_param_named(random, sdebug_random, bool, S_IRUGO | S_IWUSR);
6185 module_param_named(removable, sdebug_removable, bool, S_IRUGO | S_IWUSR);
6186 module_param_named(scsi_level, sdebug_scsi_level, int, S_IRUGO);
6187 module_param_named(sector_size, sdebug_sector_size, int, S_IRUGO);
6188 module_param_named(statistics, sdebug_statistics, bool, S_IRUGO | S_IWUSR);
6189 module_param_named(strict, sdebug_strict, bool, S_IRUGO | S_IWUSR);
6190 module_param_named(submit_queues, submit_queues, int, S_IRUGO);
6191 module_param_named(poll_queues, poll_queues, int, S_IRUGO);
6192 module_param_named(tur_ms_to_ready, sdeb_tur_ms_to_ready, int, S_IRUGO);
6193 module_param_named(unmap_alignment, sdebug_unmap_alignment, int, S_IRUGO);
6194 module_param_named(unmap_granularity, sdebug_unmap_granularity, int, S_IRUGO);
6195 module_param_named(unmap_max_blocks, sdebug_unmap_max_blocks, int, S_IRUGO);
6196 module_param_named(unmap_max_desc, sdebug_unmap_max_desc, int, S_IRUGO);
6197 module_param_named(uuid_ctl, sdebug_uuid_ctl, int, S_IRUGO);
6198 module_param_named(virtual_gb, sdebug_virtual_gb, int, S_IRUGO | S_IWUSR);
6199 module_param_named(vpd_use_hostno, sdebug_vpd_use_hostno, int,
6201 module_param_named(wp, sdebug_wp, bool, S_IRUGO | S_IWUSR);
6202 module_param_named(write_same_length, sdebug_write_same_length, int,
6204 module_param_named(zbc, sdeb_zbc_model_s, charp, S_IRUGO);
6205 module_param_named(zone_cap_mb, sdeb_zbc_zone_cap_mb, int, S_IRUGO);
6206 module_param_named(zone_max_open, sdeb_zbc_max_open, int, S_IRUGO);
6207 module_param_named(zone_nr_conv, sdeb_zbc_nr_conv, int, S_IRUGO);
6208 module_param_named(zone_size_mb, sdeb_zbc_zone_size_mb, int, S_IRUGO);
6209 module_param_named(allow_restart, sdebug_allow_restart, bool, S_IRUGO | S_IWUSR);
6211 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
6212 MODULE_DESCRIPTION("SCSI debug adapter driver");
6213 MODULE_LICENSE("GPL");
6214 MODULE_VERSION(SDEBUG_VERSION);
6216 MODULE_PARM_DESC(add_host, "add n hosts, in sysfs if negative remove host(s) (def=1)");
6217 MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
6218 MODULE_PARM_DESC(cdb_len, "suggest CDB lengths to drivers (def=10)");
6219 MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
6220 MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
6221 MODULE_PARM_DESC(dev_size_mb, "size in MiB of ram shared by devs(def=8)");
6222 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
6223 MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
6224 MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
6225 MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
6226 MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
6227 MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
6228 MODULE_PARM_DESC(host_lock, "host_lock is ignored (def=0)");
6229 MODULE_PARM_DESC(host_max_queue,
6230 "host max # of queued cmds (0 to max(def) [max_queue fixed equal for !0])");
6231 MODULE_PARM_DESC(inq_product, "SCSI INQUIRY product string (def=\"scsi_debug\")");
6232 MODULE_PARM_DESC(inq_rev, "SCSI INQUIRY revision string (def=\""
6233 SDEBUG_VERSION "\")");
6234 MODULE_PARM_DESC(inq_vendor, "SCSI INQUIRY vendor string (def=\"Linux\")");
6235 MODULE_PARM_DESC(lbprz,
6236 "on read unmapped LBs return 0 when 1 (def), return 0xff when 2");
6237 MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
6238 MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
6239 MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
6240 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
6241 MODULE_PARM_DESC(lun_format, "LUN format: 0->peripheral (def); 1 --> flat address method");
6242 MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
6243 MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))");
6244 MODULE_PARM_DESC(medium_error_count, "count of sectors to return follow on MEDIUM error");
6245 MODULE_PARM_DESC(medium_error_start, "starting sector number to return MEDIUM error");
6246 MODULE_PARM_DESC(ndelay, "response delay in nanoseconds (def=0 -> ignore)");
6247 MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
6248 MODULE_PARM_DESC(no_rwlock, "don't protect user data reads+writes (def=0)");
6249 MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
6250 MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
6251 MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
6252 MODULE_PARM_DESC(opt_blks, "optimal transfer length in blocks (def=1024)");
6253 MODULE_PARM_DESC(opt_xferlen_exp, "optimal transfer length granularity exponent (def=physblk_exp)");
6254 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
6255 MODULE_PARM_DESC(per_host_store, "If set, next positive add_host will get new store (def=0)");
6256 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
6257 MODULE_PARM_DESC(poll_queues, "support for iouring iopoll queues (1 to max(submit_queues - 1))");
6258 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
6259 MODULE_PARM_DESC(random, "If set, uniformly randomize command duration between 0 and delay_in_ns");
6260 MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
6261 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=7[SPC-5])");
6262 MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
6263 MODULE_PARM_DESC(statistics, "collect statistics on commands, queues (def=0)");
6264 MODULE_PARM_DESC(strict, "stricter checks: reserved field in cdb (def=0)");
6265 MODULE_PARM_DESC(submit_queues, "support for block multi-queue (def=1)");
6266 MODULE_PARM_DESC(tur_ms_to_ready, "TEST UNIT READY millisecs before initial good status (def=0)");
6267 MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
6268 MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
6269 MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
6270 MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
6271 MODULE_PARM_DESC(uuid_ctl,
6272 "1->use uuid for lu name, 0->don't, 2->all use same (def=0)");
6273 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)");
6274 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
6275 MODULE_PARM_DESC(wp, "Write Protect (def=0)");
6276 MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
6277 MODULE_PARM_DESC(zbc, "'none' [0]; 'aware' [1]; 'managed' [2] (def=0). Can have 'host-' prefix");
6278 MODULE_PARM_DESC(zone_cap_mb, "Zone capacity in MiB (def=zone size)");
6279 MODULE_PARM_DESC(zone_max_open, "Maximum number of open zones; [0] for no limit (def=auto)");
6280 MODULE_PARM_DESC(zone_nr_conv, "Number of conventional zones (def=1)");
6281 MODULE_PARM_DESC(zone_size_mb, "Zone size in MiB (def=auto)");
6282 MODULE_PARM_DESC(allow_restart, "Set scsi_device's allow_restart flag(def=0)");
6284 #define SDEBUG_INFO_LEN 256
6285 static char sdebug_info[SDEBUG_INFO_LEN];
6287 static const char *scsi_debug_info(struct Scsi_Host *shp)
6291 k = scnprintf(sdebug_info, SDEBUG_INFO_LEN, "%s: version %s [%s]\n",
6292 my_name, SDEBUG_VERSION, sdebug_version_date);
6293 if (k >= (SDEBUG_INFO_LEN - 1))
6295 scnprintf(sdebug_info + k, SDEBUG_INFO_LEN - k,
6296 " dev_size_mb=%d, opts=0x%x, submit_queues=%d, %s=%d",
6297 sdebug_dev_size_mb, sdebug_opts, submit_queues,
6298 "statistics", (int)sdebug_statistics);
6302 /* 'echo <val> > /proc/scsi/scsi_debug/<host_id>' writes to opts */
6303 static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer,
6308 int minLen = length > 15 ? 15 : length;
6310 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
6312 memcpy(arr, buffer, minLen);
6314 if (1 != sscanf(arr, "%d", &opts))
6317 sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
6318 sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
6319 if (sdebug_every_nth != 0)
6324 struct sdebug_submit_queue_data {
6330 static bool sdebug_submit_queue_iter(struct request *rq, void *opaque)
6332 struct sdebug_submit_queue_data *data = opaque;
6333 u32 unique_tag = blk_mq_unique_tag(rq);
6334 u16 hwq = blk_mq_unique_tag_to_hwq(unique_tag);
6335 u16 tag = blk_mq_unique_tag_to_tag(unique_tag);
6336 int queue_num = data->queue_num;
6338 if (hwq != queue_num)
6341 /* Rely on iter'ing in ascending tag order */
6342 if (*data->first == -1)
6343 *data->first = *data->last = tag;
6350 /* Output seen with 'cat /proc/scsi/scsi_debug/<host_id>'. It will be the
6351 * same for each scsi_debug host (if more than one). Some of the counters
6352 * output are not atomics so might be inaccurate in a busy system. */
6353 static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
6355 struct sdebug_host_info *sdhp;
6358 seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n",
6359 SDEBUG_VERSION, sdebug_version_date);
6360 seq_printf(m, "num_tgts=%d, %ssize=%d MB, opts=0x%x, every_nth=%d\n",
6361 sdebug_num_tgts, "shared (ram) ", sdebug_dev_size_mb,
6362 sdebug_opts, sdebug_every_nth);
6363 seq_printf(m, "delay=%d, ndelay=%d, max_luns=%d, sector_size=%d %s\n",
6364 sdebug_jdelay, sdebug_ndelay, sdebug_max_luns,
6365 sdebug_sector_size, "bytes");
6366 seq_printf(m, "cylinders=%d, heads=%d, sectors=%d, command aborts=%d\n",
6367 sdebug_cylinders_per, sdebug_heads, sdebug_sectors_per,
6369 seq_printf(m, "RESETs: device=%d, target=%d, bus=%d, host=%d\n",
6370 num_dev_resets, num_target_resets, num_bus_resets,
6372 seq_printf(m, "dix_reads=%d, dix_writes=%d, dif_errors=%d\n",
6373 dix_reads, dix_writes, dif_errors);
6374 seq_printf(m, "usec_in_jiffy=%lu, statistics=%d\n", TICK_NSEC / 1000,
6376 seq_printf(m, "cmnd_count=%d, completions=%d, %s=%d, a_tsf=%d, mq_polls=%d\n",
6377 atomic_read(&sdebug_cmnd_count),
6378 atomic_read(&sdebug_completions),
6379 "miss_cpus", atomic_read(&sdebug_miss_cpus),
6380 atomic_read(&sdebug_a_tsf),
6381 atomic_read(&sdeb_mq_poll_count));
6383 seq_printf(m, "submit_queues=%d\n", submit_queues);
6384 for (j = 0; j < submit_queues; ++j) {
6386 struct sdebug_submit_queue_data data = {
6391 seq_printf(m, " queue %d:\n", j);
6392 blk_mq_tagset_busy_iter(&host->tag_set, sdebug_submit_queue_iter,
6395 seq_printf(m, " in_use_bm BUSY: %s: %d,%d\n",
6396 "first,last bits", f, l);
6400 seq_printf(m, "this host_no=%d\n", host->host_no);
6401 if (!xa_empty(per_store_ap)) {
6404 unsigned long l_idx;
6405 struct sdeb_store_info *sip;
6407 seq_puts(m, "\nhost list:\n");
6409 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6411 seq_printf(m, " %d: host_no=%d, si_idx=%d\n", j,
6412 sdhp->shost->host_no, idx);
6415 seq_printf(m, "\nper_store array [most_recent_idx=%d]:\n",
6416 sdeb_most_recent_idx);
6418 xa_for_each(per_store_ap, l_idx, sip) {
6419 niu = xa_get_mark(per_store_ap, l_idx,
6420 SDEB_XA_NOT_IN_USE);
6422 seq_printf(m, " %d: idx=%d%s\n", j, idx,
6423 (niu ? " not_in_use" : ""));
6430 static ssize_t delay_show(struct device_driver *ddp, char *buf)
6432 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_jdelay);
6434 /* Returns -EBUSY if jdelay is being changed and commands are queued. The unit
6435 * of delay is jiffies.
6437 static ssize_t delay_store(struct device_driver *ddp, const char *buf,
6442 if (count > 0 && sscanf(buf, "%d", &jdelay) == 1) {
6444 if (sdebug_jdelay != jdelay) {
6445 struct sdebug_host_info *sdhp;
6447 mutex_lock(&sdebug_host_list_mutex);
6448 block_unblock_all_queues(true);
6450 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6451 struct Scsi_Host *shost = sdhp->shost;
6453 if (scsi_host_busy(shost)) {
6454 res = -EBUSY; /* queued commands */
6459 sdebug_jdelay = jdelay;
6462 block_unblock_all_queues(false);
6463 mutex_unlock(&sdebug_host_list_mutex);
6469 static DRIVER_ATTR_RW(delay);
6471 static ssize_t ndelay_show(struct device_driver *ddp, char *buf)
6473 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ndelay);
6475 /* Returns -EBUSY if ndelay is being changed and commands are queued */
6476 /* If > 0 and accepted then sdebug_jdelay is set to JDELAY_OVERRIDDEN */
6477 static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
6482 if ((count > 0) && (1 == sscanf(buf, "%d", &ndelay)) &&
6483 (ndelay >= 0) && (ndelay < (1000 * 1000 * 1000))) {
6485 if (sdebug_ndelay != ndelay) {
6486 struct sdebug_host_info *sdhp;
6488 mutex_lock(&sdebug_host_list_mutex);
6489 block_unblock_all_queues(true);
6491 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6492 struct Scsi_Host *shost = sdhp->shost;
6494 if (scsi_host_busy(shost)) {
6495 res = -EBUSY; /* queued commands */
6501 sdebug_ndelay = ndelay;
6502 sdebug_jdelay = ndelay ? JDELAY_OVERRIDDEN
6505 block_unblock_all_queues(false);
6506 mutex_unlock(&sdebug_host_list_mutex);
6512 static DRIVER_ATTR_RW(ndelay);
6514 static ssize_t opts_show(struct device_driver *ddp, char *buf)
6516 return scnprintf(buf, PAGE_SIZE, "0x%x\n", sdebug_opts);
6519 static ssize_t opts_store(struct device_driver *ddp, const char *buf,
6525 if (sscanf(buf, "%10s", work) == 1) {
6526 if (strncasecmp(work, "0x", 2) == 0) {
6527 if (kstrtoint(work + 2, 16, &opts) == 0)
6530 if (kstrtoint(work, 10, &opts) == 0)
6537 sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
6538 sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
6542 static DRIVER_ATTR_RW(opts);
6544 static ssize_t ptype_show(struct device_driver *ddp, char *buf)
6546 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ptype);
6548 static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
6553 /* Cannot change from or to TYPE_ZBC with sysfs */
6554 if (sdebug_ptype == TYPE_ZBC)
6557 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6565 static DRIVER_ATTR_RW(ptype);
6567 static ssize_t dsense_show(struct device_driver *ddp, char *buf)
6569 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dsense);
6571 static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
6576 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6582 static DRIVER_ATTR_RW(dsense);
6584 static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
6586 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_fake_rw);
6588 static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
6593 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6594 bool want_store = (n == 0);
6595 struct sdebug_host_info *sdhp;
6598 sdebug_fake_rw = (sdebug_fake_rw > 0);
6599 if (sdebug_fake_rw == n)
6600 return count; /* not transitioning so do nothing */
6602 if (want_store) { /* 1 --> 0 transition, set up store */
6603 if (sdeb_first_idx < 0) {
6604 idx = sdebug_add_store();
6608 idx = sdeb_first_idx;
6609 xa_clear_mark(per_store_ap, idx,
6610 SDEB_XA_NOT_IN_USE);
6612 /* make all hosts use same store */
6613 list_for_each_entry(sdhp, &sdebug_host_list,
6615 if (sdhp->si_idx != idx) {
6616 xa_set_mark(per_store_ap, sdhp->si_idx,
6617 SDEB_XA_NOT_IN_USE);
6621 sdeb_most_recent_idx = idx;
6622 } else { /* 0 --> 1 transition is trigger for shrink */
6623 sdebug_erase_all_stores(true /* apart from first */);
6630 static DRIVER_ATTR_RW(fake_rw);
6632 static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
6634 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_lun_0);
6636 static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
6641 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6642 sdebug_no_lun_0 = n;
6647 static DRIVER_ATTR_RW(no_lun_0);
6649 static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
6651 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_tgts);
6653 static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
6658 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6659 sdebug_num_tgts = n;
6660 sdebug_max_tgts_luns();
6665 static DRIVER_ATTR_RW(num_tgts);
6667 static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)
6669 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dev_size_mb);
6671 static DRIVER_ATTR_RO(dev_size_mb);
6673 static ssize_t per_host_store_show(struct device_driver *ddp, char *buf)
6675 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_per_host_store);
6678 static ssize_t per_host_store_store(struct device_driver *ddp, const char *buf,
6683 if (kstrtobool(buf, &v))
6686 sdebug_per_host_store = v;
6689 static DRIVER_ATTR_RW(per_host_store);
6691 static ssize_t num_parts_show(struct device_driver *ddp, char *buf)
6693 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_parts);
6695 static DRIVER_ATTR_RO(num_parts);
6697 static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
6699 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_every_nth);
6701 static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
6707 if (sscanf(buf, "%10s", work) == 1) {
6708 if (strncasecmp(work, "0x", 2) == 0) {
6709 if (kstrtoint(work + 2, 16, &nth) == 0)
6710 goto every_nth_done;
6712 if (kstrtoint(work, 10, &nth) == 0)
6713 goto every_nth_done;
6719 sdebug_every_nth = nth;
6720 if (nth && !sdebug_statistics) {
6721 pr_info("every_nth needs statistics=1, set it\n");
6722 sdebug_statistics = true;
6727 static DRIVER_ATTR_RW(every_nth);
6729 static ssize_t lun_format_show(struct device_driver *ddp, char *buf)
6731 return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_lun_am);
6733 static ssize_t lun_format_store(struct device_driver *ddp, const char *buf,
6739 if (kstrtoint(buf, 0, &n))
6742 if (n > (int)SAM_LUN_AM_FLAT) {
6743 pr_warn("only LUN address methods 0 and 1 are supported\n");
6746 changed = ((int)sdebug_lun_am != n);
6748 if (changed && sdebug_scsi_level >= 5) { /* >= SPC-3 */
6749 struct sdebug_host_info *sdhp;
6750 struct sdebug_dev_info *dp;
6752 mutex_lock(&sdebug_host_list_mutex);
6753 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6754 list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
6755 set_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
6758 mutex_unlock(&sdebug_host_list_mutex);
6764 static DRIVER_ATTR_RW(lun_format);
6766 static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
6768 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_luns);
6770 static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
6776 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6778 pr_warn("max_luns can be no more than 256\n");
6781 changed = (sdebug_max_luns != n);
6782 sdebug_max_luns = n;
6783 sdebug_max_tgts_luns();
6784 if (changed && (sdebug_scsi_level >= 5)) { /* >= SPC-3 */
6785 struct sdebug_host_info *sdhp;
6786 struct sdebug_dev_info *dp;
6788 mutex_lock(&sdebug_host_list_mutex);
6789 list_for_each_entry(sdhp, &sdebug_host_list,
6791 list_for_each_entry(dp, &sdhp->dev_info_list,
6793 set_bit(SDEBUG_UA_LUNS_CHANGED,
6797 mutex_unlock(&sdebug_host_list_mutex);
6803 static DRIVER_ATTR_RW(max_luns);
6805 static ssize_t max_queue_show(struct device_driver *ddp, char *buf)
6807 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_queue);
6809 /* N.B. max_queue can be changed while there are queued commands. In flight
6810 * commands beyond the new max_queue will be completed. */
6811 static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
6816 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
6817 (n <= SDEBUG_CANQUEUE) &&
6818 (sdebug_host_max_queue == 0)) {
6819 mutex_lock(&sdebug_host_list_mutex);
6821 /* We may only change sdebug_max_queue when we have no shosts */
6822 if (list_empty(&sdebug_host_list))
6823 sdebug_max_queue = n;
6826 mutex_unlock(&sdebug_host_list_mutex);
6831 static DRIVER_ATTR_RW(max_queue);
6833 static ssize_t host_max_queue_show(struct device_driver *ddp, char *buf)
6835 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_host_max_queue);
6838 static ssize_t no_rwlock_show(struct device_driver *ddp, char *buf)
6840 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_rwlock);
6843 static ssize_t no_rwlock_store(struct device_driver *ddp, const char *buf, size_t count)
6847 if (kstrtobool(buf, &v))
6850 sdebug_no_rwlock = v;
6853 static DRIVER_ATTR_RW(no_rwlock);
6856 * Since this is used for .can_queue, and we get the hc_idx tag from the bitmap
6857 * in range [0, sdebug_host_max_queue), we can't change it.
6859 static DRIVER_ATTR_RO(host_max_queue);
6861 static ssize_t no_uld_show(struct device_driver *ddp, char *buf)
6863 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_uld);
6865 static DRIVER_ATTR_RO(no_uld);
6867 static ssize_t scsi_level_show(struct device_driver *ddp, char *buf)
6869 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_scsi_level);
6871 static DRIVER_ATTR_RO(scsi_level);
6873 static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
6875 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_virtual_gb);
6877 static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
6883 /* Ignore capacity change for ZBC drives for now */
6884 if (sdeb_zbc_in_use)
6887 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6888 changed = (sdebug_virtual_gb != n);
6889 sdebug_virtual_gb = n;
6890 sdebug_capacity = get_sdebug_capacity();
6892 struct sdebug_host_info *sdhp;
6893 struct sdebug_dev_info *dp;
6895 mutex_lock(&sdebug_host_list_mutex);
6896 list_for_each_entry(sdhp, &sdebug_host_list,
6898 list_for_each_entry(dp, &sdhp->dev_info_list,
6900 set_bit(SDEBUG_UA_CAPACITY_CHANGED,
6904 mutex_unlock(&sdebug_host_list_mutex);
6910 static DRIVER_ATTR_RW(virtual_gb);
6912 static ssize_t add_host_show(struct device_driver *ddp, char *buf)
6914 /* absolute number of hosts currently active is what is shown */
6915 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_hosts);
6918 static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
6923 struct sdeb_store_info *sip;
6924 bool want_phs = (sdebug_fake_rw == 0) && sdebug_per_host_store;
6927 if (sscanf(buf, "%d", &delta_hosts) != 1)
6929 if (delta_hosts > 0) {
6933 xa_for_each_marked(per_store_ap, idx, sip,
6934 SDEB_XA_NOT_IN_USE) {
6935 sdeb_most_recent_idx = (int)idx;
6939 if (found) /* re-use case */
6940 sdebug_add_host_helper((int)idx);
6942 sdebug_do_add_host(true);
6944 sdebug_do_add_host(false);
6946 } while (--delta_hosts);
6947 } else if (delta_hosts < 0) {
6949 sdebug_do_remove_host(false);
6950 } while (++delta_hosts);
6954 static DRIVER_ATTR_RW(add_host);
6956 static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf)
6958 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_vpd_use_hostno);
6960 static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf,
6965 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6966 sdebug_vpd_use_hostno = n;
6971 static DRIVER_ATTR_RW(vpd_use_hostno);
6973 static ssize_t statistics_show(struct device_driver *ddp, char *buf)
6975 return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_statistics);
6977 static ssize_t statistics_store(struct device_driver *ddp, const char *buf,
6982 if ((count > 0) && (sscanf(buf, "%d", &n) == 1) && (n >= 0)) {
6984 sdebug_statistics = true;
6986 clear_queue_stats();
6987 sdebug_statistics = false;
6993 static DRIVER_ATTR_RW(statistics);
6995 static ssize_t sector_size_show(struct device_driver *ddp, char *buf)
6997 return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_sector_size);
6999 static DRIVER_ATTR_RO(sector_size);
7001 static ssize_t submit_queues_show(struct device_driver *ddp, char *buf)
7003 return scnprintf(buf, PAGE_SIZE, "%d\n", submit_queues);
7005 static DRIVER_ATTR_RO(submit_queues);
7007 static ssize_t dix_show(struct device_driver *ddp, char *buf)
7009 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dix);
7011 static DRIVER_ATTR_RO(dix);
7013 static ssize_t dif_show(struct device_driver *ddp, char *buf)
7015 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dif);
7017 static DRIVER_ATTR_RO(dif);
7019 static ssize_t guard_show(struct device_driver *ddp, char *buf)
7021 return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_guard);
7023 static DRIVER_ATTR_RO(guard);
7025 static ssize_t ato_show(struct device_driver *ddp, char *buf)
7027 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ato);
7029 static DRIVER_ATTR_RO(ato);
7031 static ssize_t map_show(struct device_driver *ddp, char *buf)
7035 if (!scsi_debug_lbp())
7036 return scnprintf(buf, PAGE_SIZE, "0-%u\n",
7037 sdebug_store_sectors);
7039 if (sdebug_fake_rw == 0 && !xa_empty(per_store_ap)) {
7040 struct sdeb_store_info *sip = xa_load(per_store_ap, 0);
7043 count = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
7044 (int)map_size, sip->map_storep);
7046 buf[count++] = '\n';
7051 static DRIVER_ATTR_RO(map);
7053 static ssize_t random_show(struct device_driver *ddp, char *buf)
7055 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_random);
7058 static ssize_t random_store(struct device_driver *ddp, const char *buf,
7063 if (kstrtobool(buf, &v))
7069 static DRIVER_ATTR_RW(random);
7071 static ssize_t removable_show(struct device_driver *ddp, char *buf)
7073 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_removable ? 1 : 0);
7075 static ssize_t removable_store(struct device_driver *ddp, const char *buf,
7080 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
7081 sdebug_removable = (n > 0);
7086 static DRIVER_ATTR_RW(removable);
7088 static ssize_t host_lock_show(struct device_driver *ddp, char *buf)
7090 return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_host_lock);
7092 /* N.B. sdebug_host_lock does nothing, kept for backward compatibility */
7093 static ssize_t host_lock_store(struct device_driver *ddp, const char *buf,
7098 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
7099 sdebug_host_lock = (n > 0);
7104 static DRIVER_ATTR_RW(host_lock);
7106 static ssize_t strict_show(struct device_driver *ddp, char *buf)
7108 return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_strict);
7110 static ssize_t strict_store(struct device_driver *ddp, const char *buf,
7115 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
7116 sdebug_strict = (n > 0);
7121 static DRIVER_ATTR_RW(strict);
7123 static ssize_t uuid_ctl_show(struct device_driver *ddp, char *buf)
7125 return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_uuid_ctl);
7127 static DRIVER_ATTR_RO(uuid_ctl);
7129 static ssize_t cdb_len_show(struct device_driver *ddp, char *buf)
7131 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_cdb_len);
7133 static ssize_t cdb_len_store(struct device_driver *ddp, const char *buf,
7138 ret = kstrtoint(buf, 0, &n);
7142 all_config_cdb_len();
7145 static DRIVER_ATTR_RW(cdb_len);
7147 static const char * const zbc_model_strs_a[] = {
7148 [BLK_ZONED_NONE] = "none",
7149 [BLK_ZONED_HA] = "host-aware",
7150 [BLK_ZONED_HM] = "host-managed",
7153 static const char * const zbc_model_strs_b[] = {
7154 [BLK_ZONED_NONE] = "no",
7155 [BLK_ZONED_HA] = "aware",
7156 [BLK_ZONED_HM] = "managed",
7159 static const char * const zbc_model_strs_c[] = {
7160 [BLK_ZONED_NONE] = "0",
7161 [BLK_ZONED_HA] = "1",
7162 [BLK_ZONED_HM] = "2",
7165 static int sdeb_zbc_model_str(const char *cp)
7167 int res = sysfs_match_string(zbc_model_strs_a, cp);
7170 res = sysfs_match_string(zbc_model_strs_b, cp);
7172 res = sysfs_match_string(zbc_model_strs_c, cp);
7180 static ssize_t zbc_show(struct device_driver *ddp, char *buf)
7182 return scnprintf(buf, PAGE_SIZE, "%s\n",
7183 zbc_model_strs_a[sdeb_zbc_model]);
7185 static DRIVER_ATTR_RO(zbc);
7187 static ssize_t tur_ms_to_ready_show(struct device_driver *ddp, char *buf)
7189 return scnprintf(buf, PAGE_SIZE, "%d\n", sdeb_tur_ms_to_ready);
7191 static DRIVER_ATTR_RO(tur_ms_to_ready);
7193 /* Note: The following array creates attribute files in the
7194 /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
7195 files (over those found in the /sys/module/scsi_debug/parameters
7196 directory) is that auxiliary actions can be triggered when an attribute
7197 is changed. For example see: add_host_store() above.
7200 static struct attribute *sdebug_drv_attrs[] = {
7201 &driver_attr_delay.attr,
7202 &driver_attr_opts.attr,
7203 &driver_attr_ptype.attr,
7204 &driver_attr_dsense.attr,
7205 &driver_attr_fake_rw.attr,
7206 &driver_attr_host_max_queue.attr,
7207 &driver_attr_no_lun_0.attr,
7208 &driver_attr_num_tgts.attr,
7209 &driver_attr_dev_size_mb.attr,
7210 &driver_attr_num_parts.attr,
7211 &driver_attr_every_nth.attr,
7212 &driver_attr_lun_format.attr,
7213 &driver_attr_max_luns.attr,
7214 &driver_attr_max_queue.attr,
7215 &driver_attr_no_rwlock.attr,
7216 &driver_attr_no_uld.attr,
7217 &driver_attr_scsi_level.attr,
7218 &driver_attr_virtual_gb.attr,
7219 &driver_attr_add_host.attr,
7220 &driver_attr_per_host_store.attr,
7221 &driver_attr_vpd_use_hostno.attr,
7222 &driver_attr_sector_size.attr,
7223 &driver_attr_statistics.attr,
7224 &driver_attr_submit_queues.attr,
7225 &driver_attr_dix.attr,
7226 &driver_attr_dif.attr,
7227 &driver_attr_guard.attr,
7228 &driver_attr_ato.attr,
7229 &driver_attr_map.attr,
7230 &driver_attr_random.attr,
7231 &driver_attr_removable.attr,
7232 &driver_attr_host_lock.attr,
7233 &driver_attr_ndelay.attr,
7234 &driver_attr_strict.attr,
7235 &driver_attr_uuid_ctl.attr,
7236 &driver_attr_cdb_len.attr,
7237 &driver_attr_tur_ms_to_ready.attr,
7238 &driver_attr_zbc.attr,
7241 ATTRIBUTE_GROUPS(sdebug_drv);
7243 static struct device *pseudo_primary;
7245 static int __init scsi_debug_init(void)
7247 bool want_store = (sdebug_fake_rw == 0);
7249 int k, ret, hosts_to_add;
7252 if (sdebug_ndelay >= 1000 * 1000 * 1000) {
7253 pr_warn("ndelay must be less than 1 second, ignored\n");
7255 } else if (sdebug_ndelay > 0)
7256 sdebug_jdelay = JDELAY_OVERRIDDEN;
7258 switch (sdebug_sector_size) {
7265 pr_err("invalid sector_size %d\n", sdebug_sector_size);
7269 switch (sdebug_dif) {
7270 case T10_PI_TYPE0_PROTECTION:
7272 case T10_PI_TYPE1_PROTECTION:
7273 case T10_PI_TYPE2_PROTECTION:
7274 case T10_PI_TYPE3_PROTECTION:
7275 have_dif_prot = true;
7279 pr_err("dif must be 0, 1, 2 or 3\n");
7283 if (sdebug_num_tgts < 0) {
7284 pr_err("num_tgts must be >= 0\n");
7288 if (sdebug_guard > 1) {
7289 pr_err("guard must be 0 or 1\n");
7293 if (sdebug_ato > 1) {
7294 pr_err("ato must be 0 or 1\n");
7298 if (sdebug_physblk_exp > 15) {
7299 pr_err("invalid physblk_exp %u\n", sdebug_physblk_exp);
7303 sdebug_lun_am = sdebug_lun_am_i;
7304 if (sdebug_lun_am > SAM_LUN_AM_FLAT) {
7305 pr_warn("Invalid LUN format %u, using default\n", (int)sdebug_lun_am);
7306 sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
7309 if (sdebug_max_luns > 256) {
7310 if (sdebug_max_luns > 16384) {
7311 pr_warn("max_luns can be no more than 16384, use default\n");
7312 sdebug_max_luns = DEF_MAX_LUNS;
7314 sdebug_lun_am = SAM_LUN_AM_FLAT;
7317 if (sdebug_lowest_aligned > 0x3fff) {
7318 pr_err("lowest_aligned too big: %u\n", sdebug_lowest_aligned);
7322 if (submit_queues < 1) {
7323 pr_err("submit_queues must be 1 or more\n");
7327 if ((sdebug_max_queue > SDEBUG_CANQUEUE) || (sdebug_max_queue < 1)) {
7328 pr_err("max_queue must be in range [1, %d]\n", SDEBUG_CANQUEUE);
7332 if ((sdebug_host_max_queue > SDEBUG_CANQUEUE) ||
7333 (sdebug_host_max_queue < 0)) {
7334 pr_err("host_max_queue must be in range [0 %d]\n",
7339 if (sdebug_host_max_queue &&
7340 (sdebug_max_queue != sdebug_host_max_queue)) {
7341 sdebug_max_queue = sdebug_host_max_queue;
7342 pr_warn("fixing max submit queue depth to host max queue depth, %d\n",
7347 * check for host managed zoned block device specified with
7348 * ptype=0x14 or zbc=XXX.
7350 if (sdebug_ptype == TYPE_ZBC) {
7351 sdeb_zbc_model = BLK_ZONED_HM;
7352 } else if (sdeb_zbc_model_s && *sdeb_zbc_model_s) {
7353 k = sdeb_zbc_model_str(sdeb_zbc_model_s);
7357 switch (sdeb_zbc_model) {
7358 case BLK_ZONED_NONE:
7360 sdebug_ptype = TYPE_DISK;
7363 sdebug_ptype = TYPE_ZBC;
7366 pr_err("Invalid ZBC model\n");
7370 if (sdeb_zbc_model != BLK_ZONED_NONE) {
7371 sdeb_zbc_in_use = true;
7372 if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
7373 sdebug_dev_size_mb = DEF_ZBC_DEV_SIZE_MB;
7376 if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
7377 sdebug_dev_size_mb = DEF_DEV_SIZE_MB;
7378 if (sdebug_dev_size_mb < 1)
7379 sdebug_dev_size_mb = 1; /* force minimum 1 MB ramdisk */
7380 sz = (unsigned long)sdebug_dev_size_mb * 1048576;
7381 sdebug_store_sectors = sz / sdebug_sector_size;
7382 sdebug_capacity = get_sdebug_capacity();
7384 /* play around with geometry, don't waste too much on track 0 */
7386 sdebug_sectors_per = 32;
7387 if (sdebug_dev_size_mb >= 256)
7389 else if (sdebug_dev_size_mb >= 16)
7391 sdebug_cylinders_per = (unsigned long)sdebug_capacity /
7392 (sdebug_sectors_per * sdebug_heads);
7393 if (sdebug_cylinders_per >= 1024) {
7394 /* other LLDs do this; implies >= 1GB ram disk ... */
7396 sdebug_sectors_per = 63;
7397 sdebug_cylinders_per = (unsigned long)sdebug_capacity /
7398 (sdebug_sectors_per * sdebug_heads);
7400 if (scsi_debug_lbp()) {
7401 sdebug_unmap_max_blocks =
7402 clamp(sdebug_unmap_max_blocks, 0U, 0xffffffffU);
7404 sdebug_unmap_max_desc =
7405 clamp(sdebug_unmap_max_desc, 0U, 256U);
7407 sdebug_unmap_granularity =
7408 clamp(sdebug_unmap_granularity, 1U, 0xffffffffU);
7410 if (sdebug_unmap_alignment &&
7411 sdebug_unmap_granularity <=
7412 sdebug_unmap_alignment) {
7413 pr_err("ERR: unmap_granularity <= unmap_alignment\n");
7417 xa_init_flags(per_store_ap, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
7419 idx = sdebug_add_store();
7424 pseudo_primary = root_device_register("pseudo_0");
7425 if (IS_ERR(pseudo_primary)) {
7426 pr_warn("root_device_register() error\n");
7427 ret = PTR_ERR(pseudo_primary);
7430 ret = bus_register(&pseudo_lld_bus);
7432 pr_warn("bus_register error: %d\n", ret);
7435 ret = driver_register(&sdebug_driverfs_driver);
7437 pr_warn("driver_register error: %d\n", ret);
7441 hosts_to_add = sdebug_add_host;
7442 sdebug_add_host = 0;
7444 queued_cmd_cache = KMEM_CACHE(sdebug_queued_cmd, SLAB_HWCACHE_ALIGN);
7445 if (!queued_cmd_cache) {
7450 sdebug_debugfs_root = debugfs_create_dir("scsi_debug", NULL);
7451 if (IS_ERR_OR_NULL(sdebug_debugfs_root))
7452 pr_info("%s: failed to create initial debugfs directory\n", __func__);
7454 for (k = 0; k < hosts_to_add; k++) {
7455 if (want_store && k == 0) {
7456 ret = sdebug_add_host_helper(idx);
7458 pr_err("add_host_helper k=%d, error=%d\n",
7463 ret = sdebug_do_add_host(want_store &&
7464 sdebug_per_host_store);
7466 pr_err("add_host k=%d error=%d\n", k, -ret);
7472 pr_info("built %d host(s)\n", sdebug_num_hosts);
7477 driver_unregister(&sdebug_driverfs_driver);
7479 bus_unregister(&pseudo_lld_bus);
7481 root_device_unregister(pseudo_primary);
7483 sdebug_erase_store(idx, NULL);
7487 static void __exit scsi_debug_exit(void)
7489 int k = sdebug_num_hosts;
7492 sdebug_do_remove_host(true);
7493 kmem_cache_destroy(queued_cmd_cache);
7494 driver_unregister(&sdebug_driverfs_driver);
7495 bus_unregister(&pseudo_lld_bus);
7496 root_device_unregister(pseudo_primary);
7498 sdebug_erase_all_stores(false);
7499 xa_destroy(per_store_ap);
7500 debugfs_remove(sdebug_debugfs_root);
7503 device_initcall(scsi_debug_init);
7504 module_exit(scsi_debug_exit);
7506 static void sdebug_release_adapter(struct device *dev)
7508 struct sdebug_host_info *sdbg_host;
7510 sdbg_host = dev_to_sdebug_host(dev);
7514 /* idx must be valid, if sip is NULL then it will be obtained using idx */
7515 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip)
7520 if (xa_empty(per_store_ap))
7522 sip = xa_load(per_store_ap, idx);
7526 vfree(sip->map_storep);
7527 vfree(sip->dif_storep);
7529 xa_erase(per_store_ap, idx);
7533 /* Assume apart_from_first==false only in shutdown case. */
7534 static void sdebug_erase_all_stores(bool apart_from_first)
7537 struct sdeb_store_info *sip = NULL;
7539 xa_for_each(per_store_ap, idx, sip) {
7540 if (apart_from_first)
7541 apart_from_first = false;
7543 sdebug_erase_store(idx, sip);
7545 if (apart_from_first)
7546 sdeb_most_recent_idx = sdeb_first_idx;
7550 * Returns store xarray new element index (idx) if >=0 else negated errno.
7551 * Limit the number of stores to 65536.
7553 static int sdebug_add_store(void)
7557 unsigned long iflags;
7558 unsigned long sz = (unsigned long)sdebug_dev_size_mb * 1048576;
7559 struct sdeb_store_info *sip = NULL;
7560 struct xa_limit xal = { .max = 1 << 16, .min = 0 };
7562 sip = kzalloc(sizeof(*sip), GFP_KERNEL);
7566 xa_lock_irqsave(per_store_ap, iflags);
7567 res = __xa_alloc(per_store_ap, &n_idx, sip, xal, GFP_ATOMIC);
7568 if (unlikely(res < 0)) {
7569 xa_unlock_irqrestore(per_store_ap, iflags);
7571 pr_warn("%s: xa_alloc() errno=%d\n", __func__, -res);
7574 sdeb_most_recent_idx = n_idx;
7575 if (sdeb_first_idx < 0)
7576 sdeb_first_idx = n_idx;
7577 xa_unlock_irqrestore(per_store_ap, iflags);
7580 sip->storep = vzalloc(sz);
7582 pr_err("user data oom\n");
7585 if (sdebug_num_parts > 0)
7586 sdebug_build_parts(sip->storep, sz);
7588 /* DIF/DIX: what T10 calls Protection Information (PI) */
7592 dif_size = sdebug_store_sectors * sizeof(struct t10_pi_tuple);
7593 sip->dif_storep = vmalloc(dif_size);
7595 pr_info("dif_storep %u bytes @ %pK\n", dif_size,
7598 if (!sip->dif_storep) {
7599 pr_err("DIX oom\n");
7602 memset(sip->dif_storep, 0xff, dif_size);
7604 /* Logical Block Provisioning */
7605 if (scsi_debug_lbp()) {
7606 map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
7607 sip->map_storep = vmalloc(array_size(sizeof(long),
7608 BITS_TO_LONGS(map_size)));
7610 pr_info("%lu provisioning blocks\n", map_size);
7612 if (!sip->map_storep) {
7613 pr_err("LBP map oom\n");
7617 bitmap_zero(sip->map_storep, map_size);
7619 /* Map first 1KB for partition table */
7620 if (sdebug_num_parts)
7621 map_region(sip, 0, 2);
7624 rwlock_init(&sip->macc_lck);
7627 sdebug_erase_store((int)n_idx, sip);
7628 pr_warn("%s: failed, errno=%d\n", __func__, -res);
7632 static int sdebug_add_host_helper(int per_host_idx)
7634 int k, devs_per_host, idx;
7635 int error = -ENOMEM;
7636 struct sdebug_host_info *sdbg_host;
7637 struct sdebug_dev_info *sdbg_devinfo, *tmp;
7639 sdbg_host = kzalloc(sizeof(*sdbg_host), GFP_KERNEL);
7642 idx = (per_host_idx < 0) ? sdeb_first_idx : per_host_idx;
7643 if (xa_get_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE))
7644 xa_clear_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
7645 sdbg_host->si_idx = idx;
7647 INIT_LIST_HEAD(&sdbg_host->dev_info_list);
7649 devs_per_host = sdebug_num_tgts * sdebug_max_luns;
7650 for (k = 0; k < devs_per_host; k++) {
7651 sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
7656 mutex_lock(&sdebug_host_list_mutex);
7657 list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
7658 mutex_unlock(&sdebug_host_list_mutex);
7660 sdbg_host->dev.bus = &pseudo_lld_bus;
7661 sdbg_host->dev.parent = pseudo_primary;
7662 sdbg_host->dev.release = &sdebug_release_adapter;
7663 dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_num_hosts);
7665 error = device_register(&sdbg_host->dev);
7667 mutex_lock(&sdebug_host_list_mutex);
7668 list_del(&sdbg_host->host_list);
7669 mutex_unlock(&sdebug_host_list_mutex);
7677 list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
7679 list_del(&sdbg_devinfo->dev_list);
7680 kfree(sdbg_devinfo->zstate);
7681 kfree(sdbg_devinfo);
7683 if (sdbg_host->dev.release)
7684 put_device(&sdbg_host->dev);
7687 pr_warn("%s: failed, errno=%d\n", __func__, -error);
7691 static int sdebug_do_add_host(bool mk_new_store)
7693 int ph_idx = sdeb_most_recent_idx;
7696 ph_idx = sdebug_add_store();
7700 return sdebug_add_host_helper(ph_idx);
7703 static void sdebug_do_remove_host(bool the_end)
7706 struct sdebug_host_info *sdbg_host = NULL;
7707 struct sdebug_host_info *sdbg_host2;
7709 mutex_lock(&sdebug_host_list_mutex);
7710 if (!list_empty(&sdebug_host_list)) {
7711 sdbg_host = list_entry(sdebug_host_list.prev,
7712 struct sdebug_host_info, host_list);
7713 idx = sdbg_host->si_idx;
7715 if (!the_end && idx >= 0) {
7718 list_for_each_entry(sdbg_host2, &sdebug_host_list, host_list) {
7719 if (sdbg_host2 == sdbg_host)
7721 if (idx == sdbg_host2->si_idx) {
7727 xa_set_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
7728 if (idx == sdeb_most_recent_idx)
7729 --sdeb_most_recent_idx;
7733 list_del(&sdbg_host->host_list);
7734 mutex_unlock(&sdebug_host_list_mutex);
7739 device_unregister(&sdbg_host->dev);
7743 static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
7745 struct sdebug_dev_info *devip = sdev->hostdata;
7750 mutex_lock(&sdebug_host_list_mutex);
7751 block_unblock_all_queues(true);
7753 if (qdepth > SDEBUG_CANQUEUE) {
7754 qdepth = SDEBUG_CANQUEUE;
7755 pr_warn("%s: requested qdepth [%d] exceeds canqueue [%d], trim\n", __func__,
7756 qdepth, SDEBUG_CANQUEUE);
7760 if (qdepth != sdev->queue_depth)
7761 scsi_change_queue_depth(sdev, qdepth);
7763 block_unblock_all_queues(false);
7764 mutex_unlock(&sdebug_host_list_mutex);
7766 if (SDEBUG_OPT_Q_NOISE & sdebug_opts)
7767 sdev_printk(KERN_INFO, sdev, "%s: qdepth=%d\n", __func__, qdepth);
7769 return sdev->queue_depth;
7772 static bool fake_timeout(struct scsi_cmnd *scp)
7774 if (0 == (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth))) {
7775 if (sdebug_every_nth < -1)
7776 sdebug_every_nth = -1;
7777 if (SDEBUG_OPT_TIMEOUT & sdebug_opts)
7778 return true; /* ignore command causing timeout */
7779 else if (SDEBUG_OPT_MAC_TIMEOUT & sdebug_opts &&
7780 scsi_medium_access_command(scp))
7781 return true; /* time out reads and writes */
7786 /* Response to TUR or media access command when device stopped */
7787 static int resp_not_ready(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
7791 ktime_t now_ts = ktime_get_boottime();
7792 struct scsi_device *sdp = scp->device;
7794 stopped_state = atomic_read(&devip->stopped);
7795 if (stopped_state == 2) {
7796 if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
7797 diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
7798 if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
7799 /* tur_ms_to_ready timer extinguished */
7800 atomic_set(&devip->stopped, 0);
7804 mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x1);
7806 sdev_printk(KERN_INFO, sdp,
7807 "%s: Not ready: in process of becoming ready\n", my_name);
7808 if (scp->cmnd[0] == TEST_UNIT_READY) {
7809 u64 tur_nanosecs_to_ready = (u64)sdeb_tur_ms_to_ready * 1000000;
7811 if (diff_ns <= tur_nanosecs_to_ready)
7812 diff_ns = tur_nanosecs_to_ready - diff_ns;
7814 diff_ns = tur_nanosecs_to_ready;
7815 /* As per 20-061r2 approved for spc6 by T10 on 20200716 */
7816 do_div(diff_ns, 1000000); /* diff_ns becomes milliseconds */
7817 scsi_set_sense_information(scp->sense_buffer, SCSI_SENSE_BUFFERSIZE,
7819 return check_condition_result;
7822 mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x2);
7824 sdev_printk(KERN_INFO, sdp, "%s: Not ready: initializing command required\n",
7826 return check_condition_result;
7829 static void sdebug_map_queues(struct Scsi_Host *shost)
7833 if (shost->nr_hw_queues == 1)
7836 for (i = 0, qoff = 0; i < HCTX_MAX_TYPES; i++) {
7837 struct blk_mq_queue_map *map = &shost->tag_set.map[i];
7841 if (i == HCTX_TYPE_DEFAULT)
7842 map->nr_queues = submit_queues - poll_queues;
7843 else if (i == HCTX_TYPE_POLL)
7844 map->nr_queues = poll_queues;
7846 if (!map->nr_queues) {
7847 BUG_ON(i == HCTX_TYPE_DEFAULT);
7851 map->queue_offset = qoff;
7852 blk_mq_map_queues(map);
7854 qoff += map->nr_queues;
7858 struct sdebug_blk_mq_poll_data {
7859 unsigned int queue_num;
7864 * We don't handle aborted commands here, but it does not seem possible to have
7865 * aborted polled commands from schedule_resp()
7867 static bool sdebug_blk_mq_poll_iter(struct request *rq, void *opaque)
7869 struct sdebug_blk_mq_poll_data *data = opaque;
7870 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
7871 struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmd);
7872 struct sdebug_defer *sd_dp;
7873 u32 unique_tag = blk_mq_unique_tag(rq);
7874 u16 hwq = blk_mq_unique_tag_to_hwq(unique_tag);
7875 struct sdebug_queued_cmd *sqcp;
7876 unsigned long flags;
7877 int queue_num = data->queue_num;
7880 /* We're only interested in one queue for this iteration */
7881 if (hwq != queue_num)
7884 /* Subsequent checks would fail if this failed, but check anyway */
7885 if (!test_bit(SCMD_STATE_INFLIGHT, &cmd->state))
7888 time = ktime_get_boottime();
7890 spin_lock_irqsave(&sdsc->lock, flags);
7891 sqcp = TO_QUEUED_CMD(cmd);
7893 spin_unlock_irqrestore(&sdsc->lock, flags);
7897 sd_dp = &sqcp->sd_dp;
7898 if (READ_ONCE(sd_dp->defer_t) != SDEB_DEFER_POLL) {
7899 spin_unlock_irqrestore(&sdsc->lock, flags);
7903 if (time < sd_dp->cmpl_ts) {
7904 spin_unlock_irqrestore(&sdsc->lock, flags);
7908 ASSIGN_QUEUED_CMD(cmd, NULL);
7909 spin_unlock_irqrestore(&sdsc->lock, flags);
7911 if (sdebug_statistics) {
7912 atomic_inc(&sdebug_completions);
7913 if (raw_smp_processor_id() != sd_dp->issuing_cpu)
7914 atomic_inc(&sdebug_miss_cpus);
7917 sdebug_free_queued_cmd(sqcp);
7919 scsi_done(cmd); /* callback to mid level */
7920 (*data->num_entries)++;
7924 static int sdebug_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
7926 int num_entries = 0;
7927 struct sdebug_blk_mq_poll_data data = {
7928 .queue_num = queue_num,
7929 .num_entries = &num_entries,
7932 blk_mq_tagset_busy_iter(&shost->tag_set, sdebug_blk_mq_poll_iter,
7935 if (num_entries > 0)
7936 atomic_add(num_entries, &sdeb_mq_poll_count);
7940 static int sdebug_timeout_cmd(struct scsi_cmnd *cmnd)
7942 struct scsi_device *sdp = cmnd->device;
7943 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
7944 struct sdebug_err_inject *err;
7945 unsigned char *cmd = cmnd->cmnd;
7952 list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
7953 if (err->type == ERR_TMOUT_CMD &&
7954 (err->cmd == cmd[0] || err->cmd == 0xff)) {
7968 static int sdebug_fail_queue_cmd(struct scsi_cmnd *cmnd)
7970 struct scsi_device *sdp = cmnd->device;
7971 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
7972 struct sdebug_err_inject *err;
7973 unsigned char *cmd = cmnd->cmnd;
7980 list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
7981 if (err->type == ERR_FAIL_QUEUE_CMD &&
7982 (err->cmd == cmd[0] || err->cmd == 0xff)) {
7983 ret = err->cnt ? err->queuecmd_ret : 0;
7996 static int sdebug_fail_cmd(struct scsi_cmnd *cmnd, int *retval,
7997 struct sdebug_err_inject *info)
7999 struct scsi_device *sdp = cmnd->device;
8000 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
8001 struct sdebug_err_inject *err;
8002 unsigned char *cmd = cmnd->cmnd;
8010 list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
8011 if (err->type == ERR_FAIL_CMD &&
8012 (err->cmd == cmd[0] || err->cmd == 0xff)) {
8030 mk_sense_buffer(cmnd, err->sense_key, err->asc, err->asq);
8031 result = err->status_byte | err->host_byte << 16 | err->driver_byte << 24;
8033 *retval = schedule_resp(cmnd, devip, result, NULL, 0, 0);
8038 static int scsi_debug_queuecommand(struct Scsi_Host *shost,
8039 struct scsi_cmnd *scp)
8042 struct scsi_device *sdp = scp->device;
8043 const struct opcode_info_t *oip;
8044 const struct opcode_info_t *r_oip;
8045 struct sdebug_dev_info *devip;
8046 u8 *cmd = scp->cmnd;
8047 int (*r_pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
8048 int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *) = NULL;
8051 u64 lun_index = sdp->lun & 0x3FFF;
8058 struct sdebug_err_inject err;
8060 scsi_set_resid(scp, 0);
8061 if (sdebug_statistics) {
8062 atomic_inc(&sdebug_cmnd_count);
8063 inject_now = inject_on_this_cmd();
8067 if (unlikely(sdebug_verbose &&
8068 !(SDEBUG_OPT_NO_CDB_NOISE & sdebug_opts))) {
8073 sb = (int)sizeof(b);
8075 strcpy(b, "too long, over 32 bytes");
8077 for (k = 0, n = 0; k < len && n < sb; ++k)
8078 n += scnprintf(b + n, sb - n, "%02x ",
8081 sdev_printk(KERN_INFO, sdp, "%s: tag=%#x, cmd %s\n", my_name,
8082 blk_mq_unique_tag(scsi_cmd_to_rq(scp)), b);
8084 if (unlikely(inject_now && (sdebug_opts & SDEBUG_OPT_HOST_BUSY)))
8085 return SCSI_MLQUEUE_HOST_BUSY;
8086 has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS);
8087 if (unlikely(lun_index >= sdebug_max_luns && !has_wlun_rl))
8090 sdeb_i = opcode_ind_arr[opcode]; /* fully mapped */
8091 oip = &opcode_info_arr[sdeb_i]; /* safe if table consistent */
8092 devip = (struct sdebug_dev_info *)sdp->hostdata;
8093 if (unlikely(!devip)) {
8094 devip = find_build_dev_info(sdp);
8099 if (sdebug_timeout_cmd(scp)) {
8100 scmd_printk(KERN_INFO, scp, "timeout command 0x%x\n", opcode);
8104 ret = sdebug_fail_queue_cmd(scp);
8106 scmd_printk(KERN_INFO, scp, "fail queue command 0x%x with 0x%x\n",
8111 if (sdebug_fail_cmd(scp, &ret, &err)) {
8112 scmd_printk(KERN_INFO, scp,
8113 "fail command 0x%x with hostbyte=0x%x, "
8114 "driverbyte=0x%x, statusbyte=0x%x, "
8115 "sense_key=0x%x, asc=0x%x, asq=0x%x\n",
8116 opcode, err.host_byte, err.driver_byte,
8117 err.status_byte, err.sense_key, err.asc, err.asq);
8121 if (unlikely(inject_now && !atomic_read(&sdeb_inject_pending)))
8122 atomic_set(&sdeb_inject_pending, 1);
8124 na = oip->num_attached;
8126 if (na) { /* multiple commands with this opcode */
8128 if (FF_SA & r_oip->flags) {
8129 if (F_SA_LOW & oip->flags)
8132 sa = get_unaligned_be16(cmd + 8);
8133 for (k = 0; k <= na; oip = r_oip->arrp + k++) {
8134 if (opcode == oip->opcode && sa == oip->sa)
8137 } else { /* since no service action only check opcode */
8138 for (k = 0; k <= na; oip = r_oip->arrp + k++) {
8139 if (opcode == oip->opcode)
8144 if (F_SA_LOW & r_oip->flags)
8145 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 4);
8146 else if (F_SA_HIGH & r_oip->flags)
8147 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, 7);
8149 mk_sense_invalid_opcode(scp);
8152 } /* else (when na==0) we assume the oip is a match */
8154 if (unlikely(F_INV_OP & flags)) {
8155 mk_sense_invalid_opcode(scp);
8158 if (unlikely(has_wlun_rl && !(F_RL_WLUN_OK & flags))) {
8160 sdev_printk(KERN_INFO, sdp, "%s: Opcode 0x%x not%s\n",
8161 my_name, opcode, " supported for wlun");
8162 mk_sense_invalid_opcode(scp);
8165 if (unlikely(sdebug_strict)) { /* check cdb against mask */
8169 for (k = 1; k < oip->len_mask[0] && k < 16; ++k) {
8170 rem = ~oip->len_mask[k] & cmd[k];
8172 for (j = 7; j >= 0; --j, rem <<= 1) {
8176 mk_sense_invalid_fld(scp, SDEB_IN_CDB, k, j);
8181 if (unlikely(!(F_SKIP_UA & flags) &&
8182 find_first_bit(devip->uas_bm,
8183 SDEBUG_NUM_UAS) != SDEBUG_NUM_UAS)) {
8184 errsts = make_ua(scp, devip);
8188 if (unlikely(((F_M_ACCESS & flags) || scp->cmnd[0] == TEST_UNIT_READY) &&
8189 atomic_read(&devip->stopped))) {
8190 errsts = resp_not_ready(scp, devip);
8194 if (sdebug_fake_rw && (F_FAKE_RW & flags))
8196 if (unlikely(sdebug_every_nth)) {
8197 if (fake_timeout(scp))
8198 return 0; /* ignore command: make trouble */
8200 if (likely(oip->pfp))
8201 pfp = oip->pfp; /* calls a resp_* function */
8203 pfp = r_pfp; /* if leaf function ptr NULL, try the root's */
8206 if (F_DELAY_OVERR & flags) /* cmds like INQUIRY respond asap */
8207 return schedule_resp(scp, devip, errsts, pfp, 0, 0);
8208 else if ((flags & F_LONG_DELAY) && (sdebug_jdelay > 0 ||
8209 sdebug_ndelay > 10000)) {
8211 * Skip long delays if ndelay <= 10 microseconds. Otherwise
8212 * for Start Stop Unit (SSU) want at least 1 second delay and
8213 * if sdebug_jdelay>1 want a long delay of that many seconds.
8214 * For Synchronize Cache want 1/20 of SSU's delay.
8216 int jdelay = (sdebug_jdelay < 2) ? 1 : sdebug_jdelay;
8217 int denom = (flags & F_SYNC_DELAY) ? 20 : 1;
8219 jdelay = mult_frac(USER_HZ * jdelay, HZ, denom * USER_HZ);
8220 return schedule_resp(scp, devip, errsts, pfp, jdelay, 0);
8222 return schedule_resp(scp, devip, errsts, pfp, sdebug_jdelay,
8225 return schedule_resp(scp, devip, check_condition_result, NULL, 0, 0);
8227 return schedule_resp(scp, NULL, DID_NO_CONNECT << 16, NULL, 0, 0);
8230 static int sdebug_init_cmd_priv(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
8232 struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmd);
8234 spin_lock_init(&sdsc->lock);
8239 static struct scsi_host_template sdebug_driver_template = {
8240 .show_info = scsi_debug_show_info,
8241 .write_info = scsi_debug_write_info,
8242 .proc_name = sdebug_proc_name,
8243 .name = "SCSI DEBUG",
8244 .info = scsi_debug_info,
8245 .slave_alloc = scsi_debug_slave_alloc,
8246 .slave_configure = scsi_debug_slave_configure,
8247 .slave_destroy = scsi_debug_slave_destroy,
8248 .ioctl = scsi_debug_ioctl,
8249 .queuecommand = scsi_debug_queuecommand,
8250 .change_queue_depth = sdebug_change_qdepth,
8251 .map_queues = sdebug_map_queues,
8252 .mq_poll = sdebug_blk_mq_poll,
8253 .eh_abort_handler = scsi_debug_abort,
8254 .eh_device_reset_handler = scsi_debug_device_reset,
8255 .eh_target_reset_handler = scsi_debug_target_reset,
8256 .eh_bus_reset_handler = scsi_debug_bus_reset,
8257 .eh_host_reset_handler = scsi_debug_host_reset,
8258 .can_queue = SDEBUG_CANQUEUE,
8260 .sg_tablesize = SG_MAX_SEGMENTS,
8261 .cmd_per_lun = DEF_CMD_PER_LUN,
8263 .max_segment_size = -1U,
8264 .module = THIS_MODULE,
8265 .track_queue_depth = 1,
8266 .cmd_size = sizeof(struct sdebug_scsi_cmd),
8267 .init_cmd_priv = sdebug_init_cmd_priv,
8268 .target_alloc = sdebug_target_alloc,
8269 .target_destroy = sdebug_target_destroy,
8272 static int sdebug_driver_probe(struct device *dev)
8275 struct sdebug_host_info *sdbg_host;
8276 struct Scsi_Host *hpnt;
8279 sdbg_host = dev_to_sdebug_host(dev);
8281 sdebug_driver_template.can_queue = sdebug_max_queue;
8282 sdebug_driver_template.cmd_per_lun = sdebug_max_queue;
8283 if (!sdebug_clustering)
8284 sdebug_driver_template.dma_boundary = PAGE_SIZE - 1;
8286 hpnt = scsi_host_alloc(&sdebug_driver_template, 0);
8288 pr_err("scsi_host_alloc failed\n");
8292 if (submit_queues > nr_cpu_ids) {
8293 pr_warn("%s: trim submit_queues (was %d) to nr_cpu_ids=%u\n",
8294 my_name, submit_queues, nr_cpu_ids);
8295 submit_queues = nr_cpu_ids;
8298 * Decide whether to tell scsi subsystem that we want mq. The
8299 * following should give the same answer for each host.
8301 hpnt->nr_hw_queues = submit_queues;
8302 if (sdebug_host_max_queue)
8303 hpnt->host_tagset = 1;
8305 /* poll queues are possible for nr_hw_queues > 1 */
8306 if (hpnt->nr_hw_queues == 1 || (poll_queues < 1)) {
8307 pr_warn("%s: trim poll_queues to 0. poll_q/nr_hw = (%d/%d)\n",
8308 my_name, poll_queues, hpnt->nr_hw_queues);
8313 * Poll queues don't need interrupts, but we need at least one I/O queue
8314 * left over for non-polled I/O.
8315 * If condition not met, trim poll_queues to 1 (just for simplicity).
8317 if (poll_queues >= submit_queues) {
8318 if (submit_queues < 3)
8319 pr_warn("%s: trim poll_queues to 1\n", my_name);
8321 pr_warn("%s: trim poll_queues to 1. Perhaps try poll_queues=%d\n",
8322 my_name, submit_queues - 1);
8328 sdbg_host->shost = hpnt;
8329 if ((hpnt->this_id >= 0) && (sdebug_num_tgts > hpnt->this_id))
8330 hpnt->max_id = sdebug_num_tgts + 1;
8332 hpnt->max_id = sdebug_num_tgts;
8333 /* = sdebug_max_luns; */
8334 hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
8338 switch (sdebug_dif) {
8340 case T10_PI_TYPE1_PROTECTION:
8341 hprot = SHOST_DIF_TYPE1_PROTECTION;
8343 hprot |= SHOST_DIX_TYPE1_PROTECTION;
8346 case T10_PI_TYPE2_PROTECTION:
8347 hprot = SHOST_DIF_TYPE2_PROTECTION;
8349 hprot |= SHOST_DIX_TYPE2_PROTECTION;
8352 case T10_PI_TYPE3_PROTECTION:
8353 hprot = SHOST_DIF_TYPE3_PROTECTION;
8355 hprot |= SHOST_DIX_TYPE3_PROTECTION;
8360 hprot |= SHOST_DIX_TYPE0_PROTECTION;
8364 scsi_host_set_prot(hpnt, hprot);
8366 if (have_dif_prot || sdebug_dix)
8367 pr_info("host protection%s%s%s%s%s%s%s\n",
8368 (hprot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
8369 (hprot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
8370 (hprot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
8371 (hprot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
8372 (hprot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
8373 (hprot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
8374 (hprot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
8376 if (sdebug_guard == 1)
8377 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
8379 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
8381 sdebug_verbose = !!(SDEBUG_OPT_NOISE & sdebug_opts);
8382 sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & sdebug_opts);
8383 if (sdebug_every_nth) /* need stats counters for every_nth */
8384 sdebug_statistics = true;
8385 error = scsi_add_host(hpnt, &sdbg_host->dev);
8387 pr_err("scsi_add_host failed\n");
8389 scsi_host_put(hpnt);
8391 scsi_scan_host(hpnt);
8397 static void sdebug_driver_remove(struct device *dev)
8399 struct sdebug_host_info *sdbg_host;
8400 struct sdebug_dev_info *sdbg_devinfo, *tmp;
8402 sdbg_host = dev_to_sdebug_host(dev);
8404 scsi_remove_host(sdbg_host->shost);
8406 list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
8408 list_del(&sdbg_devinfo->dev_list);
8409 kfree(sdbg_devinfo->zstate);
8410 kfree(sdbg_devinfo);
8413 scsi_host_put(sdbg_host->shost);
8416 static struct bus_type pseudo_lld_bus = {
8418 .probe = sdebug_driver_probe,
8419 .remove = sdebug_driver_remove,
8420 .drv_groups = sdebug_drv_groups,