1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
4 * Copyright (C) 1992 Eric Youngdale
5 * Simulate a host adapter with 2 disks attached. Do a lot of checking
6 * to make sure that we are not getting blocks mixed up, and PANIC if
7 * anything out of the ordinary is seen.
8 * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
10 * Copyright (C) 2001 - 2021 Douglas Gilbert
12 * For documentation see http://sg.danny.cz/sg/scsi_debug.html
16 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
18 #include <linux/module.h>
19 #include <linux/align.h>
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/jiffies.h>
23 #include <linux/slab.h>
24 #include <linux/types.h>
25 #include <linux/string.h>
27 #include <linux/init.h>
28 #include <linux/proc_fs.h>
29 #include <linux/vmalloc.h>
30 #include <linux/moduleparam.h>
31 #include <linux/scatterlist.h>
32 #include <linux/blkdev.h>
33 #include <linux/crc-t10dif.h>
34 #include <linux/spinlock.h>
35 #include <linux/interrupt.h>
36 #include <linux/atomic.h>
37 #include <linux/hrtimer.h>
38 #include <linux/uuid.h>
39 #include <linux/t10-pi.h>
40 #include <linux/msdos_partition.h>
41 #include <linux/random.h>
42 #include <linux/xarray.h>
43 #include <linux/prefetch.h>
44 #include <linux/debugfs.h>
45 #include <linux/async.h>
46 #include <linux/cleanup.h>
48 #include <net/checksum.h>
50 #include <asm/unaligned.h>
52 #include <scsi/scsi.h>
53 #include <scsi/scsi_cmnd.h>
54 #include <scsi/scsi_device.h>
55 #include <scsi/scsi_host.h>
56 #include <scsi/scsicam.h>
57 #include <scsi/scsi_eh.h>
58 #include <scsi/scsi_tcq.h>
59 #include <scsi/scsi_dbg.h>
62 #include "scsi_logging.h"
64 /* make sure inq_product_rev string corresponds to this version */
65 #define SDEBUG_VERSION "0191" /* format to fit INQUIRY revision field */
66 static const char *sdebug_version_date = "20210520";
68 #define MY_NAME "scsi_debug"
70 /* Additional Sense Code (ASC) */
71 #define NO_ADDITIONAL_SENSE 0x0
72 #define LOGICAL_UNIT_NOT_READY 0x4
73 #define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
74 #define UNRECOVERED_READ_ERR 0x11
75 #define PARAMETER_LIST_LENGTH_ERR 0x1a
76 #define INVALID_OPCODE 0x20
77 #define LBA_OUT_OF_RANGE 0x21
78 #define INVALID_FIELD_IN_CDB 0x24
79 #define INVALID_FIELD_IN_PARAM_LIST 0x26
80 #define WRITE_PROTECTED 0x27
81 #define UA_RESET_ASC 0x29
82 #define UA_CHANGED_ASC 0x2a
83 #define TARGET_CHANGED_ASC 0x3f
84 #define LUNS_CHANGED_ASCQ 0x0e
85 #define INSUFF_RES_ASC 0x55
86 #define INSUFF_RES_ASCQ 0x3
87 #define POWER_ON_RESET_ASCQ 0x0
88 #define POWER_ON_OCCURRED_ASCQ 0x1
89 #define BUS_RESET_ASCQ 0x2 /* scsi bus reset occurred */
90 #define MODE_CHANGED_ASCQ 0x1 /* mode parameters changed */
91 #define CAPACITY_CHANGED_ASCQ 0x9
92 #define SAVING_PARAMS_UNSUP 0x39
93 #define TRANSPORT_PROBLEM 0x4b
94 #define THRESHOLD_EXCEEDED 0x5d
95 #define LOW_POWER_COND_ON 0x5e
96 #define MISCOMPARE_VERIFY_ASC 0x1d
97 #define MICROCODE_CHANGED_ASCQ 0x1 /* with TARGET_CHANGED_ASC */
98 #define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16
99 #define WRITE_ERROR_ASC 0xc
100 #define UNALIGNED_WRITE_ASCQ 0x4
101 #define WRITE_BOUNDARY_ASCQ 0x5
102 #define READ_INVDATA_ASCQ 0x6
103 #define READ_BOUNDARY_ASCQ 0x7
104 #define ATTEMPT_ACCESS_GAP 0x9
105 #define INSUFF_ZONE_ASCQ 0xe
107 /* Additional Sense Code Qualifier (ASCQ) */
108 #define ACK_NAK_TO 0x3
110 /* Default values for driver parameters */
111 #define DEF_NUM_HOST 1
112 #define DEF_NUM_TGTS 1
113 #define DEF_MAX_LUNS 1
114 /* With these defaults, this driver will make 1 host with 1 target
115 * (id 0) containing 1 logical unit (lun 0). That is 1 device.
118 #define DEF_CDB_LEN 10
119 #define DEF_JDELAY 1 /* if > 0 unit is a jiffy */
120 #define DEF_DEV_SIZE_PRE_INIT 0
121 #define DEF_DEV_SIZE_MB 8
122 #define DEF_ZBC_DEV_SIZE_MB 128
125 #define DEF_PER_HOST_STORE false
126 #define DEF_D_SENSE 0
127 #define DEF_EVERY_NTH 0
128 #define DEF_FAKE_RW 0
130 #define DEF_HOST_LOCK 0
133 #define DEF_LBPWS10 0
135 #define DEF_LOWEST_ALIGNED 0
136 #define DEF_NDELAY 0 /* if > 0 unit is a nanosecond */
137 #define DEF_NO_LUN_0 0
138 #define DEF_NUM_PARTS 0
140 #define DEF_OPT_BLKS 1024
141 #define DEF_PHYSBLK_EXP 0
142 #define DEF_OPT_XFERLEN_EXP 0
143 #define DEF_PTYPE TYPE_DISK
144 #define DEF_RANDOM false
145 #define DEF_REMOVABLE false
146 #define DEF_SCSI_LEVEL 7 /* INQUIRY, byte2 [6->SPC-4; 7->SPC-5] */
147 #define DEF_SECTOR_SIZE 512
148 #define DEF_UNMAP_ALIGNMENT 0
149 #define DEF_UNMAP_GRANULARITY 1
150 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
151 #define DEF_UNMAP_MAX_DESC 256
152 #define DEF_VIRTUAL_GB 0
153 #define DEF_VPD_USE_HOSTNO 1
154 #define DEF_WRITESAME_LENGTH 0xFFFF
156 #define DEF_STATISTICS false
157 #define DEF_SUBMIT_QUEUES 1
158 #define DEF_TUR_MS_TO_READY 0
159 #define DEF_UUID_CTL 0
160 #define JDELAY_OVERRIDDEN -9999
162 /* Default parameters for ZBC drives */
163 #define DEF_ZBC_ZONE_SIZE_MB 128
164 #define DEF_ZBC_MAX_OPEN_ZONES 8
165 #define DEF_ZBC_NR_CONV_ZONES 1
167 #define SDEBUG_LUN_0_VAL 0
169 /* bit mask values for sdebug_opts */
170 #define SDEBUG_OPT_NOISE 1
171 #define SDEBUG_OPT_MEDIUM_ERR 2
172 #define SDEBUG_OPT_TIMEOUT 4
173 #define SDEBUG_OPT_RECOVERED_ERR 8
174 #define SDEBUG_OPT_TRANSPORT_ERR 16
175 #define SDEBUG_OPT_DIF_ERR 32
176 #define SDEBUG_OPT_DIX_ERR 64
177 #define SDEBUG_OPT_MAC_TIMEOUT 128
178 #define SDEBUG_OPT_SHORT_TRANSFER 0x100
179 #define SDEBUG_OPT_Q_NOISE 0x200
180 #define SDEBUG_OPT_ALL_TSF 0x400 /* ignore */
181 #define SDEBUG_OPT_RARE_TSF 0x800
182 #define SDEBUG_OPT_N_WCE 0x1000
183 #define SDEBUG_OPT_RESET_NOISE 0x2000
184 #define SDEBUG_OPT_NO_CDB_NOISE 0x4000
185 #define SDEBUG_OPT_HOST_BUSY 0x8000
186 #define SDEBUG_OPT_CMD_ABORT 0x10000
187 #define SDEBUG_OPT_ALL_NOISE (SDEBUG_OPT_NOISE | SDEBUG_OPT_Q_NOISE | \
188 SDEBUG_OPT_RESET_NOISE)
189 #define SDEBUG_OPT_ALL_INJECTING (SDEBUG_OPT_RECOVERED_ERR | \
190 SDEBUG_OPT_TRANSPORT_ERR | \
191 SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR | \
192 SDEBUG_OPT_SHORT_TRANSFER | \
193 SDEBUG_OPT_HOST_BUSY | \
194 SDEBUG_OPT_CMD_ABORT)
195 #define SDEBUG_OPT_RECOV_DIF_DIX (SDEBUG_OPT_RECOVERED_ERR | \
196 SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR)
198 /* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs) are returned in
199 * priority order. In the subset implemented here lower numbers have higher
200 * priority. The UA numbers should be a sequence starting from 0 with
201 * SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */
202 #define SDEBUG_UA_POR 0 /* Power on, reset, or bus device reset */
203 #define SDEBUG_UA_POOCCUR 1 /* Power on occurred */
204 #define SDEBUG_UA_BUS_RESET 2
205 #define SDEBUG_UA_MODE_CHANGED 3
206 #define SDEBUG_UA_CAPACITY_CHANGED 4
207 #define SDEBUG_UA_LUNS_CHANGED 5
208 #define SDEBUG_UA_MICROCODE_CHANGED 6 /* simulate firmware change */
209 #define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 7
210 #define SDEBUG_NUM_UAS 8
212 /* when 1==SDEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
213 * sector on read commands: */
214 #define OPT_MEDIUM_ERR_ADDR 0x1234 /* that's sector 4660 in decimal */
215 #define OPT_MEDIUM_ERR_NUM 10 /* number of consecutive medium errs */
217 /* SDEBUG_CANQUEUE is the maximum number of commands that can be queued
218 * (for response) per submit queue at one time. Can be reduced by max_queue
219 * option. Command responses are not queued when jdelay=0 and ndelay=0. The
220 * per-device DEF_CMD_PER_LUN can be changed via sysfs:
221 * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth
222 * but cannot exceed SDEBUG_CANQUEUE .
224 #define SDEBUG_CANQUEUE_WORDS 3 /* a WORD is bits in a long */
225 #define SDEBUG_CANQUEUE (SDEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
226 #define DEF_CMD_PER_LUN SDEBUG_CANQUEUE
228 /* UA - Unit Attention; SA - Service Action; SSU - Start Stop Unit */
229 #define F_D_IN 1 /* Data-in command (e.g. READ) */
230 #define F_D_OUT 2 /* Data-out command (e.g. WRITE) */
231 #define F_D_OUT_MAYBE 4 /* WRITE SAME, NDOB bit */
233 #define F_RL_WLUN_OK 0x10 /* allowed with REPORT LUNS W-LUN */
234 #define F_SKIP_UA 0x20 /* bypass UAs (e.g. INQUIRY command) */
235 #define F_DELAY_OVERR 0x40 /* for commands like INQUIRY */
236 #define F_SA_LOW 0x80 /* SA is in cdb byte 1, bits 4 to 0 */
237 #define F_SA_HIGH 0x100 /* SA is in cdb bytes 8 and 9 */
238 #define F_INV_OP 0x200 /* invalid opcode (not supported) */
239 #define F_FAKE_RW 0x400 /* bypass resp_*() when fake_rw set */
240 #define F_M_ACCESS 0x800 /* media access, reacts to SSU state */
241 #define F_SSU_DELAY 0x1000 /* SSU command delay (long-ish) */
242 #define F_SYNC_DELAY 0x2000 /* SYNCHRONIZE CACHE delay */
244 /* Useful combinations of the above flags */
245 #define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
246 #define FF_MEDIA_IO (F_M_ACCESS | F_FAKE_RW)
247 #define FF_SA (F_SA_HIGH | F_SA_LOW)
248 #define F_LONG_DELAY (F_SSU_DELAY | F_SYNC_DELAY)
250 #define SDEBUG_MAX_PARTS 4
252 #define SDEBUG_MAX_CMD_LEN 32
254 #define SDEB_XA_NOT_IN_USE XA_MARK_1
256 static struct kmem_cache *queued_cmd_cache;
258 #define TO_QUEUED_CMD(scmd) ((void *)(scmd)->host_scribble)
259 #define ASSIGN_QUEUED_CMD(scmnd, qc) { (scmnd)->host_scribble = (void *) qc; }
261 /* Zone types (zbcr05 table 25) */
266 /* ZBC_ZTYPE_SOBR = 0x4, */
270 /* enumeration names taken from table 26, zbcr05 */
272 ZBC_NOT_WRITE_POINTER = 0x0,
274 ZC2_IMPLICIT_OPEN = 0x2,
275 ZC3_EXPLICIT_OPEN = 0x3,
282 struct sdeb_zone_state { /* ZBC: per zone state */
283 enum sdebug_z_type z_type;
284 enum sdebug_z_cond z_cond;
285 bool z_non_seq_resource;
291 enum sdebug_err_type {
292 ERR_TMOUT_CMD = 0, /* make specific scsi command timeout */
293 ERR_FAIL_QUEUE_CMD = 1, /* make specific scsi command's */
294 /* queuecmd return failed */
295 ERR_FAIL_CMD = 2, /* make specific scsi command's */
296 /* queuecmd return succeed but */
297 /* with errors set in scsi_cmnd */
298 ERR_ABORT_CMD_FAILED = 3, /* control return FAILED from */
299 /* scsi_debug_abort() */
300 ERR_LUN_RESET_FAILED = 4, /* control return FAILED from */
301 /* scsi_debug_device_reseLUN_RESET_FAILEDt() */
304 struct sdebug_err_inject {
306 struct list_head list;
313 * For ERR_FAIL_QUEUE_CMD
321 unsigned char host_byte;
322 unsigned char driver_byte;
323 unsigned char status_byte;
324 unsigned char sense_key;
331 struct sdebug_dev_info {
332 struct list_head dev_list;
333 unsigned int channel;
337 struct sdebug_host_info *sdbg_host;
338 unsigned long uas_bm[1];
339 atomic_t stopped; /* 1: by SSU, 2: device start */
342 /* For ZBC devices */
346 unsigned int zsize_shift;
347 unsigned int nr_zones;
348 unsigned int nr_conv_zones;
349 unsigned int nr_seq_zones;
350 unsigned int nr_imp_open;
351 unsigned int nr_exp_open;
352 unsigned int nr_closed;
353 unsigned int max_open;
354 ktime_t create_ts; /* time since bootup that this device was created */
355 struct sdeb_zone_state *zstate;
357 struct dentry *debugfs_entry;
358 struct spinlock list_lock;
359 struct list_head inject_err_list;
362 struct sdebug_target_info {
364 struct dentry *debugfs_entry;
367 struct sdebug_host_info {
368 struct list_head host_list;
369 int si_idx; /* sdeb_store_info (per host) xarray index */
370 struct Scsi_Host *shost;
372 struct list_head dev_info_list;
375 /* There is an xarray of pointers to this struct's objects, one per host */
376 struct sdeb_store_info {
377 rwlock_t macc_lck; /* for atomic media access on this store */
378 u8 *storep; /* user data storage (ram) */
379 struct t10_pi_tuple *dif_storep; /* protection info */
380 void *map_storep; /* provisioning map */
383 #define dev_to_sdebug_host(d) \
384 container_of(d, struct sdebug_host_info, dev)
386 #define shost_to_sdebug_host(shost) \
387 dev_to_sdebug_host(shost->dma_dev)
389 enum sdeb_defer_type {SDEB_DEFER_NONE = 0, SDEB_DEFER_HRT = 1,
390 SDEB_DEFER_WQ = 2, SDEB_DEFER_POLL = 3};
392 struct sdebug_defer {
394 struct execute_work ew;
395 ktime_t cmpl_ts;/* time since boot to complete this cmd */
397 bool aborted; /* true when blk_abort_request() already called */
398 enum sdeb_defer_type defer_t;
401 struct sdebug_queued_cmd {
402 /* corresponding bit set in in_use_bm[] in owning struct sdebug_queue
403 * instance indicates this slot is in use.
405 struct sdebug_defer sd_dp;
406 struct scsi_cmnd *scmd;
409 struct sdebug_scsi_cmd {
413 static atomic_t sdebug_cmnd_count; /* number of incoming commands */
414 static atomic_t sdebug_completions; /* count of deferred completions */
415 static atomic_t sdebug_miss_cpus; /* submission + completion cpus differ */
416 static atomic_t sdebug_a_tsf; /* 'almost task set full' counter */
417 static atomic_t sdeb_inject_pending;
418 static atomic_t sdeb_mq_poll_count; /* bumped when mq_poll returns > 0 */
420 struct opcode_info_t {
421 u8 num_attached; /* 0 if this is it (i.e. a leaf); use 0xff */
422 /* for terminating element */
423 u8 opcode; /* if num_attached > 0, preferred */
424 u16 sa; /* service action */
425 u32 flags; /* OR-ed set of SDEB_F_* */
426 int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
427 const struct opcode_info_t *arrp; /* num_attached elements or NULL */
428 u8 len_mask[16]; /* len_mask[0]-->cdb_len, then mask for cdb */
429 /* 1 to min(cdb_len, 15); ignore cdb[15...] */
432 /* SCSI opcodes (first byte of cdb) of interest mapped onto these indexes */
433 enum sdeb_opcode_index {
434 SDEB_I_INVALID_OPCODE = 0,
436 SDEB_I_REPORT_LUNS = 2,
437 SDEB_I_REQUEST_SENSE = 3,
438 SDEB_I_TEST_UNIT_READY = 4,
439 SDEB_I_MODE_SENSE = 5, /* 6, 10 */
440 SDEB_I_MODE_SELECT = 6, /* 6, 10 */
441 SDEB_I_LOG_SENSE = 7,
442 SDEB_I_READ_CAPACITY = 8, /* 10; 16 is in SA_IN(16) */
443 SDEB_I_READ = 9, /* 6, 10, 12, 16 */
444 SDEB_I_WRITE = 10, /* 6, 10, 12, 16 */
445 SDEB_I_START_STOP = 11,
446 SDEB_I_SERV_ACT_IN_16 = 12, /* add ...SERV_ACT_IN_12 if needed */
447 SDEB_I_SERV_ACT_OUT_16 = 13, /* add ...SERV_ACT_OUT_12 if needed */
448 SDEB_I_MAINT_IN = 14,
449 SDEB_I_MAINT_OUT = 15,
450 SDEB_I_VERIFY = 16, /* VERIFY(10), VERIFY(16) */
451 SDEB_I_VARIABLE_LEN = 17, /* READ(32), WRITE(32), WR_SCAT(32) */
452 SDEB_I_RESERVE = 18, /* 6, 10 */
453 SDEB_I_RELEASE = 19, /* 6, 10 */
454 SDEB_I_ALLOW_REMOVAL = 20, /* PREVENT ALLOW MEDIUM REMOVAL */
455 SDEB_I_REZERO_UNIT = 21, /* REWIND in SSC */
456 SDEB_I_ATA_PT = 22, /* 12, 16 */
457 SDEB_I_SEND_DIAG = 23,
459 SDEB_I_WRITE_BUFFER = 25,
460 SDEB_I_WRITE_SAME = 26, /* 10, 16 */
461 SDEB_I_SYNC_CACHE = 27, /* 10, 16 */
462 SDEB_I_COMP_WRITE = 28,
463 SDEB_I_PRE_FETCH = 29, /* 10, 16 */
464 SDEB_I_ZONE_OUT = 30, /* 0x94+SA; includes no data xfer */
465 SDEB_I_ZONE_IN = 31, /* 0x95+SA; all have data-in */
466 SDEB_I_LAST_ELEM_P1 = 32, /* keep this last (previous + 1) */
470 static const unsigned char opcode_ind_arr[256] = {
471 /* 0x0; 0x0->0x1f: 6 byte cdbs */
472 SDEB_I_TEST_UNIT_READY, SDEB_I_REZERO_UNIT, 0, SDEB_I_REQUEST_SENSE,
474 SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, 0,
475 0, 0, SDEB_I_INQUIRY, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
477 0, 0, SDEB_I_MODE_SENSE, SDEB_I_START_STOP, 0, SDEB_I_SEND_DIAG,
478 SDEB_I_ALLOW_REMOVAL, 0,
479 /* 0x20; 0x20->0x3f: 10 byte cdbs */
480 0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY, 0, 0,
481 SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, SDEB_I_VERIFY,
482 0, 0, 0, 0, SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, 0,
483 0, 0, 0, SDEB_I_WRITE_BUFFER, 0, 0, 0, 0,
484 /* 0x40; 0x40->0x5f: 10 byte cdbs */
485 0, SDEB_I_WRITE_SAME, SDEB_I_UNMAP, 0, 0, 0, 0, 0,
486 0, 0, 0, 0, 0, SDEB_I_LOG_SENSE, 0, 0,
487 0, 0, 0, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
489 0, 0, SDEB_I_MODE_SENSE, 0, 0, 0, 0, 0,
490 /* 0x60; 0x60->0x7d are reserved, 0x7e is "extended cdb" */
491 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
492 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
493 0, SDEB_I_VARIABLE_LEN,
494 /* 0x80; 0x80->0x9f: 16 byte cdbs */
495 0, 0, 0, 0, 0, SDEB_I_ATA_PT, 0, 0,
496 SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0,
497 0, 0, 0, SDEB_I_VERIFY,
498 SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, SDEB_I_WRITE_SAME,
499 SDEB_I_ZONE_OUT, SDEB_I_ZONE_IN, 0, 0,
500 0, 0, 0, 0, 0, 0, SDEB_I_SERV_ACT_IN_16, SDEB_I_SERV_ACT_OUT_16,
501 /* 0xa0; 0xa0->0xbf: 12 byte cdbs */
502 SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN,
503 SDEB_I_MAINT_OUT, 0, 0, 0,
504 SDEB_I_READ, 0 /* SDEB_I_SERV_ACT_OUT_12 */, SDEB_I_WRITE,
505 0 /* SDEB_I_SERV_ACT_IN_12 */, 0, 0, 0, 0,
506 0, 0, 0, 0, 0, 0, 0, 0,
507 0, 0, 0, 0, 0, 0, 0, 0,
508 /* 0xc0; 0xc0->0xff: vendor specific */
509 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
510 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
511 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
512 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
516 * The following "response" functions return the SCSI mid-level's 4 byte
517 * tuple-in-an-int. To handle commands with an IMMED bit, for a faster
518 * command completion, they can mask their return value with
519 * SDEG_RES_IMMED_MASK .
521 #define SDEG_RES_IMMED_MASK 0x40000000
523 static int resp_inquiry(struct scsi_cmnd *, struct sdebug_dev_info *);
524 static int resp_report_luns(struct scsi_cmnd *, struct sdebug_dev_info *);
525 static int resp_requests(struct scsi_cmnd *, struct sdebug_dev_info *);
526 static int resp_mode_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
527 static int resp_mode_select(struct scsi_cmnd *, struct sdebug_dev_info *);
528 static int resp_log_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
529 static int resp_readcap(struct scsi_cmnd *, struct sdebug_dev_info *);
530 static int resp_read_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
531 static int resp_write_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
532 static int resp_write_scat(struct scsi_cmnd *, struct sdebug_dev_info *);
533 static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *);
534 static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *);
535 static int resp_get_lba_status(struct scsi_cmnd *, struct sdebug_dev_info *);
536 static int resp_report_tgtpgs(struct scsi_cmnd *, struct sdebug_dev_info *);
537 static int resp_unmap(struct scsi_cmnd *, struct sdebug_dev_info *);
538 static int resp_rsup_opcodes(struct scsi_cmnd *, struct sdebug_dev_info *);
539 static int resp_rsup_tmfs(struct scsi_cmnd *, struct sdebug_dev_info *);
540 static int resp_verify(struct scsi_cmnd *, struct sdebug_dev_info *);
541 static int resp_write_same_10(struct scsi_cmnd *, struct sdebug_dev_info *);
542 static int resp_write_same_16(struct scsi_cmnd *, struct sdebug_dev_info *);
543 static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *);
544 static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *);
545 static int resp_sync_cache(struct scsi_cmnd *, struct sdebug_dev_info *);
546 static int resp_pre_fetch(struct scsi_cmnd *, struct sdebug_dev_info *);
547 static int resp_report_zones(struct scsi_cmnd *, struct sdebug_dev_info *);
548 static int resp_open_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
549 static int resp_close_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
550 static int resp_finish_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
551 static int resp_rwp_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
553 static int sdebug_do_add_host(bool mk_new_store);
554 static int sdebug_add_host_helper(int per_host_idx);
555 static void sdebug_do_remove_host(bool the_end);
556 static int sdebug_add_store(void);
557 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip);
558 static void sdebug_erase_all_stores(bool apart_from_first);
560 static void sdebug_free_queued_cmd(struct sdebug_queued_cmd *sqcp);
563 * The following are overflow arrays for cdbs that "hit" the same index in
564 * the opcode_info_arr array. The most time sensitive (or commonly used) cdb
565 * should be placed in opcode_info_arr[], the others should be placed here.
567 static const struct opcode_info_t msense_iarr[] = {
568 {0, 0x1a, 0, F_D_IN, NULL, NULL,
569 {6, 0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
572 static const struct opcode_info_t mselect_iarr[] = {
573 {0, 0x15, 0, F_D_OUT, NULL, NULL,
574 {6, 0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
577 static const struct opcode_info_t read_iarr[] = {
578 {0, 0x28, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(10) */
579 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
581 {0, 0x8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL, /* READ(6) */
582 {6, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
583 {0, 0xa8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(12) */
584 {12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf,
588 static const struct opcode_info_t write_iarr[] = {
589 {0, 0x2a, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(10) */
590 NULL, {10, 0xfb, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7,
592 {0, 0xa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(6) */
593 NULL, {6, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0,
595 {0, 0xaa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(12) */
596 NULL, {12, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
597 0xbf, 0xc7, 0, 0, 0, 0} },
600 static const struct opcode_info_t verify_iarr[] = {
601 {0, 0x2f, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,/* VERIFY(10) */
602 NULL, {10, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xff, 0xff, 0xc7,
606 static const struct opcode_info_t sa_in_16_iarr[] = {
607 {0, 0x9e, 0x12, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
608 {16, 0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
609 0xff, 0xff, 0xff, 0, 0xc7} }, /* GET LBA STATUS(16) */
612 static const struct opcode_info_t vl_iarr[] = { /* VARIABLE LENGTH */
613 {0, 0x7f, 0xb, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_dt0,
614 NULL, {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0xb, 0xfa,
615 0, 0xff, 0xff, 0xff, 0xff} }, /* WRITE(32) */
616 {0, 0x7f, 0x11, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
617 NULL, {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x11, 0xf8,
618 0, 0xff, 0xff, 0x0, 0x0} }, /* WRITE SCATTERED(32) */
621 static const struct opcode_info_t maint_in_iarr[] = { /* MAINT IN */
622 {0, 0xa3, 0xc, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL,
623 {12, 0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
624 0xc7, 0, 0, 0, 0} }, /* REPORT SUPPORTED OPERATION CODES */
625 {0, 0xa3, 0xd, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL,
626 {12, 0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
627 0, 0} }, /* REPORTED SUPPORTED TASK MANAGEMENT FUNCTIONS */
630 static const struct opcode_info_t write_same_iarr[] = {
631 {0, 0x93, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_write_same_16, NULL,
632 {16, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
633 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* WRITE SAME(16) */
636 static const struct opcode_info_t reserve_iarr[] = {
637 {0, 0x16, 0, F_D_OUT, NULL, NULL, /* RESERVE(6) */
638 {6, 0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
641 static const struct opcode_info_t release_iarr[] = {
642 {0, 0x17, 0, F_D_OUT, NULL, NULL, /* RELEASE(6) */
643 {6, 0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
646 static const struct opcode_info_t sync_cache_iarr[] = {
647 {0, 0x91, 0, F_SYNC_DELAY | F_M_ACCESS, resp_sync_cache, NULL,
648 {16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
649 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* SYNC_CACHE (16) */
652 static const struct opcode_info_t pre_fetch_iarr[] = {
653 {0, 0x90, 0, F_SYNC_DELAY | FF_MEDIA_IO, resp_pre_fetch, NULL,
654 {16, 0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
655 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* PRE-FETCH (16) */
658 static const struct opcode_info_t zone_out_iarr[] = { /* ZONE OUT(16) */
659 {0, 0x94, 0x1, F_SA_LOW | F_M_ACCESS, resp_close_zone, NULL,
660 {16, 0x1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
661 0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} }, /* CLOSE ZONE */
662 {0, 0x94, 0x2, F_SA_LOW | F_M_ACCESS, resp_finish_zone, NULL,
663 {16, 0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
664 0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} }, /* FINISH ZONE */
665 {0, 0x94, 0x4, F_SA_LOW | F_M_ACCESS, resp_rwp_zone, NULL,
666 {16, 0x4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
667 0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} }, /* RESET WRITE POINTER */
670 static const struct opcode_info_t zone_in_iarr[] = { /* ZONE IN(16) */
671 {0, 0x95, 0x6, F_SA_LOW | F_D_IN | F_M_ACCESS, NULL, NULL,
672 {16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
673 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* REPORT ZONES */
677 /* This array is accessed via SDEB_I_* values. Make sure all are mapped,
678 * plus the terminating elements for logic that scans this table such as
679 * REPORT SUPPORTED OPERATION CODES. */
680 static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEM_P1 + 1] = {
682 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* unknown opcodes */
683 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
684 {0, 0x12, 0, FF_RESPOND | F_D_IN, resp_inquiry, NULL, /* INQUIRY */
685 {6, 0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
686 {0, 0xa0, 0, FF_RESPOND | F_D_IN, resp_report_luns, NULL,
687 {12, 0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
688 0, 0} }, /* REPORT LUNS */
689 {0, 0x3, 0, FF_RESPOND | F_D_IN, resp_requests, NULL,
690 {6, 0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
691 {0, 0x0, 0, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */
692 {6, 0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
694 {ARRAY_SIZE(msense_iarr), 0x5a, 0, F_D_IN, /* MODE SENSE(10) */
695 resp_mode_sense, msense_iarr, {10, 0xf8, 0xff, 0xff, 0, 0, 0,
696 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
697 {ARRAY_SIZE(mselect_iarr), 0x55, 0, F_D_OUT, /* MODE SELECT(10) */
698 resp_mode_select, mselect_iarr, {10, 0xf1, 0, 0, 0, 0, 0, 0xff,
699 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
700 {0, 0x4d, 0, F_D_IN, resp_log_sense, NULL, /* LOG SENSE */
701 {10, 0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0,
703 {0, 0x25, 0, F_D_IN, resp_readcap, NULL, /* READ CAPACITY(10) */
704 {10, 0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0,
706 {ARRAY_SIZE(read_iarr), 0x88, 0, F_D_IN | FF_MEDIA_IO, /* READ(16) */
707 resp_read_dt0, read_iarr, {16, 0xfe, 0xff, 0xff, 0xff, 0xff,
708 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
710 {ARRAY_SIZE(write_iarr), 0x8a, 0, F_D_OUT | FF_MEDIA_IO,
711 resp_write_dt0, write_iarr, /* WRITE(16) */
712 {16, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
713 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
714 {0, 0x1b, 0, F_SSU_DELAY, resp_start_stop, NULL,/* START STOP UNIT */
715 {6, 0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
716 {ARRAY_SIZE(sa_in_16_iarr), 0x9e, 0x10, F_SA_LOW | F_D_IN,
717 resp_readcap16, sa_in_16_iarr, /* SA_IN(16), READ CAPACITY(16) */
718 {16, 0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
719 0xff, 0xff, 0xff, 0xff, 0x1, 0xc7} },
720 {0, 0x9f, 0x12, F_SA_LOW | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
721 NULL, {16, 0x12, 0xf9, 0x0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff,
722 0xff, 0xff, 0xff, 0xff, 0xc7} }, /* SA_OUT(16), WRITE SCAT(16) */
723 {ARRAY_SIZE(maint_in_iarr), 0xa3, 0xa, F_SA_LOW | F_D_IN,
724 resp_report_tgtpgs, /* MAINT IN, REPORT TARGET PORT GROUPS */
725 maint_in_iarr, {12, 0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff,
726 0xff, 0, 0xc7, 0, 0, 0, 0} },
728 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
729 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
730 {ARRAY_SIZE(verify_iarr), 0x8f, 0,
731 F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify, /* VERIFY(16) */
732 verify_iarr, {16, 0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
733 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },
734 {ARRAY_SIZE(vl_iarr), 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_MEDIA_IO,
735 resp_read_dt0, vl_iarr, /* VARIABLE LENGTH, READ(32) */
736 {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x9, 0xfe, 0, 0xff, 0xff,
738 {ARRAY_SIZE(reserve_iarr), 0x56, 0, F_D_OUT,
739 NULL, reserve_iarr, /* RESERVE(10) <no response function> */
740 {10, 0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
742 {ARRAY_SIZE(release_iarr), 0x57, 0, F_D_OUT,
743 NULL, release_iarr, /* RELEASE(10) <no response function> */
744 {10, 0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
747 {0, 0x1e, 0, 0, NULL, NULL, /* ALLOW REMOVAL */
748 {6, 0, 0, 0, 0x3, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
749 {0, 0x1, 0, 0, resp_start_stop, NULL, /* REWIND ?? */
750 {6, 0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
751 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */
752 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
753 {0, 0x1d, F_D_OUT, 0, NULL, NULL, /* SEND DIAGNOSTIC */
754 {6, 0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
755 {0, 0x42, 0, F_D_OUT | FF_MEDIA_IO, resp_unmap, NULL, /* UNMAP */
756 {10, 0x1, 0, 0, 0, 0, 0x3f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
758 {0, 0x3b, 0, F_D_OUT_MAYBE, resp_write_buffer, NULL,
759 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0,
760 0, 0, 0, 0} }, /* WRITE_BUFFER */
761 {ARRAY_SIZE(write_same_iarr), 0x41, 0, F_D_OUT_MAYBE | FF_MEDIA_IO,
762 resp_write_same_10, write_same_iarr, /* WRITE SAME(10) */
763 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0,
765 {ARRAY_SIZE(sync_cache_iarr), 0x35, 0, F_SYNC_DELAY | F_M_ACCESS,
766 resp_sync_cache, sync_cache_iarr,
767 {10, 0x7, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
768 0, 0, 0, 0} }, /* SYNC_CACHE (10) */
769 {0, 0x89, 0, F_D_OUT | FF_MEDIA_IO, resp_comp_write, NULL,
770 {16, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
771 0, 0xff, 0x3f, 0xc7} }, /* COMPARE AND WRITE */
772 {ARRAY_SIZE(pre_fetch_iarr), 0x34, 0, F_SYNC_DELAY | FF_MEDIA_IO,
773 resp_pre_fetch, pre_fetch_iarr,
774 {10, 0x2, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
775 0, 0, 0, 0} }, /* PRE-FETCH (10) */
778 {ARRAY_SIZE(zone_out_iarr), 0x94, 0x3, F_SA_LOW | F_M_ACCESS,
779 resp_open_zone, zone_out_iarr, /* ZONE_OUT(16), OPEN ZONE) */
780 {16, 0x3 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
781 0xff, 0xff, 0x0, 0x0, 0xff, 0xff, 0x1, 0xc7} },
782 {ARRAY_SIZE(zone_in_iarr), 0x95, 0x0, F_SA_LOW | F_M_ACCESS,
783 resp_report_zones, zone_in_iarr, /* ZONE_IN(16), REPORT ZONES) */
784 {16, 0x0 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
785 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xc7} },
787 {0xff, 0, 0, 0, NULL, NULL, /* terminating element */
788 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
791 static int sdebug_num_hosts;
792 static int sdebug_add_host = DEF_NUM_HOST; /* in sysfs this is relative */
793 static int sdebug_ato = DEF_ATO;
794 static int sdebug_cdb_len = DEF_CDB_LEN;
795 static int sdebug_jdelay = DEF_JDELAY; /* if > 0 then unit is jiffies */
796 static int sdebug_dev_size_mb = DEF_DEV_SIZE_PRE_INIT;
797 static int sdebug_dif = DEF_DIF;
798 static int sdebug_dix = DEF_DIX;
799 static int sdebug_dsense = DEF_D_SENSE;
800 static int sdebug_every_nth = DEF_EVERY_NTH;
801 static int sdebug_fake_rw = DEF_FAKE_RW;
802 static unsigned int sdebug_guard = DEF_GUARD;
803 static int sdebug_host_max_queue; /* per host */
804 static int sdebug_lowest_aligned = DEF_LOWEST_ALIGNED;
805 static int sdebug_max_luns = DEF_MAX_LUNS;
806 static int sdebug_max_queue = SDEBUG_CANQUEUE; /* per submit queue */
807 static unsigned int sdebug_medium_error_start = OPT_MEDIUM_ERR_ADDR;
808 static int sdebug_medium_error_count = OPT_MEDIUM_ERR_NUM;
809 static int sdebug_ndelay = DEF_NDELAY; /* if > 0 then unit is nanoseconds */
810 static int sdebug_no_lun_0 = DEF_NO_LUN_0;
811 static int sdebug_no_uld;
812 static int sdebug_num_parts = DEF_NUM_PARTS;
813 static int sdebug_num_tgts = DEF_NUM_TGTS; /* targets per host */
814 static int sdebug_opt_blks = DEF_OPT_BLKS;
815 static int sdebug_opts = DEF_OPTS;
816 static int sdebug_physblk_exp = DEF_PHYSBLK_EXP;
817 static int sdebug_opt_xferlen_exp = DEF_OPT_XFERLEN_EXP;
818 static int sdebug_ptype = DEF_PTYPE; /* SCSI peripheral device type */
819 static int sdebug_scsi_level = DEF_SCSI_LEVEL;
820 static int sdebug_sector_size = DEF_SECTOR_SIZE;
821 static int sdeb_tur_ms_to_ready = DEF_TUR_MS_TO_READY;
822 static int sdebug_virtual_gb = DEF_VIRTUAL_GB;
823 static int sdebug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
824 static unsigned int sdebug_lbpu = DEF_LBPU;
825 static unsigned int sdebug_lbpws = DEF_LBPWS;
826 static unsigned int sdebug_lbpws10 = DEF_LBPWS10;
827 static unsigned int sdebug_lbprz = DEF_LBPRZ;
828 static unsigned int sdebug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
829 static unsigned int sdebug_unmap_granularity = DEF_UNMAP_GRANULARITY;
830 static unsigned int sdebug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
831 static unsigned int sdebug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
832 static unsigned int sdebug_write_same_length = DEF_WRITESAME_LENGTH;
833 static int sdebug_uuid_ctl = DEF_UUID_CTL;
834 static bool sdebug_random = DEF_RANDOM;
835 static bool sdebug_per_host_store = DEF_PER_HOST_STORE;
836 static bool sdebug_removable = DEF_REMOVABLE;
837 static bool sdebug_clustering;
838 static bool sdebug_host_lock = DEF_HOST_LOCK;
839 static bool sdebug_strict = DEF_STRICT;
840 static bool sdebug_any_injecting_opt;
841 static bool sdebug_no_rwlock;
842 static bool sdebug_verbose;
843 static bool have_dif_prot;
844 static bool write_since_sync;
845 static bool sdebug_statistics = DEF_STATISTICS;
846 static bool sdebug_wp;
847 static bool sdebug_allow_restart;
852 } sdeb_zbc_model = BLK_ZONED_NONE;
853 static char *sdeb_zbc_model_s;
855 enum sam_lun_addr_method {SAM_LUN_AM_PERIPHERAL = 0x0,
856 SAM_LUN_AM_FLAT = 0x1,
857 SAM_LUN_AM_LOGICAL_UNIT = 0x2,
858 SAM_LUN_AM_EXTENDED = 0x3};
859 static enum sam_lun_addr_method sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
860 static int sdebug_lun_am_i = (int)SAM_LUN_AM_PERIPHERAL;
862 static unsigned int sdebug_store_sectors;
863 static sector_t sdebug_capacity; /* in sectors */
865 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
866 may still need them */
867 static int sdebug_heads; /* heads per disk */
868 static int sdebug_cylinders_per; /* cylinders per surface */
869 static int sdebug_sectors_per; /* sectors per cylinder */
871 static LIST_HEAD(sdebug_host_list);
872 static DEFINE_MUTEX(sdebug_host_list_mutex);
874 static struct xarray per_store_arr;
875 static struct xarray *per_store_ap = &per_store_arr;
876 static int sdeb_first_idx = -1; /* invalid index ==> none created */
877 static int sdeb_most_recent_idx = -1;
878 static DEFINE_RWLOCK(sdeb_fake_rw_lck); /* need a RW lock when fake_rw=1 */
880 static unsigned long map_size;
881 static int num_aborts;
882 static int num_dev_resets;
883 static int num_target_resets;
884 static int num_bus_resets;
885 static int num_host_resets;
886 static int dix_writes;
887 static int dix_reads;
888 static int dif_errors;
890 /* ZBC global data */
891 static bool sdeb_zbc_in_use; /* true for host-aware and host-managed disks */
892 static int sdeb_zbc_zone_cap_mb;
893 static int sdeb_zbc_zone_size_mb;
894 static int sdeb_zbc_max_open = DEF_ZBC_MAX_OPEN_ZONES;
895 static int sdeb_zbc_nr_conv = DEF_ZBC_NR_CONV_ZONES;
897 static int submit_queues = DEF_SUBMIT_QUEUES; /* > 1 for multi-queue (mq) */
898 static int poll_queues; /* iouring iopoll interface.*/
900 static char sdebug_proc_name[] = MY_NAME;
901 static const char *my_name = MY_NAME;
903 static struct bus_type pseudo_lld_bus;
905 static struct device_driver sdebug_driverfs_driver = {
906 .name = sdebug_proc_name,
907 .bus = &pseudo_lld_bus,
910 static const int check_condition_result =
911 SAM_STAT_CHECK_CONDITION;
913 static const int illegal_condition_result =
914 (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
916 static const int device_qfull_result =
917 (DID_ABORT << 16) | SAM_STAT_TASK_SET_FULL;
919 static const int condition_met_result = SAM_STAT_CONDITION_MET;
921 static struct dentry *sdebug_debugfs_root;
923 static void sdebug_err_free(struct rcu_head *head)
925 struct sdebug_err_inject *inject =
926 container_of(head, typeof(*inject), rcu);
931 static void sdebug_err_add(struct scsi_device *sdev, struct sdebug_err_inject *new)
933 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdev->hostdata;
934 struct sdebug_err_inject *err;
936 spin_lock(&devip->list_lock);
937 list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
938 if (err->type == new->type && err->cmd == new->cmd) {
939 list_del_rcu(&err->list);
940 call_rcu(&err->rcu, sdebug_err_free);
944 list_add_tail_rcu(&new->list, &devip->inject_err_list);
945 spin_unlock(&devip->list_lock);
948 static int sdebug_err_remove(struct scsi_device *sdev, const char *buf, size_t count)
950 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdev->hostdata;
951 struct sdebug_err_inject *err;
955 if (sscanf(buf, "- %d %hhx", &type, &cmd) != 2) {
960 spin_lock(&devip->list_lock);
961 list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
962 if (err->type == type && err->cmd == cmd) {
963 list_del_rcu(&err->list);
964 call_rcu(&err->rcu, sdebug_err_free);
965 spin_unlock(&devip->list_lock);
970 spin_unlock(&devip->list_lock);
976 static int sdebug_error_show(struct seq_file *m, void *p)
978 struct scsi_device *sdev = (struct scsi_device *)m->private;
979 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdev->hostdata;
980 struct sdebug_err_inject *err;
982 seq_puts(m, "Type\tCount\tCommand\n");
985 list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
988 case ERR_ABORT_CMD_FAILED:
989 case ERR_LUN_RESET_FAILED:
990 seq_printf(m, "%d\t%d\t0x%x\n", err->type, err->cnt,
994 case ERR_FAIL_QUEUE_CMD:
995 seq_printf(m, "%d\t%d\t0x%x\t0x%x\n", err->type,
996 err->cnt, err->cmd, err->queuecmd_ret);
1000 seq_printf(m, "%d\t%d\t0x%x\t0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
1001 err->type, err->cnt, err->cmd,
1002 err->host_byte, err->driver_byte,
1003 err->status_byte, err->sense_key,
1004 err->asc, err->asq);
1013 static int sdebug_error_open(struct inode *inode, struct file *file)
1015 return single_open(file, sdebug_error_show, inode->i_private);
1018 static ssize_t sdebug_error_write(struct file *file, const char __user *ubuf,
1019 size_t count, loff_t *ppos)
1022 unsigned int inject_type;
1023 struct sdebug_err_inject *inject;
1024 struct scsi_device *sdev = (struct scsi_device *)file->f_inode->i_private;
1026 buf = kzalloc(count + 1, GFP_KERNEL);
1030 if (copy_from_user(buf, ubuf, count)) {
1036 return sdebug_err_remove(sdev, buf, count);
1038 if (sscanf(buf, "%d", &inject_type) != 1) {
1043 inject = kzalloc(sizeof(struct sdebug_err_inject), GFP_KERNEL);
1049 switch (inject_type) {
1051 case ERR_ABORT_CMD_FAILED:
1052 case ERR_LUN_RESET_FAILED:
1053 if (sscanf(buf, "%d %d %hhx", &inject->type, &inject->cnt,
1058 case ERR_FAIL_QUEUE_CMD:
1059 if (sscanf(buf, "%d %d %hhx %x", &inject->type, &inject->cnt,
1060 &inject->cmd, &inject->queuecmd_ret) != 4)
1065 if (sscanf(buf, "%d %d %hhx %hhx %hhx %hhx %hhx %hhx %hhx",
1066 &inject->type, &inject->cnt, &inject->cmd,
1067 &inject->host_byte, &inject->driver_byte,
1068 &inject->status_byte, &inject->sense_key,
1069 &inject->asc, &inject->asq) != 9)
1079 sdebug_err_add(sdev, inject);
1089 static const struct file_operations sdebug_error_fops = {
1090 .open = sdebug_error_open,
1092 .write = sdebug_error_write,
1093 .release = single_release,
1096 static int sdebug_target_reset_fail_show(struct seq_file *m, void *p)
1098 struct scsi_target *starget = (struct scsi_target *)m->private;
1099 struct sdebug_target_info *targetip =
1100 (struct sdebug_target_info *)starget->hostdata;
1103 seq_printf(m, "%c\n", targetip->reset_fail ? 'Y' : 'N');
1108 static int sdebug_target_reset_fail_open(struct inode *inode, struct file *file)
1110 return single_open(file, sdebug_target_reset_fail_show, inode->i_private);
1113 static ssize_t sdebug_target_reset_fail_write(struct file *file,
1114 const char __user *ubuf, size_t count, loff_t *ppos)
1117 struct scsi_target *starget =
1118 (struct scsi_target *)file->f_inode->i_private;
1119 struct sdebug_target_info *targetip =
1120 (struct sdebug_target_info *)starget->hostdata;
1123 ret = kstrtobool_from_user(ubuf, count, &targetip->reset_fail);
1124 return ret < 0 ? ret : count;
1129 static const struct file_operations sdebug_target_reset_fail_fops = {
1130 .open = sdebug_target_reset_fail_open,
1132 .write = sdebug_target_reset_fail_write,
1133 .release = single_release,
1136 static int sdebug_target_alloc(struct scsi_target *starget)
1138 struct sdebug_target_info *targetip;
1140 targetip = kzalloc(sizeof(struct sdebug_target_info), GFP_KERNEL);
1144 targetip->debugfs_entry = debugfs_create_dir(dev_name(&starget->dev),
1145 sdebug_debugfs_root);
1147 debugfs_create_file("fail_reset", 0600, targetip->debugfs_entry, starget,
1148 &sdebug_target_reset_fail_fops);
1150 starget->hostdata = targetip;
1155 static void sdebug_tartget_cleanup_async(void *data, async_cookie_t cookie)
1157 struct sdebug_target_info *targetip = data;
1159 debugfs_remove(targetip->debugfs_entry);
1163 static void sdebug_target_destroy(struct scsi_target *starget)
1165 struct sdebug_target_info *targetip;
1167 targetip = (struct sdebug_target_info *)starget->hostdata;
1169 starget->hostdata = NULL;
1170 async_schedule(sdebug_tartget_cleanup_async, targetip);
1174 /* Only do the extra work involved in logical block provisioning if one or
1175 * more of the lbpu, lbpws or lbpws10 parameters are given and we are doing
1176 * real reads and writes (i.e. not skipping them for speed).
1178 static inline bool scsi_debug_lbp(void)
1180 return 0 == sdebug_fake_rw &&
1181 (sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10);
1184 static void *lba2fake_store(struct sdeb_store_info *sip,
1185 unsigned long long lba)
1187 struct sdeb_store_info *lsip = sip;
1189 lba = do_div(lba, sdebug_store_sectors);
1190 if (!sip || !sip->storep) {
1192 lsip = xa_load(per_store_ap, 0); /* should never be NULL */
1194 return lsip->storep + lba * sdebug_sector_size;
1197 static struct t10_pi_tuple *dif_store(struct sdeb_store_info *sip,
1200 sector = sector_div(sector, sdebug_store_sectors);
1202 return sip->dif_storep + sector;
1205 static void sdebug_max_tgts_luns(void)
1207 struct sdebug_host_info *sdbg_host;
1208 struct Scsi_Host *hpnt;
1210 mutex_lock(&sdebug_host_list_mutex);
1211 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
1212 hpnt = sdbg_host->shost;
1213 if ((hpnt->this_id >= 0) &&
1214 (sdebug_num_tgts > hpnt->this_id))
1215 hpnt->max_id = sdebug_num_tgts + 1;
1217 hpnt->max_id = sdebug_num_tgts;
1218 /* sdebug_max_luns; */
1219 hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
1221 mutex_unlock(&sdebug_host_list_mutex);
1224 enum sdeb_cmd_data {SDEB_IN_DATA = 0, SDEB_IN_CDB = 1};
1226 /* Set in_bit to -1 to indicate no bit position of invalid field */
1227 static void mk_sense_invalid_fld(struct scsi_cmnd *scp,
1228 enum sdeb_cmd_data c_d,
1229 int in_byte, int in_bit)
1231 unsigned char *sbuff;
1235 sbuff = scp->sense_buffer;
1237 sdev_printk(KERN_ERR, scp->device,
1238 "%s: sense_buffer is NULL\n", __func__);
1241 asc = c_d ? INVALID_FIELD_IN_CDB : INVALID_FIELD_IN_PARAM_LIST;
1242 memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
1243 scsi_build_sense(scp, sdebug_dsense, ILLEGAL_REQUEST, asc, 0);
1244 memset(sks, 0, sizeof(sks));
1250 sks[0] |= 0x7 & in_bit;
1252 put_unaligned_be16(in_byte, sks + 1);
1253 if (sdebug_dsense) {
1257 sbuff[sl + 1] = 0x6;
1258 memcpy(sbuff + sl + 4, sks, 3);
1260 memcpy(sbuff + 15, sks, 3);
1262 sdev_printk(KERN_INFO, scp->device, "%s: [sense_key,asc,ascq"
1263 "]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n",
1264 my_name, asc, c_d ? 'C' : 'D', in_byte, in_bit);
1267 static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
1269 if (!scp->sense_buffer) {
1270 sdev_printk(KERN_ERR, scp->device,
1271 "%s: sense_buffer is NULL\n", __func__);
1274 memset(scp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1276 scsi_build_sense(scp, sdebug_dsense, key, asc, asq);
1279 sdev_printk(KERN_INFO, scp->device,
1280 "%s: [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
1281 my_name, key, asc, asq);
1284 static void mk_sense_invalid_opcode(struct scsi_cmnd *scp)
1286 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
1289 static int scsi_debug_ioctl(struct scsi_device *dev, unsigned int cmd,
1292 if (sdebug_verbose) {
1294 sdev_printk(KERN_INFO, dev,
1295 "%s: BLKFLSBUF [0x1261]\n", __func__);
1296 else if (0x5331 == cmd)
1297 sdev_printk(KERN_INFO, dev,
1298 "%s: CDROM_GET_CAPABILITY [0x5331]\n",
1301 sdev_printk(KERN_INFO, dev, "%s: cmd=0x%x\n",
1305 /* return -ENOTTY; // correct return but upsets fdisk */
1308 static void config_cdb_len(struct scsi_device *sdev)
1310 switch (sdebug_cdb_len) {
1311 case 6: /* suggest 6 byte READ, WRITE and MODE SENSE/SELECT */
1312 sdev->use_10_for_rw = false;
1313 sdev->use_16_for_rw = false;
1314 sdev->use_10_for_ms = false;
1316 case 10: /* suggest 10 byte RWs and 6 byte MODE SENSE/SELECT */
1317 sdev->use_10_for_rw = true;
1318 sdev->use_16_for_rw = false;
1319 sdev->use_10_for_ms = false;
1321 case 12: /* suggest 10 byte RWs and 10 byte MODE SENSE/SELECT */
1322 sdev->use_10_for_rw = true;
1323 sdev->use_16_for_rw = false;
1324 sdev->use_10_for_ms = true;
1327 sdev->use_10_for_rw = false;
1328 sdev->use_16_for_rw = true;
1329 sdev->use_10_for_ms = true;
1331 case 32: /* No knobs to suggest this so same as 16 for now */
1332 sdev->use_10_for_rw = false;
1333 sdev->use_16_for_rw = true;
1334 sdev->use_10_for_ms = true;
1337 pr_warn("unexpected cdb_len=%d, force to 10\n",
1339 sdev->use_10_for_rw = true;
1340 sdev->use_16_for_rw = false;
1341 sdev->use_10_for_ms = false;
1342 sdebug_cdb_len = 10;
1347 static void all_config_cdb_len(void)
1349 struct sdebug_host_info *sdbg_host;
1350 struct Scsi_Host *shost;
1351 struct scsi_device *sdev;
1353 mutex_lock(&sdebug_host_list_mutex);
1354 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
1355 shost = sdbg_host->shost;
1356 shost_for_each_device(sdev, shost) {
1357 config_cdb_len(sdev);
1360 mutex_unlock(&sdebug_host_list_mutex);
1363 static void clear_luns_changed_on_target(struct sdebug_dev_info *devip)
1365 struct sdebug_host_info *sdhp = devip->sdbg_host;
1366 struct sdebug_dev_info *dp;
1368 list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
1369 if ((devip->sdbg_host == dp->sdbg_host) &&
1370 (devip->target == dp->target)) {
1371 clear_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
1376 static int make_ua(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1380 k = find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS);
1381 if (k != SDEBUG_NUM_UAS) {
1382 const char *cp = NULL;
1386 mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1387 POWER_ON_RESET_ASCQ);
1389 cp = "power on reset";
1391 case SDEBUG_UA_POOCCUR:
1392 mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1393 POWER_ON_OCCURRED_ASCQ);
1395 cp = "power on occurred";
1397 case SDEBUG_UA_BUS_RESET:
1398 mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1403 case SDEBUG_UA_MODE_CHANGED:
1404 mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1407 cp = "mode parameters changed";
1409 case SDEBUG_UA_CAPACITY_CHANGED:
1410 mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1411 CAPACITY_CHANGED_ASCQ);
1413 cp = "capacity data changed";
1415 case SDEBUG_UA_MICROCODE_CHANGED:
1416 mk_sense_buffer(scp, UNIT_ATTENTION,
1418 MICROCODE_CHANGED_ASCQ);
1420 cp = "microcode has been changed";
1422 case SDEBUG_UA_MICROCODE_CHANGED_WO_RESET:
1423 mk_sense_buffer(scp, UNIT_ATTENTION,
1425 MICROCODE_CHANGED_WO_RESET_ASCQ);
1427 cp = "microcode has been changed without reset";
1429 case SDEBUG_UA_LUNS_CHANGED:
1431 * SPC-3 behavior is to report a UNIT ATTENTION with
1432 * ASC/ASCQ REPORTED LUNS DATA HAS CHANGED on every LUN
1433 * on the target, until a REPORT LUNS command is
1434 * received. SPC-4 behavior is to report it only once.
1435 * NOTE: sdebug_scsi_level does not use the same
1436 * values as struct scsi_device->scsi_level.
1438 if (sdebug_scsi_level >= 6) /* SPC-4 and above */
1439 clear_luns_changed_on_target(devip);
1440 mk_sense_buffer(scp, UNIT_ATTENTION,
1444 cp = "reported luns data has changed";
1447 pr_warn("unexpected unit attention code=%d\n", k);
1452 clear_bit(k, devip->uas_bm);
1454 sdev_printk(KERN_INFO, scp->device,
1455 "%s reports: Unit attention: %s\n",
1457 return check_condition_result;
1462 /* Build SCSI "data-in" buffer. Returns 0 if ok else (DID_ERROR << 16). */
1463 static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1467 struct scsi_data_buffer *sdb = &scp->sdb;
1471 if (scp->sc_data_direction != DMA_FROM_DEVICE)
1472 return DID_ERROR << 16;
1474 act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
1476 scsi_set_resid(scp, scsi_bufflen(scp) - act_len);
1481 /* Partial build of SCSI "data-in" buffer. Returns 0 if ok else
1482 * (DID_ERROR << 16). Can write to offset in data-in buffer. If multiple
1483 * calls, not required to write in ascending offset order. Assumes resid
1484 * set to scsi_bufflen() prior to any calls.
1486 static int p_fill_from_dev_buffer(struct scsi_cmnd *scp, const void *arr,
1487 int arr_len, unsigned int off_dst)
1489 unsigned int act_len, n;
1490 struct scsi_data_buffer *sdb = &scp->sdb;
1491 off_t skip = off_dst;
1493 if (sdb->length <= off_dst)
1495 if (scp->sc_data_direction != DMA_FROM_DEVICE)
1496 return DID_ERROR << 16;
1498 act_len = sg_pcopy_from_buffer(sdb->table.sgl, sdb->table.nents,
1499 arr, arr_len, skip);
1500 pr_debug("%s: off_dst=%u, scsi_bufflen=%u, act_len=%u, resid=%d\n",
1501 __func__, off_dst, scsi_bufflen(scp), act_len,
1502 scsi_get_resid(scp));
1503 n = scsi_bufflen(scp) - (off_dst + act_len);
1504 scsi_set_resid(scp, min_t(u32, scsi_get_resid(scp), n));
1508 /* Fetches from SCSI "data-out" buffer. Returns number of bytes fetched into
1509 * 'arr' or -1 if error.
1511 static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1514 if (!scsi_bufflen(scp))
1516 if (scp->sc_data_direction != DMA_TO_DEVICE)
1519 return scsi_sg_copy_to_buffer(scp, arr, arr_len);
1523 static char sdebug_inq_vendor_id[9] = "Linux ";
1524 static char sdebug_inq_product_id[17] = "scsi_debug ";
1525 static char sdebug_inq_product_rev[5] = SDEBUG_VERSION;
1526 /* Use some locally assigned NAAs for SAS addresses. */
1527 static const u64 naa3_comp_a = 0x3222222000000000ULL;
1528 static const u64 naa3_comp_b = 0x3333333000000000ULL;
1529 static const u64 naa3_comp_c = 0x3111111000000000ULL;
1531 /* Device identification VPD page. Returns number of bytes placed in arr */
1532 static int inquiry_vpd_83(unsigned char *arr, int port_group_id,
1533 int target_dev_id, int dev_id_num,
1534 const char *dev_id_str, int dev_id_str_len,
1535 const uuid_t *lu_name)
1540 port_a = target_dev_id + 1;
1541 /* T10 vendor identifier field format (faked) */
1542 arr[0] = 0x2; /* ASCII */
1545 memcpy(&arr[4], sdebug_inq_vendor_id, 8);
1546 memcpy(&arr[12], sdebug_inq_product_id, 16);
1547 memcpy(&arr[28], dev_id_str, dev_id_str_len);
1548 num = 8 + 16 + dev_id_str_len;
1551 if (dev_id_num >= 0) {
1552 if (sdebug_uuid_ctl) {
1553 /* Locally assigned UUID */
1554 arr[num++] = 0x1; /* binary (not necessarily sas) */
1555 arr[num++] = 0xa; /* PIV=0, lu, naa */
1558 arr[num++] = 0x10; /* uuid type=1, locally assigned */
1560 memcpy(arr + num, lu_name, 16);
1563 /* NAA-3, Logical unit identifier (binary) */
1564 arr[num++] = 0x1; /* binary (not necessarily sas) */
1565 arr[num++] = 0x3; /* PIV=0, lu, naa */
1568 put_unaligned_be64(naa3_comp_b + dev_id_num, arr + num);
1571 /* Target relative port number */
1572 arr[num++] = 0x61; /* proto=sas, binary */
1573 arr[num++] = 0x94; /* PIV=1, target port, rel port */
1574 arr[num++] = 0x0; /* reserved */
1575 arr[num++] = 0x4; /* length */
1576 arr[num++] = 0x0; /* reserved */
1577 arr[num++] = 0x0; /* reserved */
1579 arr[num++] = 0x1; /* relative port A */
1581 /* NAA-3, Target port identifier */
1582 arr[num++] = 0x61; /* proto=sas, binary */
1583 arr[num++] = 0x93; /* piv=1, target port, naa */
1586 put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1588 /* NAA-3, Target port group identifier */
1589 arr[num++] = 0x61; /* proto=sas, binary */
1590 arr[num++] = 0x95; /* piv=1, target port group id */
1595 put_unaligned_be16(port_group_id, arr + num);
1597 /* NAA-3, Target device identifier */
1598 arr[num++] = 0x61; /* proto=sas, binary */
1599 arr[num++] = 0xa3; /* piv=1, target device, naa */
1602 put_unaligned_be64(naa3_comp_a + target_dev_id, arr + num);
1604 /* SCSI name string: Target device identifier */
1605 arr[num++] = 0x63; /* proto=sas, UTF-8 */
1606 arr[num++] = 0xa8; /* piv=1, target device, SCSI name string */
1609 memcpy(arr + num, "naa.32222220", 12);
1611 snprintf(b, sizeof(b), "%08X", target_dev_id);
1612 memcpy(arr + num, b, 8);
1614 memset(arr + num, 0, 4);
1619 static unsigned char vpd84_data[] = {
1620 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
1621 0x22,0x22,0x22,0x0,0xbb,0x1,
1622 0x22,0x22,0x22,0x0,0xbb,0x2,
1625 /* Software interface identification VPD page */
1626 static int inquiry_vpd_84(unsigned char *arr)
1628 memcpy(arr, vpd84_data, sizeof(vpd84_data));
1629 return sizeof(vpd84_data);
1632 /* Management network addresses VPD page */
1633 static int inquiry_vpd_85(unsigned char *arr)
1636 const char *na1 = "https://www.kernel.org/config";
1637 const char *na2 = "http://www.kernel.org/log";
1640 arr[num++] = 0x1; /* lu, storage config */
1641 arr[num++] = 0x0; /* reserved */
1646 plen = ((plen / 4) + 1) * 4;
1647 arr[num++] = plen; /* length, null termianted, padded */
1648 memcpy(arr + num, na1, olen);
1649 memset(arr + num + olen, 0, plen - olen);
1652 arr[num++] = 0x4; /* lu, logging */
1653 arr[num++] = 0x0; /* reserved */
1658 plen = ((plen / 4) + 1) * 4;
1659 arr[num++] = plen; /* length, null terminated, padded */
1660 memcpy(arr + num, na2, olen);
1661 memset(arr + num + olen, 0, plen - olen);
1667 /* SCSI ports VPD page */
1668 static int inquiry_vpd_88(unsigned char *arr, int target_dev_id)
1673 port_a = target_dev_id + 1;
1674 port_b = port_a + 1;
1675 arr[num++] = 0x0; /* reserved */
1676 arr[num++] = 0x0; /* reserved */
1678 arr[num++] = 0x1; /* relative port 1 (primary) */
1679 memset(arr + num, 0, 6);
1682 arr[num++] = 12; /* length tp descriptor */
1683 /* naa-5 target port identifier (A) */
1684 arr[num++] = 0x61; /* proto=sas, binary */
1685 arr[num++] = 0x93; /* PIV=1, target port, NAA */
1686 arr[num++] = 0x0; /* reserved */
1687 arr[num++] = 0x8; /* length */
1688 put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1690 arr[num++] = 0x0; /* reserved */
1691 arr[num++] = 0x0; /* reserved */
1693 arr[num++] = 0x2; /* relative port 2 (secondary) */
1694 memset(arr + num, 0, 6);
1697 arr[num++] = 12; /* length tp descriptor */
1698 /* naa-5 target port identifier (B) */
1699 arr[num++] = 0x61; /* proto=sas, binary */
1700 arr[num++] = 0x93; /* PIV=1, target port, NAA */
1701 arr[num++] = 0x0; /* reserved */
1702 arr[num++] = 0x8; /* length */
1703 put_unaligned_be64(naa3_comp_a + port_b, arr + num);
1710 static unsigned char vpd89_data[] = {
1711 /* from 4th byte */ 0,0,0,0,
1712 'l','i','n','u','x',' ',' ',' ',
1713 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
1715 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
1717 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
1718 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
1719 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
1720 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
1722 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1724 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1726 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
1727 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
1728 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
1729 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
1730 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
1731 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
1732 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
1733 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1734 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1735 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1736 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
1737 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
1738 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
1739 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
1740 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1741 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1742 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1743 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1744 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1745 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1746 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1747 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1748 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1749 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1750 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1751 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
1754 /* ATA Information VPD page */
1755 static int inquiry_vpd_89(unsigned char *arr)
1757 memcpy(arr, vpd89_data, sizeof(vpd89_data));
1758 return sizeof(vpd89_data);
1762 static unsigned char vpdb0_data[] = {
1763 /* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
1764 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1765 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1766 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1769 /* Block limits VPD page (SBC-3) */
1770 static int inquiry_vpd_b0(unsigned char *arr)
1774 memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
1776 /* Optimal transfer length granularity */
1777 if (sdebug_opt_xferlen_exp != 0 &&
1778 sdebug_physblk_exp < sdebug_opt_xferlen_exp)
1779 gran = 1 << sdebug_opt_xferlen_exp;
1781 gran = 1 << sdebug_physblk_exp;
1782 put_unaligned_be16(gran, arr + 2);
1784 /* Maximum Transfer Length */
1785 if (sdebug_store_sectors > 0x400)
1786 put_unaligned_be32(sdebug_store_sectors, arr + 4);
1788 /* Optimal Transfer Length */
1789 put_unaligned_be32(sdebug_opt_blks, &arr[8]);
1792 /* Maximum Unmap LBA Count */
1793 put_unaligned_be32(sdebug_unmap_max_blocks, &arr[16]);
1795 /* Maximum Unmap Block Descriptor Count */
1796 put_unaligned_be32(sdebug_unmap_max_desc, &arr[20]);
1799 /* Unmap Granularity Alignment */
1800 if (sdebug_unmap_alignment) {
1801 put_unaligned_be32(sdebug_unmap_alignment, &arr[28]);
1802 arr[28] |= 0x80; /* UGAVALID */
1805 /* Optimal Unmap Granularity */
1806 put_unaligned_be32(sdebug_unmap_granularity, &arr[24]);
1808 /* Maximum WRITE SAME Length */
1809 put_unaligned_be64(sdebug_write_same_length, &arr[32]);
1811 return 0x3c; /* Mandatory page length for Logical Block Provisioning */
1814 /* Block device characteristics VPD page (SBC-3) */
1815 static int inquiry_vpd_b1(struct sdebug_dev_info *devip, unsigned char *arr)
1817 memset(arr, 0, 0x3c);
1819 arr[1] = 1; /* non rotating medium (e.g. solid state) */
1821 arr[3] = 5; /* less than 1.8" */
1826 /* Logical block provisioning VPD page (SBC-4) */
1827 static int inquiry_vpd_b2(unsigned char *arr)
1829 memset(arr, 0, 0x4);
1830 arr[0] = 0; /* threshold exponent */
1837 if (sdebug_lbprz && scsi_debug_lbp())
1838 arr[1] |= (sdebug_lbprz & 0x7) << 2; /* sbc4r07 and later */
1839 /* anc_sup=0; dp=0 (no provisioning group descriptor) */
1840 /* minimum_percentage=0; provisioning_type=0 (unknown) */
1841 /* threshold_percentage=0 */
1845 /* Zoned block device characteristics VPD page (ZBC mandatory) */
1846 static int inquiry_vpd_b6(struct sdebug_dev_info *devip, unsigned char *arr)
1848 memset(arr, 0, 0x3c);
1849 arr[0] = 0x1; /* set URSWRZ (unrestricted read in seq. wr req zone) */
1851 * Set Optimal number of open sequential write preferred zones and
1852 * Optimal number of non-sequentially written sequential write
1853 * preferred zones fields to 'not reported' (0xffffffff). Leave other
1854 * fields set to zero, apart from Max. number of open swrz_s field.
1856 put_unaligned_be32(0xffffffff, &arr[4]);
1857 put_unaligned_be32(0xffffffff, &arr[8]);
1858 if (sdeb_zbc_model == BLK_ZONED_HM && devip->max_open)
1859 put_unaligned_be32(devip->max_open, &arr[12]);
1861 put_unaligned_be32(0xffffffff, &arr[12]);
1862 if (devip->zcap < devip->zsize) {
1863 arr[19] = ZBC_CONSTANT_ZONE_START_OFFSET;
1864 put_unaligned_be64(devip->zsize, &arr[20]);
1871 #define SDEBUG_BLE_LEN_AFTER_B4 28 /* thus vpage 32 bytes long */
1873 enum { MAXIMUM_NUMBER_OF_STREAMS = 6, PERMANENT_STREAM_COUNT = 5 };
1875 /* Block limits extension VPD page (SBC-4) */
1876 static int inquiry_vpd_b7(unsigned char *arrb4)
1878 memset(arrb4, 0, SDEBUG_BLE_LEN_AFTER_B4);
1879 arrb4[1] = 1; /* Reduced stream control support (RSCS) */
1880 put_unaligned_be16(MAXIMUM_NUMBER_OF_STREAMS, &arrb4[2]);
1881 return SDEBUG_BLE_LEN_AFTER_B4;
1884 #define SDEBUG_LONG_INQ_SZ 96
1885 #define SDEBUG_MAX_INQ_ARR_SZ 584
1887 static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1889 unsigned char pq_pdt;
1891 unsigned char *cmd = scp->cmnd;
1894 bool have_wlun, is_disk, is_zbc, is_disk_zbc;
1896 alloc_len = get_unaligned_be16(cmd + 3);
1897 arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
1899 return DID_REQUEUE << 16;
1900 is_disk = (sdebug_ptype == TYPE_DISK);
1901 is_zbc = devip->zoned;
1902 is_disk_zbc = (is_disk || is_zbc);
1903 have_wlun = scsi_is_wlun(scp->device->lun);
1905 pq_pdt = TYPE_WLUN; /* present, wlun */
1906 else if (sdebug_no_lun_0 && (devip->lun == SDEBUG_LUN_0_VAL))
1907 pq_pdt = 0x7f; /* not present, PQ=3, PDT=0x1f */
1909 pq_pdt = (sdebug_ptype & 0x1f);
1911 if (0x2 & cmd[1]) { /* CMDDT bit set */
1912 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
1914 return check_condition_result;
1915 } else if (0x1 & cmd[1]) { /* EVPD bit set */
1916 int lu_id_num, port_group_id, target_dev_id;
1919 int host_no = devip->sdbg_host->shost->host_no;
1922 port_group_id = (((host_no + 1) & 0x7f) << 8) +
1923 (devip->channel & 0x7f);
1924 if (sdebug_vpd_use_hostno == 0)
1926 lu_id_num = have_wlun ? -1 : (((host_no + 1) * 2000) +
1927 (devip->target * 1000) + devip->lun);
1928 target_dev_id = ((host_no + 1) * 2000) +
1929 (devip->target * 1000) - 3;
1930 len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
1931 if (0 == cmd[2]) { /* supported vital product data pages */
1933 arr[n++] = 0x0; /* this page */
1934 arr[n++] = 0x80; /* unit serial number */
1935 arr[n++] = 0x83; /* device identification */
1936 arr[n++] = 0x84; /* software interface ident. */
1937 arr[n++] = 0x85; /* management network addresses */
1938 arr[n++] = 0x86; /* extended inquiry */
1939 arr[n++] = 0x87; /* mode page policy */
1940 arr[n++] = 0x88; /* SCSI ports */
1941 if (is_disk_zbc) { /* SBC or ZBC */
1942 arr[n++] = 0x89; /* ATA information */
1943 arr[n++] = 0xb0; /* Block limits */
1944 arr[n++] = 0xb1; /* Block characteristics */
1946 arr[n++] = 0xb2; /* LB Provisioning */
1948 arr[n++] = 0xb6; /* ZB dev. char. */
1949 arr[n++] = 0xb7; /* Block limits extension */
1951 arr[3] = n - 4; /* number of supported VPD pages */
1952 } else if (0x80 == cmd[2]) { /* unit serial number */
1954 memcpy(&arr[4], lu_id_str, len);
1955 } else if (0x83 == cmd[2]) { /* device identification */
1956 arr[3] = inquiry_vpd_83(&arr[4], port_group_id,
1957 target_dev_id, lu_id_num,
1960 } else if (0x84 == cmd[2]) { /* Software interface ident. */
1961 arr[3] = inquiry_vpd_84(&arr[4]);
1962 } else if (0x85 == cmd[2]) { /* Management network addresses */
1963 arr[3] = inquiry_vpd_85(&arr[4]);
1964 } else if (0x86 == cmd[2]) { /* extended inquiry */
1965 arr[3] = 0x3c; /* number of following entries */
1966 if (sdebug_dif == T10_PI_TYPE3_PROTECTION)
1967 arr[4] = 0x4; /* SPT: GRD_CHK:1 */
1968 else if (have_dif_prot)
1969 arr[4] = 0x5; /* SPT: GRD_CHK:1, REF_CHK:1 */
1971 arr[4] = 0x0; /* no protection stuff */
1972 arr[5] = 0x7; /* head of q, ordered + simple q's */
1973 } else if (0x87 == cmd[2]) { /* mode page policy */
1974 arr[3] = 0x8; /* number of following entries */
1975 arr[4] = 0x2; /* disconnect-reconnect mp */
1976 arr[6] = 0x80; /* mlus, shared */
1977 arr[8] = 0x18; /* protocol specific lu */
1978 arr[10] = 0x82; /* mlus, per initiator port */
1979 } else if (0x88 == cmd[2]) { /* SCSI Ports */
1980 arr[3] = inquiry_vpd_88(&arr[4], target_dev_id);
1981 } else if (is_disk_zbc && 0x89 == cmd[2]) { /* ATA info */
1982 n = inquiry_vpd_89(&arr[4]);
1983 put_unaligned_be16(n, arr + 2);
1984 } else if (is_disk_zbc && 0xb0 == cmd[2]) { /* Block limits */
1985 arr[3] = inquiry_vpd_b0(&arr[4]);
1986 } else if (is_disk_zbc && 0xb1 == cmd[2]) { /* Block char. */
1987 arr[3] = inquiry_vpd_b1(devip, &arr[4]);
1988 } else if (is_disk && 0xb2 == cmd[2]) { /* LB Prov. */
1989 arr[3] = inquiry_vpd_b2(&arr[4]);
1990 } else if (is_zbc && cmd[2] == 0xb6) { /* ZB dev. charact. */
1991 arr[3] = inquiry_vpd_b6(devip, &arr[4]);
1992 } else if (cmd[2] == 0xb7) { /* block limits extension page */
1993 arr[3] = inquiry_vpd_b7(&arr[4]);
1995 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
1997 return check_condition_result;
1999 len = min_t(u32, get_unaligned_be16(arr + 2) + 4, alloc_len);
2000 ret = fill_from_dev_buffer(scp, arr,
2001 min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ));
2005 /* drops through here for a standard inquiry */
2006 arr[1] = sdebug_removable ? 0x80 : 0; /* Removable disk */
2007 arr[2] = sdebug_scsi_level;
2008 arr[3] = 2; /* response_data_format==2 */
2009 arr[4] = SDEBUG_LONG_INQ_SZ - 5;
2010 arr[5] = (int)have_dif_prot; /* PROTECT bit */
2011 if (sdebug_vpd_use_hostno == 0)
2012 arr[5] |= 0x10; /* claim: implicit TPGS */
2013 arr[6] = 0x10; /* claim: MultiP */
2014 /* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
2015 arr[7] = 0xa; /* claim: LINKED + CMDQUE */
2016 memcpy(&arr[8], sdebug_inq_vendor_id, 8);
2017 memcpy(&arr[16], sdebug_inq_product_id, 16);
2018 memcpy(&arr[32], sdebug_inq_product_rev, 4);
2019 /* Use Vendor Specific area to place driver date in ASCII hex */
2020 memcpy(&arr[36], sdebug_version_date, 8);
2021 /* version descriptors (2 bytes each) follow */
2022 put_unaligned_be16(0xc0, arr + 58); /* SAM-6 no version claimed */
2023 put_unaligned_be16(0x5c0, arr + 60); /* SPC-5 no version claimed */
2025 if (is_disk) { /* SBC-4 no version claimed */
2026 put_unaligned_be16(0x600, arr + n);
2028 } else if (sdebug_ptype == TYPE_TAPE) { /* SSC-4 rev 3 */
2029 put_unaligned_be16(0x525, arr + n);
2031 } else if (is_zbc) { /* ZBC BSR INCITS 536 revision 05 */
2032 put_unaligned_be16(0x624, arr + n);
2035 put_unaligned_be16(0x2100, arr + n); /* SPL-4 no version claimed */
2036 ret = fill_from_dev_buffer(scp, arr,
2037 min_t(u32, alloc_len, SDEBUG_LONG_INQ_SZ));
2042 /* See resp_iec_m_pg() for how this data is manipulated */
2043 static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
2046 static int resp_requests(struct scsi_cmnd *scp,
2047 struct sdebug_dev_info *devip)
2049 unsigned char *cmd = scp->cmnd;
2050 unsigned char arr[SCSI_SENSE_BUFFERSIZE]; /* assume >= 18 bytes */
2051 bool dsense = !!(cmd[1] & 1);
2052 u32 alloc_len = cmd[4];
2054 int stopped_state = atomic_read(&devip->stopped);
2056 memset(arr, 0, sizeof(arr));
2057 if (stopped_state > 0) { /* some "pollable" data [spc6r02: 5.12.2] */
2061 arr[2] = LOGICAL_UNIT_NOT_READY;
2062 arr[3] = (stopped_state == 2) ? 0x1 : 0x2;
2066 arr[2] = NOT_READY; /* NO_SENSE in sense_key */
2067 arr[7] = 0xa; /* 18 byte sense buffer */
2068 arr[12] = LOGICAL_UNIT_NOT_READY;
2069 arr[13] = (stopped_state == 2) ? 0x1 : 0x2;
2071 } else if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
2072 /* Information exceptions control mode page: TEST=1, MRIE=6 */
2075 arr[1] = 0x0; /* NO_SENSE in sense_key */
2076 arr[2] = THRESHOLD_EXCEEDED;
2077 arr[3] = 0xff; /* Failure prediction(false) */
2081 arr[2] = 0x0; /* NO_SENSE in sense_key */
2082 arr[7] = 0xa; /* 18 byte sense buffer */
2083 arr[12] = THRESHOLD_EXCEEDED;
2084 arr[13] = 0xff; /* Failure prediction(false) */
2086 } else { /* nothing to report */
2089 memset(arr, 0, len);
2092 memset(arr, 0, len);
2097 return fill_from_dev_buffer(scp, arr, min_t(u32, len, alloc_len));
2100 static int resp_start_stop(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2102 unsigned char *cmd = scp->cmnd;
2103 int power_cond, want_stop, stopped_state;
2106 power_cond = (cmd[4] & 0xf0) >> 4;
2108 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 7);
2109 return check_condition_result;
2111 want_stop = !(cmd[4] & 1);
2112 stopped_state = atomic_read(&devip->stopped);
2113 if (stopped_state == 2) {
2114 ktime_t now_ts = ktime_get_boottime();
2116 if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
2117 u64 diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
2119 if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
2120 /* tur_ms_to_ready timer extinguished */
2121 atomic_set(&devip->stopped, 0);
2125 if (stopped_state == 2) {
2127 stopped_state = 1; /* dummy up success */
2128 } else { /* Disallow tur_ms_to_ready delay to be overridden */
2129 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 0 /* START bit */);
2130 return check_condition_result;
2134 changing = (stopped_state != want_stop);
2136 atomic_xchg(&devip->stopped, want_stop);
2137 if (!changing || (cmd[1] & 0x1)) /* state unchanged or IMMED bit set in cdb */
2138 return SDEG_RES_IMMED_MASK;
2143 static sector_t get_sdebug_capacity(void)
2145 static const unsigned int gibibyte = 1073741824;
2147 if (sdebug_virtual_gb > 0)
2148 return (sector_t)sdebug_virtual_gb *
2149 (gibibyte / sdebug_sector_size);
2151 return sdebug_store_sectors;
2154 #define SDEBUG_READCAP_ARR_SZ 8
2155 static int resp_readcap(struct scsi_cmnd *scp,
2156 struct sdebug_dev_info *devip)
2158 unsigned char arr[SDEBUG_READCAP_ARR_SZ];
2161 /* following just in case virtual_gb changed */
2162 sdebug_capacity = get_sdebug_capacity();
2163 memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
2164 if (sdebug_capacity < 0xffffffff) {
2165 capac = (unsigned int)sdebug_capacity - 1;
2166 put_unaligned_be32(capac, arr + 0);
2168 put_unaligned_be32(0xffffffff, arr + 0);
2169 put_unaligned_be16(sdebug_sector_size, arr + 6);
2170 return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
2173 #define SDEBUG_READCAP16_ARR_SZ 32
2174 static int resp_readcap16(struct scsi_cmnd *scp,
2175 struct sdebug_dev_info *devip)
2177 unsigned char *cmd = scp->cmnd;
2178 unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
2181 alloc_len = get_unaligned_be32(cmd + 10);
2182 /* following just in case virtual_gb changed */
2183 sdebug_capacity = get_sdebug_capacity();
2184 memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
2185 put_unaligned_be64((u64)(sdebug_capacity - 1), arr + 0);
2186 put_unaligned_be32(sdebug_sector_size, arr + 8);
2187 arr[13] = sdebug_physblk_exp & 0xf;
2188 arr[14] = (sdebug_lowest_aligned >> 8) & 0x3f;
2190 if (scsi_debug_lbp()) {
2191 arr[14] |= 0x80; /* LBPME */
2192 /* from sbc4r07, this LBPRZ field is 1 bit, but the LBPRZ in
2193 * the LB Provisioning VPD page is 3 bits. Note that lbprz=2
2194 * in the wider field maps to 0 in this field.
2196 if (sdebug_lbprz & 1) /* precisely what the draft requires */
2201 * Since the scsi_debug READ CAPACITY implementation always reports the
2202 * total disk capacity, set RC BASIS = 1 for host-managed ZBC devices.
2207 arr[15] = sdebug_lowest_aligned & 0xff;
2209 if (have_dif_prot) {
2210 arr[12] = (sdebug_dif - 1) << 1; /* P_TYPE */
2211 arr[12] |= 1; /* PROT_EN */
2214 return fill_from_dev_buffer(scp, arr,
2215 min_t(u32, alloc_len, SDEBUG_READCAP16_ARR_SZ));
2218 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
2220 static int resp_report_tgtpgs(struct scsi_cmnd *scp,
2221 struct sdebug_dev_info *devip)
2223 unsigned char *cmd = scp->cmnd;
2225 int host_no = devip->sdbg_host->shost->host_no;
2226 int port_group_a, port_group_b, port_a, port_b;
2230 alen = get_unaligned_be32(cmd + 6);
2231 arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
2233 return DID_REQUEUE << 16;
2235 * EVPD page 0x88 states we have two ports, one
2236 * real and a fake port with no device connected.
2237 * So we create two port groups with one port each
2238 * and set the group with port B to unavailable.
2240 port_a = 0x1; /* relative port A */
2241 port_b = 0x2; /* relative port B */
2242 port_group_a = (((host_no + 1) & 0x7f) << 8) +
2243 (devip->channel & 0x7f);
2244 port_group_b = (((host_no + 1) & 0x7f) << 8) +
2245 (devip->channel & 0x7f) + 0x80;
2248 * The asymmetric access state is cycled according to the host_id.
2251 if (sdebug_vpd_use_hostno == 0) {
2252 arr[n++] = host_no % 3; /* Asymm access state */
2253 arr[n++] = 0x0F; /* claim: all states are supported */
2255 arr[n++] = 0x0; /* Active/Optimized path */
2256 arr[n++] = 0x01; /* only support active/optimized paths */
2258 put_unaligned_be16(port_group_a, arr + n);
2260 arr[n++] = 0; /* Reserved */
2261 arr[n++] = 0; /* Status code */
2262 arr[n++] = 0; /* Vendor unique */
2263 arr[n++] = 0x1; /* One port per group */
2264 arr[n++] = 0; /* Reserved */
2265 arr[n++] = 0; /* Reserved */
2266 put_unaligned_be16(port_a, arr + n);
2268 arr[n++] = 3; /* Port unavailable */
2269 arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
2270 put_unaligned_be16(port_group_b, arr + n);
2272 arr[n++] = 0; /* Reserved */
2273 arr[n++] = 0; /* Status code */
2274 arr[n++] = 0; /* Vendor unique */
2275 arr[n++] = 0x1; /* One port per group */
2276 arr[n++] = 0; /* Reserved */
2277 arr[n++] = 0; /* Reserved */
2278 put_unaligned_be16(port_b, arr + n);
2282 put_unaligned_be32(rlen, arr + 0);
2285 * Return the smallest value of either
2286 * - The allocated length
2287 * - The constructed command length
2288 * - The maximum array size
2290 rlen = min(alen, n);
2291 ret = fill_from_dev_buffer(scp, arr,
2292 min_t(u32, rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
2297 static int resp_rsup_opcodes(struct scsi_cmnd *scp,
2298 struct sdebug_dev_info *devip)
2301 u8 reporting_opts, req_opcode, sdeb_i, supp;
2303 u32 alloc_len, a_len;
2304 int k, offset, len, errsts, count, bump, na;
2305 const struct opcode_info_t *oip;
2306 const struct opcode_info_t *r_oip;
2308 u8 *cmd = scp->cmnd;
2310 rctd = !!(cmd[2] & 0x80);
2311 reporting_opts = cmd[2] & 0x7;
2312 req_opcode = cmd[3];
2313 req_sa = get_unaligned_be16(cmd + 4);
2314 alloc_len = get_unaligned_be32(cmd + 6);
2315 if (alloc_len < 4 || alloc_len > 0xffff) {
2316 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
2317 return check_condition_result;
2319 if (alloc_len > 8192)
2323 arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_ATOMIC);
2325 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
2327 return check_condition_result;
2329 switch (reporting_opts) {
2330 case 0: /* all commands */
2331 /* count number of commands */
2332 for (count = 0, oip = opcode_info_arr;
2333 oip->num_attached != 0xff; ++oip) {
2334 if (F_INV_OP & oip->flags)
2336 count += (oip->num_attached + 1);
2338 bump = rctd ? 20 : 8;
2339 put_unaligned_be32(count * bump, arr);
2340 for (offset = 4, oip = opcode_info_arr;
2341 oip->num_attached != 0xff && offset < a_len; ++oip) {
2342 if (F_INV_OP & oip->flags)
2344 na = oip->num_attached;
2345 arr[offset] = oip->opcode;
2346 put_unaligned_be16(oip->sa, arr + offset + 2);
2348 arr[offset + 5] |= 0x2;
2349 if (FF_SA & oip->flags)
2350 arr[offset + 5] |= 0x1;
2351 put_unaligned_be16(oip->len_mask[0], arr + offset + 6);
2353 put_unaligned_be16(0xa, arr + offset + 8);
2355 for (k = 0, oip = oip->arrp; k < na; ++k, ++oip) {
2356 if (F_INV_OP & oip->flags)
2359 arr[offset] = oip->opcode;
2360 put_unaligned_be16(oip->sa, arr + offset + 2);
2362 arr[offset + 5] |= 0x2;
2363 if (FF_SA & oip->flags)
2364 arr[offset + 5] |= 0x1;
2365 put_unaligned_be16(oip->len_mask[0],
2368 put_unaligned_be16(0xa,
2375 case 1: /* one command: opcode only */
2376 case 2: /* one command: opcode plus service action */
2377 case 3: /* one command: if sa==0 then opcode only else opcode+sa */
2378 sdeb_i = opcode_ind_arr[req_opcode];
2379 oip = &opcode_info_arr[sdeb_i];
2380 if (F_INV_OP & oip->flags) {
2384 if (1 == reporting_opts) {
2385 if (FF_SA & oip->flags) {
2386 mk_sense_invalid_fld(scp, SDEB_IN_CDB,
2389 return check_condition_result;
2392 } else if (2 == reporting_opts &&
2393 0 == (FF_SA & oip->flags)) {
2394 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
2395 kfree(arr); /* point at requested sa */
2396 return check_condition_result;
2398 if (0 == (FF_SA & oip->flags) &&
2399 req_opcode == oip->opcode)
2401 else if (0 == (FF_SA & oip->flags)) {
2402 na = oip->num_attached;
2403 for (k = 0, oip = oip->arrp; k < na;
2405 if (req_opcode == oip->opcode)
2408 supp = (k >= na) ? 1 : 3;
2409 } else if (req_sa != oip->sa) {
2410 na = oip->num_attached;
2411 for (k = 0, oip = oip->arrp; k < na;
2413 if (req_sa == oip->sa)
2416 supp = (k >= na) ? 1 : 3;
2420 u = oip->len_mask[0];
2421 put_unaligned_be16(u, arr + 2);
2422 arr[4] = oip->opcode;
2423 for (k = 1; k < u; ++k)
2424 arr[4 + k] = (k < 16) ?
2425 oip->len_mask[k] : 0xff;
2430 arr[1] = (rctd ? 0x80 : 0) | supp;
2432 put_unaligned_be16(0xa, arr + offset);
2437 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
2439 return check_condition_result;
2441 offset = (offset < a_len) ? offset : a_len;
2442 len = (offset < alloc_len) ? offset : alloc_len;
2443 errsts = fill_from_dev_buffer(scp, arr, len);
2448 static int resp_rsup_tmfs(struct scsi_cmnd *scp,
2449 struct sdebug_dev_info *devip)
2454 u8 *cmd = scp->cmnd;
2456 memset(arr, 0, sizeof(arr));
2457 repd = !!(cmd[2] & 0x80);
2458 alloc_len = get_unaligned_be32(cmd + 6);
2459 if (alloc_len < 4) {
2460 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
2461 return check_condition_result;
2463 arr[0] = 0xc8; /* ATS | ATSS | LURS */
2464 arr[1] = 0x1; /* ITNRS */
2471 len = (len < alloc_len) ? len : alloc_len;
2472 return fill_from_dev_buffer(scp, arr, len);
2475 /* <<Following mode page info copied from ST318451LW>> */
2477 static int resp_err_recov_pg(unsigned char *p, int pcontrol, int target)
2478 { /* Read-Write Error Recovery page for mode_sense */
2479 unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
2482 memcpy(p, err_recov_pg, sizeof(err_recov_pg));
2484 memset(p + 2, 0, sizeof(err_recov_pg) - 2);
2485 return sizeof(err_recov_pg);
2488 static int resp_disconnect_pg(unsigned char *p, int pcontrol, int target)
2489 { /* Disconnect-Reconnect page for mode_sense */
2490 unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
2491 0, 0, 0, 0, 0, 0, 0, 0};
2493 memcpy(p, disconnect_pg, sizeof(disconnect_pg));
2495 memset(p + 2, 0, sizeof(disconnect_pg) - 2);
2496 return sizeof(disconnect_pg);
2499 static int resp_format_pg(unsigned char *p, int pcontrol, int target)
2500 { /* Format device page for mode_sense */
2501 unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
2502 0, 0, 0, 0, 0, 0, 0, 0,
2503 0, 0, 0, 0, 0x40, 0, 0, 0};
2505 memcpy(p, format_pg, sizeof(format_pg));
2506 put_unaligned_be16(sdebug_sectors_per, p + 10);
2507 put_unaligned_be16(sdebug_sector_size, p + 12);
2508 if (sdebug_removable)
2509 p[20] |= 0x20; /* should agree with INQUIRY */
2511 memset(p + 2, 0, sizeof(format_pg) - 2);
2512 return sizeof(format_pg);
2515 static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2516 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,
2519 static int resp_caching_pg(unsigned char *p, int pcontrol, int target)
2520 { /* Caching page for mode_sense */
2521 unsigned char ch_caching_pg[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0,
2522 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
2523 unsigned char d_caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2524 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0, 0, 0, 0, 0};
2526 if (SDEBUG_OPT_N_WCE & sdebug_opts)
2527 caching_pg[2] &= ~0x4; /* set WCE=0 (default WCE=1) */
2528 memcpy(p, caching_pg, sizeof(caching_pg));
2530 memcpy(p + 2, ch_caching_pg, sizeof(ch_caching_pg));
2531 else if (2 == pcontrol)
2532 memcpy(p, d_caching_pg, sizeof(d_caching_pg));
2533 return sizeof(caching_pg);
2536 static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2539 static int resp_ctrl_m_pg(unsigned char *p, int pcontrol, int target)
2540 { /* Control mode page for mode_sense */
2541 unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
2543 unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2547 ctrl_m_pg[2] |= 0x4;
2549 ctrl_m_pg[2] &= ~0x4;
2552 ctrl_m_pg[5] |= 0x80; /* ATO=1 */
2554 memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
2556 memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
2557 else if (2 == pcontrol)
2558 memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
2559 return sizeof(ctrl_m_pg);
2563 static int resp_iec_m_pg(unsigned char *p, int pcontrol, int target)
2564 { /* Informational Exceptions control mode page for mode_sense */
2565 unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
2567 unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
2570 memcpy(p, iec_m_pg, sizeof(iec_m_pg));
2572 memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
2573 else if (2 == pcontrol)
2574 memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
2575 return sizeof(iec_m_pg);
2578 static int resp_sas_sf_m_pg(unsigned char *p, int pcontrol, int target)
2579 { /* SAS SSP mode page - short format for mode_sense */
2580 unsigned char sas_sf_m_pg[] = {0x19, 0x6,
2581 0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
2583 memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
2585 memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
2586 return sizeof(sas_sf_m_pg);
2590 static int resp_sas_pcd_m_spg(unsigned char *p, int pcontrol, int target,
2592 { /* SAS phy control and discover mode page for mode_sense */
2593 unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
2594 0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
2595 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2596 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2597 0x2, 0, 0, 0, 0, 0, 0, 0,
2598 0x88, 0x99, 0, 0, 0, 0, 0, 0,
2599 0, 0, 0, 0, 0, 0, 0, 0,
2600 0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
2601 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2602 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2603 0x3, 0, 0, 0, 0, 0, 0, 0,
2604 0x88, 0x99, 0, 0, 0, 0, 0, 0,
2605 0, 0, 0, 0, 0, 0, 0, 0,
2609 put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 16);
2610 put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 24);
2611 put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 64);
2612 put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 72);
2613 port_a = target_dev_id + 1;
2614 port_b = port_a + 1;
2615 memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
2616 put_unaligned_be32(port_a, p + 20);
2617 put_unaligned_be32(port_b, p + 48 + 20);
2619 memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
2620 return sizeof(sas_pcd_m_pg);
2623 static int resp_sas_sha_m_spg(unsigned char *p, int pcontrol)
2624 { /* SAS SSP shared protocol specific port mode subpage */
2625 unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
2626 0, 0, 0, 0, 0, 0, 0, 0,
2629 memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
2631 memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
2632 return sizeof(sas_sha_m_pg);
2635 /* PAGE_SIZE is more than necessary but provides room for future expansion. */
2636 #define SDEBUG_MAX_MSENSE_SZ PAGE_SIZE
2638 static int resp_mode_sense(struct scsi_cmnd *scp,
2639 struct sdebug_dev_info *devip)
2641 int pcontrol, pcode, subpcode, bd_len;
2642 unsigned char dev_spec;
2643 u32 alloc_len, offset, len;
2645 int target = scp->device->id;
2647 unsigned char *arr __free(kfree);
2648 unsigned char *cmd = scp->cmnd;
2649 bool dbd, llbaa, msense_6, is_disk, is_zbc;
2651 arr = kzalloc(SDEBUG_MAX_MSENSE_SZ, GFP_ATOMIC);
2654 dbd = !!(cmd[1] & 0x8); /* disable block descriptors */
2655 pcontrol = (cmd[2] & 0xc0) >> 6;
2656 pcode = cmd[2] & 0x3f;
2658 msense_6 = (MODE_SENSE == cmd[0]);
2659 llbaa = msense_6 ? false : !!(cmd[1] & 0x10);
2660 is_disk = (sdebug_ptype == TYPE_DISK);
2661 is_zbc = devip->zoned;
2662 if ((is_disk || is_zbc) && !dbd)
2663 bd_len = llbaa ? 16 : 8;
2666 alloc_len = msense_6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2667 memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
2668 if (0x3 == pcontrol) { /* Saving values not supported */
2669 mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0);
2670 return check_condition_result;
2672 target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
2673 (devip->target * 1000) - 3;
2674 /* for disks+zbc set DPOFUA bit and clear write protect (WP) bit */
2675 if (is_disk || is_zbc) {
2676 dev_spec = 0x10; /* =0x90 if WP=1 implies read-only */
2688 arr[4] = 0x1; /* set LONGLBA bit */
2689 arr[7] = bd_len; /* assume 255 or less */
2693 if ((bd_len > 0) && (!sdebug_capacity))
2694 sdebug_capacity = get_sdebug_capacity();
2697 if (sdebug_capacity > 0xfffffffe)
2698 put_unaligned_be32(0xffffffff, ap + 0);
2700 put_unaligned_be32(sdebug_capacity, ap + 0);
2701 put_unaligned_be16(sdebug_sector_size, ap + 6);
2704 } else if (16 == bd_len) {
2705 put_unaligned_be64((u64)sdebug_capacity, ap + 0);
2706 put_unaligned_be32(sdebug_sector_size, ap + 12);
2712 case 0x1: /* Read-Write error recovery page, direct access */
2713 if (subpcode > 0x0 && subpcode < 0xff)
2715 len = resp_err_recov_pg(ap, pcontrol, target);
2718 case 0x2: /* Disconnect-Reconnect page, all devices */
2719 if (subpcode > 0x0 && subpcode < 0xff)
2721 len = resp_disconnect_pg(ap, pcontrol, target);
2724 case 0x3: /* Format device page, direct access */
2725 if (subpcode > 0x0 && subpcode < 0xff)
2728 len = resp_format_pg(ap, pcontrol, target);
2734 case 0x8: /* Caching page, direct access */
2735 if (subpcode > 0x0 && subpcode < 0xff)
2737 if (is_disk || is_zbc) {
2738 len = resp_caching_pg(ap, pcontrol, target);
2744 case 0xa: /* Control Mode page, all devices */
2745 if (subpcode > 0x0 && subpcode < 0xff)
2747 len = resp_ctrl_m_pg(ap, pcontrol, target);
2750 case 0x19: /* if spc==1 then sas phy, control+discover */
2751 if (subpcode > 0x2 && subpcode < 0xff)
2754 if ((0x0 == subpcode) || (0xff == subpcode))
2755 len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2756 if ((0x1 == subpcode) || (0xff == subpcode))
2757 len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
2759 if ((0x2 == subpcode) || (0xff == subpcode))
2760 len += resp_sas_sha_m_spg(ap + len, pcontrol);
2763 case 0x1c: /* Informational Exceptions Mode page, all devices */
2764 if (subpcode > 0x0 && subpcode < 0xff)
2766 len = resp_iec_m_pg(ap, pcontrol, target);
2769 case 0x3f: /* Read all Mode pages */
2770 if (subpcode > 0x0 && subpcode < 0xff)
2772 len = resp_err_recov_pg(ap, pcontrol, target);
2773 len += resp_disconnect_pg(ap + len, pcontrol, target);
2775 len += resp_format_pg(ap + len, pcontrol, target);
2776 len += resp_caching_pg(ap + len, pcontrol, target);
2777 } else if (is_zbc) {
2778 len += resp_caching_pg(ap + len, pcontrol, target);
2780 len += resp_ctrl_m_pg(ap + len, pcontrol, target);
2781 len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2782 if (0xff == subpcode) {
2783 len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
2785 len += resp_sas_sha_m_spg(ap + len, pcontrol);
2787 len += resp_iec_m_pg(ap + len, pcontrol, target);
2794 arr[0] = offset - 1;
2796 put_unaligned_be16((offset - 2), arr + 0);
2797 return fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, offset));
2800 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2801 return check_condition_result;
2804 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2805 return check_condition_result;
2808 #define SDEBUG_MAX_MSELECT_SZ 512
2810 static int resp_mode_select(struct scsi_cmnd *scp,
2811 struct sdebug_dev_info *devip)
2813 int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
2814 int param_len, res, mpage;
2815 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
2816 unsigned char *cmd = scp->cmnd;
2817 int mselect6 = (MODE_SELECT == cmd[0]);
2819 memset(arr, 0, sizeof(arr));
2822 param_len = mselect6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2823 if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
2824 mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1);
2825 return check_condition_result;
2827 res = fetch_to_dev_buffer(scp, arr, param_len);
2829 return DID_ERROR << 16;
2830 else if (sdebug_verbose && (res < param_len))
2831 sdev_printk(KERN_INFO, scp->device,
2832 "%s: cdb indicated=%d, IO sent=%d bytes\n",
2833 __func__, param_len, res);
2834 md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2);
2835 bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6);
2836 off = bd_len + (mselect6 ? 4 : 8);
2837 if (md_len > 2 || off >= res) {
2838 mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
2839 return check_condition_result;
2841 mpage = arr[off] & 0x3f;
2842 ps = !!(arr[off] & 0x80);
2844 mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 7);
2845 return check_condition_result;
2847 spf = !!(arr[off] & 0x40);
2848 pg_len = spf ? (get_unaligned_be16(arr + off + 2) + 4) :
2850 if ((pg_len + off) > param_len) {
2851 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2852 PARAMETER_LIST_LENGTH_ERR, 0);
2853 return check_condition_result;
2856 case 0x8: /* Caching Mode page */
2857 if (caching_pg[1] == arr[off + 1]) {
2858 memcpy(caching_pg + 2, arr + off + 2,
2859 sizeof(caching_pg) - 2);
2860 goto set_mode_changed_ua;
2863 case 0xa: /* Control Mode page */
2864 if (ctrl_m_pg[1] == arr[off + 1]) {
2865 memcpy(ctrl_m_pg + 2, arr + off + 2,
2866 sizeof(ctrl_m_pg) - 2);
2867 if (ctrl_m_pg[4] & 0x8)
2871 sdebug_dsense = !!(ctrl_m_pg[2] & 0x4);
2872 goto set_mode_changed_ua;
2875 case 0x1c: /* Informational Exceptions Mode page */
2876 if (iec_m_pg[1] == arr[off + 1]) {
2877 memcpy(iec_m_pg + 2, arr + off + 2,
2878 sizeof(iec_m_pg) - 2);
2879 goto set_mode_changed_ua;
2885 mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 5);
2886 return check_condition_result;
2887 set_mode_changed_ua:
2888 set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm);
2892 static int resp_temp_l_pg(unsigned char *arr)
2894 unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
2895 0x0, 0x1, 0x3, 0x2, 0x0, 65,
2898 memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
2899 return sizeof(temp_l_pg);
2902 static int resp_ie_l_pg(unsigned char *arr)
2904 unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
2907 memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
2908 if (iec_m_pg[2] & 0x4) { /* TEST bit set */
2909 arr[4] = THRESHOLD_EXCEEDED;
2912 return sizeof(ie_l_pg);
2915 static int resp_env_rep_l_spg(unsigned char *arr)
2917 unsigned char env_rep_l_spg[] = {0x0, 0x0, 0x23, 0x8,
2918 0x0, 40, 72, 0xff, 45, 18, 0, 0,
2919 0x1, 0x0, 0x23, 0x8,
2920 0x0, 55, 72, 35, 55, 45, 0, 0,
2923 memcpy(arr, env_rep_l_spg, sizeof(env_rep_l_spg));
2924 return sizeof(env_rep_l_spg);
2927 #define SDEBUG_MAX_LSENSE_SZ 512
2929 static int resp_log_sense(struct scsi_cmnd *scp,
2930 struct sdebug_dev_info *devip)
2932 int ppc, sp, pcode, subpcode;
2933 u32 alloc_len, len, n;
2934 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
2935 unsigned char *cmd = scp->cmnd;
2937 memset(arr, 0, sizeof(arr));
2941 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, ppc ? 1 : 0);
2942 return check_condition_result;
2944 pcode = cmd[2] & 0x3f;
2945 subpcode = cmd[3] & 0xff;
2946 alloc_len = get_unaligned_be16(cmd + 7);
2948 if (0 == subpcode) {
2950 case 0x0: /* Supported log pages log page */
2952 arr[n++] = 0x0; /* this page */
2953 arr[n++] = 0xd; /* Temperature */
2954 arr[n++] = 0x2f; /* Informational exceptions */
2957 case 0xd: /* Temperature log page */
2958 arr[3] = resp_temp_l_pg(arr + 4);
2960 case 0x2f: /* Informational exceptions log page */
2961 arr[3] = resp_ie_l_pg(arr + 4);
2964 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2965 return check_condition_result;
2967 } else if (0xff == subpcode) {
2971 case 0x0: /* Supported log pages and subpages log page */
2974 arr[n++] = 0x0; /* 0,0 page */
2976 arr[n++] = 0xff; /* this page */
2978 arr[n++] = 0x0; /* Temperature */
2980 arr[n++] = 0x1; /* Environment reporting */
2982 arr[n++] = 0xff; /* all 0xd subpages */
2984 arr[n++] = 0x0; /* Informational exceptions */
2986 arr[n++] = 0xff; /* all 0x2f subpages */
2989 case 0xd: /* Temperature subpages */
2992 arr[n++] = 0x0; /* Temperature */
2994 arr[n++] = 0x1; /* Environment reporting */
2996 arr[n++] = 0xff; /* these subpages */
2999 case 0x2f: /* Informational exceptions subpages */
3002 arr[n++] = 0x0; /* Informational exceptions */
3004 arr[n++] = 0xff; /* these subpages */
3008 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
3009 return check_condition_result;
3011 } else if (subpcode > 0) {
3014 if (pcode == 0xd && subpcode == 1)
3015 arr[3] = resp_env_rep_l_spg(arr + 4);
3017 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
3018 return check_condition_result;
3021 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
3022 return check_condition_result;
3024 len = min_t(u32, get_unaligned_be16(arr + 2) + 4, alloc_len);
3025 return fill_from_dev_buffer(scp, arr,
3026 min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ));
3029 static inline bool sdebug_dev_is_zoned(struct sdebug_dev_info *devip)
3031 return devip->nr_zones != 0;
3034 static struct sdeb_zone_state *zbc_zone(struct sdebug_dev_info *devip,
3035 unsigned long long lba)
3037 u32 zno = lba >> devip->zsize_shift;
3038 struct sdeb_zone_state *zsp;
3040 if (devip->zcap == devip->zsize || zno < devip->nr_conv_zones)
3041 return &devip->zstate[zno];
3044 * If the zone capacity is less than the zone size, adjust for gap
3047 zno = 2 * zno - devip->nr_conv_zones;
3048 WARN_ONCE(zno >= devip->nr_zones, "%u > %u\n", zno, devip->nr_zones);
3049 zsp = &devip->zstate[zno];
3050 if (lba >= zsp->z_start + zsp->z_size)
3052 WARN_ON_ONCE(lba >= zsp->z_start + zsp->z_size);
3056 static inline bool zbc_zone_is_conv(struct sdeb_zone_state *zsp)
3058 return zsp->z_type == ZBC_ZTYPE_CNV;
3061 static inline bool zbc_zone_is_gap(struct sdeb_zone_state *zsp)
3063 return zsp->z_type == ZBC_ZTYPE_GAP;
3066 static inline bool zbc_zone_is_seq(struct sdeb_zone_state *zsp)
3068 return !zbc_zone_is_conv(zsp) && !zbc_zone_is_gap(zsp);
3071 static void zbc_close_zone(struct sdebug_dev_info *devip,
3072 struct sdeb_zone_state *zsp)
3074 enum sdebug_z_cond zc;
3076 if (!zbc_zone_is_seq(zsp))
3080 if (!(zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN))
3083 if (zc == ZC2_IMPLICIT_OPEN)
3084 devip->nr_imp_open--;
3086 devip->nr_exp_open--;
3088 if (zsp->z_wp == zsp->z_start) {
3089 zsp->z_cond = ZC1_EMPTY;
3091 zsp->z_cond = ZC4_CLOSED;
3096 static void zbc_close_imp_open_zone(struct sdebug_dev_info *devip)
3098 struct sdeb_zone_state *zsp = &devip->zstate[0];
3101 for (i = 0; i < devip->nr_zones; i++, zsp++) {
3102 if (zsp->z_cond == ZC2_IMPLICIT_OPEN) {
3103 zbc_close_zone(devip, zsp);
3109 static void zbc_open_zone(struct sdebug_dev_info *devip,
3110 struct sdeb_zone_state *zsp, bool explicit)
3112 enum sdebug_z_cond zc;
3114 if (!zbc_zone_is_seq(zsp))
3118 if ((explicit && zc == ZC3_EXPLICIT_OPEN) ||
3119 (!explicit && zc == ZC2_IMPLICIT_OPEN))
3122 /* Close an implicit open zone if necessary */
3123 if (explicit && zsp->z_cond == ZC2_IMPLICIT_OPEN)
3124 zbc_close_zone(devip, zsp);
3125 else if (devip->max_open &&
3126 devip->nr_imp_open + devip->nr_exp_open >= devip->max_open)
3127 zbc_close_imp_open_zone(devip);
3129 if (zsp->z_cond == ZC4_CLOSED)
3132 zsp->z_cond = ZC3_EXPLICIT_OPEN;
3133 devip->nr_exp_open++;
3135 zsp->z_cond = ZC2_IMPLICIT_OPEN;
3136 devip->nr_imp_open++;
3140 static inline void zbc_set_zone_full(struct sdebug_dev_info *devip,
3141 struct sdeb_zone_state *zsp)
3143 switch (zsp->z_cond) {
3144 case ZC2_IMPLICIT_OPEN:
3145 devip->nr_imp_open--;
3147 case ZC3_EXPLICIT_OPEN:
3148 devip->nr_exp_open--;
3151 WARN_ONCE(true, "Invalid zone %llu condition %x\n",
3152 zsp->z_start, zsp->z_cond);
3155 zsp->z_cond = ZC5_FULL;
3158 static void zbc_inc_wp(struct sdebug_dev_info *devip,
3159 unsigned long long lba, unsigned int num)
3161 struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
3162 unsigned long long n, end, zend = zsp->z_start + zsp->z_size;
3164 if (!zbc_zone_is_seq(zsp))
3167 if (zsp->z_type == ZBC_ZTYPE_SWR) {
3169 if (zsp->z_wp >= zend)
3170 zbc_set_zone_full(devip, zsp);
3175 if (lba != zsp->z_wp)
3176 zsp->z_non_seq_resource = true;
3182 } else if (end > zsp->z_wp) {
3188 if (zsp->z_wp >= zend)
3189 zbc_set_zone_full(devip, zsp);
3195 zend = zsp->z_start + zsp->z_size;
3200 static int check_zbc_access_params(struct scsi_cmnd *scp,
3201 unsigned long long lba, unsigned int num, bool write)
3203 struct scsi_device *sdp = scp->device;
3204 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
3205 struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
3206 struct sdeb_zone_state *zsp_end = zbc_zone(devip, lba + num - 1);
3209 /* For host-managed, reads cannot cross zone types boundaries */
3210 if (zsp->z_type != zsp_end->z_type) {
3211 mk_sense_buffer(scp, ILLEGAL_REQUEST,
3214 return check_condition_result;
3219 /* Writing into a gap zone is not allowed */
3220 if (zbc_zone_is_gap(zsp)) {
3221 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE,
3222 ATTEMPT_ACCESS_GAP);
3223 return check_condition_result;
3226 /* No restrictions for writes within conventional zones */
3227 if (zbc_zone_is_conv(zsp)) {
3228 if (!zbc_zone_is_conv(zsp_end)) {
3229 mk_sense_buffer(scp, ILLEGAL_REQUEST,
3231 WRITE_BOUNDARY_ASCQ);
3232 return check_condition_result;
3237 if (zsp->z_type == ZBC_ZTYPE_SWR) {
3238 /* Writes cannot cross sequential zone boundaries */
3239 if (zsp_end != zsp) {
3240 mk_sense_buffer(scp, ILLEGAL_REQUEST,
3242 WRITE_BOUNDARY_ASCQ);
3243 return check_condition_result;
3245 /* Cannot write full zones */
3246 if (zsp->z_cond == ZC5_FULL) {
3247 mk_sense_buffer(scp, ILLEGAL_REQUEST,
3248 INVALID_FIELD_IN_CDB, 0);
3249 return check_condition_result;
3251 /* Writes must be aligned to the zone WP */
3252 if (lba != zsp->z_wp) {
3253 mk_sense_buffer(scp, ILLEGAL_REQUEST,
3255 UNALIGNED_WRITE_ASCQ);
3256 return check_condition_result;
3260 /* Handle implicit open of closed and empty zones */
3261 if (zsp->z_cond == ZC1_EMPTY || zsp->z_cond == ZC4_CLOSED) {
3262 if (devip->max_open &&
3263 devip->nr_exp_open >= devip->max_open) {
3264 mk_sense_buffer(scp, DATA_PROTECT,
3267 return check_condition_result;
3269 zbc_open_zone(devip, zsp, false);
3275 static inline int check_device_access_params
3276 (struct scsi_cmnd *scp, unsigned long long lba,
3277 unsigned int num, bool write)
3279 struct scsi_device *sdp = scp->device;
3280 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
3282 if (lba + num > sdebug_capacity) {
3283 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
3284 return check_condition_result;
3286 /* transfer length excessive (tie in to block limits VPD page) */
3287 if (num > sdebug_store_sectors) {
3288 /* needs work to find which cdb byte 'num' comes from */
3289 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3290 return check_condition_result;
3292 if (write && unlikely(sdebug_wp)) {
3293 mk_sense_buffer(scp, DATA_PROTECT, WRITE_PROTECTED, 0x2);
3294 return check_condition_result;
3296 if (sdebug_dev_is_zoned(devip))
3297 return check_zbc_access_params(scp, lba, num, write);
3303 * Note: if BUG_ON() fires it usually indicates a problem with the parser
3304 * tables. Perhaps a missing F_FAKE_RW or FF_MEDIA_IO flag. Response functions
3305 * that access any of the "stores" in struct sdeb_store_info should call this
3306 * function with bug_if_fake_rw set to true.
3308 static inline struct sdeb_store_info *devip2sip(struct sdebug_dev_info *devip,
3309 bool bug_if_fake_rw)
3311 if (sdebug_fake_rw) {
3312 BUG_ON(bug_if_fake_rw); /* See note above */
3315 return xa_load(per_store_ap, devip->sdbg_host->si_idx);
3318 /* Returns number of bytes copied or -1 if error. */
3319 static int do_device_access(struct sdeb_store_info *sip, struct scsi_cmnd *scp,
3320 u32 sg_skip, u64 lba, u32 num, bool do_write)
3323 u64 block, rest = 0;
3324 enum dma_data_direction dir;
3325 struct scsi_data_buffer *sdb = &scp->sdb;
3329 dir = DMA_TO_DEVICE;
3330 write_since_sync = true;
3332 dir = DMA_FROM_DEVICE;
3335 if (!sdb->length || !sip)
3337 if (scp->sc_data_direction != dir)
3341 block = do_div(lba, sdebug_store_sectors);
3342 if (block + num > sdebug_store_sectors)
3343 rest = block + num - sdebug_store_sectors;
3345 ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
3346 fsp + (block * sdebug_sector_size),
3347 (num - rest) * sdebug_sector_size, sg_skip, do_write);
3348 if (ret != (num - rest) * sdebug_sector_size)
3352 ret += sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
3353 fsp, rest * sdebug_sector_size,
3354 sg_skip + ((num - rest) * sdebug_sector_size),
3361 /* Returns number of bytes copied or -1 if error. */
3362 static int do_dout_fetch(struct scsi_cmnd *scp, u32 num, u8 *doutp)
3364 struct scsi_data_buffer *sdb = &scp->sdb;
3368 if (scp->sc_data_direction != DMA_TO_DEVICE)
3370 return sg_copy_buffer(sdb->table.sgl, sdb->table.nents, doutp,
3371 num * sdebug_sector_size, 0, true);
3374 /* If sip->storep+lba compares equal to arr(num), then copy top half of
3375 * arr into sip->storep+lba and return true. If comparison fails then
3377 static bool comp_write_worker(struct sdeb_store_info *sip, u64 lba, u32 num,
3378 const u8 *arr, bool compare_only)
3381 u64 block, rest = 0;
3382 u32 store_blks = sdebug_store_sectors;
3383 u32 lb_size = sdebug_sector_size;
3384 u8 *fsp = sip->storep;
3386 block = do_div(lba, store_blks);
3387 if (block + num > store_blks)
3388 rest = block + num - store_blks;
3390 res = !memcmp(fsp + (block * lb_size), arr, (num - rest) * lb_size);
3394 res = memcmp(fsp, arr + ((num - rest) * lb_size),
3400 arr += num * lb_size;
3401 memcpy(fsp + (block * lb_size), arr, (num - rest) * lb_size);
3403 memcpy(fsp, arr + ((num - rest) * lb_size), rest * lb_size);
3407 static __be16 dif_compute_csum(const void *buf, int len)
3412 csum = (__force __be16)ip_compute_csum(buf, len);
3414 csum = cpu_to_be16(crc_t10dif(buf, len));
3419 static int dif_verify(struct t10_pi_tuple *sdt, const void *data,
3420 sector_t sector, u32 ei_lba)
3422 __be16 csum = dif_compute_csum(data, sdebug_sector_size);
3424 if (sdt->guard_tag != csum) {
3425 pr_err("GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
3426 (unsigned long)sector,
3427 be16_to_cpu(sdt->guard_tag),
3431 if (sdebug_dif == T10_PI_TYPE1_PROTECTION &&
3432 be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
3433 pr_err("REF check failed on sector %lu\n",
3434 (unsigned long)sector);
3437 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3438 be32_to_cpu(sdt->ref_tag) != ei_lba) {
3439 pr_err("REF check failed on sector %lu\n",
3440 (unsigned long)sector);
3446 static void dif_copy_prot(struct scsi_cmnd *scp, sector_t sector,
3447 unsigned int sectors, bool read)
3451 struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3452 scp->device->hostdata, true);
3453 struct t10_pi_tuple *dif_storep = sip->dif_storep;
3454 const void *dif_store_end = dif_storep + sdebug_store_sectors;
3455 struct sg_mapping_iter miter;
3457 /* Bytes of protection data to copy into sgl */
3458 resid = sectors * sizeof(*dif_storep);
3460 sg_miter_start(&miter, scsi_prot_sglist(scp),
3461 scsi_prot_sg_count(scp), SG_MITER_ATOMIC |
3462 (read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
3464 while (sg_miter_next(&miter) && resid > 0) {
3465 size_t len = min_t(size_t, miter.length, resid);
3466 void *start = dif_store(sip, sector);
3469 if (dif_store_end < start + len)
3470 rest = start + len - dif_store_end;
3475 memcpy(paddr, start, len - rest);
3477 memcpy(start, paddr, len - rest);
3481 memcpy(paddr + len - rest, dif_storep, rest);
3483 memcpy(dif_storep, paddr + len - rest, rest);
3486 sector += len / sizeof(*dif_storep);
3489 sg_miter_stop(&miter);
3492 static int prot_verify_read(struct scsi_cmnd *scp, sector_t start_sec,
3493 unsigned int sectors, u32 ei_lba)
3498 struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3499 scp->device->hostdata, true);
3500 struct t10_pi_tuple *sdt;
3502 for (i = 0; i < sectors; i++, ei_lba++) {
3503 sector = start_sec + i;
3504 sdt = dif_store(sip, sector);
3506 if (sdt->app_tag == cpu_to_be16(0xffff))
3510 * Because scsi_debug acts as both initiator and
3511 * target we proceed to verify the PI even if
3512 * RDPROTECT=3. This is done so the "initiator" knows
3513 * which type of error to return. Otherwise we would
3514 * have to iterate over the PI twice.
3516 if (scp->cmnd[1] >> 5) { /* RDPROTECT */
3517 ret = dif_verify(sdt, lba2fake_store(sip, sector),
3526 dif_copy_prot(scp, start_sec, sectors, true);
3533 sdeb_read_lock(struct sdeb_store_info *sip)
3535 if (sdebug_no_rwlock) {
3537 __acquire(&sip->macc_lck);
3539 __acquire(&sdeb_fake_rw_lck);
3542 read_lock(&sip->macc_lck);
3544 read_lock(&sdeb_fake_rw_lck);
3549 sdeb_read_unlock(struct sdeb_store_info *sip)
3551 if (sdebug_no_rwlock) {
3553 __release(&sip->macc_lck);
3555 __release(&sdeb_fake_rw_lck);
3558 read_unlock(&sip->macc_lck);
3560 read_unlock(&sdeb_fake_rw_lck);
3565 sdeb_write_lock(struct sdeb_store_info *sip)
3567 if (sdebug_no_rwlock) {
3569 __acquire(&sip->macc_lck);
3571 __acquire(&sdeb_fake_rw_lck);
3574 write_lock(&sip->macc_lck);
3576 write_lock(&sdeb_fake_rw_lck);
3581 sdeb_write_unlock(struct sdeb_store_info *sip)
3583 if (sdebug_no_rwlock) {
3585 __release(&sip->macc_lck);
3587 __release(&sdeb_fake_rw_lck);
3590 write_unlock(&sip->macc_lck);
3592 write_unlock(&sdeb_fake_rw_lck);
3596 static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3603 struct sdeb_store_info *sip = devip2sip(devip, true);
3604 u8 *cmd = scp->cmnd;
3609 lba = get_unaligned_be64(cmd + 2);
3610 num = get_unaligned_be32(cmd + 10);
3615 lba = get_unaligned_be32(cmd + 2);
3616 num = get_unaligned_be16(cmd + 7);
3621 lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3622 (u32)(cmd[1] & 0x1f) << 16;
3623 num = (0 == cmd[4]) ? 256 : cmd[4];
3628 lba = get_unaligned_be32(cmd + 2);
3629 num = get_unaligned_be32(cmd + 6);
3632 case XDWRITEREAD_10:
3634 lba = get_unaligned_be32(cmd + 2);
3635 num = get_unaligned_be16(cmd + 7);
3638 default: /* assume READ(32) */
3639 lba = get_unaligned_be64(cmd + 12);
3640 ei_lba = get_unaligned_be32(cmd + 20);
3641 num = get_unaligned_be32(cmd + 28);
3645 if (unlikely(have_dif_prot && check_prot)) {
3646 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3648 mk_sense_invalid_opcode(scp);
3649 return check_condition_result;
3651 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3652 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3653 (cmd[1] & 0xe0) == 0)
3654 sdev_printk(KERN_ERR, scp->device, "Unprotected RD "
3657 if (unlikely((sdebug_opts & SDEBUG_OPT_SHORT_TRANSFER) &&
3658 atomic_read(&sdeb_inject_pending))) {
3660 atomic_set(&sdeb_inject_pending, 0);
3663 ret = check_device_access_params(scp, lba, num, false);
3666 if (unlikely((SDEBUG_OPT_MEDIUM_ERR & sdebug_opts) &&
3667 (lba <= (sdebug_medium_error_start + sdebug_medium_error_count - 1)) &&
3668 ((lba + num) > sdebug_medium_error_start))) {
3669 /* claim unrecoverable read error */
3670 mk_sense_buffer(scp, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
3671 /* set info field and valid bit for fixed descriptor */
3672 if (0x70 == (scp->sense_buffer[0] & 0x7f)) {
3673 scp->sense_buffer[0] |= 0x80; /* Valid bit */
3674 ret = (lba < OPT_MEDIUM_ERR_ADDR)
3675 ? OPT_MEDIUM_ERR_ADDR : (int)lba;
3676 put_unaligned_be32(ret, scp->sense_buffer + 3);
3678 scsi_set_resid(scp, scsi_bufflen(scp));
3679 return check_condition_result;
3682 sdeb_read_lock(sip);
3685 if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3686 switch (prot_verify_read(scp, lba, num, ei_lba)) {
3687 case 1: /* Guard tag error */
3688 if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */
3689 sdeb_read_unlock(sip);
3690 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3691 return check_condition_result;
3692 } else if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) {
3693 sdeb_read_unlock(sip);
3694 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3695 return illegal_condition_result;
3698 case 3: /* Reference tag error */
3699 if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */
3700 sdeb_read_unlock(sip);
3701 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3);
3702 return check_condition_result;
3703 } else if (scp->prot_flags & SCSI_PROT_REF_CHECK) {
3704 sdeb_read_unlock(sip);
3705 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3);
3706 return illegal_condition_result;
3712 ret = do_device_access(sip, scp, 0, lba, num, false);
3713 sdeb_read_unlock(sip);
3714 if (unlikely(ret == -1))
3715 return DID_ERROR << 16;
3717 scsi_set_resid(scp, scsi_bufflen(scp) - ret);
3719 if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3720 atomic_read(&sdeb_inject_pending))) {
3721 if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3722 mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3723 atomic_set(&sdeb_inject_pending, 0);
3724 return check_condition_result;
3725 } else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3726 /* Logical block guard check failed */
3727 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3728 atomic_set(&sdeb_inject_pending, 0);
3729 return illegal_condition_result;
3730 } else if (SDEBUG_OPT_DIX_ERR & sdebug_opts) {
3731 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3732 atomic_set(&sdeb_inject_pending, 0);
3733 return illegal_condition_result;
3739 static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
3740 unsigned int sectors, u32 ei_lba)
3743 struct t10_pi_tuple *sdt;
3745 sector_t sector = start_sec;
3748 struct sg_mapping_iter diter;
3749 struct sg_mapping_iter piter;
3751 BUG_ON(scsi_sg_count(SCpnt) == 0);
3752 BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
3754 sg_miter_start(&piter, scsi_prot_sglist(SCpnt),
3755 scsi_prot_sg_count(SCpnt),
3756 SG_MITER_ATOMIC | SG_MITER_FROM_SG);
3757 sg_miter_start(&diter, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
3758 SG_MITER_ATOMIC | SG_MITER_FROM_SG);
3760 /* For each protection page */
3761 while (sg_miter_next(&piter)) {
3763 if (WARN_ON(!sg_miter_next(&diter))) {
3768 for (ppage_offset = 0; ppage_offset < piter.length;
3769 ppage_offset += sizeof(struct t10_pi_tuple)) {
3770 /* If we're at the end of the current
3771 * data page advance to the next one
3773 if (dpage_offset >= diter.length) {
3774 if (WARN_ON(!sg_miter_next(&diter))) {
3781 sdt = piter.addr + ppage_offset;
3782 daddr = diter.addr + dpage_offset;
3784 if (SCpnt->cmnd[1] >> 5 != 3) { /* WRPROTECT */
3785 ret = dif_verify(sdt, daddr, sector, ei_lba);
3792 dpage_offset += sdebug_sector_size;
3794 diter.consumed = dpage_offset;
3795 sg_miter_stop(&diter);
3797 sg_miter_stop(&piter);
3799 dif_copy_prot(SCpnt, start_sec, sectors, false);
3806 sg_miter_stop(&diter);
3807 sg_miter_stop(&piter);
3811 static unsigned long lba_to_map_index(sector_t lba)
3813 if (sdebug_unmap_alignment)
3814 lba += sdebug_unmap_granularity - sdebug_unmap_alignment;
3815 sector_div(lba, sdebug_unmap_granularity);
3819 static sector_t map_index_to_lba(unsigned long index)
3821 sector_t lba = index * sdebug_unmap_granularity;
3823 if (sdebug_unmap_alignment)
3824 lba -= sdebug_unmap_granularity - sdebug_unmap_alignment;
3828 static unsigned int map_state(struct sdeb_store_info *sip, sector_t lba,
3832 unsigned int mapped;
3833 unsigned long index;
3836 index = lba_to_map_index(lba);
3837 mapped = test_bit(index, sip->map_storep);
3840 next = find_next_zero_bit(sip->map_storep, map_size, index);
3842 next = find_next_bit(sip->map_storep, map_size, index);
3844 end = min_t(sector_t, sdebug_store_sectors, map_index_to_lba(next));
3849 static void map_region(struct sdeb_store_info *sip, sector_t lba,
3852 sector_t end = lba + len;
3855 unsigned long index = lba_to_map_index(lba);
3857 if (index < map_size)
3858 set_bit(index, sip->map_storep);
3860 lba = map_index_to_lba(index + 1);
3864 static void unmap_region(struct sdeb_store_info *sip, sector_t lba,
3867 sector_t end = lba + len;
3868 u8 *fsp = sip->storep;
3871 unsigned long index = lba_to_map_index(lba);
3873 if (lba == map_index_to_lba(index) &&
3874 lba + sdebug_unmap_granularity <= end &&
3876 clear_bit(index, sip->map_storep);
3877 if (sdebug_lbprz) { /* for LBPRZ=2 return 0xff_s */
3878 memset(fsp + lba * sdebug_sector_size,
3879 (sdebug_lbprz & 1) ? 0 : 0xff,
3880 sdebug_sector_size *
3881 sdebug_unmap_granularity);
3883 if (sip->dif_storep) {
3884 memset(sip->dif_storep + lba, 0xff,
3885 sizeof(*sip->dif_storep) *
3886 sdebug_unmap_granularity);
3889 lba = map_index_to_lba(index + 1);
3893 static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3900 struct sdeb_store_info *sip = devip2sip(devip, true);
3901 u8 *cmd = scp->cmnd;
3906 lba = get_unaligned_be64(cmd + 2);
3907 num = get_unaligned_be32(cmd + 10);
3912 lba = get_unaligned_be32(cmd + 2);
3913 num = get_unaligned_be16(cmd + 7);
3918 lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3919 (u32)(cmd[1] & 0x1f) << 16;
3920 num = (0 == cmd[4]) ? 256 : cmd[4];
3925 lba = get_unaligned_be32(cmd + 2);
3926 num = get_unaligned_be32(cmd + 6);
3929 case 0x53: /* XDWRITEREAD(10) */
3931 lba = get_unaligned_be32(cmd + 2);
3932 num = get_unaligned_be16(cmd + 7);
3935 default: /* assume WRITE(32) */
3936 lba = get_unaligned_be64(cmd + 12);
3937 ei_lba = get_unaligned_be32(cmd + 20);
3938 num = get_unaligned_be32(cmd + 28);
3942 if (unlikely(have_dif_prot && check_prot)) {
3943 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3945 mk_sense_invalid_opcode(scp);
3946 return check_condition_result;
3948 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3949 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3950 (cmd[1] & 0xe0) == 0)
3951 sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3955 sdeb_write_lock(sip);
3956 ret = check_device_access_params(scp, lba, num, true);
3958 sdeb_write_unlock(sip);
3963 if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3964 switch (prot_verify_write(scp, lba, num, ei_lba)) {
3965 case 1: /* Guard tag error */
3966 if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) {
3967 sdeb_write_unlock(sip);
3968 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3969 return illegal_condition_result;
3970 } else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */
3971 sdeb_write_unlock(sip);
3972 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3973 return check_condition_result;
3976 case 3: /* Reference tag error */
3977 if (scp->prot_flags & SCSI_PROT_REF_CHECK) {
3978 sdeb_write_unlock(sip);
3979 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3);
3980 return illegal_condition_result;
3981 } else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */
3982 sdeb_write_unlock(sip);
3983 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3);
3984 return check_condition_result;
3990 ret = do_device_access(sip, scp, 0, lba, num, true);
3991 if (unlikely(scsi_debug_lbp()))
3992 map_region(sip, lba, num);
3993 /* If ZBC zone then bump its write pointer */
3994 if (sdebug_dev_is_zoned(devip))
3995 zbc_inc_wp(devip, lba, num);
3996 sdeb_write_unlock(sip);
3997 if (unlikely(-1 == ret))
3998 return DID_ERROR << 16;
3999 else if (unlikely(sdebug_verbose &&
4000 (ret < (num * sdebug_sector_size))))
4001 sdev_printk(KERN_INFO, scp->device,
4002 "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
4003 my_name, num * sdebug_sector_size, ret);
4005 if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
4006 atomic_read(&sdeb_inject_pending))) {
4007 if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
4008 mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
4009 atomic_set(&sdeb_inject_pending, 0);
4010 return check_condition_result;
4011 } else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
4012 /* Logical block guard check failed */
4013 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
4014 atomic_set(&sdeb_inject_pending, 0);
4015 return illegal_condition_result;
4016 } else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
4017 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
4018 atomic_set(&sdeb_inject_pending, 0);
4019 return illegal_condition_result;
4026 * T10 has only specified WRITE SCATTERED(16) and WRITE SCATTERED(32).
4027 * No READ GATHERED yet (requires bidi or long cdb holding gather list).
4029 static int resp_write_scat(struct scsi_cmnd *scp,
4030 struct sdebug_dev_info *devip)
4032 u8 *cmd = scp->cmnd;
4035 struct sdeb_store_info *sip = devip2sip(devip, true);
4037 u16 lbdof, num_lrd, k;
4038 u32 num, num_by, bt_len, lbdof_blen, sg_off, cum_lb;
4039 u32 lb_size = sdebug_sector_size;
4044 static const u32 lrd_size = 32; /* + parameter list header size */
4046 if (cmd[0] == VARIABLE_LENGTH_CMD) {
4048 wrprotect = (cmd[10] >> 5) & 0x7;
4049 lbdof = get_unaligned_be16(cmd + 12);
4050 num_lrd = get_unaligned_be16(cmd + 16);
4051 bt_len = get_unaligned_be32(cmd + 28);
4052 } else { /* that leaves WRITE SCATTERED(16) */
4054 wrprotect = (cmd[2] >> 5) & 0x7;
4055 lbdof = get_unaligned_be16(cmd + 4);
4056 num_lrd = get_unaligned_be16(cmd + 8);
4057 bt_len = get_unaligned_be32(cmd + 10);
4058 if (unlikely(have_dif_prot)) {
4059 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
4061 mk_sense_invalid_opcode(scp);
4062 return illegal_condition_result;
4064 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
4065 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
4067 sdev_printk(KERN_ERR, scp->device,
4068 "Unprotected WR to DIF device\n");
4071 if ((num_lrd == 0) || (bt_len == 0))
4072 return 0; /* T10 says these do-nothings are not errors */
4075 sdev_printk(KERN_INFO, scp->device,
4076 "%s: %s: LB Data Offset field bad\n",
4078 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4079 return illegal_condition_result;
4081 lbdof_blen = lbdof * lb_size;
4082 if ((lrd_size + (num_lrd * lrd_size)) > lbdof_blen) {
4084 sdev_printk(KERN_INFO, scp->device,
4085 "%s: %s: LBA range descriptors don't fit\n",
4087 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4088 return illegal_condition_result;
4090 lrdp = kzalloc(lbdof_blen, GFP_ATOMIC | __GFP_NOWARN);
4092 return SCSI_MLQUEUE_HOST_BUSY;
4094 sdev_printk(KERN_INFO, scp->device,
4095 "%s: %s: Fetch header+scatter_list, lbdof_blen=%u\n",
4096 my_name, __func__, lbdof_blen);
4097 res = fetch_to_dev_buffer(scp, lrdp, lbdof_blen);
4099 ret = DID_ERROR << 16;
4103 sdeb_write_lock(sip);
4104 sg_off = lbdof_blen;
4105 /* Spec says Buffer xfer Length field in number of LBs in dout */
4107 for (k = 0, up = lrdp + lrd_size; k < num_lrd; ++k, up += lrd_size) {
4108 lba = get_unaligned_be64(up + 0);
4109 num = get_unaligned_be32(up + 8);
4111 sdev_printk(KERN_INFO, scp->device,
4112 "%s: %s: k=%d LBA=0x%llx num=%u sg_off=%u\n",
4113 my_name, __func__, k, lba, num, sg_off);
4116 ret = check_device_access_params(scp, lba, num, true);
4118 goto err_out_unlock;
4119 num_by = num * lb_size;
4120 ei_lba = is_16 ? 0 : get_unaligned_be32(up + 12);
4122 if ((cum_lb + num) > bt_len) {
4124 sdev_printk(KERN_INFO, scp->device,
4125 "%s: %s: sum of blocks > data provided\n",
4127 mk_sense_buffer(scp, ILLEGAL_REQUEST, WRITE_ERROR_ASC,
4129 ret = illegal_condition_result;
4130 goto err_out_unlock;
4134 if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
4135 int prot_ret = prot_verify_write(scp, lba, num,
4139 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10,
4141 ret = illegal_condition_result;
4142 goto err_out_unlock;
4146 ret = do_device_access(sip, scp, sg_off, lba, num, true);
4147 /* If ZBC zone then bump its write pointer */
4148 if (sdebug_dev_is_zoned(devip))
4149 zbc_inc_wp(devip, lba, num);
4150 if (unlikely(scsi_debug_lbp()))
4151 map_region(sip, lba, num);
4152 if (unlikely(-1 == ret)) {
4153 ret = DID_ERROR << 16;
4154 goto err_out_unlock;
4155 } else if (unlikely(sdebug_verbose && (ret < num_by)))
4156 sdev_printk(KERN_INFO, scp->device,
4157 "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
4158 my_name, num_by, ret);
4160 if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
4161 atomic_read(&sdeb_inject_pending))) {
4162 if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
4163 mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
4164 atomic_set(&sdeb_inject_pending, 0);
4165 ret = check_condition_result;
4166 goto err_out_unlock;
4167 } else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
4168 /* Logical block guard check failed */
4169 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
4170 atomic_set(&sdeb_inject_pending, 0);
4171 ret = illegal_condition_result;
4172 goto err_out_unlock;
4173 } else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
4174 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
4175 atomic_set(&sdeb_inject_pending, 0);
4176 ret = illegal_condition_result;
4177 goto err_out_unlock;
4185 sdeb_write_unlock(sip);
4191 static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
4192 u32 ei_lba, bool unmap, bool ndob)
4194 struct scsi_device *sdp = scp->device;
4195 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
4196 unsigned long long i;
4198 u32 lb_size = sdebug_sector_size;
4200 struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
4201 scp->device->hostdata, true);
4205 sdeb_write_lock(sip);
4207 ret = check_device_access_params(scp, lba, num, true);
4209 sdeb_write_unlock(sip);
4213 if (unmap && scsi_debug_lbp()) {
4214 unmap_region(sip, lba, num);
4218 block = do_div(lbaa, sdebug_store_sectors);
4219 /* if ndob then zero 1 logical block, else fetch 1 logical block */
4221 fs1p = fsp + (block * lb_size);
4223 memset(fs1p, 0, lb_size);
4226 ret = fetch_to_dev_buffer(scp, fs1p, lb_size);
4229 sdeb_write_unlock(sip);
4230 return DID_ERROR << 16;
4231 } else if (sdebug_verbose && !ndob && (ret < lb_size))
4232 sdev_printk(KERN_INFO, scp->device,
4233 "%s: %s: lb size=%u, IO sent=%d bytes\n",
4234 my_name, "write same", lb_size, ret);
4236 /* Copy first sector to remaining blocks */
4237 for (i = 1 ; i < num ; i++) {
4239 block = do_div(lbaa, sdebug_store_sectors);
4240 memmove(fsp + (block * lb_size), fs1p, lb_size);
4242 if (scsi_debug_lbp())
4243 map_region(sip, lba, num);
4244 /* If ZBC zone then bump its write pointer */
4245 if (sdebug_dev_is_zoned(devip))
4246 zbc_inc_wp(devip, lba, num);
4248 sdeb_write_unlock(sip);
4253 static int resp_write_same_10(struct scsi_cmnd *scp,
4254 struct sdebug_dev_info *devip)
4256 u8 *cmd = scp->cmnd;
4263 if (sdebug_lbpws10 == 0) {
4264 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
4265 return check_condition_result;
4269 lba = get_unaligned_be32(cmd + 2);
4270 num = get_unaligned_be16(cmd + 7);
4271 if (num > sdebug_write_same_length) {
4272 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
4273 return check_condition_result;
4275 return resp_write_same(scp, lba, num, ei_lba, unmap, false);
4278 static int resp_write_same_16(struct scsi_cmnd *scp,
4279 struct sdebug_dev_info *devip)
4281 u8 *cmd = scp->cmnd;
4288 if (cmd[1] & 0x8) { /* UNMAP */
4289 if (sdebug_lbpws == 0) {
4290 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
4291 return check_condition_result;
4295 if (cmd[1] & 0x1) /* NDOB (no data-out buffer, assumes zeroes) */
4297 lba = get_unaligned_be64(cmd + 2);
4298 num = get_unaligned_be32(cmd + 10);
4299 if (num > sdebug_write_same_length) {
4300 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
4301 return check_condition_result;
4303 return resp_write_same(scp, lba, num, ei_lba, unmap, ndob);
4306 /* Note the mode field is in the same position as the (lower) service action
4307 * field. For the Report supported operation codes command, SPC-4 suggests
4308 * each mode of this command should be reported separately; for future. */
4309 static int resp_write_buffer(struct scsi_cmnd *scp,
4310 struct sdebug_dev_info *devip)
4312 u8 *cmd = scp->cmnd;
4313 struct scsi_device *sdp = scp->device;
4314 struct sdebug_dev_info *dp;
4317 mode = cmd[1] & 0x1f;
4319 case 0x4: /* download microcode (MC) and activate (ACT) */
4320 /* set UAs on this device only */
4321 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
4322 set_bit(SDEBUG_UA_MICROCODE_CHANGED, devip->uas_bm);
4324 case 0x5: /* download MC, save and ACT */
4325 set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET, devip->uas_bm);
4327 case 0x6: /* download MC with offsets and ACT */
4328 /* set UAs on most devices (LUs) in this target */
4329 list_for_each_entry(dp,
4330 &devip->sdbg_host->dev_info_list,
4332 if (dp->target == sdp->id) {
4333 set_bit(SDEBUG_UA_BUS_RESET, dp->uas_bm);
4335 set_bit(SDEBUG_UA_MICROCODE_CHANGED,
4339 case 0x7: /* download MC with offsets, save, and ACT */
4340 /* set UA on all devices (LUs) in this target */
4341 list_for_each_entry(dp,
4342 &devip->sdbg_host->dev_info_list,
4344 if (dp->target == sdp->id)
4345 set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET,
4349 /* do nothing for this command for other mode values */
4355 static int resp_comp_write(struct scsi_cmnd *scp,
4356 struct sdebug_dev_info *devip)
4358 u8 *cmd = scp->cmnd;
4360 struct sdeb_store_info *sip = devip2sip(devip, true);
4363 u32 lb_size = sdebug_sector_size;
4368 lba = get_unaligned_be64(cmd + 2);
4369 num = cmd[13]; /* 1 to a maximum of 255 logical blocks */
4371 return 0; /* degenerate case, not an error */
4372 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
4374 mk_sense_invalid_opcode(scp);
4375 return check_condition_result;
4377 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
4378 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
4379 (cmd[1] & 0xe0) == 0)
4380 sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
4382 ret = check_device_access_params(scp, lba, num, false);
4386 arr = kcalloc(lb_size, dnum, GFP_ATOMIC);
4388 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4390 return check_condition_result;
4393 sdeb_write_lock(sip);
4395 ret = do_dout_fetch(scp, dnum, arr);
4397 retval = DID_ERROR << 16;
4399 } else if (sdebug_verbose && (ret < (dnum * lb_size)))
4400 sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb "
4401 "indicated=%u, IO sent=%d bytes\n", my_name,
4402 dnum * lb_size, ret);
4403 if (!comp_write_worker(sip, lba, num, arr, false)) {
4404 mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
4405 retval = check_condition_result;
4408 if (scsi_debug_lbp())
4409 map_region(sip, lba, num);
4411 sdeb_write_unlock(sip);
4416 struct unmap_block_desc {
4422 static int resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4425 struct unmap_block_desc *desc;
4426 struct sdeb_store_info *sip = devip2sip(devip, true);
4427 unsigned int i, payload_len, descriptors;
4430 if (!scsi_debug_lbp())
4431 return 0; /* fib and say its done */
4432 payload_len = get_unaligned_be16(scp->cmnd + 7);
4433 BUG_ON(scsi_bufflen(scp) != payload_len);
4435 descriptors = (payload_len - 8) / 16;
4436 if (descriptors > sdebug_unmap_max_desc) {
4437 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
4438 return check_condition_result;
4441 buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC);
4443 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4445 return check_condition_result;
4448 scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
4450 BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
4451 BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
4453 desc = (void *)&buf[8];
4455 sdeb_write_lock(sip);
4457 for (i = 0 ; i < descriptors ; i++) {
4458 unsigned long long lba = get_unaligned_be64(&desc[i].lba);
4459 unsigned int num = get_unaligned_be32(&desc[i].blocks);
4461 ret = check_device_access_params(scp, lba, num, true);
4465 unmap_region(sip, lba, num);
4471 sdeb_write_unlock(sip);
4477 #define SDEBUG_GET_LBA_STATUS_LEN 32
4479 static int resp_get_lba_status(struct scsi_cmnd *scp,
4480 struct sdebug_dev_info *devip)
4482 u8 *cmd = scp->cmnd;
4484 u32 alloc_len, mapped, num;
4486 u8 arr[SDEBUG_GET_LBA_STATUS_LEN];
4488 lba = get_unaligned_be64(cmd + 2);
4489 alloc_len = get_unaligned_be32(cmd + 10);
4494 ret = check_device_access_params(scp, lba, 1, false);
4498 if (scsi_debug_lbp()) {
4499 struct sdeb_store_info *sip = devip2sip(devip, true);
4501 mapped = map_state(sip, lba, &num);
4504 /* following just in case virtual_gb changed */
4505 sdebug_capacity = get_sdebug_capacity();
4506 if (sdebug_capacity - lba <= 0xffffffff)
4507 num = sdebug_capacity - lba;
4512 memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
4513 put_unaligned_be32(20, arr); /* Parameter Data Length */
4514 put_unaligned_be64(lba, arr + 8); /* LBA */
4515 put_unaligned_be32(num, arr + 16); /* Number of blocks */
4516 arr[20] = !mapped; /* prov_stat=0: mapped; 1: dealloc */
4518 return fill_from_dev_buffer(scp, arr, SDEBUG_GET_LBA_STATUS_LEN);
4521 static int resp_sync_cache(struct scsi_cmnd *scp,
4522 struct sdebug_dev_info *devip)
4527 u8 *cmd = scp->cmnd;
4529 if (cmd[0] == SYNCHRONIZE_CACHE) { /* 10 byte cdb */
4530 lba = get_unaligned_be32(cmd + 2);
4531 num_blocks = get_unaligned_be16(cmd + 7);
4532 } else { /* SYNCHRONIZE_CACHE(16) */
4533 lba = get_unaligned_be64(cmd + 2);
4534 num_blocks = get_unaligned_be32(cmd + 10);
4536 if (lba + num_blocks > sdebug_capacity) {
4537 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4538 return check_condition_result;
4540 if (!write_since_sync || (cmd[1] & 0x2))
4541 res = SDEG_RES_IMMED_MASK;
4542 else /* delay if write_since_sync and IMMED clear */
4543 write_since_sync = false;
4548 * Assuming the LBA+num_blocks is not out-of-range, this function will return
4549 * CONDITION MET if the specified blocks will/have fitted in the cache, and
4550 * a GOOD status otherwise. Model a disk with a big cache and yield
4551 * CONDITION MET. Actually tries to bring range in main memory into the
4552 * cache associated with the CPU(s).
4554 static int resp_pre_fetch(struct scsi_cmnd *scp,
4555 struct sdebug_dev_info *devip)
4559 u64 block, rest = 0;
4561 u8 *cmd = scp->cmnd;
4562 struct sdeb_store_info *sip = devip2sip(devip, true);
4563 u8 *fsp = sip->storep;
4565 if (cmd[0] == PRE_FETCH) { /* 10 byte cdb */
4566 lba = get_unaligned_be32(cmd + 2);
4567 nblks = get_unaligned_be16(cmd + 7);
4568 } else { /* PRE-FETCH(16) */
4569 lba = get_unaligned_be64(cmd + 2);
4570 nblks = get_unaligned_be32(cmd + 10);
4572 if (lba + nblks > sdebug_capacity) {
4573 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4574 return check_condition_result;
4578 /* PRE-FETCH spec says nothing about LBP or PI so skip them */
4579 block = do_div(lba, sdebug_store_sectors);
4580 if (block + nblks > sdebug_store_sectors)
4581 rest = block + nblks - sdebug_store_sectors;
4583 /* Try to bring the PRE-FETCH range into CPU's cache */
4584 sdeb_read_lock(sip);
4585 prefetch_range(fsp + (sdebug_sector_size * block),
4586 (nblks - rest) * sdebug_sector_size);
4588 prefetch_range(fsp, rest * sdebug_sector_size);
4589 sdeb_read_unlock(sip);
4592 res = SDEG_RES_IMMED_MASK;
4593 return res | condition_met_result;
4596 #define RL_BUCKET_ELEMS 8
4598 /* Even though each pseudo target has a REPORT LUNS "well known logical unit"
4599 * (W-LUN), the normal Linux scanning logic does not associate it with a
4600 * device (e.g. /dev/sg7). The following magic will make that association:
4601 * "cd /sys/class/scsi_host/host<n> ; echo '- - 49409' > scan"
4602 * where <n> is a host number. If there are multiple targets in a host then
4603 * the above will associate a W-LUN to each target. To only get a W-LUN
4604 * for target 2, then use "echo '- 2 49409' > scan" .
4606 static int resp_report_luns(struct scsi_cmnd *scp,
4607 struct sdebug_dev_info *devip)
4609 unsigned char *cmd = scp->cmnd;
4610 unsigned int alloc_len;
4611 unsigned char select_report;
4613 struct scsi_lun *lun_p;
4614 u8 arr[RL_BUCKET_ELEMS * sizeof(struct scsi_lun)];
4615 unsigned int lun_cnt; /* normal LUN count (max: 256) */
4616 unsigned int wlun_cnt; /* report luns W-LUN count */
4617 unsigned int tlun_cnt; /* total LUN count */
4618 unsigned int rlen; /* response length (in bytes) */
4620 unsigned int off_rsp = 0;
4621 const int sz_lun = sizeof(struct scsi_lun);
4623 clear_luns_changed_on_target(devip);
4625 select_report = cmd[2];
4626 alloc_len = get_unaligned_be32(cmd + 6);
4628 if (alloc_len < 4) {
4629 pr_err("alloc len too small %d\n", alloc_len);
4630 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
4631 return check_condition_result;
4634 switch (select_report) {
4635 case 0: /* all LUNs apart from W-LUNs */
4636 lun_cnt = sdebug_max_luns;
4639 case 1: /* only W-LUNs */
4643 case 2: /* all LUNs */
4644 lun_cnt = sdebug_max_luns;
4647 case 0x10: /* only administrative LUs */
4648 case 0x11: /* see SPC-5 */
4649 case 0x12: /* only subsiduary LUs owned by referenced LU */
4651 pr_debug("select report invalid %d\n", select_report);
4652 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
4653 return check_condition_result;
4656 if (sdebug_no_lun_0 && (lun_cnt > 0))
4659 tlun_cnt = lun_cnt + wlun_cnt;
4660 rlen = tlun_cnt * sz_lun; /* excluding 8 byte header */
4661 scsi_set_resid(scp, scsi_bufflen(scp));
4662 pr_debug("select_report %d luns = %d wluns = %d no_lun0 %d\n",
4663 select_report, lun_cnt, wlun_cnt, sdebug_no_lun_0);
4665 /* loops rely on sizeof response header same as sizeof lun (both 8) */
4666 lun = sdebug_no_lun_0 ? 1 : 0;
4667 for (k = 0, j = 0, res = 0; true; ++k, j = 0) {
4668 memset(arr, 0, sizeof(arr));
4669 lun_p = (struct scsi_lun *)&arr[0];
4671 put_unaligned_be32(rlen, &arr[0]);
4675 for ( ; j < RL_BUCKET_ELEMS; ++j, ++lun_p) {
4676 if ((k * RL_BUCKET_ELEMS) + j > lun_cnt)
4678 int_to_scsilun(lun++, lun_p);
4679 if (lun > 1 && sdebug_lun_am == SAM_LUN_AM_FLAT)
4680 lun_p->scsi_lun[0] |= 0x40;
4682 if (j < RL_BUCKET_ELEMS)
4685 res = p_fill_from_dev_buffer(scp, arr, n, off_rsp);
4691 int_to_scsilun(SCSI_W_LUN_REPORT_LUNS, lun_p);
4695 res = p_fill_from_dev_buffer(scp, arr, j * sz_lun, off_rsp);
4699 static int resp_verify(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4701 bool is_bytchk3 = false;
4704 u32 vnum, a_num, off;
4705 const u32 lb_size = sdebug_sector_size;
4708 u8 *cmd = scp->cmnd;
4709 struct sdeb_store_info *sip = devip2sip(devip, true);
4711 bytchk = (cmd[1] >> 1) & 0x3;
4713 return 0; /* always claim internal verify okay */
4714 } else if (bytchk == 2) {
4715 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
4716 return check_condition_result;
4717 } else if (bytchk == 3) {
4718 is_bytchk3 = true; /* 1 block sent, compared repeatedly */
4722 lba = get_unaligned_be64(cmd + 2);
4723 vnum = get_unaligned_be32(cmd + 10);
4725 case VERIFY: /* is VERIFY(10) */
4726 lba = get_unaligned_be32(cmd + 2);
4727 vnum = get_unaligned_be16(cmd + 7);
4730 mk_sense_invalid_opcode(scp);
4731 return check_condition_result;
4734 return 0; /* not an error */
4735 a_num = is_bytchk3 ? 1 : vnum;
4736 /* Treat following check like one for read (i.e. no write) access */
4737 ret = check_device_access_params(scp, lba, a_num, false);
4741 arr = kcalloc(lb_size, vnum, GFP_ATOMIC | __GFP_NOWARN);
4743 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4745 return check_condition_result;
4747 /* Not changing store, so only need read access */
4748 sdeb_read_lock(sip);
4750 ret = do_dout_fetch(scp, a_num, arr);
4752 ret = DID_ERROR << 16;
4754 } else if (sdebug_verbose && (ret < (a_num * lb_size))) {
4755 sdev_printk(KERN_INFO, scp->device,
4756 "%s: %s: cdb indicated=%u, IO sent=%d bytes\n",
4757 my_name, __func__, a_num * lb_size, ret);
4760 for (j = 1, off = lb_size; j < vnum; ++j, off += lb_size)
4761 memcpy(arr + off, arr, lb_size);
4764 if (!comp_write_worker(sip, lba, vnum, arr, true)) {
4765 mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
4766 ret = check_condition_result;
4770 sdeb_read_unlock(sip);
4775 #define RZONES_DESC_HD 64
4777 /* Report zones depending on start LBA and reporting options */
4778 static int resp_report_zones(struct scsi_cmnd *scp,
4779 struct sdebug_dev_info *devip)
4781 unsigned int rep_max_zones, nrz = 0;
4783 u32 alloc_len, rep_opts, rep_len;
4786 u8 *arr = NULL, *desc;
4787 u8 *cmd = scp->cmnd;
4788 struct sdeb_zone_state *zsp = NULL;
4789 struct sdeb_store_info *sip = devip2sip(devip, false);
4791 if (!sdebug_dev_is_zoned(devip)) {
4792 mk_sense_invalid_opcode(scp);
4793 return check_condition_result;
4795 zs_lba = get_unaligned_be64(cmd + 2);
4796 alloc_len = get_unaligned_be32(cmd + 10);
4798 return 0; /* not an error */
4799 rep_opts = cmd[14] & 0x3f;
4800 partial = cmd[14] & 0x80;
4802 if (zs_lba >= sdebug_capacity) {
4803 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4804 return check_condition_result;
4807 rep_max_zones = (alloc_len - 64) >> ilog2(RZONES_DESC_HD);
4809 arr = kzalloc(alloc_len, GFP_ATOMIC | __GFP_NOWARN);
4811 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4813 return check_condition_result;
4816 sdeb_read_lock(sip);
4819 for (lba = zs_lba; lba < sdebug_capacity;
4820 lba = zsp->z_start + zsp->z_size) {
4821 if (WARN_ONCE(zbc_zone(devip, lba) == zsp, "lba = %llu\n", lba))
4823 zsp = zbc_zone(devip, lba);
4830 if (zsp->z_cond != ZC1_EMPTY)
4834 /* Implicit open zones */
4835 if (zsp->z_cond != ZC2_IMPLICIT_OPEN)
4839 /* Explicit open zones */
4840 if (zsp->z_cond != ZC3_EXPLICIT_OPEN)
4845 if (zsp->z_cond != ZC4_CLOSED)
4850 if (zsp->z_cond != ZC5_FULL)
4857 * Read-only, offline, reset WP recommended are
4858 * not emulated: no zones to report;
4862 /* non-seq-resource set */
4863 if (!zsp->z_non_seq_resource)
4867 /* All zones except gap zones. */
4868 if (zbc_zone_is_gap(zsp))
4872 /* Not write pointer (conventional) zones */
4873 if (zbc_zone_is_seq(zsp))
4877 mk_sense_buffer(scp, ILLEGAL_REQUEST,
4878 INVALID_FIELD_IN_CDB, 0);
4879 ret = check_condition_result;
4883 if (nrz < rep_max_zones) {
4884 /* Fill zone descriptor */
4885 desc[0] = zsp->z_type;
4886 desc[1] = zsp->z_cond << 4;
4887 if (zsp->z_non_seq_resource)
4889 put_unaligned_be64((u64)zsp->z_size, desc + 8);
4890 put_unaligned_be64((u64)zsp->z_start, desc + 16);
4891 put_unaligned_be64((u64)zsp->z_wp, desc + 24);
4895 if (partial && nrz >= rep_max_zones)
4902 /* Zone list length. */
4903 put_unaligned_be32(nrz * RZONES_DESC_HD, arr + 0);
4905 put_unaligned_be64(sdebug_capacity - 1, arr + 8);
4906 /* Zone starting LBA granularity. */
4907 if (devip->zcap < devip->zsize)
4908 put_unaligned_be64(devip->zsize, arr + 16);
4910 rep_len = (unsigned long)desc - (unsigned long)arr;
4911 ret = fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, rep_len));
4914 sdeb_read_unlock(sip);
4919 /* Logic transplanted from tcmu-runner, file_zbc.c */
4920 static void zbc_open_all(struct sdebug_dev_info *devip)
4922 struct sdeb_zone_state *zsp = &devip->zstate[0];
4925 for (i = 0; i < devip->nr_zones; i++, zsp++) {
4926 if (zsp->z_cond == ZC4_CLOSED)
4927 zbc_open_zone(devip, &devip->zstate[i], true);
4931 static int resp_open_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4935 enum sdebug_z_cond zc;
4936 u8 *cmd = scp->cmnd;
4937 struct sdeb_zone_state *zsp;
4938 bool all = cmd[14] & 0x01;
4939 struct sdeb_store_info *sip = devip2sip(devip, false);
4941 if (!sdebug_dev_is_zoned(devip)) {
4942 mk_sense_invalid_opcode(scp);
4943 return check_condition_result;
4946 sdeb_write_lock(sip);
4949 /* Check if all closed zones can be open */
4950 if (devip->max_open &&
4951 devip->nr_exp_open + devip->nr_closed > devip->max_open) {
4952 mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
4954 res = check_condition_result;
4957 /* Open all closed zones */
4958 zbc_open_all(devip);
4962 /* Open the specified zone */
4963 z_id = get_unaligned_be64(cmd + 2);
4964 if (z_id >= sdebug_capacity) {
4965 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4966 res = check_condition_result;
4970 zsp = zbc_zone(devip, z_id);
4971 if (z_id != zsp->z_start) {
4972 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4973 res = check_condition_result;
4976 if (zbc_zone_is_conv(zsp)) {
4977 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4978 res = check_condition_result;
4983 if (zc == ZC3_EXPLICIT_OPEN || zc == ZC5_FULL)
4986 if (devip->max_open && devip->nr_exp_open >= devip->max_open) {
4987 mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
4989 res = check_condition_result;
4993 zbc_open_zone(devip, zsp, true);
4995 sdeb_write_unlock(sip);
4999 static void zbc_close_all(struct sdebug_dev_info *devip)
5003 for (i = 0; i < devip->nr_zones; i++)
5004 zbc_close_zone(devip, &devip->zstate[i]);
5007 static int resp_close_zone(struct scsi_cmnd *scp,
5008 struct sdebug_dev_info *devip)
5012 u8 *cmd = scp->cmnd;
5013 struct sdeb_zone_state *zsp;
5014 bool all = cmd[14] & 0x01;
5015 struct sdeb_store_info *sip = devip2sip(devip, false);
5017 if (!sdebug_dev_is_zoned(devip)) {
5018 mk_sense_invalid_opcode(scp);
5019 return check_condition_result;
5022 sdeb_write_lock(sip);
5025 zbc_close_all(devip);
5029 /* Close specified zone */
5030 z_id = get_unaligned_be64(cmd + 2);
5031 if (z_id >= sdebug_capacity) {
5032 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
5033 res = check_condition_result;
5037 zsp = zbc_zone(devip, z_id);
5038 if (z_id != zsp->z_start) {
5039 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
5040 res = check_condition_result;
5043 if (zbc_zone_is_conv(zsp)) {
5044 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
5045 res = check_condition_result;
5049 zbc_close_zone(devip, zsp);
5051 sdeb_write_unlock(sip);
5055 static void zbc_finish_zone(struct sdebug_dev_info *devip,
5056 struct sdeb_zone_state *zsp, bool empty)
5058 enum sdebug_z_cond zc = zsp->z_cond;
5060 if (zc == ZC4_CLOSED || zc == ZC2_IMPLICIT_OPEN ||
5061 zc == ZC3_EXPLICIT_OPEN || (empty && zc == ZC1_EMPTY)) {
5062 if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
5063 zbc_close_zone(devip, zsp);
5064 if (zsp->z_cond == ZC4_CLOSED)
5066 zsp->z_wp = zsp->z_start + zsp->z_size;
5067 zsp->z_cond = ZC5_FULL;
5071 static void zbc_finish_all(struct sdebug_dev_info *devip)
5075 for (i = 0; i < devip->nr_zones; i++)
5076 zbc_finish_zone(devip, &devip->zstate[i], false);
5079 static int resp_finish_zone(struct scsi_cmnd *scp,
5080 struct sdebug_dev_info *devip)
5082 struct sdeb_zone_state *zsp;
5085 u8 *cmd = scp->cmnd;
5086 bool all = cmd[14] & 0x01;
5087 struct sdeb_store_info *sip = devip2sip(devip, false);
5089 if (!sdebug_dev_is_zoned(devip)) {
5090 mk_sense_invalid_opcode(scp);
5091 return check_condition_result;
5094 sdeb_write_lock(sip);
5097 zbc_finish_all(devip);
5101 /* Finish the specified zone */
5102 z_id = get_unaligned_be64(cmd + 2);
5103 if (z_id >= sdebug_capacity) {
5104 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
5105 res = check_condition_result;
5109 zsp = zbc_zone(devip, z_id);
5110 if (z_id != zsp->z_start) {
5111 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
5112 res = check_condition_result;
5115 if (zbc_zone_is_conv(zsp)) {
5116 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
5117 res = check_condition_result;
5121 zbc_finish_zone(devip, zsp, true);
5123 sdeb_write_unlock(sip);
5127 static void zbc_rwp_zone(struct sdebug_dev_info *devip,
5128 struct sdeb_zone_state *zsp)
5130 enum sdebug_z_cond zc;
5131 struct sdeb_store_info *sip = devip2sip(devip, false);
5133 if (!zbc_zone_is_seq(zsp))
5137 if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
5138 zbc_close_zone(devip, zsp);
5140 if (zsp->z_cond == ZC4_CLOSED)
5143 if (zsp->z_wp > zsp->z_start)
5144 memset(sip->storep + zsp->z_start * sdebug_sector_size, 0,
5145 (zsp->z_wp - zsp->z_start) * sdebug_sector_size);
5147 zsp->z_non_seq_resource = false;
5148 zsp->z_wp = zsp->z_start;
5149 zsp->z_cond = ZC1_EMPTY;
5152 static void zbc_rwp_all(struct sdebug_dev_info *devip)
5156 for (i = 0; i < devip->nr_zones; i++)
5157 zbc_rwp_zone(devip, &devip->zstate[i]);
5160 static int resp_rwp_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
5162 struct sdeb_zone_state *zsp;
5165 u8 *cmd = scp->cmnd;
5166 bool all = cmd[14] & 0x01;
5167 struct sdeb_store_info *sip = devip2sip(devip, false);
5169 if (!sdebug_dev_is_zoned(devip)) {
5170 mk_sense_invalid_opcode(scp);
5171 return check_condition_result;
5174 sdeb_write_lock(sip);
5181 z_id = get_unaligned_be64(cmd + 2);
5182 if (z_id >= sdebug_capacity) {
5183 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
5184 res = check_condition_result;
5188 zsp = zbc_zone(devip, z_id);
5189 if (z_id != zsp->z_start) {
5190 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
5191 res = check_condition_result;
5194 if (zbc_zone_is_conv(zsp)) {
5195 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
5196 res = check_condition_result;
5200 zbc_rwp_zone(devip, zsp);
5202 sdeb_write_unlock(sip);
5206 static u32 get_tag(struct scsi_cmnd *cmnd)
5208 return blk_mq_unique_tag(scsi_cmd_to_rq(cmnd));
5211 /* Queued (deferred) command completions converge here. */
5212 static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
5214 struct sdebug_queued_cmd *sqcp = container_of(sd_dp, struct sdebug_queued_cmd, sd_dp);
5215 unsigned long flags;
5216 struct scsi_cmnd *scp = sqcp->scmd;
5217 struct sdebug_scsi_cmd *sdsc;
5220 if (sdebug_statistics) {
5221 atomic_inc(&sdebug_completions);
5222 if (raw_smp_processor_id() != sd_dp->issuing_cpu)
5223 atomic_inc(&sdebug_miss_cpus);
5227 pr_err("scmd=NULL\n");
5231 sdsc = scsi_cmd_priv(scp);
5232 spin_lock_irqsave(&sdsc->lock, flags);
5233 aborted = sd_dp->aborted;
5234 if (unlikely(aborted))
5235 sd_dp->aborted = false;
5236 ASSIGN_QUEUED_CMD(scp, NULL);
5238 spin_unlock_irqrestore(&sdsc->lock, flags);
5241 pr_info("bypassing scsi_done() due to aborted cmd, kicking-off EH\n");
5242 blk_abort_request(scsi_cmd_to_rq(scp));
5246 scsi_done(scp); /* callback to mid level */
5248 sdebug_free_queued_cmd(sqcp);
5251 /* When high resolution timer goes off this function is called. */
5252 static enum hrtimer_restart sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
5254 struct sdebug_defer *sd_dp = container_of(timer, struct sdebug_defer,
5256 sdebug_q_cmd_complete(sd_dp);
5257 return HRTIMER_NORESTART;
5260 /* When work queue schedules work, it calls this function. */
5261 static void sdebug_q_cmd_wq_complete(struct work_struct *work)
5263 struct sdebug_defer *sd_dp = container_of(work, struct sdebug_defer,
5265 sdebug_q_cmd_complete(sd_dp);
5268 static bool got_shared_uuid;
5269 static uuid_t shared_uuid;
5271 static int sdebug_device_create_zones(struct sdebug_dev_info *devip)
5273 struct sdeb_zone_state *zsp;
5274 sector_t capacity = get_sdebug_capacity();
5275 sector_t conv_capacity;
5276 sector_t zstart = 0;
5280 * Set the zone size: if sdeb_zbc_zone_size_mb is not set, figure out
5281 * a zone size allowing for at least 4 zones on the device. Otherwise,
5282 * use the specified zone size checking that at least 2 zones can be
5283 * created for the device.
5285 if (!sdeb_zbc_zone_size_mb) {
5286 devip->zsize = (DEF_ZBC_ZONE_SIZE_MB * SZ_1M)
5287 >> ilog2(sdebug_sector_size);
5288 while (capacity < devip->zsize << 2 && devip->zsize >= 2)
5290 if (devip->zsize < 2) {
5291 pr_err("Device capacity too small\n");
5295 if (!is_power_of_2(sdeb_zbc_zone_size_mb)) {
5296 pr_err("Zone size is not a power of 2\n");
5299 devip->zsize = (sdeb_zbc_zone_size_mb * SZ_1M)
5300 >> ilog2(sdebug_sector_size);
5301 if (devip->zsize >= capacity) {
5302 pr_err("Zone size too large for device capacity\n");
5307 devip->zsize_shift = ilog2(devip->zsize);
5308 devip->nr_zones = (capacity + devip->zsize - 1) >> devip->zsize_shift;
5310 if (sdeb_zbc_zone_cap_mb == 0) {
5311 devip->zcap = devip->zsize;
5313 devip->zcap = (sdeb_zbc_zone_cap_mb * SZ_1M) >>
5314 ilog2(sdebug_sector_size);
5315 if (devip->zcap > devip->zsize) {
5316 pr_err("Zone capacity too large\n");
5321 conv_capacity = (sector_t)sdeb_zbc_nr_conv << devip->zsize_shift;
5322 if (conv_capacity >= capacity) {
5323 pr_err("Number of conventional zones too large\n");
5326 devip->nr_conv_zones = sdeb_zbc_nr_conv;
5327 devip->nr_seq_zones = ALIGN(capacity - conv_capacity, devip->zsize) >>
5329 devip->nr_zones = devip->nr_conv_zones + devip->nr_seq_zones;
5331 /* Add gap zones if zone capacity is smaller than the zone size */
5332 if (devip->zcap < devip->zsize)
5333 devip->nr_zones += devip->nr_seq_zones;
5336 /* zbc_max_open_zones can be 0, meaning "not reported" */
5337 if (sdeb_zbc_max_open >= devip->nr_zones - 1)
5338 devip->max_open = (devip->nr_zones - 1) / 2;
5340 devip->max_open = sdeb_zbc_max_open;
5343 devip->zstate = kcalloc(devip->nr_zones,
5344 sizeof(struct sdeb_zone_state), GFP_KERNEL);
5348 for (i = 0; i < devip->nr_zones; i++) {
5349 zsp = &devip->zstate[i];
5351 zsp->z_start = zstart;
5353 if (i < devip->nr_conv_zones) {
5354 zsp->z_type = ZBC_ZTYPE_CNV;
5355 zsp->z_cond = ZBC_NOT_WRITE_POINTER;
5356 zsp->z_wp = (sector_t)-1;
5358 min_t(u64, devip->zsize, capacity - zstart);
5359 } else if ((zstart & (devip->zsize - 1)) == 0) {
5361 zsp->z_type = ZBC_ZTYPE_SWR;
5363 zsp->z_type = ZBC_ZTYPE_SWP;
5364 zsp->z_cond = ZC1_EMPTY;
5365 zsp->z_wp = zsp->z_start;
5367 min_t(u64, devip->zcap, capacity - zstart);
5369 zsp->z_type = ZBC_ZTYPE_GAP;
5370 zsp->z_cond = ZBC_NOT_WRITE_POINTER;
5371 zsp->z_wp = (sector_t)-1;
5372 zsp->z_size = min_t(u64, devip->zsize - devip->zcap,
5376 WARN_ON_ONCE((int)zsp->z_size <= 0);
5377 zstart += zsp->z_size;
5383 static struct sdebug_dev_info *sdebug_device_create(
5384 struct sdebug_host_info *sdbg_host, gfp_t flags)
5386 struct sdebug_dev_info *devip;
5388 devip = kzalloc(sizeof(*devip), flags);
5390 if (sdebug_uuid_ctl == 1)
5391 uuid_gen(&devip->lu_name);
5392 else if (sdebug_uuid_ctl == 2) {
5393 if (got_shared_uuid)
5394 devip->lu_name = shared_uuid;
5396 uuid_gen(&shared_uuid);
5397 got_shared_uuid = true;
5398 devip->lu_name = shared_uuid;
5401 devip->sdbg_host = sdbg_host;
5402 if (sdeb_zbc_in_use) {
5403 devip->zoned = sdeb_zbc_model == BLK_ZONED_HM;
5404 if (sdebug_device_create_zones(devip)) {
5409 devip->zoned = false;
5411 devip->create_ts = ktime_get_boottime();
5412 atomic_set(&devip->stopped, (sdeb_tur_ms_to_ready > 0 ? 2 : 0));
5413 spin_lock_init(&devip->list_lock);
5414 INIT_LIST_HEAD(&devip->inject_err_list);
5415 list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
5420 static struct sdebug_dev_info *find_build_dev_info(struct scsi_device *sdev)
5422 struct sdebug_host_info *sdbg_host;
5423 struct sdebug_dev_info *open_devip = NULL;
5424 struct sdebug_dev_info *devip;
5426 sdbg_host = shost_to_sdebug_host(sdev->host);
5428 list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
5429 if ((devip->used) && (devip->channel == sdev->channel) &&
5430 (devip->target == sdev->id) &&
5431 (devip->lun == sdev->lun))
5434 if ((!devip->used) && (!open_devip))
5438 if (!open_devip) { /* try and make a new one */
5439 open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
5441 pr_err("out of memory at line %d\n", __LINE__);
5446 open_devip->channel = sdev->channel;
5447 open_devip->target = sdev->id;
5448 open_devip->lun = sdev->lun;
5449 open_devip->sdbg_host = sdbg_host;
5450 set_bit(SDEBUG_UA_POOCCUR, open_devip->uas_bm);
5451 open_devip->used = true;
5455 static int scsi_debug_slave_alloc(struct scsi_device *sdp)
5458 pr_info("slave_alloc <%u %u %u %llu>\n",
5459 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5464 static int scsi_debug_slave_configure(struct scsi_device *sdp)
5466 struct sdebug_dev_info *devip =
5467 (struct sdebug_dev_info *)sdp->hostdata;
5468 struct dentry *dentry;
5471 pr_info("slave_configure <%u %u %u %llu>\n",
5472 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5473 if (sdp->host->max_cmd_len != SDEBUG_MAX_CMD_LEN)
5474 sdp->host->max_cmd_len = SDEBUG_MAX_CMD_LEN;
5475 if (devip == NULL) {
5476 devip = find_build_dev_info(sdp);
5478 return 1; /* no resources, will be marked offline */
5480 sdp->hostdata = devip;
5482 sdp->no_uld_attach = 1;
5483 config_cdb_len(sdp);
5485 if (sdebug_allow_restart)
5486 sdp->allow_restart = 1;
5488 devip->debugfs_entry = debugfs_create_dir(dev_name(&sdp->sdev_dev),
5489 sdebug_debugfs_root);
5490 if (IS_ERR_OR_NULL(devip->debugfs_entry))
5491 pr_info("%s: failed to create debugfs directory for device %s\n",
5492 __func__, dev_name(&sdp->sdev_gendev));
5494 dentry = debugfs_create_file("error", 0600, devip->debugfs_entry, sdp,
5495 &sdebug_error_fops);
5496 if (IS_ERR_OR_NULL(dentry))
5497 pr_info("%s: failed to create error file for device %s\n",
5498 __func__, dev_name(&sdp->sdev_gendev));
5503 static void scsi_debug_slave_destroy(struct scsi_device *sdp)
5505 struct sdebug_dev_info *devip =
5506 (struct sdebug_dev_info *)sdp->hostdata;
5507 struct sdebug_err_inject *err;
5510 pr_info("slave_destroy <%u %u %u %llu>\n",
5511 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5516 spin_lock(&devip->list_lock);
5517 list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
5518 list_del_rcu(&err->list);
5519 call_rcu(&err->rcu, sdebug_err_free);
5521 spin_unlock(&devip->list_lock);
5523 debugfs_remove(devip->debugfs_entry);
5525 /* make this slot available for re-use */
5526 devip->used = false;
5527 sdp->hostdata = NULL;
5530 /* Returns true if we require the queued memory to be freed by the caller. */
5531 static bool stop_qc_helper(struct sdebug_defer *sd_dp,
5532 enum sdeb_defer_type defer_t)
5534 if (defer_t == SDEB_DEFER_HRT) {
5535 int res = hrtimer_try_to_cancel(&sd_dp->hrt);
5538 case 0: /* Not active, it must have already run */
5539 case -1: /* -1 It's executing the CB */
5541 case 1: /* Was active, we've now cancelled */
5545 } else if (defer_t == SDEB_DEFER_WQ) {
5546 /* Cancel if pending */
5547 if (cancel_work_sync(&sd_dp->ew.work))
5549 /* Was not pending, so it must have run */
5551 } else if (defer_t == SDEB_DEFER_POLL) {
5559 static bool scsi_debug_stop_cmnd(struct scsi_cmnd *cmnd)
5561 enum sdeb_defer_type l_defer_t;
5562 struct sdebug_defer *sd_dp;
5563 struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmnd);
5564 struct sdebug_queued_cmd *sqcp = TO_QUEUED_CMD(cmnd);
5566 lockdep_assert_held(&sdsc->lock);
5570 sd_dp = &sqcp->sd_dp;
5571 l_defer_t = READ_ONCE(sd_dp->defer_t);
5572 ASSIGN_QUEUED_CMD(cmnd, NULL);
5574 if (stop_qc_helper(sd_dp, l_defer_t))
5575 sdebug_free_queued_cmd(sqcp);
5581 * Called from scsi_debug_abort() only, which is for timed-out cmd.
5583 static bool scsi_debug_abort_cmnd(struct scsi_cmnd *cmnd)
5585 struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmnd);
5586 unsigned long flags;
5589 spin_lock_irqsave(&sdsc->lock, flags);
5590 res = scsi_debug_stop_cmnd(cmnd);
5591 spin_unlock_irqrestore(&sdsc->lock, flags);
5597 * All we can do is set the cmnd as internally aborted and wait for it to
5598 * finish. We cannot call scsi_done() as normal completion path may do that.
5600 static bool sdebug_stop_cmnd(struct request *rq, void *data)
5602 scsi_debug_abort_cmnd(blk_mq_rq_to_pdu(rq));
5607 /* Deletes (stops) timers or work queues of all queued commands */
5608 static void stop_all_queued(void)
5610 struct sdebug_host_info *sdhp;
5612 mutex_lock(&sdebug_host_list_mutex);
5613 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
5614 struct Scsi_Host *shost = sdhp->shost;
5616 blk_mq_tagset_busy_iter(&shost->tag_set, sdebug_stop_cmnd, NULL);
5618 mutex_unlock(&sdebug_host_list_mutex);
5621 static int sdebug_fail_abort(struct scsi_cmnd *cmnd)
5623 struct scsi_device *sdp = cmnd->device;
5624 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
5625 struct sdebug_err_inject *err;
5626 unsigned char *cmd = cmnd->cmnd;
5633 list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
5634 if (err->type == ERR_ABORT_CMD_FAILED &&
5635 (err->cmd == cmd[0] || err->cmd == 0xff)) {
5649 static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
5651 bool ok = scsi_debug_abort_cmnd(SCpnt);
5652 u8 *cmd = SCpnt->cmnd;
5657 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5658 sdev_printk(KERN_INFO, SCpnt->device,
5659 "%s: command%s found\n", __func__,
5662 if (sdebug_fail_abort(SCpnt)) {
5663 scmd_printk(KERN_INFO, SCpnt, "fail abort command 0x%x\n",
5671 static bool scsi_debug_stop_all_queued_iter(struct request *rq, void *data)
5673 struct scsi_device *sdp = data;
5674 struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
5676 if (scmd->device == sdp)
5677 scsi_debug_abort_cmnd(scmd);
5682 /* Deletes (stops) timers or work queues of all queued commands per sdev */
5683 static void scsi_debug_stop_all_queued(struct scsi_device *sdp)
5685 struct Scsi_Host *shost = sdp->host;
5687 blk_mq_tagset_busy_iter(&shost->tag_set,
5688 scsi_debug_stop_all_queued_iter, sdp);
5691 static int sdebug_fail_lun_reset(struct scsi_cmnd *cmnd)
5693 struct scsi_device *sdp = cmnd->device;
5694 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
5695 struct sdebug_err_inject *err;
5696 unsigned char *cmd = cmnd->cmnd;
5703 list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
5704 if (err->type == ERR_LUN_RESET_FAILED &&
5705 (err->cmd == cmd[0] || err->cmd == 0xff)) {
5719 static int scsi_debug_device_reset(struct scsi_cmnd *SCpnt)
5721 struct scsi_device *sdp = SCpnt->device;
5722 struct sdebug_dev_info *devip = sdp->hostdata;
5723 u8 *cmd = SCpnt->cmnd;
5728 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5729 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5731 scsi_debug_stop_all_queued(sdp);
5733 set_bit(SDEBUG_UA_POR, devip->uas_bm);
5735 if (sdebug_fail_lun_reset(SCpnt)) {
5736 scmd_printk(KERN_INFO, SCpnt, "fail lun reset 0x%x\n", opcode);
5743 static int sdebug_fail_target_reset(struct scsi_cmnd *cmnd)
5745 struct scsi_target *starget = scsi_target(cmnd->device);
5746 struct sdebug_target_info *targetip =
5747 (struct sdebug_target_info *)starget->hostdata;
5750 return targetip->reset_fail;
5755 static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
5757 struct scsi_device *sdp = SCpnt->device;
5758 struct sdebug_host_info *sdbg_host = shost_to_sdebug_host(sdp->host);
5759 struct sdebug_dev_info *devip;
5760 u8 *cmd = SCpnt->cmnd;
5764 ++num_target_resets;
5765 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5766 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5768 list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
5769 if (devip->target == sdp->id) {
5770 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5775 if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5776 sdev_printk(KERN_INFO, sdp,
5777 "%s: %d device(s) found in target\n", __func__, k);
5779 if (sdebug_fail_target_reset(SCpnt)) {
5780 scmd_printk(KERN_INFO, SCpnt, "fail target reset 0x%x\n",
5788 static int scsi_debug_bus_reset(struct scsi_cmnd *SCpnt)
5790 struct scsi_device *sdp = SCpnt->device;
5791 struct sdebug_host_info *sdbg_host = shost_to_sdebug_host(sdp->host);
5792 struct sdebug_dev_info *devip;
5797 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5798 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5800 list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
5801 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5805 if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5806 sdev_printk(KERN_INFO, sdp,
5807 "%s: %d device(s) found in host\n", __func__, k);
5811 static int scsi_debug_host_reset(struct scsi_cmnd *SCpnt)
5813 struct sdebug_host_info *sdbg_host;
5814 struct sdebug_dev_info *devip;
5818 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5819 sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__);
5820 mutex_lock(&sdebug_host_list_mutex);
5821 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
5822 list_for_each_entry(devip, &sdbg_host->dev_info_list,
5824 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5828 mutex_unlock(&sdebug_host_list_mutex);
5830 if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5831 sdev_printk(KERN_INFO, SCpnt->device,
5832 "%s: %d device(s) found\n", __func__, k);
5836 static void sdebug_build_parts(unsigned char *ramp, unsigned long store_size)
5838 struct msdos_partition *pp;
5839 int starts[SDEBUG_MAX_PARTS + 2], max_part_secs;
5840 int sectors_per_part, num_sectors, k;
5841 int heads_by_sects, start_sec, end_sec;
5843 /* assume partition table already zeroed */
5844 if ((sdebug_num_parts < 1) || (store_size < 1048576))
5846 if (sdebug_num_parts > SDEBUG_MAX_PARTS) {
5847 sdebug_num_parts = SDEBUG_MAX_PARTS;
5848 pr_warn("reducing partitions to %d\n", SDEBUG_MAX_PARTS);
5850 num_sectors = (int)get_sdebug_capacity();
5851 sectors_per_part = (num_sectors - sdebug_sectors_per)
5853 heads_by_sects = sdebug_heads * sdebug_sectors_per;
5854 starts[0] = sdebug_sectors_per;
5855 max_part_secs = sectors_per_part;
5856 for (k = 1; k < sdebug_num_parts; ++k) {
5857 starts[k] = ((k * sectors_per_part) / heads_by_sects)
5859 if (starts[k] - starts[k - 1] < max_part_secs)
5860 max_part_secs = starts[k] - starts[k - 1];
5862 starts[sdebug_num_parts] = num_sectors;
5863 starts[sdebug_num_parts + 1] = 0;
5865 ramp[510] = 0x55; /* magic partition markings */
5867 pp = (struct msdos_partition *)(ramp + 0x1be);
5868 for (k = 0; starts[k + 1]; ++k, ++pp) {
5869 start_sec = starts[k];
5870 end_sec = starts[k] + max_part_secs - 1;
5873 pp->cyl = start_sec / heads_by_sects;
5874 pp->head = (start_sec - (pp->cyl * heads_by_sects))
5875 / sdebug_sectors_per;
5876 pp->sector = (start_sec % sdebug_sectors_per) + 1;
5878 pp->end_cyl = end_sec / heads_by_sects;
5879 pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
5880 / sdebug_sectors_per;
5881 pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
5883 pp->start_sect = cpu_to_le32(start_sec);
5884 pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1);
5885 pp->sys_ind = 0x83; /* plain Linux partition */
5889 static void block_unblock_all_queues(bool block)
5891 struct sdebug_host_info *sdhp;
5893 lockdep_assert_held(&sdebug_host_list_mutex);
5895 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
5896 struct Scsi_Host *shost = sdhp->shost;
5899 scsi_block_requests(shost);
5901 scsi_unblock_requests(shost);
5905 /* Adjust (by rounding down) the sdebug_cmnd_count so abs(every_nth)-1
5906 * commands will be processed normally before triggers occur.
5908 static void tweak_cmnd_count(void)
5912 modulo = abs(sdebug_every_nth);
5916 mutex_lock(&sdebug_host_list_mutex);
5917 block_unblock_all_queues(true);
5918 count = atomic_read(&sdebug_cmnd_count);
5919 atomic_set(&sdebug_cmnd_count, (count / modulo) * modulo);
5920 block_unblock_all_queues(false);
5921 mutex_unlock(&sdebug_host_list_mutex);
5924 static void clear_queue_stats(void)
5926 atomic_set(&sdebug_cmnd_count, 0);
5927 atomic_set(&sdebug_completions, 0);
5928 atomic_set(&sdebug_miss_cpus, 0);
5929 atomic_set(&sdebug_a_tsf, 0);
5932 static bool inject_on_this_cmd(void)
5934 if (sdebug_every_nth == 0)
5936 return (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) == 0;
5939 #define INCLUSIVE_TIMING_MAX_NS 1000000 /* 1 millisecond */
5942 void sdebug_free_queued_cmd(struct sdebug_queued_cmd *sqcp)
5945 kmem_cache_free(queued_cmd_cache, sqcp);
5948 static struct sdebug_queued_cmd *sdebug_alloc_queued_cmd(struct scsi_cmnd *scmd)
5950 struct sdebug_queued_cmd *sqcp;
5951 struct sdebug_defer *sd_dp;
5953 sqcp = kmem_cache_zalloc(queued_cmd_cache, GFP_ATOMIC);
5957 sd_dp = &sqcp->sd_dp;
5959 hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
5960 sd_dp->hrt.function = sdebug_q_cmd_hrt_complete;
5961 INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete);
5968 /* Complete the processing of the thread that queued a SCSI command to this
5969 * driver. It either completes the command by calling cmnd_done() or
5970 * schedules a hr timer or work queue then returns 0. Returns
5971 * SCSI_MLQUEUE_HOST_BUSY if temporarily out of resources.
5973 static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
5975 int (*pfp)(struct scsi_cmnd *,
5976 struct sdebug_dev_info *),
5977 int delta_jiff, int ndelay)
5979 struct request *rq = scsi_cmd_to_rq(cmnd);
5980 bool polled = rq->cmd_flags & REQ_POLLED;
5981 struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmnd);
5982 unsigned long flags;
5983 u64 ns_from_boot = 0;
5984 struct sdebug_queued_cmd *sqcp;
5985 struct scsi_device *sdp;
5986 struct sdebug_defer *sd_dp;
5988 if (unlikely(devip == NULL)) {
5989 if (scsi_result == 0)
5990 scsi_result = DID_NO_CONNECT << 16;
5991 goto respond_in_thread;
5995 if (delta_jiff == 0)
5996 goto respond_in_thread;
5999 if (unlikely(sdebug_every_nth && (SDEBUG_OPT_RARE_TSF & sdebug_opts) &&
6000 (scsi_result == 0))) {
6001 int num_in_q = scsi_device_busy(sdp);
6002 int qdepth = cmnd->device->queue_depth;
6004 if ((num_in_q == qdepth) &&
6005 (atomic_inc_return(&sdebug_a_tsf) >=
6006 abs(sdebug_every_nth))) {
6007 atomic_set(&sdebug_a_tsf, 0);
6008 scsi_result = device_qfull_result;
6010 if (unlikely(SDEBUG_OPT_Q_NOISE & sdebug_opts))
6011 sdev_printk(KERN_INFO, sdp, "%s: num_in_q=%d +1, <inject> status: TASK SET FULL\n",
6012 __func__, num_in_q);
6016 sqcp = sdebug_alloc_queued_cmd(cmnd);
6018 pr_err("%s no alloc\n", __func__);
6019 return SCSI_MLQUEUE_HOST_BUSY;
6021 sd_dp = &sqcp->sd_dp;
6024 ns_from_boot = ktime_get_boottime_ns();
6026 /* one of the resp_*() response functions is called here */
6027 cmnd->result = pfp ? pfp(cmnd, devip) : 0;
6028 if (cmnd->result & SDEG_RES_IMMED_MASK) {
6029 cmnd->result &= ~SDEG_RES_IMMED_MASK;
6030 delta_jiff = ndelay = 0;
6032 if (cmnd->result == 0 && scsi_result != 0)
6033 cmnd->result = scsi_result;
6034 if (cmnd->result == 0 && unlikely(sdebug_opts & SDEBUG_OPT_TRANSPORT_ERR)) {
6035 if (atomic_read(&sdeb_inject_pending)) {
6036 mk_sense_buffer(cmnd, ABORTED_COMMAND, TRANSPORT_PROBLEM, ACK_NAK_TO);
6037 atomic_set(&sdeb_inject_pending, 0);
6038 cmnd->result = check_condition_result;
6042 if (unlikely(sdebug_verbose && cmnd->result))
6043 sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n",
6044 __func__, cmnd->result);
6046 if (delta_jiff > 0 || ndelay > 0) {
6049 if (delta_jiff > 0) {
6050 u64 ns = jiffies_to_nsecs(delta_jiff);
6052 if (sdebug_random && ns < U32_MAX) {
6053 ns = get_random_u32_below((u32)ns);
6054 } else if (sdebug_random) {
6055 ns >>= 12; /* scale to 4 usec precision */
6056 if (ns < U32_MAX) /* over 4 hours max */
6057 ns = get_random_u32_below((u32)ns);
6060 kt = ns_to_ktime(ns);
6061 } else { /* ndelay has a 4.2 second max */
6062 kt = sdebug_random ? get_random_u32_below((u32)ndelay) :
6064 if (ndelay < INCLUSIVE_TIMING_MAX_NS) {
6065 u64 d = ktime_get_boottime_ns() - ns_from_boot;
6067 if (kt <= d) { /* elapsed duration >= kt */
6068 /* call scsi_done() from this thread */
6069 sdebug_free_queued_cmd(sqcp);
6073 /* otherwise reduce kt by elapsed time */
6077 if (sdebug_statistics)
6078 sd_dp->issuing_cpu = raw_smp_processor_id();
6080 spin_lock_irqsave(&sdsc->lock, flags);
6081 sd_dp->cmpl_ts = ktime_add(ns_to_ktime(ns_from_boot), kt);
6082 ASSIGN_QUEUED_CMD(cmnd, sqcp);
6083 WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_POLL);
6084 spin_unlock_irqrestore(&sdsc->lock, flags);
6086 /* schedule the invocation of scsi_done() for a later time */
6087 spin_lock_irqsave(&sdsc->lock, flags);
6088 ASSIGN_QUEUED_CMD(cmnd, sqcp);
6089 WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_HRT);
6090 hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED);
6092 * The completion handler will try to grab sqcp->lock,
6093 * so there is no chance that the completion handler
6094 * will call scsi_done() until we release the lock
6095 * here (so ok to keep referencing sdsc).
6097 spin_unlock_irqrestore(&sdsc->lock, flags);
6099 } else { /* jdelay < 0, use work queue */
6100 if (unlikely((sdebug_opts & SDEBUG_OPT_CMD_ABORT) &&
6101 atomic_read(&sdeb_inject_pending))) {
6102 sd_dp->aborted = true;
6103 atomic_set(&sdeb_inject_pending, 0);
6104 sdev_printk(KERN_INFO, sdp, "abort request tag=%#x\n",
6105 blk_mq_unique_tag_to_tag(get_tag(cmnd)));
6108 if (sdebug_statistics)
6109 sd_dp->issuing_cpu = raw_smp_processor_id();
6111 spin_lock_irqsave(&sdsc->lock, flags);
6112 ASSIGN_QUEUED_CMD(cmnd, sqcp);
6113 sd_dp->cmpl_ts = ns_to_ktime(ns_from_boot);
6114 WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_POLL);
6115 spin_unlock_irqrestore(&sdsc->lock, flags);
6117 spin_lock_irqsave(&sdsc->lock, flags);
6118 ASSIGN_QUEUED_CMD(cmnd, sqcp);
6119 WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_WQ);
6120 schedule_work(&sd_dp->ew.work);
6121 spin_unlock_irqrestore(&sdsc->lock, flags);
6127 respond_in_thread: /* call back to mid-layer using invocation thread */
6128 cmnd->result = pfp != NULL ? pfp(cmnd, devip) : 0;
6129 cmnd->result &= ~SDEG_RES_IMMED_MASK;
6130 if (cmnd->result == 0 && scsi_result != 0)
6131 cmnd->result = scsi_result;
6136 /* Note: The following macros create attribute files in the
6137 /sys/module/scsi_debug/parameters directory. Unfortunately this
6138 driver is unaware of a change and cannot trigger auxiliary actions
6139 as it can when the corresponding attribute in the
6140 /sys/bus/pseudo/drivers/scsi_debug directory is changed.
6142 module_param_named(add_host, sdebug_add_host, int, S_IRUGO | S_IWUSR);
6143 module_param_named(ato, sdebug_ato, int, S_IRUGO);
6144 module_param_named(cdb_len, sdebug_cdb_len, int, 0644);
6145 module_param_named(clustering, sdebug_clustering, bool, S_IRUGO | S_IWUSR);
6146 module_param_named(delay, sdebug_jdelay, int, S_IRUGO | S_IWUSR);
6147 module_param_named(dev_size_mb, sdebug_dev_size_mb, int, S_IRUGO);
6148 module_param_named(dif, sdebug_dif, int, S_IRUGO);
6149 module_param_named(dix, sdebug_dix, int, S_IRUGO);
6150 module_param_named(dsense, sdebug_dsense, int, S_IRUGO | S_IWUSR);
6151 module_param_named(every_nth, sdebug_every_nth, int, S_IRUGO | S_IWUSR);
6152 module_param_named(fake_rw, sdebug_fake_rw, int, S_IRUGO | S_IWUSR);
6153 module_param_named(guard, sdebug_guard, uint, S_IRUGO);
6154 module_param_named(host_lock, sdebug_host_lock, bool, S_IRUGO | S_IWUSR);
6155 module_param_named(host_max_queue, sdebug_host_max_queue, int, S_IRUGO);
6156 module_param_string(inq_product, sdebug_inq_product_id,
6157 sizeof(sdebug_inq_product_id), S_IRUGO | S_IWUSR);
6158 module_param_string(inq_rev, sdebug_inq_product_rev,
6159 sizeof(sdebug_inq_product_rev), S_IRUGO | S_IWUSR);
6160 module_param_string(inq_vendor, sdebug_inq_vendor_id,
6161 sizeof(sdebug_inq_vendor_id), S_IRUGO | S_IWUSR);
6162 module_param_named(lbprz, sdebug_lbprz, int, S_IRUGO);
6163 module_param_named(lbpu, sdebug_lbpu, int, S_IRUGO);
6164 module_param_named(lbpws, sdebug_lbpws, int, S_IRUGO);
6165 module_param_named(lbpws10, sdebug_lbpws10, int, S_IRUGO);
6166 module_param_named(lowest_aligned, sdebug_lowest_aligned, int, S_IRUGO);
6167 module_param_named(lun_format, sdebug_lun_am_i, int, S_IRUGO | S_IWUSR);
6168 module_param_named(max_luns, sdebug_max_luns, int, S_IRUGO | S_IWUSR);
6169 module_param_named(max_queue, sdebug_max_queue, int, S_IRUGO | S_IWUSR);
6170 module_param_named(medium_error_count, sdebug_medium_error_count, int,
6172 module_param_named(medium_error_start, sdebug_medium_error_start, int,
6174 module_param_named(ndelay, sdebug_ndelay, int, S_IRUGO | S_IWUSR);
6175 module_param_named(no_lun_0, sdebug_no_lun_0, int, S_IRUGO | S_IWUSR);
6176 module_param_named(no_rwlock, sdebug_no_rwlock, bool, S_IRUGO | S_IWUSR);
6177 module_param_named(no_uld, sdebug_no_uld, int, S_IRUGO);
6178 module_param_named(num_parts, sdebug_num_parts, int, S_IRUGO);
6179 module_param_named(num_tgts, sdebug_num_tgts, int, S_IRUGO | S_IWUSR);
6180 module_param_named(opt_blks, sdebug_opt_blks, int, S_IRUGO);
6181 module_param_named(opt_xferlen_exp, sdebug_opt_xferlen_exp, int, S_IRUGO);
6182 module_param_named(opts, sdebug_opts, int, S_IRUGO | S_IWUSR);
6183 module_param_named(per_host_store, sdebug_per_host_store, bool,
6185 module_param_named(physblk_exp, sdebug_physblk_exp, int, S_IRUGO);
6186 module_param_named(ptype, sdebug_ptype, int, S_IRUGO | S_IWUSR);
6187 module_param_named(random, sdebug_random, bool, S_IRUGO | S_IWUSR);
6188 module_param_named(removable, sdebug_removable, bool, S_IRUGO | S_IWUSR);
6189 module_param_named(scsi_level, sdebug_scsi_level, int, S_IRUGO);
6190 module_param_named(sector_size, sdebug_sector_size, int, S_IRUGO);
6191 module_param_named(statistics, sdebug_statistics, bool, S_IRUGO | S_IWUSR);
6192 module_param_named(strict, sdebug_strict, bool, S_IRUGO | S_IWUSR);
6193 module_param_named(submit_queues, submit_queues, int, S_IRUGO);
6194 module_param_named(poll_queues, poll_queues, int, S_IRUGO);
6195 module_param_named(tur_ms_to_ready, sdeb_tur_ms_to_ready, int, S_IRUGO);
6196 module_param_named(unmap_alignment, sdebug_unmap_alignment, int, S_IRUGO);
6197 module_param_named(unmap_granularity, sdebug_unmap_granularity, int, S_IRUGO);
6198 module_param_named(unmap_max_blocks, sdebug_unmap_max_blocks, int, S_IRUGO);
6199 module_param_named(unmap_max_desc, sdebug_unmap_max_desc, int, S_IRUGO);
6200 module_param_named(uuid_ctl, sdebug_uuid_ctl, int, S_IRUGO);
6201 module_param_named(virtual_gb, sdebug_virtual_gb, int, S_IRUGO | S_IWUSR);
6202 module_param_named(vpd_use_hostno, sdebug_vpd_use_hostno, int,
6204 module_param_named(wp, sdebug_wp, bool, S_IRUGO | S_IWUSR);
6205 module_param_named(write_same_length, sdebug_write_same_length, int,
6207 module_param_named(zbc, sdeb_zbc_model_s, charp, S_IRUGO);
6208 module_param_named(zone_cap_mb, sdeb_zbc_zone_cap_mb, int, S_IRUGO);
6209 module_param_named(zone_max_open, sdeb_zbc_max_open, int, S_IRUGO);
6210 module_param_named(zone_nr_conv, sdeb_zbc_nr_conv, int, S_IRUGO);
6211 module_param_named(zone_size_mb, sdeb_zbc_zone_size_mb, int, S_IRUGO);
6212 module_param_named(allow_restart, sdebug_allow_restart, bool, S_IRUGO | S_IWUSR);
6214 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
6215 MODULE_DESCRIPTION("SCSI debug adapter driver");
6216 MODULE_LICENSE("GPL");
6217 MODULE_VERSION(SDEBUG_VERSION);
6219 MODULE_PARM_DESC(add_host, "add n hosts, in sysfs if negative remove host(s) (def=1)");
6220 MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
6221 MODULE_PARM_DESC(cdb_len, "suggest CDB lengths to drivers (def=10)");
6222 MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
6223 MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
6224 MODULE_PARM_DESC(dev_size_mb, "size in MiB of ram shared by devs(def=8)");
6225 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
6226 MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
6227 MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
6228 MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
6229 MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
6230 MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
6231 MODULE_PARM_DESC(host_lock, "host_lock is ignored (def=0)");
6232 MODULE_PARM_DESC(host_max_queue,
6233 "host max # of queued cmds (0 to max(def) [max_queue fixed equal for !0])");
6234 MODULE_PARM_DESC(inq_product, "SCSI INQUIRY product string (def=\"scsi_debug\")");
6235 MODULE_PARM_DESC(inq_rev, "SCSI INQUIRY revision string (def=\""
6236 SDEBUG_VERSION "\")");
6237 MODULE_PARM_DESC(inq_vendor, "SCSI INQUIRY vendor string (def=\"Linux\")");
6238 MODULE_PARM_DESC(lbprz,
6239 "on read unmapped LBs return 0 when 1 (def), return 0xff when 2");
6240 MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
6241 MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
6242 MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
6243 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
6244 MODULE_PARM_DESC(lun_format, "LUN format: 0->peripheral (def); 1 --> flat address method");
6245 MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
6246 MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))");
6247 MODULE_PARM_DESC(medium_error_count, "count of sectors to return follow on MEDIUM error");
6248 MODULE_PARM_DESC(medium_error_start, "starting sector number to return MEDIUM error");
6249 MODULE_PARM_DESC(ndelay, "response delay in nanoseconds (def=0 -> ignore)");
6250 MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
6251 MODULE_PARM_DESC(no_rwlock, "don't protect user data reads+writes (def=0)");
6252 MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
6253 MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
6254 MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
6255 MODULE_PARM_DESC(opt_blks, "optimal transfer length in blocks (def=1024)");
6256 MODULE_PARM_DESC(opt_xferlen_exp, "optimal transfer length granularity exponent (def=physblk_exp)");
6257 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
6258 MODULE_PARM_DESC(per_host_store, "If set, next positive add_host will get new store (def=0)");
6259 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
6260 MODULE_PARM_DESC(poll_queues, "support for iouring iopoll queues (1 to max(submit_queues - 1))");
6261 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
6262 MODULE_PARM_DESC(random, "If set, uniformly randomize command duration between 0 and delay_in_ns");
6263 MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
6264 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=7[SPC-5])");
6265 MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
6266 MODULE_PARM_DESC(statistics, "collect statistics on commands, queues (def=0)");
6267 MODULE_PARM_DESC(strict, "stricter checks: reserved field in cdb (def=0)");
6268 MODULE_PARM_DESC(submit_queues, "support for block multi-queue (def=1)");
6269 MODULE_PARM_DESC(tur_ms_to_ready, "TEST UNIT READY millisecs before initial good status (def=0)");
6270 MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
6271 MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
6272 MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
6273 MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
6274 MODULE_PARM_DESC(uuid_ctl,
6275 "1->use uuid for lu name, 0->don't, 2->all use same (def=0)");
6276 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)");
6277 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
6278 MODULE_PARM_DESC(wp, "Write Protect (def=0)");
6279 MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
6280 MODULE_PARM_DESC(zbc, "'none' [0]; 'aware' [1]; 'managed' [2] (def=0). Can have 'host-' prefix");
6281 MODULE_PARM_DESC(zone_cap_mb, "Zone capacity in MiB (def=zone size)");
6282 MODULE_PARM_DESC(zone_max_open, "Maximum number of open zones; [0] for no limit (def=auto)");
6283 MODULE_PARM_DESC(zone_nr_conv, "Number of conventional zones (def=1)");
6284 MODULE_PARM_DESC(zone_size_mb, "Zone size in MiB (def=auto)");
6285 MODULE_PARM_DESC(allow_restart, "Set scsi_device's allow_restart flag(def=0)");
6287 #define SDEBUG_INFO_LEN 256
6288 static char sdebug_info[SDEBUG_INFO_LEN];
6290 static const char *scsi_debug_info(struct Scsi_Host *shp)
6294 k = scnprintf(sdebug_info, SDEBUG_INFO_LEN, "%s: version %s [%s]\n",
6295 my_name, SDEBUG_VERSION, sdebug_version_date);
6296 if (k >= (SDEBUG_INFO_LEN - 1))
6298 scnprintf(sdebug_info + k, SDEBUG_INFO_LEN - k,
6299 " dev_size_mb=%d, opts=0x%x, submit_queues=%d, %s=%d",
6300 sdebug_dev_size_mb, sdebug_opts, submit_queues,
6301 "statistics", (int)sdebug_statistics);
6305 /* 'echo <val> > /proc/scsi/scsi_debug/<host_id>' writes to opts */
6306 static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer,
6311 int minLen = length > 15 ? 15 : length;
6313 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
6315 memcpy(arr, buffer, minLen);
6317 if (1 != sscanf(arr, "%d", &opts))
6320 sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
6321 sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
6322 if (sdebug_every_nth != 0)
6327 struct sdebug_submit_queue_data {
6333 static bool sdebug_submit_queue_iter(struct request *rq, void *opaque)
6335 struct sdebug_submit_queue_data *data = opaque;
6336 u32 unique_tag = blk_mq_unique_tag(rq);
6337 u16 hwq = blk_mq_unique_tag_to_hwq(unique_tag);
6338 u16 tag = blk_mq_unique_tag_to_tag(unique_tag);
6339 int queue_num = data->queue_num;
6341 if (hwq != queue_num)
6344 /* Rely on iter'ing in ascending tag order */
6345 if (*data->first == -1)
6346 *data->first = *data->last = tag;
6353 /* Output seen with 'cat /proc/scsi/scsi_debug/<host_id>'. It will be the
6354 * same for each scsi_debug host (if more than one). Some of the counters
6355 * output are not atomics so might be inaccurate in a busy system. */
6356 static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
6358 struct sdebug_host_info *sdhp;
6361 seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n",
6362 SDEBUG_VERSION, sdebug_version_date);
6363 seq_printf(m, "num_tgts=%d, %ssize=%d MB, opts=0x%x, every_nth=%d\n",
6364 sdebug_num_tgts, "shared (ram) ", sdebug_dev_size_mb,
6365 sdebug_opts, sdebug_every_nth);
6366 seq_printf(m, "delay=%d, ndelay=%d, max_luns=%d, sector_size=%d %s\n",
6367 sdebug_jdelay, sdebug_ndelay, sdebug_max_luns,
6368 sdebug_sector_size, "bytes");
6369 seq_printf(m, "cylinders=%d, heads=%d, sectors=%d, command aborts=%d\n",
6370 sdebug_cylinders_per, sdebug_heads, sdebug_sectors_per,
6372 seq_printf(m, "RESETs: device=%d, target=%d, bus=%d, host=%d\n",
6373 num_dev_resets, num_target_resets, num_bus_resets,
6375 seq_printf(m, "dix_reads=%d, dix_writes=%d, dif_errors=%d\n",
6376 dix_reads, dix_writes, dif_errors);
6377 seq_printf(m, "usec_in_jiffy=%lu, statistics=%d\n", TICK_NSEC / 1000,
6379 seq_printf(m, "cmnd_count=%d, completions=%d, %s=%d, a_tsf=%d, mq_polls=%d\n",
6380 atomic_read(&sdebug_cmnd_count),
6381 atomic_read(&sdebug_completions),
6382 "miss_cpus", atomic_read(&sdebug_miss_cpus),
6383 atomic_read(&sdebug_a_tsf),
6384 atomic_read(&sdeb_mq_poll_count));
6386 seq_printf(m, "submit_queues=%d\n", submit_queues);
6387 for (j = 0; j < submit_queues; ++j) {
6389 struct sdebug_submit_queue_data data = {
6394 seq_printf(m, " queue %d:\n", j);
6395 blk_mq_tagset_busy_iter(&host->tag_set, sdebug_submit_queue_iter,
6398 seq_printf(m, " in_use_bm BUSY: %s: %d,%d\n",
6399 "first,last bits", f, l);
6403 seq_printf(m, "this host_no=%d\n", host->host_no);
6404 if (!xa_empty(per_store_ap)) {
6407 unsigned long l_idx;
6408 struct sdeb_store_info *sip;
6410 seq_puts(m, "\nhost list:\n");
6412 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6414 seq_printf(m, " %d: host_no=%d, si_idx=%d\n", j,
6415 sdhp->shost->host_no, idx);
6418 seq_printf(m, "\nper_store array [most_recent_idx=%d]:\n",
6419 sdeb_most_recent_idx);
6421 xa_for_each(per_store_ap, l_idx, sip) {
6422 niu = xa_get_mark(per_store_ap, l_idx,
6423 SDEB_XA_NOT_IN_USE);
6425 seq_printf(m, " %d: idx=%d%s\n", j, idx,
6426 (niu ? " not_in_use" : ""));
6433 static ssize_t delay_show(struct device_driver *ddp, char *buf)
6435 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_jdelay);
6437 /* Returns -EBUSY if jdelay is being changed and commands are queued. The unit
6438 * of delay is jiffies.
6440 static ssize_t delay_store(struct device_driver *ddp, const char *buf,
6445 if (count > 0 && sscanf(buf, "%d", &jdelay) == 1) {
6447 if (sdebug_jdelay != jdelay) {
6448 struct sdebug_host_info *sdhp;
6450 mutex_lock(&sdebug_host_list_mutex);
6451 block_unblock_all_queues(true);
6453 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6454 struct Scsi_Host *shost = sdhp->shost;
6456 if (scsi_host_busy(shost)) {
6457 res = -EBUSY; /* queued commands */
6462 sdebug_jdelay = jdelay;
6465 block_unblock_all_queues(false);
6466 mutex_unlock(&sdebug_host_list_mutex);
6472 static DRIVER_ATTR_RW(delay);
6474 static ssize_t ndelay_show(struct device_driver *ddp, char *buf)
6476 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ndelay);
6478 /* Returns -EBUSY if ndelay is being changed and commands are queued */
6479 /* If > 0 and accepted then sdebug_jdelay is set to JDELAY_OVERRIDDEN */
6480 static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
6485 if ((count > 0) && (1 == sscanf(buf, "%d", &ndelay)) &&
6486 (ndelay >= 0) && (ndelay < (1000 * 1000 * 1000))) {
6488 if (sdebug_ndelay != ndelay) {
6489 struct sdebug_host_info *sdhp;
6491 mutex_lock(&sdebug_host_list_mutex);
6492 block_unblock_all_queues(true);
6494 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6495 struct Scsi_Host *shost = sdhp->shost;
6497 if (scsi_host_busy(shost)) {
6498 res = -EBUSY; /* queued commands */
6504 sdebug_ndelay = ndelay;
6505 sdebug_jdelay = ndelay ? JDELAY_OVERRIDDEN
6508 block_unblock_all_queues(false);
6509 mutex_unlock(&sdebug_host_list_mutex);
6515 static DRIVER_ATTR_RW(ndelay);
6517 static ssize_t opts_show(struct device_driver *ddp, char *buf)
6519 return scnprintf(buf, PAGE_SIZE, "0x%x\n", sdebug_opts);
6522 static ssize_t opts_store(struct device_driver *ddp, const char *buf,
6528 if (sscanf(buf, "%10s", work) == 1) {
6529 if (strncasecmp(work, "0x", 2) == 0) {
6530 if (kstrtoint(work + 2, 16, &opts) == 0)
6533 if (kstrtoint(work, 10, &opts) == 0)
6540 sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
6541 sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
6545 static DRIVER_ATTR_RW(opts);
6547 static ssize_t ptype_show(struct device_driver *ddp, char *buf)
6549 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ptype);
6551 static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
6556 /* Cannot change from or to TYPE_ZBC with sysfs */
6557 if (sdebug_ptype == TYPE_ZBC)
6560 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6568 static DRIVER_ATTR_RW(ptype);
6570 static ssize_t dsense_show(struct device_driver *ddp, char *buf)
6572 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dsense);
6574 static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
6579 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6585 static DRIVER_ATTR_RW(dsense);
6587 static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
6589 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_fake_rw);
6591 static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
6596 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6597 bool want_store = (n == 0);
6598 struct sdebug_host_info *sdhp;
6601 sdebug_fake_rw = (sdebug_fake_rw > 0);
6602 if (sdebug_fake_rw == n)
6603 return count; /* not transitioning so do nothing */
6605 if (want_store) { /* 1 --> 0 transition, set up store */
6606 if (sdeb_first_idx < 0) {
6607 idx = sdebug_add_store();
6611 idx = sdeb_first_idx;
6612 xa_clear_mark(per_store_ap, idx,
6613 SDEB_XA_NOT_IN_USE);
6615 /* make all hosts use same store */
6616 list_for_each_entry(sdhp, &sdebug_host_list,
6618 if (sdhp->si_idx != idx) {
6619 xa_set_mark(per_store_ap, sdhp->si_idx,
6620 SDEB_XA_NOT_IN_USE);
6624 sdeb_most_recent_idx = idx;
6625 } else { /* 0 --> 1 transition is trigger for shrink */
6626 sdebug_erase_all_stores(true /* apart from first */);
6633 static DRIVER_ATTR_RW(fake_rw);
6635 static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
6637 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_lun_0);
6639 static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
6644 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6645 sdebug_no_lun_0 = n;
6650 static DRIVER_ATTR_RW(no_lun_0);
6652 static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
6654 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_tgts);
6656 static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
6661 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6662 sdebug_num_tgts = n;
6663 sdebug_max_tgts_luns();
6668 static DRIVER_ATTR_RW(num_tgts);
6670 static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)
6672 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dev_size_mb);
6674 static DRIVER_ATTR_RO(dev_size_mb);
6676 static ssize_t per_host_store_show(struct device_driver *ddp, char *buf)
6678 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_per_host_store);
6681 static ssize_t per_host_store_store(struct device_driver *ddp, const char *buf,
6686 if (kstrtobool(buf, &v))
6689 sdebug_per_host_store = v;
6692 static DRIVER_ATTR_RW(per_host_store);
6694 static ssize_t num_parts_show(struct device_driver *ddp, char *buf)
6696 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_parts);
6698 static DRIVER_ATTR_RO(num_parts);
6700 static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
6702 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_every_nth);
6704 static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
6710 if (sscanf(buf, "%10s", work) == 1) {
6711 if (strncasecmp(work, "0x", 2) == 0) {
6712 if (kstrtoint(work + 2, 16, &nth) == 0)
6713 goto every_nth_done;
6715 if (kstrtoint(work, 10, &nth) == 0)
6716 goto every_nth_done;
6722 sdebug_every_nth = nth;
6723 if (nth && !sdebug_statistics) {
6724 pr_info("every_nth needs statistics=1, set it\n");
6725 sdebug_statistics = true;
6730 static DRIVER_ATTR_RW(every_nth);
6732 static ssize_t lun_format_show(struct device_driver *ddp, char *buf)
6734 return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_lun_am);
6736 static ssize_t lun_format_store(struct device_driver *ddp, const char *buf,
6742 if (kstrtoint(buf, 0, &n))
6745 if (n > (int)SAM_LUN_AM_FLAT) {
6746 pr_warn("only LUN address methods 0 and 1 are supported\n");
6749 changed = ((int)sdebug_lun_am != n);
6751 if (changed && sdebug_scsi_level >= 5) { /* >= SPC-3 */
6752 struct sdebug_host_info *sdhp;
6753 struct sdebug_dev_info *dp;
6755 mutex_lock(&sdebug_host_list_mutex);
6756 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6757 list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
6758 set_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
6761 mutex_unlock(&sdebug_host_list_mutex);
6767 static DRIVER_ATTR_RW(lun_format);
6769 static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
6771 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_luns);
6773 static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
6779 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6781 pr_warn("max_luns can be no more than 256\n");
6784 changed = (sdebug_max_luns != n);
6785 sdebug_max_luns = n;
6786 sdebug_max_tgts_luns();
6787 if (changed && (sdebug_scsi_level >= 5)) { /* >= SPC-3 */
6788 struct sdebug_host_info *sdhp;
6789 struct sdebug_dev_info *dp;
6791 mutex_lock(&sdebug_host_list_mutex);
6792 list_for_each_entry(sdhp, &sdebug_host_list,
6794 list_for_each_entry(dp, &sdhp->dev_info_list,
6796 set_bit(SDEBUG_UA_LUNS_CHANGED,
6800 mutex_unlock(&sdebug_host_list_mutex);
6806 static DRIVER_ATTR_RW(max_luns);
6808 static ssize_t max_queue_show(struct device_driver *ddp, char *buf)
6810 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_queue);
6812 /* N.B. max_queue can be changed while there are queued commands. In flight
6813 * commands beyond the new max_queue will be completed. */
6814 static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
6819 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
6820 (n <= SDEBUG_CANQUEUE) &&
6821 (sdebug_host_max_queue == 0)) {
6822 mutex_lock(&sdebug_host_list_mutex);
6824 /* We may only change sdebug_max_queue when we have no shosts */
6825 if (list_empty(&sdebug_host_list))
6826 sdebug_max_queue = n;
6829 mutex_unlock(&sdebug_host_list_mutex);
6834 static DRIVER_ATTR_RW(max_queue);
6836 static ssize_t host_max_queue_show(struct device_driver *ddp, char *buf)
6838 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_host_max_queue);
6841 static ssize_t no_rwlock_show(struct device_driver *ddp, char *buf)
6843 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_rwlock);
6846 static ssize_t no_rwlock_store(struct device_driver *ddp, const char *buf, size_t count)
6850 if (kstrtobool(buf, &v))
6853 sdebug_no_rwlock = v;
6856 static DRIVER_ATTR_RW(no_rwlock);
6859 * Since this is used for .can_queue, and we get the hc_idx tag from the bitmap
6860 * in range [0, sdebug_host_max_queue), we can't change it.
6862 static DRIVER_ATTR_RO(host_max_queue);
6864 static ssize_t no_uld_show(struct device_driver *ddp, char *buf)
6866 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_uld);
6868 static DRIVER_ATTR_RO(no_uld);
6870 static ssize_t scsi_level_show(struct device_driver *ddp, char *buf)
6872 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_scsi_level);
6874 static DRIVER_ATTR_RO(scsi_level);
6876 static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
6878 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_virtual_gb);
6880 static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
6886 /* Ignore capacity change for ZBC drives for now */
6887 if (sdeb_zbc_in_use)
6890 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6891 changed = (sdebug_virtual_gb != n);
6892 sdebug_virtual_gb = n;
6893 sdebug_capacity = get_sdebug_capacity();
6895 struct sdebug_host_info *sdhp;
6896 struct sdebug_dev_info *dp;
6898 mutex_lock(&sdebug_host_list_mutex);
6899 list_for_each_entry(sdhp, &sdebug_host_list,
6901 list_for_each_entry(dp, &sdhp->dev_info_list,
6903 set_bit(SDEBUG_UA_CAPACITY_CHANGED,
6907 mutex_unlock(&sdebug_host_list_mutex);
6913 static DRIVER_ATTR_RW(virtual_gb);
6915 static ssize_t add_host_show(struct device_driver *ddp, char *buf)
6917 /* absolute number of hosts currently active is what is shown */
6918 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_hosts);
6921 static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
6926 struct sdeb_store_info *sip;
6927 bool want_phs = (sdebug_fake_rw == 0) && sdebug_per_host_store;
6930 if (sscanf(buf, "%d", &delta_hosts) != 1)
6932 if (delta_hosts > 0) {
6936 xa_for_each_marked(per_store_ap, idx, sip,
6937 SDEB_XA_NOT_IN_USE) {
6938 sdeb_most_recent_idx = (int)idx;
6942 if (found) /* re-use case */
6943 sdebug_add_host_helper((int)idx);
6945 sdebug_do_add_host(true);
6947 sdebug_do_add_host(false);
6949 } while (--delta_hosts);
6950 } else if (delta_hosts < 0) {
6952 sdebug_do_remove_host(false);
6953 } while (++delta_hosts);
6957 static DRIVER_ATTR_RW(add_host);
6959 static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf)
6961 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_vpd_use_hostno);
6963 static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf,
6968 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6969 sdebug_vpd_use_hostno = n;
6974 static DRIVER_ATTR_RW(vpd_use_hostno);
6976 static ssize_t statistics_show(struct device_driver *ddp, char *buf)
6978 return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_statistics);
6980 static ssize_t statistics_store(struct device_driver *ddp, const char *buf,
6985 if ((count > 0) && (sscanf(buf, "%d", &n) == 1) && (n >= 0)) {
6987 sdebug_statistics = true;
6989 clear_queue_stats();
6990 sdebug_statistics = false;
6996 static DRIVER_ATTR_RW(statistics);
6998 static ssize_t sector_size_show(struct device_driver *ddp, char *buf)
7000 return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_sector_size);
7002 static DRIVER_ATTR_RO(sector_size);
7004 static ssize_t submit_queues_show(struct device_driver *ddp, char *buf)
7006 return scnprintf(buf, PAGE_SIZE, "%d\n", submit_queues);
7008 static DRIVER_ATTR_RO(submit_queues);
7010 static ssize_t dix_show(struct device_driver *ddp, char *buf)
7012 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dix);
7014 static DRIVER_ATTR_RO(dix);
7016 static ssize_t dif_show(struct device_driver *ddp, char *buf)
7018 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dif);
7020 static DRIVER_ATTR_RO(dif);
7022 static ssize_t guard_show(struct device_driver *ddp, char *buf)
7024 return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_guard);
7026 static DRIVER_ATTR_RO(guard);
7028 static ssize_t ato_show(struct device_driver *ddp, char *buf)
7030 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ato);
7032 static DRIVER_ATTR_RO(ato);
7034 static ssize_t map_show(struct device_driver *ddp, char *buf)
7038 if (!scsi_debug_lbp())
7039 return scnprintf(buf, PAGE_SIZE, "0-%u\n",
7040 sdebug_store_sectors);
7042 if (sdebug_fake_rw == 0 && !xa_empty(per_store_ap)) {
7043 struct sdeb_store_info *sip = xa_load(per_store_ap, 0);
7046 count = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
7047 (int)map_size, sip->map_storep);
7049 buf[count++] = '\n';
7054 static DRIVER_ATTR_RO(map);
7056 static ssize_t random_show(struct device_driver *ddp, char *buf)
7058 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_random);
7061 static ssize_t random_store(struct device_driver *ddp, const char *buf,
7066 if (kstrtobool(buf, &v))
7072 static DRIVER_ATTR_RW(random);
7074 static ssize_t removable_show(struct device_driver *ddp, char *buf)
7076 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_removable ? 1 : 0);
7078 static ssize_t removable_store(struct device_driver *ddp, const char *buf,
7083 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
7084 sdebug_removable = (n > 0);
7089 static DRIVER_ATTR_RW(removable);
7091 static ssize_t host_lock_show(struct device_driver *ddp, char *buf)
7093 return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_host_lock);
7095 /* N.B. sdebug_host_lock does nothing, kept for backward compatibility */
7096 static ssize_t host_lock_store(struct device_driver *ddp, const char *buf,
7101 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
7102 sdebug_host_lock = (n > 0);
7107 static DRIVER_ATTR_RW(host_lock);
7109 static ssize_t strict_show(struct device_driver *ddp, char *buf)
7111 return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_strict);
7113 static ssize_t strict_store(struct device_driver *ddp, const char *buf,
7118 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
7119 sdebug_strict = (n > 0);
7124 static DRIVER_ATTR_RW(strict);
7126 static ssize_t uuid_ctl_show(struct device_driver *ddp, char *buf)
7128 return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_uuid_ctl);
7130 static DRIVER_ATTR_RO(uuid_ctl);
7132 static ssize_t cdb_len_show(struct device_driver *ddp, char *buf)
7134 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_cdb_len);
7136 static ssize_t cdb_len_store(struct device_driver *ddp, const char *buf,
7141 ret = kstrtoint(buf, 0, &n);
7145 all_config_cdb_len();
7148 static DRIVER_ATTR_RW(cdb_len);
7150 static const char * const zbc_model_strs_a[] = {
7151 [BLK_ZONED_NONE] = "none",
7152 [BLK_ZONED_HA] = "host-aware",
7153 [BLK_ZONED_HM] = "host-managed",
7156 static const char * const zbc_model_strs_b[] = {
7157 [BLK_ZONED_NONE] = "no",
7158 [BLK_ZONED_HA] = "aware",
7159 [BLK_ZONED_HM] = "managed",
7162 static const char * const zbc_model_strs_c[] = {
7163 [BLK_ZONED_NONE] = "0",
7164 [BLK_ZONED_HA] = "1",
7165 [BLK_ZONED_HM] = "2",
7168 static int sdeb_zbc_model_str(const char *cp)
7170 int res = sysfs_match_string(zbc_model_strs_a, cp);
7173 res = sysfs_match_string(zbc_model_strs_b, cp);
7175 res = sysfs_match_string(zbc_model_strs_c, cp);
7183 static ssize_t zbc_show(struct device_driver *ddp, char *buf)
7185 return scnprintf(buf, PAGE_SIZE, "%s\n",
7186 zbc_model_strs_a[sdeb_zbc_model]);
7188 static DRIVER_ATTR_RO(zbc);
7190 static ssize_t tur_ms_to_ready_show(struct device_driver *ddp, char *buf)
7192 return scnprintf(buf, PAGE_SIZE, "%d\n", sdeb_tur_ms_to_ready);
7194 static DRIVER_ATTR_RO(tur_ms_to_ready);
7196 /* Note: The following array creates attribute files in the
7197 /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
7198 files (over those found in the /sys/module/scsi_debug/parameters
7199 directory) is that auxiliary actions can be triggered when an attribute
7200 is changed. For example see: add_host_store() above.
7203 static struct attribute *sdebug_drv_attrs[] = {
7204 &driver_attr_delay.attr,
7205 &driver_attr_opts.attr,
7206 &driver_attr_ptype.attr,
7207 &driver_attr_dsense.attr,
7208 &driver_attr_fake_rw.attr,
7209 &driver_attr_host_max_queue.attr,
7210 &driver_attr_no_lun_0.attr,
7211 &driver_attr_num_tgts.attr,
7212 &driver_attr_dev_size_mb.attr,
7213 &driver_attr_num_parts.attr,
7214 &driver_attr_every_nth.attr,
7215 &driver_attr_lun_format.attr,
7216 &driver_attr_max_luns.attr,
7217 &driver_attr_max_queue.attr,
7218 &driver_attr_no_rwlock.attr,
7219 &driver_attr_no_uld.attr,
7220 &driver_attr_scsi_level.attr,
7221 &driver_attr_virtual_gb.attr,
7222 &driver_attr_add_host.attr,
7223 &driver_attr_per_host_store.attr,
7224 &driver_attr_vpd_use_hostno.attr,
7225 &driver_attr_sector_size.attr,
7226 &driver_attr_statistics.attr,
7227 &driver_attr_submit_queues.attr,
7228 &driver_attr_dix.attr,
7229 &driver_attr_dif.attr,
7230 &driver_attr_guard.attr,
7231 &driver_attr_ato.attr,
7232 &driver_attr_map.attr,
7233 &driver_attr_random.attr,
7234 &driver_attr_removable.attr,
7235 &driver_attr_host_lock.attr,
7236 &driver_attr_ndelay.attr,
7237 &driver_attr_strict.attr,
7238 &driver_attr_uuid_ctl.attr,
7239 &driver_attr_cdb_len.attr,
7240 &driver_attr_tur_ms_to_ready.attr,
7241 &driver_attr_zbc.attr,
7244 ATTRIBUTE_GROUPS(sdebug_drv);
7246 static struct device *pseudo_primary;
7248 static int __init scsi_debug_init(void)
7250 bool want_store = (sdebug_fake_rw == 0);
7252 int k, ret, hosts_to_add;
7255 if (sdebug_ndelay >= 1000 * 1000 * 1000) {
7256 pr_warn("ndelay must be less than 1 second, ignored\n");
7258 } else if (sdebug_ndelay > 0)
7259 sdebug_jdelay = JDELAY_OVERRIDDEN;
7261 switch (sdebug_sector_size) {
7268 pr_err("invalid sector_size %d\n", sdebug_sector_size);
7272 switch (sdebug_dif) {
7273 case T10_PI_TYPE0_PROTECTION:
7275 case T10_PI_TYPE1_PROTECTION:
7276 case T10_PI_TYPE2_PROTECTION:
7277 case T10_PI_TYPE3_PROTECTION:
7278 have_dif_prot = true;
7282 pr_err("dif must be 0, 1, 2 or 3\n");
7286 if (sdebug_num_tgts < 0) {
7287 pr_err("num_tgts must be >= 0\n");
7291 if (sdebug_guard > 1) {
7292 pr_err("guard must be 0 or 1\n");
7296 if (sdebug_ato > 1) {
7297 pr_err("ato must be 0 or 1\n");
7301 if (sdebug_physblk_exp > 15) {
7302 pr_err("invalid physblk_exp %u\n", sdebug_physblk_exp);
7306 sdebug_lun_am = sdebug_lun_am_i;
7307 if (sdebug_lun_am > SAM_LUN_AM_FLAT) {
7308 pr_warn("Invalid LUN format %u, using default\n", (int)sdebug_lun_am);
7309 sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
7312 if (sdebug_max_luns > 256) {
7313 if (sdebug_max_luns > 16384) {
7314 pr_warn("max_luns can be no more than 16384, use default\n");
7315 sdebug_max_luns = DEF_MAX_LUNS;
7317 sdebug_lun_am = SAM_LUN_AM_FLAT;
7320 if (sdebug_lowest_aligned > 0x3fff) {
7321 pr_err("lowest_aligned too big: %u\n", sdebug_lowest_aligned);
7325 if (submit_queues < 1) {
7326 pr_err("submit_queues must be 1 or more\n");
7330 if ((sdebug_max_queue > SDEBUG_CANQUEUE) || (sdebug_max_queue < 1)) {
7331 pr_err("max_queue must be in range [1, %d]\n", SDEBUG_CANQUEUE);
7335 if ((sdebug_host_max_queue > SDEBUG_CANQUEUE) ||
7336 (sdebug_host_max_queue < 0)) {
7337 pr_err("host_max_queue must be in range [0 %d]\n",
7342 if (sdebug_host_max_queue &&
7343 (sdebug_max_queue != sdebug_host_max_queue)) {
7344 sdebug_max_queue = sdebug_host_max_queue;
7345 pr_warn("fixing max submit queue depth to host max queue depth, %d\n",
7350 * check for host managed zoned block device specified with
7351 * ptype=0x14 or zbc=XXX.
7353 if (sdebug_ptype == TYPE_ZBC) {
7354 sdeb_zbc_model = BLK_ZONED_HM;
7355 } else if (sdeb_zbc_model_s && *sdeb_zbc_model_s) {
7356 k = sdeb_zbc_model_str(sdeb_zbc_model_s);
7360 switch (sdeb_zbc_model) {
7361 case BLK_ZONED_NONE:
7363 sdebug_ptype = TYPE_DISK;
7366 sdebug_ptype = TYPE_ZBC;
7369 pr_err("Invalid ZBC model\n");
7373 if (sdeb_zbc_model != BLK_ZONED_NONE) {
7374 sdeb_zbc_in_use = true;
7375 if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
7376 sdebug_dev_size_mb = DEF_ZBC_DEV_SIZE_MB;
7379 if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
7380 sdebug_dev_size_mb = DEF_DEV_SIZE_MB;
7381 if (sdebug_dev_size_mb < 1)
7382 sdebug_dev_size_mb = 1; /* force minimum 1 MB ramdisk */
7383 sz = (unsigned long)sdebug_dev_size_mb * 1048576;
7384 sdebug_store_sectors = sz / sdebug_sector_size;
7385 sdebug_capacity = get_sdebug_capacity();
7387 /* play around with geometry, don't waste too much on track 0 */
7389 sdebug_sectors_per = 32;
7390 if (sdebug_dev_size_mb >= 256)
7392 else if (sdebug_dev_size_mb >= 16)
7394 sdebug_cylinders_per = (unsigned long)sdebug_capacity /
7395 (sdebug_sectors_per * sdebug_heads);
7396 if (sdebug_cylinders_per >= 1024) {
7397 /* other LLDs do this; implies >= 1GB ram disk ... */
7399 sdebug_sectors_per = 63;
7400 sdebug_cylinders_per = (unsigned long)sdebug_capacity /
7401 (sdebug_sectors_per * sdebug_heads);
7403 if (scsi_debug_lbp()) {
7404 sdebug_unmap_max_blocks =
7405 clamp(sdebug_unmap_max_blocks, 0U, 0xffffffffU);
7407 sdebug_unmap_max_desc =
7408 clamp(sdebug_unmap_max_desc, 0U, 256U);
7410 sdebug_unmap_granularity =
7411 clamp(sdebug_unmap_granularity, 1U, 0xffffffffU);
7413 if (sdebug_unmap_alignment &&
7414 sdebug_unmap_granularity <=
7415 sdebug_unmap_alignment) {
7416 pr_err("ERR: unmap_granularity <= unmap_alignment\n");
7420 xa_init_flags(per_store_ap, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
7422 idx = sdebug_add_store();
7427 pseudo_primary = root_device_register("pseudo_0");
7428 if (IS_ERR(pseudo_primary)) {
7429 pr_warn("root_device_register() error\n");
7430 ret = PTR_ERR(pseudo_primary);
7433 ret = bus_register(&pseudo_lld_bus);
7435 pr_warn("bus_register error: %d\n", ret);
7438 ret = driver_register(&sdebug_driverfs_driver);
7440 pr_warn("driver_register error: %d\n", ret);
7444 hosts_to_add = sdebug_add_host;
7445 sdebug_add_host = 0;
7447 queued_cmd_cache = KMEM_CACHE(sdebug_queued_cmd, SLAB_HWCACHE_ALIGN);
7448 if (!queued_cmd_cache) {
7453 sdebug_debugfs_root = debugfs_create_dir("scsi_debug", NULL);
7454 if (IS_ERR_OR_NULL(sdebug_debugfs_root))
7455 pr_info("%s: failed to create initial debugfs directory\n", __func__);
7457 for (k = 0; k < hosts_to_add; k++) {
7458 if (want_store && k == 0) {
7459 ret = sdebug_add_host_helper(idx);
7461 pr_err("add_host_helper k=%d, error=%d\n",
7466 ret = sdebug_do_add_host(want_store &&
7467 sdebug_per_host_store);
7469 pr_err("add_host k=%d error=%d\n", k, -ret);
7475 pr_info("built %d host(s)\n", sdebug_num_hosts);
7480 driver_unregister(&sdebug_driverfs_driver);
7482 bus_unregister(&pseudo_lld_bus);
7484 root_device_unregister(pseudo_primary);
7486 sdebug_erase_store(idx, NULL);
7490 static void __exit scsi_debug_exit(void)
7492 int k = sdebug_num_hosts;
7495 sdebug_do_remove_host(true);
7496 kmem_cache_destroy(queued_cmd_cache);
7497 driver_unregister(&sdebug_driverfs_driver);
7498 bus_unregister(&pseudo_lld_bus);
7499 root_device_unregister(pseudo_primary);
7501 sdebug_erase_all_stores(false);
7502 xa_destroy(per_store_ap);
7503 debugfs_remove(sdebug_debugfs_root);
7506 device_initcall(scsi_debug_init);
7507 module_exit(scsi_debug_exit);
7509 static void sdebug_release_adapter(struct device *dev)
7511 struct sdebug_host_info *sdbg_host;
7513 sdbg_host = dev_to_sdebug_host(dev);
7517 /* idx must be valid, if sip is NULL then it will be obtained using idx */
7518 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip)
7523 if (xa_empty(per_store_ap))
7525 sip = xa_load(per_store_ap, idx);
7529 vfree(sip->map_storep);
7530 vfree(sip->dif_storep);
7532 xa_erase(per_store_ap, idx);
7536 /* Assume apart_from_first==false only in shutdown case. */
7537 static void sdebug_erase_all_stores(bool apart_from_first)
7540 struct sdeb_store_info *sip = NULL;
7542 xa_for_each(per_store_ap, idx, sip) {
7543 if (apart_from_first)
7544 apart_from_first = false;
7546 sdebug_erase_store(idx, sip);
7548 if (apart_from_first)
7549 sdeb_most_recent_idx = sdeb_first_idx;
7553 * Returns store xarray new element index (idx) if >=0 else negated errno.
7554 * Limit the number of stores to 65536.
7556 static int sdebug_add_store(void)
7560 unsigned long iflags;
7561 unsigned long sz = (unsigned long)sdebug_dev_size_mb * 1048576;
7562 struct sdeb_store_info *sip = NULL;
7563 struct xa_limit xal = { .max = 1 << 16, .min = 0 };
7565 sip = kzalloc(sizeof(*sip), GFP_KERNEL);
7569 xa_lock_irqsave(per_store_ap, iflags);
7570 res = __xa_alloc(per_store_ap, &n_idx, sip, xal, GFP_ATOMIC);
7571 if (unlikely(res < 0)) {
7572 xa_unlock_irqrestore(per_store_ap, iflags);
7574 pr_warn("%s: xa_alloc() errno=%d\n", __func__, -res);
7577 sdeb_most_recent_idx = n_idx;
7578 if (sdeb_first_idx < 0)
7579 sdeb_first_idx = n_idx;
7580 xa_unlock_irqrestore(per_store_ap, iflags);
7583 sip->storep = vzalloc(sz);
7585 pr_err("user data oom\n");
7588 if (sdebug_num_parts > 0)
7589 sdebug_build_parts(sip->storep, sz);
7591 /* DIF/DIX: what T10 calls Protection Information (PI) */
7595 dif_size = sdebug_store_sectors * sizeof(struct t10_pi_tuple);
7596 sip->dif_storep = vmalloc(dif_size);
7598 pr_info("dif_storep %u bytes @ %pK\n", dif_size,
7601 if (!sip->dif_storep) {
7602 pr_err("DIX oom\n");
7605 memset(sip->dif_storep, 0xff, dif_size);
7607 /* Logical Block Provisioning */
7608 if (scsi_debug_lbp()) {
7609 map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
7610 sip->map_storep = vmalloc(array_size(sizeof(long),
7611 BITS_TO_LONGS(map_size)));
7613 pr_info("%lu provisioning blocks\n", map_size);
7615 if (!sip->map_storep) {
7616 pr_err("LBP map oom\n");
7620 bitmap_zero(sip->map_storep, map_size);
7622 /* Map first 1KB for partition table */
7623 if (sdebug_num_parts)
7624 map_region(sip, 0, 2);
7627 rwlock_init(&sip->macc_lck);
7630 sdebug_erase_store((int)n_idx, sip);
7631 pr_warn("%s: failed, errno=%d\n", __func__, -res);
7635 static int sdebug_add_host_helper(int per_host_idx)
7637 int k, devs_per_host, idx;
7638 int error = -ENOMEM;
7639 struct sdebug_host_info *sdbg_host;
7640 struct sdebug_dev_info *sdbg_devinfo, *tmp;
7642 sdbg_host = kzalloc(sizeof(*sdbg_host), GFP_KERNEL);
7645 idx = (per_host_idx < 0) ? sdeb_first_idx : per_host_idx;
7646 if (xa_get_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE))
7647 xa_clear_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
7648 sdbg_host->si_idx = idx;
7650 INIT_LIST_HEAD(&sdbg_host->dev_info_list);
7652 devs_per_host = sdebug_num_tgts * sdebug_max_luns;
7653 for (k = 0; k < devs_per_host; k++) {
7654 sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
7659 mutex_lock(&sdebug_host_list_mutex);
7660 list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
7661 mutex_unlock(&sdebug_host_list_mutex);
7663 sdbg_host->dev.bus = &pseudo_lld_bus;
7664 sdbg_host->dev.parent = pseudo_primary;
7665 sdbg_host->dev.release = &sdebug_release_adapter;
7666 dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_num_hosts);
7668 error = device_register(&sdbg_host->dev);
7670 mutex_lock(&sdebug_host_list_mutex);
7671 list_del(&sdbg_host->host_list);
7672 mutex_unlock(&sdebug_host_list_mutex);
7680 list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
7682 list_del(&sdbg_devinfo->dev_list);
7683 kfree(sdbg_devinfo->zstate);
7684 kfree(sdbg_devinfo);
7686 if (sdbg_host->dev.release)
7687 put_device(&sdbg_host->dev);
7690 pr_warn("%s: failed, errno=%d\n", __func__, -error);
7694 static int sdebug_do_add_host(bool mk_new_store)
7696 int ph_idx = sdeb_most_recent_idx;
7699 ph_idx = sdebug_add_store();
7703 return sdebug_add_host_helper(ph_idx);
7706 static void sdebug_do_remove_host(bool the_end)
7709 struct sdebug_host_info *sdbg_host = NULL;
7710 struct sdebug_host_info *sdbg_host2;
7712 mutex_lock(&sdebug_host_list_mutex);
7713 if (!list_empty(&sdebug_host_list)) {
7714 sdbg_host = list_entry(sdebug_host_list.prev,
7715 struct sdebug_host_info, host_list);
7716 idx = sdbg_host->si_idx;
7718 if (!the_end && idx >= 0) {
7721 list_for_each_entry(sdbg_host2, &sdebug_host_list, host_list) {
7722 if (sdbg_host2 == sdbg_host)
7724 if (idx == sdbg_host2->si_idx) {
7730 xa_set_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
7731 if (idx == sdeb_most_recent_idx)
7732 --sdeb_most_recent_idx;
7736 list_del(&sdbg_host->host_list);
7737 mutex_unlock(&sdebug_host_list_mutex);
7742 device_unregister(&sdbg_host->dev);
7746 static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
7748 struct sdebug_dev_info *devip = sdev->hostdata;
7753 mutex_lock(&sdebug_host_list_mutex);
7754 block_unblock_all_queues(true);
7756 if (qdepth > SDEBUG_CANQUEUE) {
7757 qdepth = SDEBUG_CANQUEUE;
7758 pr_warn("%s: requested qdepth [%d] exceeds canqueue [%d], trim\n", __func__,
7759 qdepth, SDEBUG_CANQUEUE);
7763 if (qdepth != sdev->queue_depth)
7764 scsi_change_queue_depth(sdev, qdepth);
7766 block_unblock_all_queues(false);
7767 mutex_unlock(&sdebug_host_list_mutex);
7769 if (SDEBUG_OPT_Q_NOISE & sdebug_opts)
7770 sdev_printk(KERN_INFO, sdev, "%s: qdepth=%d\n", __func__, qdepth);
7772 return sdev->queue_depth;
7775 static bool fake_timeout(struct scsi_cmnd *scp)
7777 if (0 == (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth))) {
7778 if (sdebug_every_nth < -1)
7779 sdebug_every_nth = -1;
7780 if (SDEBUG_OPT_TIMEOUT & sdebug_opts)
7781 return true; /* ignore command causing timeout */
7782 else if (SDEBUG_OPT_MAC_TIMEOUT & sdebug_opts &&
7783 scsi_medium_access_command(scp))
7784 return true; /* time out reads and writes */
7789 /* Response to TUR or media access command when device stopped */
7790 static int resp_not_ready(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
7794 ktime_t now_ts = ktime_get_boottime();
7795 struct scsi_device *sdp = scp->device;
7797 stopped_state = atomic_read(&devip->stopped);
7798 if (stopped_state == 2) {
7799 if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
7800 diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
7801 if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
7802 /* tur_ms_to_ready timer extinguished */
7803 atomic_set(&devip->stopped, 0);
7807 mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x1);
7809 sdev_printk(KERN_INFO, sdp,
7810 "%s: Not ready: in process of becoming ready\n", my_name);
7811 if (scp->cmnd[0] == TEST_UNIT_READY) {
7812 u64 tur_nanosecs_to_ready = (u64)sdeb_tur_ms_to_ready * 1000000;
7814 if (diff_ns <= tur_nanosecs_to_ready)
7815 diff_ns = tur_nanosecs_to_ready - diff_ns;
7817 diff_ns = tur_nanosecs_to_ready;
7818 /* As per 20-061r2 approved for spc6 by T10 on 20200716 */
7819 do_div(diff_ns, 1000000); /* diff_ns becomes milliseconds */
7820 scsi_set_sense_information(scp->sense_buffer, SCSI_SENSE_BUFFERSIZE,
7822 return check_condition_result;
7825 mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x2);
7827 sdev_printk(KERN_INFO, sdp, "%s: Not ready: initializing command required\n",
7829 return check_condition_result;
7832 static void sdebug_map_queues(struct Scsi_Host *shost)
7836 if (shost->nr_hw_queues == 1)
7839 for (i = 0, qoff = 0; i < HCTX_MAX_TYPES; i++) {
7840 struct blk_mq_queue_map *map = &shost->tag_set.map[i];
7844 if (i == HCTX_TYPE_DEFAULT)
7845 map->nr_queues = submit_queues - poll_queues;
7846 else if (i == HCTX_TYPE_POLL)
7847 map->nr_queues = poll_queues;
7849 if (!map->nr_queues) {
7850 BUG_ON(i == HCTX_TYPE_DEFAULT);
7854 map->queue_offset = qoff;
7855 blk_mq_map_queues(map);
7857 qoff += map->nr_queues;
7861 struct sdebug_blk_mq_poll_data {
7862 unsigned int queue_num;
7867 * We don't handle aborted commands here, but it does not seem possible to have
7868 * aborted polled commands from schedule_resp()
7870 static bool sdebug_blk_mq_poll_iter(struct request *rq, void *opaque)
7872 struct sdebug_blk_mq_poll_data *data = opaque;
7873 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
7874 struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmd);
7875 struct sdebug_defer *sd_dp;
7876 u32 unique_tag = blk_mq_unique_tag(rq);
7877 u16 hwq = blk_mq_unique_tag_to_hwq(unique_tag);
7878 struct sdebug_queued_cmd *sqcp;
7879 unsigned long flags;
7880 int queue_num = data->queue_num;
7883 /* We're only interested in one queue for this iteration */
7884 if (hwq != queue_num)
7887 /* Subsequent checks would fail if this failed, but check anyway */
7888 if (!test_bit(SCMD_STATE_INFLIGHT, &cmd->state))
7891 time = ktime_get_boottime();
7893 spin_lock_irqsave(&sdsc->lock, flags);
7894 sqcp = TO_QUEUED_CMD(cmd);
7896 spin_unlock_irqrestore(&sdsc->lock, flags);
7900 sd_dp = &sqcp->sd_dp;
7901 if (READ_ONCE(sd_dp->defer_t) != SDEB_DEFER_POLL) {
7902 spin_unlock_irqrestore(&sdsc->lock, flags);
7906 if (time < sd_dp->cmpl_ts) {
7907 spin_unlock_irqrestore(&sdsc->lock, flags);
7911 ASSIGN_QUEUED_CMD(cmd, NULL);
7912 spin_unlock_irqrestore(&sdsc->lock, flags);
7914 if (sdebug_statistics) {
7915 atomic_inc(&sdebug_completions);
7916 if (raw_smp_processor_id() != sd_dp->issuing_cpu)
7917 atomic_inc(&sdebug_miss_cpus);
7920 sdebug_free_queued_cmd(sqcp);
7922 scsi_done(cmd); /* callback to mid level */
7923 (*data->num_entries)++;
7927 static int sdebug_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
7929 int num_entries = 0;
7930 struct sdebug_blk_mq_poll_data data = {
7931 .queue_num = queue_num,
7932 .num_entries = &num_entries,
7935 blk_mq_tagset_busy_iter(&shost->tag_set, sdebug_blk_mq_poll_iter,
7938 if (num_entries > 0)
7939 atomic_add(num_entries, &sdeb_mq_poll_count);
7943 static int sdebug_timeout_cmd(struct scsi_cmnd *cmnd)
7945 struct scsi_device *sdp = cmnd->device;
7946 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
7947 struct sdebug_err_inject *err;
7948 unsigned char *cmd = cmnd->cmnd;
7955 list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
7956 if (err->type == ERR_TMOUT_CMD &&
7957 (err->cmd == cmd[0] || err->cmd == 0xff)) {
7971 static int sdebug_fail_queue_cmd(struct scsi_cmnd *cmnd)
7973 struct scsi_device *sdp = cmnd->device;
7974 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
7975 struct sdebug_err_inject *err;
7976 unsigned char *cmd = cmnd->cmnd;
7983 list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
7984 if (err->type == ERR_FAIL_QUEUE_CMD &&
7985 (err->cmd == cmd[0] || err->cmd == 0xff)) {
7986 ret = err->cnt ? err->queuecmd_ret : 0;
7999 static int sdebug_fail_cmd(struct scsi_cmnd *cmnd, int *retval,
8000 struct sdebug_err_inject *info)
8002 struct scsi_device *sdp = cmnd->device;
8003 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
8004 struct sdebug_err_inject *err;
8005 unsigned char *cmd = cmnd->cmnd;
8013 list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
8014 if (err->type == ERR_FAIL_CMD &&
8015 (err->cmd == cmd[0] || err->cmd == 0xff)) {
8033 mk_sense_buffer(cmnd, err->sense_key, err->asc, err->asq);
8034 result = err->status_byte | err->host_byte << 16 | err->driver_byte << 24;
8036 *retval = schedule_resp(cmnd, devip, result, NULL, 0, 0);
8041 static int scsi_debug_queuecommand(struct Scsi_Host *shost,
8042 struct scsi_cmnd *scp)
8045 struct scsi_device *sdp = scp->device;
8046 const struct opcode_info_t *oip;
8047 const struct opcode_info_t *r_oip;
8048 struct sdebug_dev_info *devip;
8049 u8 *cmd = scp->cmnd;
8050 int (*r_pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
8051 int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *) = NULL;
8054 u64 lun_index = sdp->lun & 0x3FFF;
8061 struct sdebug_err_inject err;
8063 scsi_set_resid(scp, 0);
8064 if (sdebug_statistics) {
8065 atomic_inc(&sdebug_cmnd_count);
8066 inject_now = inject_on_this_cmd();
8070 if (unlikely(sdebug_verbose &&
8071 !(SDEBUG_OPT_NO_CDB_NOISE & sdebug_opts))) {
8076 sb = (int)sizeof(b);
8078 strcpy(b, "too long, over 32 bytes");
8080 for (k = 0, n = 0; k < len && n < sb; ++k)
8081 n += scnprintf(b + n, sb - n, "%02x ",
8084 sdev_printk(KERN_INFO, sdp, "%s: tag=%#x, cmd %s\n", my_name,
8085 blk_mq_unique_tag(scsi_cmd_to_rq(scp)), b);
8087 if (unlikely(inject_now && (sdebug_opts & SDEBUG_OPT_HOST_BUSY)))
8088 return SCSI_MLQUEUE_HOST_BUSY;
8089 has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS);
8090 if (unlikely(lun_index >= sdebug_max_luns && !has_wlun_rl))
8093 sdeb_i = opcode_ind_arr[opcode]; /* fully mapped */
8094 oip = &opcode_info_arr[sdeb_i]; /* safe if table consistent */
8095 devip = (struct sdebug_dev_info *)sdp->hostdata;
8096 if (unlikely(!devip)) {
8097 devip = find_build_dev_info(sdp);
8102 if (sdebug_timeout_cmd(scp)) {
8103 scmd_printk(KERN_INFO, scp, "timeout command 0x%x\n", opcode);
8107 ret = sdebug_fail_queue_cmd(scp);
8109 scmd_printk(KERN_INFO, scp, "fail queue command 0x%x with 0x%x\n",
8114 if (sdebug_fail_cmd(scp, &ret, &err)) {
8115 scmd_printk(KERN_INFO, scp,
8116 "fail command 0x%x with hostbyte=0x%x, "
8117 "driverbyte=0x%x, statusbyte=0x%x, "
8118 "sense_key=0x%x, asc=0x%x, asq=0x%x\n",
8119 opcode, err.host_byte, err.driver_byte,
8120 err.status_byte, err.sense_key, err.asc, err.asq);
8124 if (unlikely(inject_now && !atomic_read(&sdeb_inject_pending)))
8125 atomic_set(&sdeb_inject_pending, 1);
8127 na = oip->num_attached;
8129 if (na) { /* multiple commands with this opcode */
8131 if (FF_SA & r_oip->flags) {
8132 if (F_SA_LOW & oip->flags)
8135 sa = get_unaligned_be16(cmd + 8);
8136 for (k = 0; k <= na; oip = r_oip->arrp + k++) {
8137 if (opcode == oip->opcode && sa == oip->sa)
8140 } else { /* since no service action only check opcode */
8141 for (k = 0; k <= na; oip = r_oip->arrp + k++) {
8142 if (opcode == oip->opcode)
8147 if (F_SA_LOW & r_oip->flags)
8148 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 4);
8149 else if (F_SA_HIGH & r_oip->flags)
8150 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, 7);
8152 mk_sense_invalid_opcode(scp);
8155 } /* else (when na==0) we assume the oip is a match */
8157 if (unlikely(F_INV_OP & flags)) {
8158 mk_sense_invalid_opcode(scp);
8161 if (unlikely(has_wlun_rl && !(F_RL_WLUN_OK & flags))) {
8163 sdev_printk(KERN_INFO, sdp, "%s: Opcode 0x%x not%s\n",
8164 my_name, opcode, " supported for wlun");
8165 mk_sense_invalid_opcode(scp);
8168 if (unlikely(sdebug_strict)) { /* check cdb against mask */
8172 for (k = 1; k < oip->len_mask[0] && k < 16; ++k) {
8173 rem = ~oip->len_mask[k] & cmd[k];
8175 for (j = 7; j >= 0; --j, rem <<= 1) {
8179 mk_sense_invalid_fld(scp, SDEB_IN_CDB, k, j);
8184 if (unlikely(!(F_SKIP_UA & flags) &&
8185 find_first_bit(devip->uas_bm,
8186 SDEBUG_NUM_UAS) != SDEBUG_NUM_UAS)) {
8187 errsts = make_ua(scp, devip);
8191 if (unlikely(((F_M_ACCESS & flags) || scp->cmnd[0] == TEST_UNIT_READY) &&
8192 atomic_read(&devip->stopped))) {
8193 errsts = resp_not_ready(scp, devip);
8197 if (sdebug_fake_rw && (F_FAKE_RW & flags))
8199 if (unlikely(sdebug_every_nth)) {
8200 if (fake_timeout(scp))
8201 return 0; /* ignore command: make trouble */
8203 if (likely(oip->pfp))
8204 pfp = oip->pfp; /* calls a resp_* function */
8206 pfp = r_pfp; /* if leaf function ptr NULL, try the root's */
8209 if (F_DELAY_OVERR & flags) /* cmds like INQUIRY respond asap */
8210 return schedule_resp(scp, devip, errsts, pfp, 0, 0);
8211 else if ((flags & F_LONG_DELAY) && (sdebug_jdelay > 0 ||
8212 sdebug_ndelay > 10000)) {
8214 * Skip long delays if ndelay <= 10 microseconds. Otherwise
8215 * for Start Stop Unit (SSU) want at least 1 second delay and
8216 * if sdebug_jdelay>1 want a long delay of that many seconds.
8217 * For Synchronize Cache want 1/20 of SSU's delay.
8219 int jdelay = (sdebug_jdelay < 2) ? 1 : sdebug_jdelay;
8220 int denom = (flags & F_SYNC_DELAY) ? 20 : 1;
8222 jdelay = mult_frac(USER_HZ * jdelay, HZ, denom * USER_HZ);
8223 return schedule_resp(scp, devip, errsts, pfp, jdelay, 0);
8225 return schedule_resp(scp, devip, errsts, pfp, sdebug_jdelay,
8228 return schedule_resp(scp, devip, check_condition_result, NULL, 0, 0);
8230 return schedule_resp(scp, NULL, DID_NO_CONNECT << 16, NULL, 0, 0);
8233 static int sdebug_init_cmd_priv(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
8235 struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmd);
8237 spin_lock_init(&sdsc->lock);
8242 static struct scsi_host_template sdebug_driver_template = {
8243 .show_info = scsi_debug_show_info,
8244 .write_info = scsi_debug_write_info,
8245 .proc_name = sdebug_proc_name,
8246 .name = "SCSI DEBUG",
8247 .info = scsi_debug_info,
8248 .slave_alloc = scsi_debug_slave_alloc,
8249 .slave_configure = scsi_debug_slave_configure,
8250 .slave_destroy = scsi_debug_slave_destroy,
8251 .ioctl = scsi_debug_ioctl,
8252 .queuecommand = scsi_debug_queuecommand,
8253 .change_queue_depth = sdebug_change_qdepth,
8254 .map_queues = sdebug_map_queues,
8255 .mq_poll = sdebug_blk_mq_poll,
8256 .eh_abort_handler = scsi_debug_abort,
8257 .eh_device_reset_handler = scsi_debug_device_reset,
8258 .eh_target_reset_handler = scsi_debug_target_reset,
8259 .eh_bus_reset_handler = scsi_debug_bus_reset,
8260 .eh_host_reset_handler = scsi_debug_host_reset,
8261 .can_queue = SDEBUG_CANQUEUE,
8263 .sg_tablesize = SG_MAX_SEGMENTS,
8264 .cmd_per_lun = DEF_CMD_PER_LUN,
8266 .max_segment_size = -1U,
8267 .module = THIS_MODULE,
8268 .track_queue_depth = 1,
8269 .cmd_size = sizeof(struct sdebug_scsi_cmd),
8270 .init_cmd_priv = sdebug_init_cmd_priv,
8271 .target_alloc = sdebug_target_alloc,
8272 .target_destroy = sdebug_target_destroy,
8275 static int sdebug_driver_probe(struct device *dev)
8278 struct sdebug_host_info *sdbg_host;
8279 struct Scsi_Host *hpnt;
8282 sdbg_host = dev_to_sdebug_host(dev);
8284 sdebug_driver_template.can_queue = sdebug_max_queue;
8285 sdebug_driver_template.cmd_per_lun = sdebug_max_queue;
8286 if (!sdebug_clustering)
8287 sdebug_driver_template.dma_boundary = PAGE_SIZE - 1;
8289 hpnt = scsi_host_alloc(&sdebug_driver_template, 0);
8291 pr_err("scsi_host_alloc failed\n");
8295 if (submit_queues > nr_cpu_ids) {
8296 pr_warn("%s: trim submit_queues (was %d) to nr_cpu_ids=%u\n",
8297 my_name, submit_queues, nr_cpu_ids);
8298 submit_queues = nr_cpu_ids;
8301 * Decide whether to tell scsi subsystem that we want mq. The
8302 * following should give the same answer for each host.
8304 hpnt->nr_hw_queues = submit_queues;
8305 if (sdebug_host_max_queue)
8306 hpnt->host_tagset = 1;
8308 /* poll queues are possible for nr_hw_queues > 1 */
8309 if (hpnt->nr_hw_queues == 1 || (poll_queues < 1)) {
8310 pr_warn("%s: trim poll_queues to 0. poll_q/nr_hw = (%d/%d)\n",
8311 my_name, poll_queues, hpnt->nr_hw_queues);
8316 * Poll queues don't need interrupts, but we need at least one I/O queue
8317 * left over for non-polled I/O.
8318 * If condition not met, trim poll_queues to 1 (just for simplicity).
8320 if (poll_queues >= submit_queues) {
8321 if (submit_queues < 3)
8322 pr_warn("%s: trim poll_queues to 1\n", my_name);
8324 pr_warn("%s: trim poll_queues to 1. Perhaps try poll_queues=%d\n",
8325 my_name, submit_queues - 1);
8331 sdbg_host->shost = hpnt;
8332 if ((hpnt->this_id >= 0) && (sdebug_num_tgts > hpnt->this_id))
8333 hpnt->max_id = sdebug_num_tgts + 1;
8335 hpnt->max_id = sdebug_num_tgts;
8336 /* = sdebug_max_luns; */
8337 hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
8341 switch (sdebug_dif) {
8343 case T10_PI_TYPE1_PROTECTION:
8344 hprot = SHOST_DIF_TYPE1_PROTECTION;
8346 hprot |= SHOST_DIX_TYPE1_PROTECTION;
8349 case T10_PI_TYPE2_PROTECTION:
8350 hprot = SHOST_DIF_TYPE2_PROTECTION;
8352 hprot |= SHOST_DIX_TYPE2_PROTECTION;
8355 case T10_PI_TYPE3_PROTECTION:
8356 hprot = SHOST_DIF_TYPE3_PROTECTION;
8358 hprot |= SHOST_DIX_TYPE3_PROTECTION;
8363 hprot |= SHOST_DIX_TYPE0_PROTECTION;
8367 scsi_host_set_prot(hpnt, hprot);
8369 if (have_dif_prot || sdebug_dix)
8370 pr_info("host protection%s%s%s%s%s%s%s\n",
8371 (hprot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
8372 (hprot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
8373 (hprot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
8374 (hprot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
8375 (hprot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
8376 (hprot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
8377 (hprot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
8379 if (sdebug_guard == 1)
8380 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
8382 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
8384 sdebug_verbose = !!(SDEBUG_OPT_NOISE & sdebug_opts);
8385 sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & sdebug_opts);
8386 if (sdebug_every_nth) /* need stats counters for every_nth */
8387 sdebug_statistics = true;
8388 error = scsi_add_host(hpnt, &sdbg_host->dev);
8390 pr_err("scsi_add_host failed\n");
8392 scsi_host_put(hpnt);
8394 scsi_scan_host(hpnt);
8400 static void sdebug_driver_remove(struct device *dev)
8402 struct sdebug_host_info *sdbg_host;
8403 struct sdebug_dev_info *sdbg_devinfo, *tmp;
8405 sdbg_host = dev_to_sdebug_host(dev);
8407 scsi_remove_host(sdbg_host->shost);
8409 list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
8411 list_del(&sdbg_devinfo->dev_list);
8412 kfree(sdbg_devinfo->zstate);
8413 kfree(sdbg_devinfo);
8416 scsi_host_put(sdbg_host->shost);
8419 static struct bus_type pseudo_lld_bus = {
8421 .probe = sdebug_driver_probe,
8422 .remove = sdebug_driver_remove,
8423 .drv_groups = sdebug_drv_groups,