1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
4 * Copyright (C) 1992 Eric Youngdale
5 * Simulate a host adapter with 2 disks attached. Do a lot of checking
6 * to make sure that we are not getting blocks mixed up, and PANIC if
7 * anything out of the ordinary is seen.
8 * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
10 * Copyright (C) 2001 - 2020 Douglas Gilbert
12 * For documentation see http://sg.danny.cz/sg/scsi_debug.html
16 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
18 #include <linux/module.h>
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/jiffies.h>
23 #include <linux/slab.h>
24 #include <linux/types.h>
25 #include <linux/string.h>
26 #include <linux/genhd.h>
28 #include <linux/init.h>
29 #include <linux/proc_fs.h>
30 #include <linux/vmalloc.h>
31 #include <linux/moduleparam.h>
32 #include <linux/scatterlist.h>
33 #include <linux/blkdev.h>
34 #include <linux/crc-t10dif.h>
35 #include <linux/spinlock.h>
36 #include <linux/interrupt.h>
37 #include <linux/atomic.h>
38 #include <linux/hrtimer.h>
39 #include <linux/uuid.h>
40 #include <linux/t10-pi.h>
41 #include <linux/msdos_partition.h>
42 #include <linux/random.h>
43 #include <linux/xarray.h>
44 #include <linux/prefetch.h>
46 #include <net/checksum.h>
48 #include <asm/unaligned.h>
50 #include <scsi/scsi.h>
51 #include <scsi/scsi_cmnd.h>
52 #include <scsi/scsi_device.h>
53 #include <scsi/scsi_host.h>
54 #include <scsi/scsicam.h>
55 #include <scsi/scsi_eh.h>
56 #include <scsi/scsi_tcq.h>
57 #include <scsi/scsi_dbg.h>
60 #include "scsi_logging.h"
62 /* make sure inq_product_rev string corresponds to this version */
63 #define SDEBUG_VERSION "0190" /* format to fit INQUIRY revision field */
64 static const char *sdebug_version_date = "20200710";
66 #define MY_NAME "scsi_debug"
68 /* Additional Sense Code (ASC) */
69 #define NO_ADDITIONAL_SENSE 0x0
70 #define LOGICAL_UNIT_NOT_READY 0x4
71 #define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
72 #define UNRECOVERED_READ_ERR 0x11
73 #define PARAMETER_LIST_LENGTH_ERR 0x1a
74 #define INVALID_OPCODE 0x20
75 #define LBA_OUT_OF_RANGE 0x21
76 #define INVALID_FIELD_IN_CDB 0x24
77 #define INVALID_FIELD_IN_PARAM_LIST 0x26
78 #define WRITE_PROTECTED 0x27
79 #define UA_RESET_ASC 0x29
80 #define UA_CHANGED_ASC 0x2a
81 #define TARGET_CHANGED_ASC 0x3f
82 #define LUNS_CHANGED_ASCQ 0x0e
83 #define INSUFF_RES_ASC 0x55
84 #define INSUFF_RES_ASCQ 0x3
85 #define POWER_ON_RESET_ASCQ 0x0
86 #define BUS_RESET_ASCQ 0x2 /* scsi bus reset occurred */
87 #define MODE_CHANGED_ASCQ 0x1 /* mode parameters changed */
88 #define CAPACITY_CHANGED_ASCQ 0x9
89 #define SAVING_PARAMS_UNSUP 0x39
90 #define TRANSPORT_PROBLEM 0x4b
91 #define THRESHOLD_EXCEEDED 0x5d
92 #define LOW_POWER_COND_ON 0x5e
93 #define MISCOMPARE_VERIFY_ASC 0x1d
94 #define MICROCODE_CHANGED_ASCQ 0x1 /* with TARGET_CHANGED_ASC */
95 #define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16
96 #define WRITE_ERROR_ASC 0xc
97 #define UNALIGNED_WRITE_ASCQ 0x4
98 #define WRITE_BOUNDARY_ASCQ 0x5
99 #define READ_INVDATA_ASCQ 0x6
100 #define READ_BOUNDARY_ASCQ 0x7
101 #define INSUFF_ZONE_ASCQ 0xe
103 /* Additional Sense Code Qualifier (ASCQ) */
104 #define ACK_NAK_TO 0x3
106 /* Default values for driver parameters */
107 #define DEF_NUM_HOST 1
108 #define DEF_NUM_TGTS 1
109 #define DEF_MAX_LUNS 1
110 /* With these defaults, this driver will make 1 host with 1 target
111 * (id 0) containing 1 logical unit (lun 0). That is 1 device.
114 #define DEF_CDB_LEN 10
115 #define DEF_JDELAY 1 /* if > 0 unit is a jiffy */
116 #define DEF_DEV_SIZE_PRE_INIT 0
117 #define DEF_DEV_SIZE_MB 8
118 #define DEF_ZBC_DEV_SIZE_MB 128
121 #define DEF_PER_HOST_STORE false
122 #define DEF_D_SENSE 0
123 #define DEF_EVERY_NTH 0
124 #define DEF_FAKE_RW 0
126 #define DEF_HOST_LOCK 0
129 #define DEF_LBPWS10 0
131 #define DEF_LOWEST_ALIGNED 0
132 #define DEF_NDELAY 0 /* if > 0 unit is a nanosecond */
133 #define DEF_NO_LUN_0 0
134 #define DEF_NUM_PARTS 0
136 #define DEF_OPT_BLKS 1024
137 #define DEF_PHYSBLK_EXP 0
138 #define DEF_OPT_XFERLEN_EXP 0
139 #define DEF_PTYPE TYPE_DISK
140 #define DEF_RANDOM false
141 #define DEF_REMOVABLE false
142 #define DEF_SCSI_LEVEL 7 /* INQUIRY, byte2 [6->SPC-4; 7->SPC-5] */
143 #define DEF_SECTOR_SIZE 512
144 #define DEF_UNMAP_ALIGNMENT 0
145 #define DEF_UNMAP_GRANULARITY 1
146 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
147 #define DEF_UNMAP_MAX_DESC 256
148 #define DEF_VIRTUAL_GB 0
149 #define DEF_VPD_USE_HOSTNO 1
150 #define DEF_WRITESAME_LENGTH 0xFFFF
152 #define DEF_STATISTICS false
153 #define DEF_SUBMIT_QUEUES 1
154 #define DEF_TUR_MS_TO_READY 0
155 #define DEF_UUID_CTL 0
156 #define JDELAY_OVERRIDDEN -9999
158 /* Default parameters for ZBC drives */
159 #define DEF_ZBC_ZONE_SIZE_MB 128
160 #define DEF_ZBC_MAX_OPEN_ZONES 8
161 #define DEF_ZBC_NR_CONV_ZONES 1
163 #define SDEBUG_LUN_0_VAL 0
165 /* bit mask values for sdebug_opts */
166 #define SDEBUG_OPT_NOISE 1
167 #define SDEBUG_OPT_MEDIUM_ERR 2
168 #define SDEBUG_OPT_TIMEOUT 4
169 #define SDEBUG_OPT_RECOVERED_ERR 8
170 #define SDEBUG_OPT_TRANSPORT_ERR 16
171 #define SDEBUG_OPT_DIF_ERR 32
172 #define SDEBUG_OPT_DIX_ERR 64
173 #define SDEBUG_OPT_MAC_TIMEOUT 128
174 #define SDEBUG_OPT_SHORT_TRANSFER 0x100
175 #define SDEBUG_OPT_Q_NOISE 0x200
176 #define SDEBUG_OPT_ALL_TSF 0x400
177 #define SDEBUG_OPT_RARE_TSF 0x800
178 #define SDEBUG_OPT_N_WCE 0x1000
179 #define SDEBUG_OPT_RESET_NOISE 0x2000
180 #define SDEBUG_OPT_NO_CDB_NOISE 0x4000
181 #define SDEBUG_OPT_HOST_BUSY 0x8000
182 #define SDEBUG_OPT_CMD_ABORT 0x10000
183 #define SDEBUG_OPT_ALL_NOISE (SDEBUG_OPT_NOISE | SDEBUG_OPT_Q_NOISE | \
184 SDEBUG_OPT_RESET_NOISE)
185 #define SDEBUG_OPT_ALL_INJECTING (SDEBUG_OPT_RECOVERED_ERR | \
186 SDEBUG_OPT_TRANSPORT_ERR | \
187 SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR | \
188 SDEBUG_OPT_SHORT_TRANSFER | \
189 SDEBUG_OPT_HOST_BUSY | \
190 SDEBUG_OPT_CMD_ABORT)
191 #define SDEBUG_OPT_RECOV_DIF_DIX (SDEBUG_OPT_RECOVERED_ERR | \
192 SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR)
194 /* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs) are returned in
195 * priority order. In the subset implemented here lower numbers have higher
196 * priority. The UA numbers should be a sequence starting from 0 with
197 * SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */
198 #define SDEBUG_UA_POR 0 /* Power on, reset, or bus device reset */
199 #define SDEBUG_UA_BUS_RESET 1
200 #define SDEBUG_UA_MODE_CHANGED 2
201 #define SDEBUG_UA_CAPACITY_CHANGED 3
202 #define SDEBUG_UA_LUNS_CHANGED 4
203 #define SDEBUG_UA_MICROCODE_CHANGED 5 /* simulate firmware change */
204 #define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 6
205 #define SDEBUG_NUM_UAS 7
207 /* when 1==SDEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
208 * sector on read commands: */
209 #define OPT_MEDIUM_ERR_ADDR 0x1234 /* that's sector 4660 in decimal */
210 #define OPT_MEDIUM_ERR_NUM 10 /* number of consecutive medium errs */
212 /* SDEBUG_CANQUEUE is the maximum number of commands that can be queued
213 * (for response) per submit queue at one time. Can be reduced by max_queue
214 * option. Command responses are not queued when jdelay=0 and ndelay=0. The
215 * per-device DEF_CMD_PER_LUN can be changed via sysfs:
216 * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth
217 * but cannot exceed SDEBUG_CANQUEUE .
219 #define SDEBUG_CANQUEUE_WORDS 3 /* a WORD is bits in a long */
220 #define SDEBUG_CANQUEUE (SDEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
221 #define DEF_CMD_PER_LUN SDEBUG_CANQUEUE
223 /* UA - Unit Attention; SA - Service Action; SSU - Start Stop Unit */
224 #define F_D_IN 1 /* Data-in command (e.g. READ) */
225 #define F_D_OUT 2 /* Data-out command (e.g. WRITE) */
226 #define F_D_OUT_MAYBE 4 /* WRITE SAME, NDOB bit */
228 #define F_RL_WLUN_OK 0x10 /* allowed with REPORT LUNS W-LUN */
229 #define F_SKIP_UA 0x20 /* bypass UAs (e.g. INQUIRY command) */
230 #define F_DELAY_OVERR 0x40 /* for commands like INQUIRY */
231 #define F_SA_LOW 0x80 /* SA is in cdb byte 1, bits 4 to 0 */
232 #define F_SA_HIGH 0x100 /* SA is in cdb bytes 8 and 9 */
233 #define F_INV_OP 0x200 /* invalid opcode (not supported) */
234 #define F_FAKE_RW 0x400 /* bypass resp_*() when fake_rw set */
235 #define F_M_ACCESS 0x800 /* media access, reacts to SSU state */
236 #define F_SSU_DELAY 0x1000 /* SSU command delay (long-ish) */
237 #define F_SYNC_DELAY 0x2000 /* SYNCHRONIZE CACHE delay */
239 /* Useful combinations of the above flags */
240 #define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
241 #define FF_MEDIA_IO (F_M_ACCESS | F_FAKE_RW)
242 #define FF_SA (F_SA_HIGH | F_SA_LOW)
243 #define F_LONG_DELAY (F_SSU_DELAY | F_SYNC_DELAY)
245 #define SDEBUG_MAX_PARTS 4
247 #define SDEBUG_MAX_CMD_LEN 32
249 #define SDEB_XA_NOT_IN_USE XA_MARK_1
251 /* Zone types (zbcr05 table 25) */
253 ZBC_ZONE_TYPE_CNV = 0x1,
254 ZBC_ZONE_TYPE_SWR = 0x2,
255 ZBC_ZONE_TYPE_SWP = 0x3,
258 /* enumeration names taken from table 26, zbcr05 */
260 ZBC_NOT_WRITE_POINTER = 0x0,
262 ZC2_IMPLICIT_OPEN = 0x2,
263 ZC3_EXPLICIT_OPEN = 0x3,
270 struct sdeb_zone_state { /* ZBC: per zone state */
271 enum sdebug_z_type z_type;
272 enum sdebug_z_cond z_cond;
273 bool z_non_seq_resource;
279 struct sdebug_dev_info {
280 struct list_head dev_list;
281 unsigned int channel;
285 struct sdebug_host_info *sdbg_host;
286 unsigned long uas_bm[1];
288 atomic_t stopped; /* 1: by SSU, 2: device start */
291 /* For ZBC devices */
292 enum blk_zoned_model zmodel;
294 unsigned int zsize_shift;
295 unsigned int nr_zones;
296 unsigned int nr_conv_zones;
297 unsigned int nr_imp_open;
298 unsigned int nr_exp_open;
299 unsigned int nr_closed;
300 unsigned int max_open;
301 ktime_t create_ts; /* time since bootup that this device was created */
302 struct sdeb_zone_state *zstate;
305 struct sdebug_host_info {
306 struct list_head host_list;
307 int si_idx; /* sdeb_store_info (per host) xarray index */
308 struct Scsi_Host *shost;
310 struct list_head dev_info_list;
313 /* There is an xarray of pointers to this struct's objects, one per host */
314 struct sdeb_store_info {
315 rwlock_t macc_lck; /* for atomic media access on this store */
316 u8 *storep; /* user data storage (ram) */
317 struct t10_pi_tuple *dif_storep; /* protection info */
318 void *map_storep; /* provisioning map */
321 #define to_sdebug_host(d) \
322 container_of(d, struct sdebug_host_info, dev)
324 enum sdeb_defer_type {SDEB_DEFER_NONE = 0, SDEB_DEFER_HRT = 1,
325 SDEB_DEFER_WQ = 2, SDEB_DEFER_POLL = 3};
327 struct sdebug_defer {
329 struct execute_work ew;
330 ktime_t cmpl_ts;/* time since boot to complete this cmd */
331 int sqa_idx; /* index of sdebug_queue array */
332 int qc_idx; /* index of sdebug_queued_cmd array within sqa_idx */
333 int hc_idx; /* hostwide tag index */
338 bool aborted; /* true when blk_abort_request() already called */
339 enum sdeb_defer_type defer_t;
342 struct sdebug_queued_cmd {
343 /* corresponding bit set in in_use_bm[] in owning struct sdebug_queue
344 * instance indicates this slot is in use.
346 struct sdebug_defer *sd_dp;
347 struct scsi_cmnd *a_cmnd;
350 struct sdebug_queue {
351 struct sdebug_queued_cmd qc_arr[SDEBUG_CANQUEUE];
352 unsigned long in_use_bm[SDEBUG_CANQUEUE_WORDS];
354 atomic_t blocked; /* to temporarily stop more being queued */
357 static atomic_t sdebug_cmnd_count; /* number of incoming commands */
358 static atomic_t sdebug_completions; /* count of deferred completions */
359 static atomic_t sdebug_miss_cpus; /* submission + completion cpus differ */
360 static atomic_t sdebug_a_tsf; /* 'almost task set full' counter */
361 static atomic_t sdeb_inject_pending;
362 static atomic_t sdeb_mq_poll_count; /* bumped when mq_poll returns > 0 */
364 struct opcode_info_t {
365 u8 num_attached; /* 0 if this is it (i.e. a leaf); use 0xff */
366 /* for terminating element */
367 u8 opcode; /* if num_attached > 0, preferred */
368 u16 sa; /* service action */
369 u32 flags; /* OR-ed set of SDEB_F_* */
370 int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
371 const struct opcode_info_t *arrp; /* num_attached elements or NULL */
372 u8 len_mask[16]; /* len_mask[0]-->cdb_len, then mask for cdb */
373 /* 1 to min(cdb_len, 15); ignore cdb[15...] */
376 /* SCSI opcodes (first byte of cdb) of interest mapped onto these indexes */
377 enum sdeb_opcode_index {
378 SDEB_I_INVALID_OPCODE = 0,
380 SDEB_I_REPORT_LUNS = 2,
381 SDEB_I_REQUEST_SENSE = 3,
382 SDEB_I_TEST_UNIT_READY = 4,
383 SDEB_I_MODE_SENSE = 5, /* 6, 10 */
384 SDEB_I_MODE_SELECT = 6, /* 6, 10 */
385 SDEB_I_LOG_SENSE = 7,
386 SDEB_I_READ_CAPACITY = 8, /* 10; 16 is in SA_IN(16) */
387 SDEB_I_READ = 9, /* 6, 10, 12, 16 */
388 SDEB_I_WRITE = 10, /* 6, 10, 12, 16 */
389 SDEB_I_START_STOP = 11,
390 SDEB_I_SERV_ACT_IN_16 = 12, /* add ...SERV_ACT_IN_12 if needed */
391 SDEB_I_SERV_ACT_OUT_16 = 13, /* add ...SERV_ACT_OUT_12 if needed */
392 SDEB_I_MAINT_IN = 14,
393 SDEB_I_MAINT_OUT = 15,
394 SDEB_I_VERIFY = 16, /* VERIFY(10), VERIFY(16) */
395 SDEB_I_VARIABLE_LEN = 17, /* READ(32), WRITE(32), WR_SCAT(32) */
396 SDEB_I_RESERVE = 18, /* 6, 10 */
397 SDEB_I_RELEASE = 19, /* 6, 10 */
398 SDEB_I_ALLOW_REMOVAL = 20, /* PREVENT ALLOW MEDIUM REMOVAL */
399 SDEB_I_REZERO_UNIT = 21, /* REWIND in SSC */
400 SDEB_I_ATA_PT = 22, /* 12, 16 */
401 SDEB_I_SEND_DIAG = 23,
403 SDEB_I_WRITE_BUFFER = 25,
404 SDEB_I_WRITE_SAME = 26, /* 10, 16 */
405 SDEB_I_SYNC_CACHE = 27, /* 10, 16 */
406 SDEB_I_COMP_WRITE = 28,
407 SDEB_I_PRE_FETCH = 29, /* 10, 16 */
408 SDEB_I_ZONE_OUT = 30, /* 0x94+SA; includes no data xfer */
409 SDEB_I_ZONE_IN = 31, /* 0x95+SA; all have data-in */
410 SDEB_I_LAST_ELEM_P1 = 32, /* keep this last (previous + 1) */
414 static const unsigned char opcode_ind_arr[256] = {
415 /* 0x0; 0x0->0x1f: 6 byte cdbs */
416 SDEB_I_TEST_UNIT_READY, SDEB_I_REZERO_UNIT, 0, SDEB_I_REQUEST_SENSE,
418 SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, 0,
419 0, 0, SDEB_I_INQUIRY, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
421 0, 0, SDEB_I_MODE_SENSE, SDEB_I_START_STOP, 0, SDEB_I_SEND_DIAG,
422 SDEB_I_ALLOW_REMOVAL, 0,
423 /* 0x20; 0x20->0x3f: 10 byte cdbs */
424 0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY, 0, 0,
425 SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, SDEB_I_VERIFY,
426 0, 0, 0, 0, SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, 0,
427 0, 0, 0, SDEB_I_WRITE_BUFFER, 0, 0, 0, 0,
428 /* 0x40; 0x40->0x5f: 10 byte cdbs */
429 0, SDEB_I_WRITE_SAME, SDEB_I_UNMAP, 0, 0, 0, 0, 0,
430 0, 0, 0, 0, 0, SDEB_I_LOG_SENSE, 0, 0,
431 0, 0, 0, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
433 0, 0, SDEB_I_MODE_SENSE, 0, 0, 0, 0, 0,
434 /* 0x60; 0x60->0x7d are reserved, 0x7e is "extended cdb" */
435 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
436 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
437 0, SDEB_I_VARIABLE_LEN,
438 /* 0x80; 0x80->0x9f: 16 byte cdbs */
439 0, 0, 0, 0, 0, SDEB_I_ATA_PT, 0, 0,
440 SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0,
441 0, 0, 0, SDEB_I_VERIFY,
442 SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, SDEB_I_WRITE_SAME,
443 SDEB_I_ZONE_OUT, SDEB_I_ZONE_IN, 0, 0,
444 0, 0, 0, 0, 0, 0, SDEB_I_SERV_ACT_IN_16, SDEB_I_SERV_ACT_OUT_16,
445 /* 0xa0; 0xa0->0xbf: 12 byte cdbs */
446 SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN,
447 SDEB_I_MAINT_OUT, 0, 0, 0,
448 SDEB_I_READ, 0 /* SDEB_I_SERV_ACT_OUT_12 */, SDEB_I_WRITE,
449 0 /* SDEB_I_SERV_ACT_IN_12 */, 0, 0, 0, 0,
450 0, 0, 0, 0, 0, 0, 0, 0,
451 0, 0, 0, 0, 0, 0, 0, 0,
452 /* 0xc0; 0xc0->0xff: vendor specific */
453 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
454 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
455 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
456 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
460 * The following "response" functions return the SCSI mid-level's 4 byte
461 * tuple-in-an-int. To handle commands with an IMMED bit, for a faster
462 * command completion, they can mask their return value with
463 * SDEG_RES_IMMED_MASK .
465 #define SDEG_RES_IMMED_MASK 0x40000000
467 static int resp_inquiry(struct scsi_cmnd *, struct sdebug_dev_info *);
468 static int resp_report_luns(struct scsi_cmnd *, struct sdebug_dev_info *);
469 static int resp_requests(struct scsi_cmnd *, struct sdebug_dev_info *);
470 static int resp_mode_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
471 static int resp_mode_select(struct scsi_cmnd *, struct sdebug_dev_info *);
472 static int resp_log_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
473 static int resp_readcap(struct scsi_cmnd *, struct sdebug_dev_info *);
474 static int resp_read_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
475 static int resp_write_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
476 static int resp_write_scat(struct scsi_cmnd *, struct sdebug_dev_info *);
477 static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *);
478 static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *);
479 static int resp_get_lba_status(struct scsi_cmnd *, struct sdebug_dev_info *);
480 static int resp_report_tgtpgs(struct scsi_cmnd *, struct sdebug_dev_info *);
481 static int resp_unmap(struct scsi_cmnd *, struct sdebug_dev_info *);
482 static int resp_rsup_opcodes(struct scsi_cmnd *, struct sdebug_dev_info *);
483 static int resp_rsup_tmfs(struct scsi_cmnd *, struct sdebug_dev_info *);
484 static int resp_verify(struct scsi_cmnd *, struct sdebug_dev_info *);
485 static int resp_write_same_10(struct scsi_cmnd *, struct sdebug_dev_info *);
486 static int resp_write_same_16(struct scsi_cmnd *, struct sdebug_dev_info *);
487 static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *);
488 static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *);
489 static int resp_sync_cache(struct scsi_cmnd *, struct sdebug_dev_info *);
490 static int resp_pre_fetch(struct scsi_cmnd *, struct sdebug_dev_info *);
491 static int resp_report_zones(struct scsi_cmnd *, struct sdebug_dev_info *);
492 static int resp_open_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
493 static int resp_close_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
494 static int resp_finish_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
495 static int resp_rwp_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
497 static int sdebug_do_add_host(bool mk_new_store);
498 static int sdebug_add_host_helper(int per_host_idx);
499 static void sdebug_do_remove_host(bool the_end);
500 static int sdebug_add_store(void);
501 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip);
502 static void sdebug_erase_all_stores(bool apart_from_first);
505 * The following are overflow arrays for cdbs that "hit" the same index in
506 * the opcode_info_arr array. The most time sensitive (or commonly used) cdb
507 * should be placed in opcode_info_arr[], the others should be placed here.
509 static const struct opcode_info_t msense_iarr[] = {
510 {0, 0x1a, 0, F_D_IN, NULL, NULL,
511 {6, 0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
514 static const struct opcode_info_t mselect_iarr[] = {
515 {0, 0x15, 0, F_D_OUT, NULL, NULL,
516 {6, 0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
519 static const struct opcode_info_t read_iarr[] = {
520 {0, 0x28, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(10) */
521 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
523 {0, 0x8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL, /* READ(6) */
524 {6, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
525 {0, 0xa8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(12) */
526 {12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf,
530 static const struct opcode_info_t write_iarr[] = {
531 {0, 0x2a, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(10) */
532 NULL, {10, 0xfb, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7,
534 {0, 0xa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(6) */
535 NULL, {6, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0,
537 {0, 0xaa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(12) */
538 NULL, {12, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
539 0xbf, 0xc7, 0, 0, 0, 0} },
542 static const struct opcode_info_t verify_iarr[] = {
543 {0, 0x2f, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,/* VERIFY(10) */
544 NULL, {10, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xff, 0xff, 0xc7,
548 static const struct opcode_info_t sa_in_16_iarr[] = {
549 {0, 0x9e, 0x12, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
550 {16, 0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
551 0xff, 0xff, 0xff, 0, 0xc7} }, /* GET LBA STATUS(16) */
554 static const struct opcode_info_t vl_iarr[] = { /* VARIABLE LENGTH */
555 {0, 0x7f, 0xb, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_dt0,
556 NULL, {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0xb, 0xfa,
557 0, 0xff, 0xff, 0xff, 0xff} }, /* WRITE(32) */
558 {0, 0x7f, 0x11, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
559 NULL, {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x11, 0xf8,
560 0, 0xff, 0xff, 0x0, 0x0} }, /* WRITE SCATTERED(32) */
563 static const struct opcode_info_t maint_in_iarr[] = { /* MAINT IN */
564 {0, 0xa3, 0xc, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL,
565 {12, 0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
566 0xc7, 0, 0, 0, 0} }, /* REPORT SUPPORTED OPERATION CODES */
567 {0, 0xa3, 0xd, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL,
568 {12, 0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
569 0, 0} }, /* REPORTED SUPPORTED TASK MANAGEMENT FUNCTIONS */
572 static const struct opcode_info_t write_same_iarr[] = {
573 {0, 0x93, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_write_same_16, NULL,
574 {16, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
575 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* WRITE SAME(16) */
578 static const struct opcode_info_t reserve_iarr[] = {
579 {0, 0x16, 0, F_D_OUT, NULL, NULL, /* RESERVE(6) */
580 {6, 0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
583 static const struct opcode_info_t release_iarr[] = {
584 {0, 0x17, 0, F_D_OUT, NULL, NULL, /* RELEASE(6) */
585 {6, 0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
588 static const struct opcode_info_t sync_cache_iarr[] = {
589 {0, 0x91, 0, F_SYNC_DELAY | F_M_ACCESS, resp_sync_cache, NULL,
590 {16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
591 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* SYNC_CACHE (16) */
594 static const struct opcode_info_t pre_fetch_iarr[] = {
595 {0, 0x90, 0, F_SYNC_DELAY | FF_MEDIA_IO, resp_pre_fetch, NULL,
596 {16, 0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
597 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* PRE-FETCH (16) */
600 static const struct opcode_info_t zone_out_iarr[] = { /* ZONE OUT(16) */
601 {0, 0x94, 0x1, F_SA_LOW | F_M_ACCESS, resp_close_zone, NULL,
602 {16, 0x1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
603 0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} }, /* CLOSE ZONE */
604 {0, 0x94, 0x2, F_SA_LOW | F_M_ACCESS, resp_finish_zone, NULL,
605 {16, 0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
606 0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} }, /* FINISH ZONE */
607 {0, 0x94, 0x4, F_SA_LOW | F_M_ACCESS, resp_rwp_zone, NULL,
608 {16, 0x4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
609 0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} }, /* RESET WRITE POINTER */
612 static const struct opcode_info_t zone_in_iarr[] = { /* ZONE IN(16) */
613 {0, 0x95, 0x6, F_SA_LOW | F_D_IN | F_M_ACCESS, NULL, NULL,
614 {16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
615 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* REPORT ZONES */
619 /* This array is accessed via SDEB_I_* values. Make sure all are mapped,
620 * plus the terminating elements for logic that scans this table such as
621 * REPORT SUPPORTED OPERATION CODES. */
622 static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEM_P1 + 1] = {
624 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* unknown opcodes */
625 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
626 {0, 0x12, 0, FF_RESPOND | F_D_IN, resp_inquiry, NULL, /* INQUIRY */
627 {6, 0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
628 {0, 0xa0, 0, FF_RESPOND | F_D_IN, resp_report_luns, NULL,
629 {12, 0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
630 0, 0} }, /* REPORT LUNS */
631 {0, 0x3, 0, FF_RESPOND | F_D_IN, resp_requests, NULL,
632 {6, 0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
633 {0, 0x0, 0, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */
634 {6, 0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
636 {ARRAY_SIZE(msense_iarr), 0x5a, 0, F_D_IN, /* MODE SENSE(10) */
637 resp_mode_sense, msense_iarr, {10, 0xf8, 0xff, 0xff, 0, 0, 0,
638 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
639 {ARRAY_SIZE(mselect_iarr), 0x55, 0, F_D_OUT, /* MODE SELECT(10) */
640 resp_mode_select, mselect_iarr, {10, 0xf1, 0, 0, 0, 0, 0, 0xff,
641 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
642 {0, 0x4d, 0, F_D_IN, resp_log_sense, NULL, /* LOG SENSE */
643 {10, 0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0,
645 {0, 0x25, 0, F_D_IN, resp_readcap, NULL, /* READ CAPACITY(10) */
646 {10, 0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0,
648 {ARRAY_SIZE(read_iarr), 0x88, 0, F_D_IN | FF_MEDIA_IO, /* READ(16) */
649 resp_read_dt0, read_iarr, {16, 0xfe, 0xff, 0xff, 0xff, 0xff,
650 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
652 {ARRAY_SIZE(write_iarr), 0x8a, 0, F_D_OUT | FF_MEDIA_IO,
653 resp_write_dt0, write_iarr, /* WRITE(16) */
654 {16, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
655 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
656 {0, 0x1b, 0, F_SSU_DELAY, resp_start_stop, NULL,/* START STOP UNIT */
657 {6, 0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
658 {ARRAY_SIZE(sa_in_16_iarr), 0x9e, 0x10, F_SA_LOW | F_D_IN,
659 resp_readcap16, sa_in_16_iarr, /* SA_IN(16), READ CAPACITY(16) */
660 {16, 0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
661 0xff, 0xff, 0xff, 0xff, 0x1, 0xc7} },
662 {0, 0x9f, 0x12, F_SA_LOW | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
663 NULL, {16, 0x12, 0xf9, 0x0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff,
664 0xff, 0xff, 0xff, 0xff, 0xc7} }, /* SA_OUT(16), WRITE SCAT(16) */
665 {ARRAY_SIZE(maint_in_iarr), 0xa3, 0xa, F_SA_LOW | F_D_IN,
666 resp_report_tgtpgs, /* MAINT IN, REPORT TARGET PORT GROUPS */
667 maint_in_iarr, {12, 0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff,
668 0xff, 0, 0xc7, 0, 0, 0, 0} },
670 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
671 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
672 {ARRAY_SIZE(verify_iarr), 0x8f, 0,
673 F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify, /* VERIFY(16) */
674 verify_iarr, {16, 0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
675 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },
676 {ARRAY_SIZE(vl_iarr), 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_MEDIA_IO,
677 resp_read_dt0, vl_iarr, /* VARIABLE LENGTH, READ(32) */
678 {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x9, 0xfe, 0, 0xff, 0xff,
680 {ARRAY_SIZE(reserve_iarr), 0x56, 0, F_D_OUT,
681 NULL, reserve_iarr, /* RESERVE(10) <no response function> */
682 {10, 0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
684 {ARRAY_SIZE(release_iarr), 0x57, 0, F_D_OUT,
685 NULL, release_iarr, /* RELEASE(10) <no response function> */
686 {10, 0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
689 {0, 0x1e, 0, 0, NULL, NULL, /* ALLOW REMOVAL */
690 {6, 0, 0, 0, 0x3, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
691 {0, 0x1, 0, 0, resp_start_stop, NULL, /* REWIND ?? */
692 {6, 0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
693 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */
694 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
695 {0, 0x1d, F_D_OUT, 0, NULL, NULL, /* SEND DIAGNOSTIC */
696 {6, 0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
697 {0, 0x42, 0, F_D_OUT | FF_MEDIA_IO, resp_unmap, NULL, /* UNMAP */
698 {10, 0x1, 0, 0, 0, 0, 0x3f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
700 {0, 0x3b, 0, F_D_OUT_MAYBE, resp_write_buffer, NULL,
701 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0,
702 0, 0, 0, 0} }, /* WRITE_BUFFER */
703 {ARRAY_SIZE(write_same_iarr), 0x41, 0, F_D_OUT_MAYBE | FF_MEDIA_IO,
704 resp_write_same_10, write_same_iarr, /* WRITE SAME(10) */
705 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0,
707 {ARRAY_SIZE(sync_cache_iarr), 0x35, 0, F_SYNC_DELAY | F_M_ACCESS,
708 resp_sync_cache, sync_cache_iarr,
709 {10, 0x7, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
710 0, 0, 0, 0} }, /* SYNC_CACHE (10) */
711 {0, 0x89, 0, F_D_OUT | FF_MEDIA_IO, resp_comp_write, NULL,
712 {16, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
713 0, 0xff, 0x3f, 0xc7} }, /* COMPARE AND WRITE */
714 {ARRAY_SIZE(pre_fetch_iarr), 0x34, 0, F_SYNC_DELAY | FF_MEDIA_IO,
715 resp_pre_fetch, pre_fetch_iarr,
716 {10, 0x2, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
717 0, 0, 0, 0} }, /* PRE-FETCH (10) */
720 {ARRAY_SIZE(zone_out_iarr), 0x94, 0x3, F_SA_LOW | F_M_ACCESS,
721 resp_open_zone, zone_out_iarr, /* ZONE_OUT(16), OPEN ZONE) */
722 {16, 0x3 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
723 0xff, 0xff, 0x0, 0x0, 0xff, 0xff, 0x1, 0xc7} },
724 {ARRAY_SIZE(zone_in_iarr), 0x95, 0x0, F_SA_LOW | F_M_ACCESS,
725 resp_report_zones, zone_in_iarr, /* ZONE_IN(16), REPORT ZONES) */
726 {16, 0x0 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
727 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xc7} },
729 {0xff, 0, 0, 0, NULL, NULL, /* terminating element */
730 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
733 static int sdebug_num_hosts;
734 static int sdebug_add_host = DEF_NUM_HOST; /* in sysfs this is relative */
735 static int sdebug_ato = DEF_ATO;
736 static int sdebug_cdb_len = DEF_CDB_LEN;
737 static int sdebug_jdelay = DEF_JDELAY; /* if > 0 then unit is jiffies */
738 static int sdebug_dev_size_mb = DEF_DEV_SIZE_PRE_INIT;
739 static int sdebug_dif = DEF_DIF;
740 static int sdebug_dix = DEF_DIX;
741 static int sdebug_dsense = DEF_D_SENSE;
742 static int sdebug_every_nth = DEF_EVERY_NTH;
743 static int sdebug_fake_rw = DEF_FAKE_RW;
744 static unsigned int sdebug_guard = DEF_GUARD;
745 static int sdebug_host_max_queue; /* per host */
746 static int sdebug_lowest_aligned = DEF_LOWEST_ALIGNED;
747 static int sdebug_max_luns = DEF_MAX_LUNS;
748 static int sdebug_max_queue = SDEBUG_CANQUEUE; /* per submit queue */
749 static unsigned int sdebug_medium_error_start = OPT_MEDIUM_ERR_ADDR;
750 static int sdebug_medium_error_count = OPT_MEDIUM_ERR_NUM;
751 static atomic_t retired_max_queue; /* if > 0 then was prior max_queue */
752 static int sdebug_ndelay = DEF_NDELAY; /* if > 0 then unit is nanoseconds */
753 static int sdebug_no_lun_0 = DEF_NO_LUN_0;
754 static int sdebug_no_uld;
755 static int sdebug_num_parts = DEF_NUM_PARTS;
756 static int sdebug_num_tgts = DEF_NUM_TGTS; /* targets per host */
757 static int sdebug_opt_blks = DEF_OPT_BLKS;
758 static int sdebug_opts = DEF_OPTS;
759 static int sdebug_physblk_exp = DEF_PHYSBLK_EXP;
760 static int sdebug_opt_xferlen_exp = DEF_OPT_XFERLEN_EXP;
761 static int sdebug_ptype = DEF_PTYPE; /* SCSI peripheral device type */
762 static int sdebug_scsi_level = DEF_SCSI_LEVEL;
763 static int sdebug_sector_size = DEF_SECTOR_SIZE;
764 static int sdeb_tur_ms_to_ready = DEF_TUR_MS_TO_READY;
765 static int sdebug_virtual_gb = DEF_VIRTUAL_GB;
766 static int sdebug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
767 static unsigned int sdebug_lbpu = DEF_LBPU;
768 static unsigned int sdebug_lbpws = DEF_LBPWS;
769 static unsigned int sdebug_lbpws10 = DEF_LBPWS10;
770 static unsigned int sdebug_lbprz = DEF_LBPRZ;
771 static unsigned int sdebug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
772 static unsigned int sdebug_unmap_granularity = DEF_UNMAP_GRANULARITY;
773 static unsigned int sdebug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
774 static unsigned int sdebug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
775 static unsigned int sdebug_write_same_length = DEF_WRITESAME_LENGTH;
776 static int sdebug_uuid_ctl = DEF_UUID_CTL;
777 static bool sdebug_random = DEF_RANDOM;
778 static bool sdebug_per_host_store = DEF_PER_HOST_STORE;
779 static bool sdebug_removable = DEF_REMOVABLE;
780 static bool sdebug_clustering;
781 static bool sdebug_host_lock = DEF_HOST_LOCK;
782 static bool sdebug_strict = DEF_STRICT;
783 static bool sdebug_any_injecting_opt;
784 static bool sdebug_verbose;
785 static bool have_dif_prot;
786 static bool write_since_sync;
787 static bool sdebug_statistics = DEF_STATISTICS;
788 static bool sdebug_wp;
789 /* Following enum: 0: no zbc, def; 1: host aware; 2: host managed */
790 static enum blk_zoned_model sdeb_zbc_model = BLK_ZONED_NONE;
791 static char *sdeb_zbc_model_s;
793 enum sam_lun_addr_method {SAM_LUN_AM_PERIPHERAL = 0x0,
794 SAM_LUN_AM_FLAT = 0x1,
795 SAM_LUN_AM_LOGICAL_UNIT = 0x2,
796 SAM_LUN_AM_EXTENDED = 0x3};
797 static enum sam_lun_addr_method sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
798 static int sdebug_lun_am_i = (int)SAM_LUN_AM_PERIPHERAL;
800 static unsigned int sdebug_store_sectors;
801 static sector_t sdebug_capacity; /* in sectors */
803 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
804 may still need them */
805 static int sdebug_heads; /* heads per disk */
806 static int sdebug_cylinders_per; /* cylinders per surface */
807 static int sdebug_sectors_per; /* sectors per cylinder */
809 static LIST_HEAD(sdebug_host_list);
810 static DEFINE_SPINLOCK(sdebug_host_list_lock);
812 static struct xarray per_store_arr;
813 static struct xarray *per_store_ap = &per_store_arr;
814 static int sdeb_first_idx = -1; /* invalid index ==> none created */
815 static int sdeb_most_recent_idx = -1;
816 static DEFINE_RWLOCK(sdeb_fake_rw_lck); /* need a RW lock when fake_rw=1 */
818 static unsigned long map_size;
819 static int num_aborts;
820 static int num_dev_resets;
821 static int num_target_resets;
822 static int num_bus_resets;
823 static int num_host_resets;
824 static int dix_writes;
825 static int dix_reads;
826 static int dif_errors;
828 /* ZBC global data */
829 static bool sdeb_zbc_in_use; /* true for host-aware and host-managed disks */
830 static int sdeb_zbc_zone_size_mb;
831 static int sdeb_zbc_max_open = DEF_ZBC_MAX_OPEN_ZONES;
832 static int sdeb_zbc_nr_conv = DEF_ZBC_NR_CONV_ZONES;
834 static int submit_queues = DEF_SUBMIT_QUEUES; /* > 1 for multi-queue (mq) */
835 static int poll_queues; /* iouring iopoll interface.*/
836 static struct sdebug_queue *sdebug_q_arr; /* ptr to array of submit queues */
838 static DEFINE_RWLOCK(atomic_rw);
839 static DEFINE_RWLOCK(atomic_rw2);
841 static rwlock_t *ramdisk_lck_a[2];
843 static char sdebug_proc_name[] = MY_NAME;
844 static const char *my_name = MY_NAME;
846 static struct bus_type pseudo_lld_bus;
848 static struct device_driver sdebug_driverfs_driver = {
849 .name = sdebug_proc_name,
850 .bus = &pseudo_lld_bus,
853 static const int check_condition_result =
854 (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
856 static const int illegal_condition_result =
857 (DRIVER_SENSE << 24) | (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
859 static const int device_qfull_result =
860 (DID_OK << 16) | SAM_STAT_TASK_SET_FULL;
862 static const int condition_met_result = SAM_STAT_CONDITION_MET;
865 /* Only do the extra work involved in logical block provisioning if one or
866 * more of the lbpu, lbpws or lbpws10 parameters are given and we are doing
867 * real reads and writes (i.e. not skipping them for speed).
869 static inline bool scsi_debug_lbp(void)
871 return 0 == sdebug_fake_rw &&
872 (sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10);
875 static void *lba2fake_store(struct sdeb_store_info *sip,
876 unsigned long long lba)
878 struct sdeb_store_info *lsip = sip;
880 lba = do_div(lba, sdebug_store_sectors);
881 if (!sip || !sip->storep) {
883 lsip = xa_load(per_store_ap, 0); /* should never be NULL */
885 return lsip->storep + lba * sdebug_sector_size;
888 static struct t10_pi_tuple *dif_store(struct sdeb_store_info *sip,
891 sector = sector_div(sector, sdebug_store_sectors);
893 return sip->dif_storep + sector;
896 static void sdebug_max_tgts_luns(void)
898 struct sdebug_host_info *sdbg_host;
899 struct Scsi_Host *hpnt;
901 spin_lock(&sdebug_host_list_lock);
902 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
903 hpnt = sdbg_host->shost;
904 if ((hpnt->this_id >= 0) &&
905 (sdebug_num_tgts > hpnt->this_id))
906 hpnt->max_id = sdebug_num_tgts + 1;
908 hpnt->max_id = sdebug_num_tgts;
909 /* sdebug_max_luns; */
910 hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
912 spin_unlock(&sdebug_host_list_lock);
915 enum sdeb_cmd_data {SDEB_IN_DATA = 0, SDEB_IN_CDB = 1};
917 /* Set in_bit to -1 to indicate no bit position of invalid field */
918 static void mk_sense_invalid_fld(struct scsi_cmnd *scp,
919 enum sdeb_cmd_data c_d,
920 int in_byte, int in_bit)
922 unsigned char *sbuff;
926 sbuff = scp->sense_buffer;
928 sdev_printk(KERN_ERR, scp->device,
929 "%s: sense_buffer is NULL\n", __func__);
932 asc = c_d ? INVALID_FIELD_IN_CDB : INVALID_FIELD_IN_PARAM_LIST;
933 memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
934 scsi_build_sense_buffer(sdebug_dsense, sbuff, ILLEGAL_REQUEST, asc, 0);
935 memset(sks, 0, sizeof(sks));
941 sks[0] |= 0x7 & in_bit;
943 put_unaligned_be16(in_byte, sks + 1);
949 memcpy(sbuff + sl + 4, sks, 3);
951 memcpy(sbuff + 15, sks, 3);
953 sdev_printk(KERN_INFO, scp->device, "%s: [sense_key,asc,ascq"
954 "]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n",
955 my_name, asc, c_d ? 'C' : 'D', in_byte, in_bit);
958 static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
960 unsigned char *sbuff;
962 sbuff = scp->sense_buffer;
964 sdev_printk(KERN_ERR, scp->device,
965 "%s: sense_buffer is NULL\n", __func__);
968 memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
970 scsi_build_sense_buffer(sdebug_dsense, sbuff, key, asc, asq);
973 sdev_printk(KERN_INFO, scp->device,
974 "%s: [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
975 my_name, key, asc, asq);
978 static void mk_sense_invalid_opcode(struct scsi_cmnd *scp)
980 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
983 static int scsi_debug_ioctl(struct scsi_device *dev, unsigned int cmd,
986 if (sdebug_verbose) {
988 sdev_printk(KERN_INFO, dev,
989 "%s: BLKFLSBUF [0x1261]\n", __func__);
990 else if (0x5331 == cmd)
991 sdev_printk(KERN_INFO, dev,
992 "%s: CDROM_GET_CAPABILITY [0x5331]\n",
995 sdev_printk(KERN_INFO, dev, "%s: cmd=0x%x\n",
999 /* return -ENOTTY; // correct return but upsets fdisk */
1002 static void config_cdb_len(struct scsi_device *sdev)
1004 switch (sdebug_cdb_len) {
1005 case 6: /* suggest 6 byte READ, WRITE and MODE SENSE/SELECT */
1006 sdev->use_10_for_rw = false;
1007 sdev->use_16_for_rw = false;
1008 sdev->use_10_for_ms = false;
1010 case 10: /* suggest 10 byte RWs and 6 byte MODE SENSE/SELECT */
1011 sdev->use_10_for_rw = true;
1012 sdev->use_16_for_rw = false;
1013 sdev->use_10_for_ms = false;
1015 case 12: /* suggest 10 byte RWs and 10 byte MODE SENSE/SELECT */
1016 sdev->use_10_for_rw = true;
1017 sdev->use_16_for_rw = false;
1018 sdev->use_10_for_ms = true;
1021 sdev->use_10_for_rw = false;
1022 sdev->use_16_for_rw = true;
1023 sdev->use_10_for_ms = true;
1025 case 32: /* No knobs to suggest this so same as 16 for now */
1026 sdev->use_10_for_rw = false;
1027 sdev->use_16_for_rw = true;
1028 sdev->use_10_for_ms = true;
1031 pr_warn("unexpected cdb_len=%d, force to 10\n",
1033 sdev->use_10_for_rw = true;
1034 sdev->use_16_for_rw = false;
1035 sdev->use_10_for_ms = false;
1036 sdebug_cdb_len = 10;
1041 static void all_config_cdb_len(void)
1043 struct sdebug_host_info *sdbg_host;
1044 struct Scsi_Host *shost;
1045 struct scsi_device *sdev;
1047 spin_lock(&sdebug_host_list_lock);
1048 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
1049 shost = sdbg_host->shost;
1050 shost_for_each_device(sdev, shost) {
1051 config_cdb_len(sdev);
1054 spin_unlock(&sdebug_host_list_lock);
1057 static void clear_luns_changed_on_target(struct sdebug_dev_info *devip)
1059 struct sdebug_host_info *sdhp;
1060 struct sdebug_dev_info *dp;
1062 spin_lock(&sdebug_host_list_lock);
1063 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
1064 list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
1065 if ((devip->sdbg_host == dp->sdbg_host) &&
1066 (devip->target == dp->target))
1067 clear_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
1070 spin_unlock(&sdebug_host_list_lock);
1073 static int make_ua(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1077 k = find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS);
1078 if (k != SDEBUG_NUM_UAS) {
1079 const char *cp = NULL;
1083 mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1084 POWER_ON_RESET_ASCQ);
1086 cp = "power on reset";
1088 case SDEBUG_UA_BUS_RESET:
1089 mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1094 case SDEBUG_UA_MODE_CHANGED:
1095 mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1098 cp = "mode parameters changed";
1100 case SDEBUG_UA_CAPACITY_CHANGED:
1101 mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1102 CAPACITY_CHANGED_ASCQ);
1104 cp = "capacity data changed";
1106 case SDEBUG_UA_MICROCODE_CHANGED:
1107 mk_sense_buffer(scp, UNIT_ATTENTION,
1109 MICROCODE_CHANGED_ASCQ);
1111 cp = "microcode has been changed";
1113 case SDEBUG_UA_MICROCODE_CHANGED_WO_RESET:
1114 mk_sense_buffer(scp, UNIT_ATTENTION,
1116 MICROCODE_CHANGED_WO_RESET_ASCQ);
1118 cp = "microcode has been changed without reset";
1120 case SDEBUG_UA_LUNS_CHANGED:
1122 * SPC-3 behavior is to report a UNIT ATTENTION with
1123 * ASC/ASCQ REPORTED LUNS DATA HAS CHANGED on every LUN
1124 * on the target, until a REPORT LUNS command is
1125 * received. SPC-4 behavior is to report it only once.
1126 * NOTE: sdebug_scsi_level does not use the same
1127 * values as struct scsi_device->scsi_level.
1129 if (sdebug_scsi_level >= 6) /* SPC-4 and above */
1130 clear_luns_changed_on_target(devip);
1131 mk_sense_buffer(scp, UNIT_ATTENTION,
1135 cp = "reported luns data has changed";
1138 pr_warn("unexpected unit attention code=%d\n", k);
1143 clear_bit(k, devip->uas_bm);
1145 sdev_printk(KERN_INFO, scp->device,
1146 "%s reports: Unit attention: %s\n",
1148 return check_condition_result;
1153 /* Build SCSI "data-in" buffer. Returns 0 if ok else (DID_ERROR << 16). */
1154 static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1158 struct scsi_data_buffer *sdb = &scp->sdb;
1162 if (scp->sc_data_direction != DMA_FROM_DEVICE)
1163 return DID_ERROR << 16;
1165 act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
1167 scsi_set_resid(scp, scsi_bufflen(scp) - act_len);
1172 /* Partial build of SCSI "data-in" buffer. Returns 0 if ok else
1173 * (DID_ERROR << 16). Can write to offset in data-in buffer. If multiple
1174 * calls, not required to write in ascending offset order. Assumes resid
1175 * set to scsi_bufflen() prior to any calls.
1177 static int p_fill_from_dev_buffer(struct scsi_cmnd *scp, const void *arr,
1178 int arr_len, unsigned int off_dst)
1180 unsigned int act_len, n;
1181 struct scsi_data_buffer *sdb = &scp->sdb;
1182 off_t skip = off_dst;
1184 if (sdb->length <= off_dst)
1186 if (scp->sc_data_direction != DMA_FROM_DEVICE)
1187 return DID_ERROR << 16;
1189 act_len = sg_pcopy_from_buffer(sdb->table.sgl, sdb->table.nents,
1190 arr, arr_len, skip);
1191 pr_debug("%s: off_dst=%u, scsi_bufflen=%u, act_len=%u, resid=%d\n",
1192 __func__, off_dst, scsi_bufflen(scp), act_len,
1193 scsi_get_resid(scp));
1194 n = scsi_bufflen(scp) - (off_dst + act_len);
1195 scsi_set_resid(scp, min_t(int, scsi_get_resid(scp), n));
1199 /* Fetches from SCSI "data-out" buffer. Returns number of bytes fetched into
1200 * 'arr' or -1 if error.
1202 static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1205 if (!scsi_bufflen(scp))
1207 if (scp->sc_data_direction != DMA_TO_DEVICE)
1210 return scsi_sg_copy_to_buffer(scp, arr, arr_len);
1214 static char sdebug_inq_vendor_id[9] = "Linux ";
1215 static char sdebug_inq_product_id[17] = "scsi_debug ";
1216 static char sdebug_inq_product_rev[5] = SDEBUG_VERSION;
1217 /* Use some locally assigned NAAs for SAS addresses. */
1218 static const u64 naa3_comp_a = 0x3222222000000000ULL;
1219 static const u64 naa3_comp_b = 0x3333333000000000ULL;
1220 static const u64 naa3_comp_c = 0x3111111000000000ULL;
1222 /* Device identification VPD page. Returns number of bytes placed in arr */
1223 static int inquiry_vpd_83(unsigned char *arr, int port_group_id,
1224 int target_dev_id, int dev_id_num,
1225 const char *dev_id_str, int dev_id_str_len,
1226 const uuid_t *lu_name)
1231 port_a = target_dev_id + 1;
1232 /* T10 vendor identifier field format (faked) */
1233 arr[0] = 0x2; /* ASCII */
1236 memcpy(&arr[4], sdebug_inq_vendor_id, 8);
1237 memcpy(&arr[12], sdebug_inq_product_id, 16);
1238 memcpy(&arr[28], dev_id_str, dev_id_str_len);
1239 num = 8 + 16 + dev_id_str_len;
1242 if (dev_id_num >= 0) {
1243 if (sdebug_uuid_ctl) {
1244 /* Locally assigned UUID */
1245 arr[num++] = 0x1; /* binary (not necessarily sas) */
1246 arr[num++] = 0xa; /* PIV=0, lu, naa */
1249 arr[num++] = 0x10; /* uuid type=1, locally assigned */
1251 memcpy(arr + num, lu_name, 16);
1254 /* NAA-3, Logical unit identifier (binary) */
1255 arr[num++] = 0x1; /* binary (not necessarily sas) */
1256 arr[num++] = 0x3; /* PIV=0, lu, naa */
1259 put_unaligned_be64(naa3_comp_b + dev_id_num, arr + num);
1262 /* Target relative port number */
1263 arr[num++] = 0x61; /* proto=sas, binary */
1264 arr[num++] = 0x94; /* PIV=1, target port, rel port */
1265 arr[num++] = 0x0; /* reserved */
1266 arr[num++] = 0x4; /* length */
1267 arr[num++] = 0x0; /* reserved */
1268 arr[num++] = 0x0; /* reserved */
1270 arr[num++] = 0x1; /* relative port A */
1272 /* NAA-3, Target port identifier */
1273 arr[num++] = 0x61; /* proto=sas, binary */
1274 arr[num++] = 0x93; /* piv=1, target port, naa */
1277 put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1279 /* NAA-3, Target port group identifier */
1280 arr[num++] = 0x61; /* proto=sas, binary */
1281 arr[num++] = 0x95; /* piv=1, target port group id */
1286 put_unaligned_be16(port_group_id, arr + num);
1288 /* NAA-3, Target device identifier */
1289 arr[num++] = 0x61; /* proto=sas, binary */
1290 arr[num++] = 0xa3; /* piv=1, target device, naa */
1293 put_unaligned_be64(naa3_comp_a + target_dev_id, arr + num);
1295 /* SCSI name string: Target device identifier */
1296 arr[num++] = 0x63; /* proto=sas, UTF-8 */
1297 arr[num++] = 0xa8; /* piv=1, target device, SCSI name string */
1300 memcpy(arr + num, "naa.32222220", 12);
1302 snprintf(b, sizeof(b), "%08X", target_dev_id);
1303 memcpy(arr + num, b, 8);
1305 memset(arr + num, 0, 4);
1310 static unsigned char vpd84_data[] = {
1311 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
1312 0x22,0x22,0x22,0x0,0xbb,0x1,
1313 0x22,0x22,0x22,0x0,0xbb,0x2,
1316 /* Software interface identification VPD page */
1317 static int inquiry_vpd_84(unsigned char *arr)
1319 memcpy(arr, vpd84_data, sizeof(vpd84_data));
1320 return sizeof(vpd84_data);
1323 /* Management network addresses VPD page */
1324 static int inquiry_vpd_85(unsigned char *arr)
1327 const char *na1 = "https://www.kernel.org/config";
1328 const char *na2 = "http://www.kernel.org/log";
1331 arr[num++] = 0x1; /* lu, storage config */
1332 arr[num++] = 0x0; /* reserved */
1337 plen = ((plen / 4) + 1) * 4;
1338 arr[num++] = plen; /* length, null termianted, padded */
1339 memcpy(arr + num, na1, olen);
1340 memset(arr + num + olen, 0, plen - olen);
1343 arr[num++] = 0x4; /* lu, logging */
1344 arr[num++] = 0x0; /* reserved */
1349 plen = ((plen / 4) + 1) * 4;
1350 arr[num++] = plen; /* length, null terminated, padded */
1351 memcpy(arr + num, na2, olen);
1352 memset(arr + num + olen, 0, plen - olen);
1358 /* SCSI ports VPD page */
1359 static int inquiry_vpd_88(unsigned char *arr, int target_dev_id)
1364 port_a = target_dev_id + 1;
1365 port_b = port_a + 1;
1366 arr[num++] = 0x0; /* reserved */
1367 arr[num++] = 0x0; /* reserved */
1369 arr[num++] = 0x1; /* relative port 1 (primary) */
1370 memset(arr + num, 0, 6);
1373 arr[num++] = 12; /* length tp descriptor */
1374 /* naa-5 target port identifier (A) */
1375 arr[num++] = 0x61; /* proto=sas, binary */
1376 arr[num++] = 0x93; /* PIV=1, target port, NAA */
1377 arr[num++] = 0x0; /* reserved */
1378 arr[num++] = 0x8; /* length */
1379 put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1381 arr[num++] = 0x0; /* reserved */
1382 arr[num++] = 0x0; /* reserved */
1384 arr[num++] = 0x2; /* relative port 2 (secondary) */
1385 memset(arr + num, 0, 6);
1388 arr[num++] = 12; /* length tp descriptor */
1389 /* naa-5 target port identifier (B) */
1390 arr[num++] = 0x61; /* proto=sas, binary */
1391 arr[num++] = 0x93; /* PIV=1, target port, NAA */
1392 arr[num++] = 0x0; /* reserved */
1393 arr[num++] = 0x8; /* length */
1394 put_unaligned_be64(naa3_comp_a + port_b, arr + num);
1401 static unsigned char vpd89_data[] = {
1402 /* from 4th byte */ 0,0,0,0,
1403 'l','i','n','u','x',' ',' ',' ',
1404 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
1406 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
1408 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
1409 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
1410 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
1411 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
1413 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1415 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1417 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
1418 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
1419 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
1420 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
1421 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
1422 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
1423 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
1424 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1425 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1426 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1427 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
1428 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
1429 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
1430 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
1431 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1432 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1433 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1434 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1435 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1436 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1437 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1438 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1439 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1440 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1441 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1442 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
1445 /* ATA Information VPD page */
1446 static int inquiry_vpd_89(unsigned char *arr)
1448 memcpy(arr, vpd89_data, sizeof(vpd89_data));
1449 return sizeof(vpd89_data);
1453 static unsigned char vpdb0_data[] = {
1454 /* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
1455 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1456 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1457 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1460 /* Block limits VPD page (SBC-3) */
1461 static int inquiry_vpd_b0(unsigned char *arr)
1465 memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
1467 /* Optimal transfer length granularity */
1468 if (sdebug_opt_xferlen_exp != 0 &&
1469 sdebug_physblk_exp < sdebug_opt_xferlen_exp)
1470 gran = 1 << sdebug_opt_xferlen_exp;
1472 gran = 1 << sdebug_physblk_exp;
1473 put_unaligned_be16(gran, arr + 2);
1475 /* Maximum Transfer Length */
1476 if (sdebug_store_sectors > 0x400)
1477 put_unaligned_be32(sdebug_store_sectors, arr + 4);
1479 /* Optimal Transfer Length */
1480 put_unaligned_be32(sdebug_opt_blks, &arr[8]);
1483 /* Maximum Unmap LBA Count */
1484 put_unaligned_be32(sdebug_unmap_max_blocks, &arr[16]);
1486 /* Maximum Unmap Block Descriptor Count */
1487 put_unaligned_be32(sdebug_unmap_max_desc, &arr[20]);
1490 /* Unmap Granularity Alignment */
1491 if (sdebug_unmap_alignment) {
1492 put_unaligned_be32(sdebug_unmap_alignment, &arr[28]);
1493 arr[28] |= 0x80; /* UGAVALID */
1496 /* Optimal Unmap Granularity */
1497 put_unaligned_be32(sdebug_unmap_granularity, &arr[24]);
1499 /* Maximum WRITE SAME Length */
1500 put_unaligned_be64(sdebug_write_same_length, &arr[32]);
1502 return 0x3c; /* Mandatory page length for Logical Block Provisioning */
1504 return sizeof(vpdb0_data);
1507 /* Block device characteristics VPD page (SBC-3) */
1508 static int inquiry_vpd_b1(struct sdebug_dev_info *devip, unsigned char *arr)
1510 memset(arr, 0, 0x3c);
1512 arr[1] = 1; /* non rotating medium (e.g. solid state) */
1514 arr[3] = 5; /* less than 1.8" */
1515 if (devip->zmodel == BLK_ZONED_HA)
1516 arr[4] = 1 << 4; /* zoned field = 01b */
1521 /* Logical block provisioning VPD page (SBC-4) */
1522 static int inquiry_vpd_b2(unsigned char *arr)
1524 memset(arr, 0, 0x4);
1525 arr[0] = 0; /* threshold exponent */
1532 if (sdebug_lbprz && scsi_debug_lbp())
1533 arr[1] |= (sdebug_lbprz & 0x7) << 2; /* sbc4r07 and later */
1534 /* anc_sup=0; dp=0 (no provisioning group descriptor) */
1535 /* minimum_percentage=0; provisioning_type=0 (unknown) */
1536 /* threshold_percentage=0 */
1540 /* Zoned block device characteristics VPD page (ZBC mandatory) */
1541 static int inquiry_vpd_b6(struct sdebug_dev_info *devip, unsigned char *arr)
1543 memset(arr, 0, 0x3c);
1544 arr[0] = 0x1; /* set URSWRZ (unrestricted read in seq. wr req zone) */
1546 * Set Optimal number of open sequential write preferred zones and
1547 * Optimal number of non-sequentially written sequential write
1548 * preferred zones fields to 'not reported' (0xffffffff). Leave other
1549 * fields set to zero, apart from Max. number of open swrz_s field.
1551 put_unaligned_be32(0xffffffff, &arr[4]);
1552 put_unaligned_be32(0xffffffff, &arr[8]);
1553 if (sdeb_zbc_model == BLK_ZONED_HM && devip->max_open)
1554 put_unaligned_be32(devip->max_open, &arr[12]);
1556 put_unaligned_be32(0xffffffff, &arr[12]);
1560 #define SDEBUG_LONG_INQ_SZ 96
1561 #define SDEBUG_MAX_INQ_ARR_SZ 584
1563 static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1565 unsigned char pq_pdt;
1567 unsigned char *cmd = scp->cmnd;
1568 int alloc_len, n, ret;
1569 bool have_wlun, is_disk, is_zbc, is_disk_zbc;
1571 alloc_len = get_unaligned_be16(cmd + 3);
1572 arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
1574 return DID_REQUEUE << 16;
1575 is_disk = (sdebug_ptype == TYPE_DISK);
1576 is_zbc = (devip->zmodel != BLK_ZONED_NONE);
1577 is_disk_zbc = (is_disk || is_zbc);
1578 have_wlun = scsi_is_wlun(scp->device->lun);
1580 pq_pdt = TYPE_WLUN; /* present, wlun */
1581 else if (sdebug_no_lun_0 && (devip->lun == SDEBUG_LUN_0_VAL))
1582 pq_pdt = 0x7f; /* not present, PQ=3, PDT=0x1f */
1584 pq_pdt = (sdebug_ptype & 0x1f);
1586 if (0x2 & cmd[1]) { /* CMDDT bit set */
1587 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
1589 return check_condition_result;
1590 } else if (0x1 & cmd[1]) { /* EVPD bit set */
1591 int lu_id_num, port_group_id, target_dev_id, len;
1593 int host_no = devip->sdbg_host->shost->host_no;
1595 port_group_id = (((host_no + 1) & 0x7f) << 8) +
1596 (devip->channel & 0x7f);
1597 if (sdebug_vpd_use_hostno == 0)
1599 lu_id_num = have_wlun ? -1 : (((host_no + 1) * 2000) +
1600 (devip->target * 1000) + devip->lun);
1601 target_dev_id = ((host_no + 1) * 2000) +
1602 (devip->target * 1000) - 3;
1603 len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
1604 if (0 == cmd[2]) { /* supported vital product data pages */
1605 arr[1] = cmd[2]; /*sanity */
1607 arr[n++] = 0x0; /* this page */
1608 arr[n++] = 0x80; /* unit serial number */
1609 arr[n++] = 0x83; /* device identification */
1610 arr[n++] = 0x84; /* software interface ident. */
1611 arr[n++] = 0x85; /* management network addresses */
1612 arr[n++] = 0x86; /* extended inquiry */
1613 arr[n++] = 0x87; /* mode page policy */
1614 arr[n++] = 0x88; /* SCSI ports */
1615 if (is_disk_zbc) { /* SBC or ZBC */
1616 arr[n++] = 0x89; /* ATA information */
1617 arr[n++] = 0xb0; /* Block limits */
1618 arr[n++] = 0xb1; /* Block characteristics */
1620 arr[n++] = 0xb2; /* LB Provisioning */
1622 arr[n++] = 0xb6; /* ZB dev. char. */
1624 arr[3] = n - 4; /* number of supported VPD pages */
1625 } else if (0x80 == cmd[2]) { /* unit serial number */
1626 arr[1] = cmd[2]; /*sanity */
1628 memcpy(&arr[4], lu_id_str, len);
1629 } else if (0x83 == cmd[2]) { /* device identification */
1630 arr[1] = cmd[2]; /*sanity */
1631 arr[3] = inquiry_vpd_83(&arr[4], port_group_id,
1632 target_dev_id, lu_id_num,
1635 } else if (0x84 == cmd[2]) { /* Software interface ident. */
1636 arr[1] = cmd[2]; /*sanity */
1637 arr[3] = inquiry_vpd_84(&arr[4]);
1638 } else if (0x85 == cmd[2]) { /* Management network addresses */
1639 arr[1] = cmd[2]; /*sanity */
1640 arr[3] = inquiry_vpd_85(&arr[4]);
1641 } else if (0x86 == cmd[2]) { /* extended inquiry */
1642 arr[1] = cmd[2]; /*sanity */
1643 arr[3] = 0x3c; /* number of following entries */
1644 if (sdebug_dif == T10_PI_TYPE3_PROTECTION)
1645 arr[4] = 0x4; /* SPT: GRD_CHK:1 */
1646 else if (have_dif_prot)
1647 arr[4] = 0x5; /* SPT: GRD_CHK:1, REF_CHK:1 */
1649 arr[4] = 0x0; /* no protection stuff */
1650 arr[5] = 0x7; /* head of q, ordered + simple q's */
1651 } else if (0x87 == cmd[2]) { /* mode page policy */
1652 arr[1] = cmd[2]; /*sanity */
1653 arr[3] = 0x8; /* number of following entries */
1654 arr[4] = 0x2; /* disconnect-reconnect mp */
1655 arr[6] = 0x80; /* mlus, shared */
1656 arr[8] = 0x18; /* protocol specific lu */
1657 arr[10] = 0x82; /* mlus, per initiator port */
1658 } else if (0x88 == cmd[2]) { /* SCSI Ports */
1659 arr[1] = cmd[2]; /*sanity */
1660 arr[3] = inquiry_vpd_88(&arr[4], target_dev_id);
1661 } else if (is_disk_zbc && 0x89 == cmd[2]) { /* ATA info */
1662 arr[1] = cmd[2]; /*sanity */
1663 n = inquiry_vpd_89(&arr[4]);
1664 put_unaligned_be16(n, arr + 2);
1665 } else if (is_disk_zbc && 0xb0 == cmd[2]) { /* Block limits */
1666 arr[1] = cmd[2]; /*sanity */
1667 arr[3] = inquiry_vpd_b0(&arr[4]);
1668 } else if (is_disk_zbc && 0xb1 == cmd[2]) { /* Block char. */
1669 arr[1] = cmd[2]; /*sanity */
1670 arr[3] = inquiry_vpd_b1(devip, &arr[4]);
1671 } else if (is_disk && 0xb2 == cmd[2]) { /* LB Prov. */
1672 arr[1] = cmd[2]; /*sanity */
1673 arr[3] = inquiry_vpd_b2(&arr[4]);
1674 } else if (is_zbc && cmd[2] == 0xb6) { /* ZB dev. charact. */
1675 arr[1] = cmd[2]; /*sanity */
1676 arr[3] = inquiry_vpd_b6(devip, &arr[4]);
1678 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
1680 return check_condition_result;
1682 len = min(get_unaligned_be16(arr + 2) + 4, alloc_len);
1683 ret = fill_from_dev_buffer(scp, arr,
1684 min(len, SDEBUG_MAX_INQ_ARR_SZ));
1688 /* drops through here for a standard inquiry */
1689 arr[1] = sdebug_removable ? 0x80 : 0; /* Removable disk */
1690 arr[2] = sdebug_scsi_level;
1691 arr[3] = 2; /* response_data_format==2 */
1692 arr[4] = SDEBUG_LONG_INQ_SZ - 5;
1693 arr[5] = (int)have_dif_prot; /* PROTECT bit */
1694 if (sdebug_vpd_use_hostno == 0)
1695 arr[5] |= 0x10; /* claim: implicit TPGS */
1696 arr[6] = 0x10; /* claim: MultiP */
1697 /* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
1698 arr[7] = 0xa; /* claim: LINKED + CMDQUE */
1699 memcpy(&arr[8], sdebug_inq_vendor_id, 8);
1700 memcpy(&arr[16], sdebug_inq_product_id, 16);
1701 memcpy(&arr[32], sdebug_inq_product_rev, 4);
1702 /* Use Vendor Specific area to place driver date in ASCII hex */
1703 memcpy(&arr[36], sdebug_version_date, 8);
1704 /* version descriptors (2 bytes each) follow */
1705 put_unaligned_be16(0xc0, arr + 58); /* SAM-6 no version claimed */
1706 put_unaligned_be16(0x5c0, arr + 60); /* SPC-5 no version claimed */
1708 if (is_disk) { /* SBC-4 no version claimed */
1709 put_unaligned_be16(0x600, arr + n);
1711 } else if (sdebug_ptype == TYPE_TAPE) { /* SSC-4 rev 3 */
1712 put_unaligned_be16(0x525, arr + n);
1714 } else if (is_zbc) { /* ZBC BSR INCITS 536 revision 05 */
1715 put_unaligned_be16(0x624, arr + n);
1718 put_unaligned_be16(0x2100, arr + n); /* SPL-4 no version claimed */
1719 ret = fill_from_dev_buffer(scp, arr,
1720 min_t(int, alloc_len, SDEBUG_LONG_INQ_SZ));
1725 /* See resp_iec_m_pg() for how this data is manipulated */
1726 static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1729 static int resp_requests(struct scsi_cmnd *scp,
1730 struct sdebug_dev_info *devip)
1732 unsigned char *cmd = scp->cmnd;
1733 unsigned char arr[SCSI_SENSE_BUFFERSIZE]; /* assume >= 18 bytes */
1734 bool dsense = !!(cmd[1] & 1);
1735 int alloc_len = cmd[4];
1737 int stopped_state = atomic_read(&devip->stopped);
1739 memset(arr, 0, sizeof(arr));
1740 if (stopped_state > 0) { /* some "pollable" data [spc6r02: 5.12.2] */
1744 arr[2] = LOGICAL_UNIT_NOT_READY;
1745 arr[3] = (stopped_state == 2) ? 0x1 : 0x2;
1749 arr[2] = NOT_READY; /* NO_SENSE in sense_key */
1750 arr[7] = 0xa; /* 18 byte sense buffer */
1751 arr[12] = LOGICAL_UNIT_NOT_READY;
1752 arr[13] = (stopped_state == 2) ? 0x1 : 0x2;
1754 } else if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
1755 /* Information exceptions control mode page: TEST=1, MRIE=6 */
1758 arr[1] = 0x0; /* NO_SENSE in sense_key */
1759 arr[2] = THRESHOLD_EXCEEDED;
1760 arr[3] = 0xff; /* Failure prediction(false) */
1764 arr[2] = 0x0; /* NO_SENSE in sense_key */
1765 arr[7] = 0xa; /* 18 byte sense buffer */
1766 arr[12] = THRESHOLD_EXCEEDED;
1767 arr[13] = 0xff; /* Failure prediction(false) */
1769 } else { /* nothing to report */
1772 memset(arr, 0, len);
1775 memset(arr, 0, len);
1780 return fill_from_dev_buffer(scp, arr, min_t(int, len, alloc_len));
1783 static int resp_start_stop(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1785 unsigned char *cmd = scp->cmnd;
1786 int power_cond, want_stop, stopped_state;
1789 power_cond = (cmd[4] & 0xf0) >> 4;
1791 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 7);
1792 return check_condition_result;
1794 want_stop = !(cmd[4] & 1);
1795 stopped_state = atomic_read(&devip->stopped);
1796 if (stopped_state == 2) {
1797 ktime_t now_ts = ktime_get_boottime();
1799 if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
1800 u64 diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
1802 if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
1803 /* tur_ms_to_ready timer extinguished */
1804 atomic_set(&devip->stopped, 0);
1808 if (stopped_state == 2) {
1810 stopped_state = 1; /* dummy up success */
1811 } else { /* Disallow tur_ms_to_ready delay to be overridden */
1812 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 0 /* START bit */);
1813 return check_condition_result;
1817 changing = (stopped_state != want_stop);
1819 atomic_xchg(&devip->stopped, want_stop);
1820 if (!changing || (cmd[1] & 0x1)) /* state unchanged or IMMED bit set in cdb */
1821 return SDEG_RES_IMMED_MASK;
1826 static sector_t get_sdebug_capacity(void)
1828 static const unsigned int gibibyte = 1073741824;
1830 if (sdebug_virtual_gb > 0)
1831 return (sector_t)sdebug_virtual_gb *
1832 (gibibyte / sdebug_sector_size);
1834 return sdebug_store_sectors;
1837 #define SDEBUG_READCAP_ARR_SZ 8
1838 static int resp_readcap(struct scsi_cmnd *scp,
1839 struct sdebug_dev_info *devip)
1841 unsigned char arr[SDEBUG_READCAP_ARR_SZ];
1844 /* following just in case virtual_gb changed */
1845 sdebug_capacity = get_sdebug_capacity();
1846 memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
1847 if (sdebug_capacity < 0xffffffff) {
1848 capac = (unsigned int)sdebug_capacity - 1;
1849 put_unaligned_be32(capac, arr + 0);
1851 put_unaligned_be32(0xffffffff, arr + 0);
1852 put_unaligned_be16(sdebug_sector_size, arr + 6);
1853 return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
1856 #define SDEBUG_READCAP16_ARR_SZ 32
1857 static int resp_readcap16(struct scsi_cmnd *scp,
1858 struct sdebug_dev_info *devip)
1860 unsigned char *cmd = scp->cmnd;
1861 unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
1864 alloc_len = get_unaligned_be32(cmd + 10);
1865 /* following just in case virtual_gb changed */
1866 sdebug_capacity = get_sdebug_capacity();
1867 memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
1868 put_unaligned_be64((u64)(sdebug_capacity - 1), arr + 0);
1869 put_unaligned_be32(sdebug_sector_size, arr + 8);
1870 arr[13] = sdebug_physblk_exp & 0xf;
1871 arr[14] = (sdebug_lowest_aligned >> 8) & 0x3f;
1873 if (scsi_debug_lbp()) {
1874 arr[14] |= 0x80; /* LBPME */
1875 /* from sbc4r07, this LBPRZ field is 1 bit, but the LBPRZ in
1876 * the LB Provisioning VPD page is 3 bits. Note that lbprz=2
1877 * in the wider field maps to 0 in this field.
1879 if (sdebug_lbprz & 1) /* precisely what the draft requires */
1883 arr[15] = sdebug_lowest_aligned & 0xff;
1885 if (have_dif_prot) {
1886 arr[12] = (sdebug_dif - 1) << 1; /* P_TYPE */
1887 arr[12] |= 1; /* PROT_EN */
1890 return fill_from_dev_buffer(scp, arr,
1891 min_t(int, alloc_len, SDEBUG_READCAP16_ARR_SZ));
1894 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
1896 static int resp_report_tgtpgs(struct scsi_cmnd *scp,
1897 struct sdebug_dev_info *devip)
1899 unsigned char *cmd = scp->cmnd;
1901 int host_no = devip->sdbg_host->shost->host_no;
1902 int n, ret, alen, rlen;
1903 int port_group_a, port_group_b, port_a, port_b;
1905 alen = get_unaligned_be32(cmd + 6);
1906 arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
1908 return DID_REQUEUE << 16;
1910 * EVPD page 0x88 states we have two ports, one
1911 * real and a fake port with no device connected.
1912 * So we create two port groups with one port each
1913 * and set the group with port B to unavailable.
1915 port_a = 0x1; /* relative port A */
1916 port_b = 0x2; /* relative port B */
1917 port_group_a = (((host_no + 1) & 0x7f) << 8) +
1918 (devip->channel & 0x7f);
1919 port_group_b = (((host_no + 1) & 0x7f) << 8) +
1920 (devip->channel & 0x7f) + 0x80;
1923 * The asymmetric access state is cycled according to the host_id.
1926 if (sdebug_vpd_use_hostno == 0) {
1927 arr[n++] = host_no % 3; /* Asymm access state */
1928 arr[n++] = 0x0F; /* claim: all states are supported */
1930 arr[n++] = 0x0; /* Active/Optimized path */
1931 arr[n++] = 0x01; /* only support active/optimized paths */
1933 put_unaligned_be16(port_group_a, arr + n);
1935 arr[n++] = 0; /* Reserved */
1936 arr[n++] = 0; /* Status code */
1937 arr[n++] = 0; /* Vendor unique */
1938 arr[n++] = 0x1; /* One port per group */
1939 arr[n++] = 0; /* Reserved */
1940 arr[n++] = 0; /* Reserved */
1941 put_unaligned_be16(port_a, arr + n);
1943 arr[n++] = 3; /* Port unavailable */
1944 arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
1945 put_unaligned_be16(port_group_b, arr + n);
1947 arr[n++] = 0; /* Reserved */
1948 arr[n++] = 0; /* Status code */
1949 arr[n++] = 0; /* Vendor unique */
1950 arr[n++] = 0x1; /* One port per group */
1951 arr[n++] = 0; /* Reserved */
1952 arr[n++] = 0; /* Reserved */
1953 put_unaligned_be16(port_b, arr + n);
1957 put_unaligned_be32(rlen, arr + 0);
1960 * Return the smallest value of either
1961 * - The allocated length
1962 * - The constructed command length
1963 * - The maximum array size
1965 rlen = min_t(int, alen, n);
1966 ret = fill_from_dev_buffer(scp, arr,
1967 min_t(int, rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
1972 static int resp_rsup_opcodes(struct scsi_cmnd *scp,
1973 struct sdebug_dev_info *devip)
1976 u8 reporting_opts, req_opcode, sdeb_i, supp;
1978 u32 alloc_len, a_len;
1979 int k, offset, len, errsts, count, bump, na;
1980 const struct opcode_info_t *oip;
1981 const struct opcode_info_t *r_oip;
1983 u8 *cmd = scp->cmnd;
1985 rctd = !!(cmd[2] & 0x80);
1986 reporting_opts = cmd[2] & 0x7;
1987 req_opcode = cmd[3];
1988 req_sa = get_unaligned_be16(cmd + 4);
1989 alloc_len = get_unaligned_be32(cmd + 6);
1990 if (alloc_len < 4 || alloc_len > 0xffff) {
1991 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
1992 return check_condition_result;
1994 if (alloc_len > 8192)
1998 arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_ATOMIC);
2000 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
2002 return check_condition_result;
2004 switch (reporting_opts) {
2005 case 0: /* all commands */
2006 /* count number of commands */
2007 for (count = 0, oip = opcode_info_arr;
2008 oip->num_attached != 0xff; ++oip) {
2009 if (F_INV_OP & oip->flags)
2011 count += (oip->num_attached + 1);
2013 bump = rctd ? 20 : 8;
2014 put_unaligned_be32(count * bump, arr);
2015 for (offset = 4, oip = opcode_info_arr;
2016 oip->num_attached != 0xff && offset < a_len; ++oip) {
2017 if (F_INV_OP & oip->flags)
2019 na = oip->num_attached;
2020 arr[offset] = oip->opcode;
2021 put_unaligned_be16(oip->sa, arr + offset + 2);
2023 arr[offset + 5] |= 0x2;
2024 if (FF_SA & oip->flags)
2025 arr[offset + 5] |= 0x1;
2026 put_unaligned_be16(oip->len_mask[0], arr + offset + 6);
2028 put_unaligned_be16(0xa, arr + offset + 8);
2030 for (k = 0, oip = oip->arrp; k < na; ++k, ++oip) {
2031 if (F_INV_OP & oip->flags)
2034 arr[offset] = oip->opcode;
2035 put_unaligned_be16(oip->sa, arr + offset + 2);
2037 arr[offset + 5] |= 0x2;
2038 if (FF_SA & oip->flags)
2039 arr[offset + 5] |= 0x1;
2040 put_unaligned_be16(oip->len_mask[0],
2043 put_unaligned_be16(0xa,
2050 case 1: /* one command: opcode only */
2051 case 2: /* one command: opcode plus service action */
2052 case 3: /* one command: if sa==0 then opcode only else opcode+sa */
2053 sdeb_i = opcode_ind_arr[req_opcode];
2054 oip = &opcode_info_arr[sdeb_i];
2055 if (F_INV_OP & oip->flags) {
2059 if (1 == reporting_opts) {
2060 if (FF_SA & oip->flags) {
2061 mk_sense_invalid_fld(scp, SDEB_IN_CDB,
2064 return check_condition_result;
2067 } else if (2 == reporting_opts &&
2068 0 == (FF_SA & oip->flags)) {
2069 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
2070 kfree(arr); /* point at requested sa */
2071 return check_condition_result;
2073 if (0 == (FF_SA & oip->flags) &&
2074 req_opcode == oip->opcode)
2076 else if (0 == (FF_SA & oip->flags)) {
2077 na = oip->num_attached;
2078 for (k = 0, oip = oip->arrp; k < na;
2080 if (req_opcode == oip->opcode)
2083 supp = (k >= na) ? 1 : 3;
2084 } else if (req_sa != oip->sa) {
2085 na = oip->num_attached;
2086 for (k = 0, oip = oip->arrp; k < na;
2088 if (req_sa == oip->sa)
2091 supp = (k >= na) ? 1 : 3;
2095 u = oip->len_mask[0];
2096 put_unaligned_be16(u, arr + 2);
2097 arr[4] = oip->opcode;
2098 for (k = 1; k < u; ++k)
2099 arr[4 + k] = (k < 16) ?
2100 oip->len_mask[k] : 0xff;
2105 arr[1] = (rctd ? 0x80 : 0) | supp;
2107 put_unaligned_be16(0xa, arr + offset);
2112 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
2114 return check_condition_result;
2116 offset = (offset < a_len) ? offset : a_len;
2117 len = (offset < alloc_len) ? offset : alloc_len;
2118 errsts = fill_from_dev_buffer(scp, arr, len);
2123 static int resp_rsup_tmfs(struct scsi_cmnd *scp,
2124 struct sdebug_dev_info *devip)
2129 u8 *cmd = scp->cmnd;
2131 memset(arr, 0, sizeof(arr));
2132 repd = !!(cmd[2] & 0x80);
2133 alloc_len = get_unaligned_be32(cmd + 6);
2134 if (alloc_len < 4) {
2135 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
2136 return check_condition_result;
2138 arr[0] = 0xc8; /* ATS | ATSS | LURS */
2139 arr[1] = 0x1; /* ITNRS */
2146 len = (len < alloc_len) ? len : alloc_len;
2147 return fill_from_dev_buffer(scp, arr, len);
2150 /* <<Following mode page info copied from ST318451LW>> */
2152 static int resp_err_recov_pg(unsigned char *p, int pcontrol, int target)
2153 { /* Read-Write Error Recovery page for mode_sense */
2154 unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
2157 memcpy(p, err_recov_pg, sizeof(err_recov_pg));
2159 memset(p + 2, 0, sizeof(err_recov_pg) - 2);
2160 return sizeof(err_recov_pg);
2163 static int resp_disconnect_pg(unsigned char *p, int pcontrol, int target)
2164 { /* Disconnect-Reconnect page for mode_sense */
2165 unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
2166 0, 0, 0, 0, 0, 0, 0, 0};
2168 memcpy(p, disconnect_pg, sizeof(disconnect_pg));
2170 memset(p + 2, 0, sizeof(disconnect_pg) - 2);
2171 return sizeof(disconnect_pg);
2174 static int resp_format_pg(unsigned char *p, int pcontrol, int target)
2175 { /* Format device page for mode_sense */
2176 unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
2177 0, 0, 0, 0, 0, 0, 0, 0,
2178 0, 0, 0, 0, 0x40, 0, 0, 0};
2180 memcpy(p, format_pg, sizeof(format_pg));
2181 put_unaligned_be16(sdebug_sectors_per, p + 10);
2182 put_unaligned_be16(sdebug_sector_size, p + 12);
2183 if (sdebug_removable)
2184 p[20] |= 0x20; /* should agree with INQUIRY */
2186 memset(p + 2, 0, sizeof(format_pg) - 2);
2187 return sizeof(format_pg);
2190 static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2191 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,
2194 static int resp_caching_pg(unsigned char *p, int pcontrol, int target)
2195 { /* Caching page for mode_sense */
2196 unsigned char ch_caching_pg[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0,
2197 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
2198 unsigned char d_caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2199 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0, 0, 0, 0, 0};
2201 if (SDEBUG_OPT_N_WCE & sdebug_opts)
2202 caching_pg[2] &= ~0x4; /* set WCE=0 (default WCE=1) */
2203 memcpy(p, caching_pg, sizeof(caching_pg));
2205 memcpy(p + 2, ch_caching_pg, sizeof(ch_caching_pg));
2206 else if (2 == pcontrol)
2207 memcpy(p, d_caching_pg, sizeof(d_caching_pg));
2208 return sizeof(caching_pg);
2211 static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2214 static int resp_ctrl_m_pg(unsigned char *p, int pcontrol, int target)
2215 { /* Control mode page for mode_sense */
2216 unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
2218 unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2222 ctrl_m_pg[2] |= 0x4;
2224 ctrl_m_pg[2] &= ~0x4;
2227 ctrl_m_pg[5] |= 0x80; /* ATO=1 */
2229 memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
2231 memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
2232 else if (2 == pcontrol)
2233 memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
2234 return sizeof(ctrl_m_pg);
2238 static int resp_iec_m_pg(unsigned char *p, int pcontrol, int target)
2239 { /* Informational Exceptions control mode page for mode_sense */
2240 unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
2242 unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
2245 memcpy(p, iec_m_pg, sizeof(iec_m_pg));
2247 memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
2248 else if (2 == pcontrol)
2249 memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
2250 return sizeof(iec_m_pg);
2253 static int resp_sas_sf_m_pg(unsigned char *p, int pcontrol, int target)
2254 { /* SAS SSP mode page - short format for mode_sense */
2255 unsigned char sas_sf_m_pg[] = {0x19, 0x6,
2256 0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
2258 memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
2260 memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
2261 return sizeof(sas_sf_m_pg);
2265 static int resp_sas_pcd_m_spg(unsigned char *p, int pcontrol, int target,
2267 { /* SAS phy control and discover mode page for mode_sense */
2268 unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
2269 0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
2270 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2271 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2272 0x2, 0, 0, 0, 0, 0, 0, 0,
2273 0x88, 0x99, 0, 0, 0, 0, 0, 0,
2274 0, 0, 0, 0, 0, 0, 0, 0,
2275 0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
2276 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2277 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2278 0x3, 0, 0, 0, 0, 0, 0, 0,
2279 0x88, 0x99, 0, 0, 0, 0, 0, 0,
2280 0, 0, 0, 0, 0, 0, 0, 0,
2284 put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 16);
2285 put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 24);
2286 put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 64);
2287 put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 72);
2288 port_a = target_dev_id + 1;
2289 port_b = port_a + 1;
2290 memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
2291 put_unaligned_be32(port_a, p + 20);
2292 put_unaligned_be32(port_b, p + 48 + 20);
2294 memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
2295 return sizeof(sas_pcd_m_pg);
2298 static int resp_sas_sha_m_spg(unsigned char *p, int pcontrol)
2299 { /* SAS SSP shared protocol specific port mode subpage */
2300 unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
2301 0, 0, 0, 0, 0, 0, 0, 0,
2304 memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
2306 memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
2307 return sizeof(sas_sha_m_pg);
2310 #define SDEBUG_MAX_MSENSE_SZ 256
2312 static int resp_mode_sense(struct scsi_cmnd *scp,
2313 struct sdebug_dev_info *devip)
2315 int pcontrol, pcode, subpcode, bd_len;
2316 unsigned char dev_spec;
2317 int alloc_len, offset, len, target_dev_id;
2318 int target = scp->device->id;
2320 unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
2321 unsigned char *cmd = scp->cmnd;
2322 bool dbd, llbaa, msense_6, is_disk, is_zbc, bad_pcode;
2324 dbd = !!(cmd[1] & 0x8); /* disable block descriptors */
2325 pcontrol = (cmd[2] & 0xc0) >> 6;
2326 pcode = cmd[2] & 0x3f;
2328 msense_6 = (MODE_SENSE == cmd[0]);
2329 llbaa = msense_6 ? false : !!(cmd[1] & 0x10);
2330 is_disk = (sdebug_ptype == TYPE_DISK);
2331 is_zbc = (devip->zmodel != BLK_ZONED_NONE);
2332 if ((is_disk || is_zbc) && !dbd)
2333 bd_len = llbaa ? 16 : 8;
2336 alloc_len = msense_6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2337 memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
2338 if (0x3 == pcontrol) { /* Saving values not supported */
2339 mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0);
2340 return check_condition_result;
2342 target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
2343 (devip->target * 1000) - 3;
2344 /* for disks+zbc set DPOFUA bit and clear write protect (WP) bit */
2345 if (is_disk || is_zbc) {
2346 dev_spec = 0x10; /* =0x90 if WP=1 implies read-only */
2358 arr[4] = 0x1; /* set LONGLBA bit */
2359 arr[7] = bd_len; /* assume 255 or less */
2363 if ((bd_len > 0) && (!sdebug_capacity))
2364 sdebug_capacity = get_sdebug_capacity();
2367 if (sdebug_capacity > 0xfffffffe)
2368 put_unaligned_be32(0xffffffff, ap + 0);
2370 put_unaligned_be32(sdebug_capacity, ap + 0);
2371 put_unaligned_be16(sdebug_sector_size, ap + 6);
2374 } else if (16 == bd_len) {
2375 put_unaligned_be64((u64)sdebug_capacity, ap + 0);
2376 put_unaligned_be32(sdebug_sector_size, ap + 12);
2381 if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) {
2382 /* TODO: Control Extension page */
2383 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2384 return check_condition_result;
2389 case 0x1: /* Read-Write error recovery page, direct access */
2390 len = resp_err_recov_pg(ap, pcontrol, target);
2393 case 0x2: /* Disconnect-Reconnect page, all devices */
2394 len = resp_disconnect_pg(ap, pcontrol, target);
2397 case 0x3: /* Format device page, direct access */
2399 len = resp_format_pg(ap, pcontrol, target);
2404 case 0x8: /* Caching page, direct access */
2405 if (is_disk || is_zbc) {
2406 len = resp_caching_pg(ap, pcontrol, target);
2411 case 0xa: /* Control Mode page, all devices */
2412 len = resp_ctrl_m_pg(ap, pcontrol, target);
2415 case 0x19: /* if spc==1 then sas phy, control+discover */
2416 if ((subpcode > 0x2) && (subpcode < 0xff)) {
2417 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2418 return check_condition_result;
2421 if ((0x0 == subpcode) || (0xff == subpcode))
2422 len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2423 if ((0x1 == subpcode) || (0xff == subpcode))
2424 len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
2426 if ((0x2 == subpcode) || (0xff == subpcode))
2427 len += resp_sas_sha_m_spg(ap + len, pcontrol);
2430 case 0x1c: /* Informational Exceptions Mode page, all devices */
2431 len = resp_iec_m_pg(ap, pcontrol, target);
2434 case 0x3f: /* Read all Mode pages */
2435 if ((0 == subpcode) || (0xff == subpcode)) {
2436 len = resp_err_recov_pg(ap, pcontrol, target);
2437 len += resp_disconnect_pg(ap + len, pcontrol, target);
2439 len += resp_format_pg(ap + len, pcontrol,
2441 len += resp_caching_pg(ap + len, pcontrol,
2443 } else if (is_zbc) {
2444 len += resp_caching_pg(ap + len, pcontrol,
2447 len += resp_ctrl_m_pg(ap + len, pcontrol, target);
2448 len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2449 if (0xff == subpcode) {
2450 len += resp_sas_pcd_m_spg(ap + len, pcontrol,
2451 target, target_dev_id);
2452 len += resp_sas_sha_m_spg(ap + len, pcontrol);
2454 len += resp_iec_m_pg(ap + len, pcontrol, target);
2457 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2458 return check_condition_result;
2466 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2467 return check_condition_result;
2470 arr[0] = offset - 1;
2472 put_unaligned_be16((offset - 2), arr + 0);
2473 return fill_from_dev_buffer(scp, arr, min_t(int, alloc_len, offset));
2476 #define SDEBUG_MAX_MSELECT_SZ 512
2478 static int resp_mode_select(struct scsi_cmnd *scp,
2479 struct sdebug_dev_info *devip)
2481 int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
2482 int param_len, res, mpage;
2483 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
2484 unsigned char *cmd = scp->cmnd;
2485 int mselect6 = (MODE_SELECT == cmd[0]);
2487 memset(arr, 0, sizeof(arr));
2490 param_len = mselect6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2491 if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
2492 mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1);
2493 return check_condition_result;
2495 res = fetch_to_dev_buffer(scp, arr, param_len);
2497 return DID_ERROR << 16;
2498 else if (sdebug_verbose && (res < param_len))
2499 sdev_printk(KERN_INFO, scp->device,
2500 "%s: cdb indicated=%d, IO sent=%d bytes\n",
2501 __func__, param_len, res);
2502 md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2);
2503 bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6);
2505 mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
2506 return check_condition_result;
2508 off = bd_len + (mselect6 ? 4 : 8);
2509 mpage = arr[off] & 0x3f;
2510 ps = !!(arr[off] & 0x80);
2512 mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 7);
2513 return check_condition_result;
2515 spf = !!(arr[off] & 0x40);
2516 pg_len = spf ? (get_unaligned_be16(arr + off + 2) + 4) :
2518 if ((pg_len + off) > param_len) {
2519 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2520 PARAMETER_LIST_LENGTH_ERR, 0);
2521 return check_condition_result;
2524 case 0x8: /* Caching Mode page */
2525 if (caching_pg[1] == arr[off + 1]) {
2526 memcpy(caching_pg + 2, arr + off + 2,
2527 sizeof(caching_pg) - 2);
2528 goto set_mode_changed_ua;
2531 case 0xa: /* Control Mode page */
2532 if (ctrl_m_pg[1] == arr[off + 1]) {
2533 memcpy(ctrl_m_pg + 2, arr + off + 2,
2534 sizeof(ctrl_m_pg) - 2);
2535 if (ctrl_m_pg[4] & 0x8)
2539 sdebug_dsense = !!(ctrl_m_pg[2] & 0x4);
2540 goto set_mode_changed_ua;
2543 case 0x1c: /* Informational Exceptions Mode page */
2544 if (iec_m_pg[1] == arr[off + 1]) {
2545 memcpy(iec_m_pg + 2, arr + off + 2,
2546 sizeof(iec_m_pg) - 2);
2547 goto set_mode_changed_ua;
2553 mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 5);
2554 return check_condition_result;
2555 set_mode_changed_ua:
2556 set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm);
2560 static int resp_temp_l_pg(unsigned char *arr)
2562 unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
2563 0x0, 0x1, 0x3, 0x2, 0x0, 65,
2566 memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
2567 return sizeof(temp_l_pg);
2570 static int resp_ie_l_pg(unsigned char *arr)
2572 unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
2575 memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
2576 if (iec_m_pg[2] & 0x4) { /* TEST bit set */
2577 arr[4] = THRESHOLD_EXCEEDED;
2580 return sizeof(ie_l_pg);
2583 #define SDEBUG_MAX_LSENSE_SZ 512
2585 static int resp_log_sense(struct scsi_cmnd *scp,
2586 struct sdebug_dev_info *devip)
2588 int ppc, sp, pcode, subpcode, alloc_len, len, n;
2589 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
2590 unsigned char *cmd = scp->cmnd;
2592 memset(arr, 0, sizeof(arr));
2596 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, ppc ? 1 : 0);
2597 return check_condition_result;
2599 pcode = cmd[2] & 0x3f;
2600 subpcode = cmd[3] & 0xff;
2601 alloc_len = get_unaligned_be16(cmd + 7);
2603 if (0 == subpcode) {
2605 case 0x0: /* Supported log pages log page */
2607 arr[n++] = 0x0; /* this page */
2608 arr[n++] = 0xd; /* Temperature */
2609 arr[n++] = 0x2f; /* Informational exceptions */
2612 case 0xd: /* Temperature log page */
2613 arr[3] = resp_temp_l_pg(arr + 4);
2615 case 0x2f: /* Informational exceptions log page */
2616 arr[3] = resp_ie_l_pg(arr + 4);
2619 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2620 return check_condition_result;
2622 } else if (0xff == subpcode) {
2626 case 0x0: /* Supported log pages and subpages log page */
2629 arr[n++] = 0x0; /* 0,0 page */
2631 arr[n++] = 0xff; /* this page */
2633 arr[n++] = 0x0; /* Temperature */
2635 arr[n++] = 0x0; /* Informational exceptions */
2638 case 0xd: /* Temperature subpages */
2641 arr[n++] = 0x0; /* Temperature */
2644 case 0x2f: /* Informational exceptions subpages */
2647 arr[n++] = 0x0; /* Informational exceptions */
2651 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2652 return check_condition_result;
2655 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2656 return check_condition_result;
2658 len = min_t(int, get_unaligned_be16(arr + 2) + 4, alloc_len);
2659 return fill_from_dev_buffer(scp, arr,
2660 min_t(int, len, SDEBUG_MAX_INQ_ARR_SZ));
2663 static inline bool sdebug_dev_is_zoned(struct sdebug_dev_info *devip)
2665 return devip->nr_zones != 0;
2668 static struct sdeb_zone_state *zbc_zone(struct sdebug_dev_info *devip,
2669 unsigned long long lba)
2671 return &devip->zstate[lba >> devip->zsize_shift];
2674 static inline bool zbc_zone_is_conv(struct sdeb_zone_state *zsp)
2676 return zsp->z_type == ZBC_ZONE_TYPE_CNV;
2679 static void zbc_close_zone(struct sdebug_dev_info *devip,
2680 struct sdeb_zone_state *zsp)
2682 enum sdebug_z_cond zc;
2684 if (zbc_zone_is_conv(zsp))
2688 if (!(zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN))
2691 if (zc == ZC2_IMPLICIT_OPEN)
2692 devip->nr_imp_open--;
2694 devip->nr_exp_open--;
2696 if (zsp->z_wp == zsp->z_start) {
2697 zsp->z_cond = ZC1_EMPTY;
2699 zsp->z_cond = ZC4_CLOSED;
2704 static void zbc_close_imp_open_zone(struct sdebug_dev_info *devip)
2706 struct sdeb_zone_state *zsp = &devip->zstate[0];
2709 for (i = 0; i < devip->nr_zones; i++, zsp++) {
2710 if (zsp->z_cond == ZC2_IMPLICIT_OPEN) {
2711 zbc_close_zone(devip, zsp);
2717 static void zbc_open_zone(struct sdebug_dev_info *devip,
2718 struct sdeb_zone_state *zsp, bool explicit)
2720 enum sdebug_z_cond zc;
2722 if (zbc_zone_is_conv(zsp))
2726 if ((explicit && zc == ZC3_EXPLICIT_OPEN) ||
2727 (!explicit && zc == ZC2_IMPLICIT_OPEN))
2730 /* Close an implicit open zone if necessary */
2731 if (explicit && zsp->z_cond == ZC2_IMPLICIT_OPEN)
2732 zbc_close_zone(devip, zsp);
2733 else if (devip->max_open &&
2734 devip->nr_imp_open + devip->nr_exp_open >= devip->max_open)
2735 zbc_close_imp_open_zone(devip);
2737 if (zsp->z_cond == ZC4_CLOSED)
2740 zsp->z_cond = ZC3_EXPLICIT_OPEN;
2741 devip->nr_exp_open++;
2743 zsp->z_cond = ZC2_IMPLICIT_OPEN;
2744 devip->nr_imp_open++;
2748 static void zbc_inc_wp(struct sdebug_dev_info *devip,
2749 unsigned long long lba, unsigned int num)
2751 struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
2752 unsigned long long n, end, zend = zsp->z_start + zsp->z_size;
2754 if (zbc_zone_is_conv(zsp))
2757 if (zsp->z_type == ZBC_ZONE_TYPE_SWR) {
2759 if (zsp->z_wp >= zend)
2760 zsp->z_cond = ZC5_FULL;
2765 if (lba != zsp->z_wp)
2766 zsp->z_non_seq_resource = true;
2772 } else if (end > zsp->z_wp) {
2778 if (zsp->z_wp >= zend)
2779 zsp->z_cond = ZC5_FULL;
2785 zend = zsp->z_start + zsp->z_size;
2790 static int check_zbc_access_params(struct scsi_cmnd *scp,
2791 unsigned long long lba, unsigned int num, bool write)
2793 struct scsi_device *sdp = scp->device;
2794 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
2795 struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
2796 struct sdeb_zone_state *zsp_end = zbc_zone(devip, lba + num - 1);
2799 if (devip->zmodel == BLK_ZONED_HA)
2801 /* For host-managed, reads cannot cross zone types boundaries */
2802 if (zsp_end != zsp &&
2803 zbc_zone_is_conv(zsp) &&
2804 !zbc_zone_is_conv(zsp_end)) {
2805 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2808 return check_condition_result;
2813 /* No restrictions for writes within conventional zones */
2814 if (zbc_zone_is_conv(zsp)) {
2815 if (!zbc_zone_is_conv(zsp_end)) {
2816 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2818 WRITE_BOUNDARY_ASCQ);
2819 return check_condition_result;
2824 if (zsp->z_type == ZBC_ZONE_TYPE_SWR) {
2825 /* Writes cannot cross sequential zone boundaries */
2826 if (zsp_end != zsp) {
2827 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2829 WRITE_BOUNDARY_ASCQ);
2830 return check_condition_result;
2832 /* Cannot write full zones */
2833 if (zsp->z_cond == ZC5_FULL) {
2834 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2835 INVALID_FIELD_IN_CDB, 0);
2836 return check_condition_result;
2838 /* Writes must be aligned to the zone WP */
2839 if (lba != zsp->z_wp) {
2840 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2842 UNALIGNED_WRITE_ASCQ);
2843 return check_condition_result;
2847 /* Handle implicit open of closed and empty zones */
2848 if (zsp->z_cond == ZC1_EMPTY || zsp->z_cond == ZC4_CLOSED) {
2849 if (devip->max_open &&
2850 devip->nr_exp_open >= devip->max_open) {
2851 mk_sense_buffer(scp, DATA_PROTECT,
2854 return check_condition_result;
2856 zbc_open_zone(devip, zsp, false);
2862 static inline int check_device_access_params
2863 (struct scsi_cmnd *scp, unsigned long long lba,
2864 unsigned int num, bool write)
2866 struct scsi_device *sdp = scp->device;
2867 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
2869 if (lba + num > sdebug_capacity) {
2870 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2871 return check_condition_result;
2873 /* transfer length excessive (tie in to block limits VPD page) */
2874 if (num > sdebug_store_sectors) {
2875 /* needs work to find which cdb byte 'num' comes from */
2876 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2877 return check_condition_result;
2879 if (write && unlikely(sdebug_wp)) {
2880 mk_sense_buffer(scp, DATA_PROTECT, WRITE_PROTECTED, 0x2);
2881 return check_condition_result;
2883 if (sdebug_dev_is_zoned(devip))
2884 return check_zbc_access_params(scp, lba, num, write);
2890 * Note: if BUG_ON() fires it usually indicates a problem with the parser
2891 * tables. Perhaps a missing F_FAKE_RW or FF_MEDIA_IO flag. Response functions
2892 * that access any of the "stores" in struct sdeb_store_info should call this
2893 * function with bug_if_fake_rw set to true.
2895 static inline struct sdeb_store_info *devip2sip(struct sdebug_dev_info *devip,
2896 bool bug_if_fake_rw)
2898 if (sdebug_fake_rw) {
2899 BUG_ON(bug_if_fake_rw); /* See note above */
2902 return xa_load(per_store_ap, devip->sdbg_host->si_idx);
2905 /* Returns number of bytes copied or -1 if error. */
2906 static int do_device_access(struct sdeb_store_info *sip, struct scsi_cmnd *scp,
2907 u32 sg_skip, u64 lba, u32 num, bool do_write)
2910 u64 block, rest = 0;
2911 enum dma_data_direction dir;
2912 struct scsi_data_buffer *sdb = &scp->sdb;
2916 dir = DMA_TO_DEVICE;
2917 write_since_sync = true;
2919 dir = DMA_FROM_DEVICE;
2922 if (!sdb->length || !sip)
2924 if (scp->sc_data_direction != dir)
2928 block = do_div(lba, sdebug_store_sectors);
2929 if (block + num > sdebug_store_sectors)
2930 rest = block + num - sdebug_store_sectors;
2932 ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
2933 fsp + (block * sdebug_sector_size),
2934 (num - rest) * sdebug_sector_size, sg_skip, do_write);
2935 if (ret != (num - rest) * sdebug_sector_size)
2939 ret += sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
2940 fsp, rest * sdebug_sector_size,
2941 sg_skip + ((num - rest) * sdebug_sector_size),
2948 /* Returns number of bytes copied or -1 if error. */
2949 static int do_dout_fetch(struct scsi_cmnd *scp, u32 num, u8 *doutp)
2951 struct scsi_data_buffer *sdb = &scp->sdb;
2955 if (scp->sc_data_direction != DMA_TO_DEVICE)
2957 return sg_copy_buffer(sdb->table.sgl, sdb->table.nents, doutp,
2958 num * sdebug_sector_size, 0, true);
2961 /* If sip->storep+lba compares equal to arr(num), then copy top half of
2962 * arr into sip->storep+lba and return true. If comparison fails then
2964 static bool comp_write_worker(struct sdeb_store_info *sip, u64 lba, u32 num,
2965 const u8 *arr, bool compare_only)
2968 u64 block, rest = 0;
2969 u32 store_blks = sdebug_store_sectors;
2970 u32 lb_size = sdebug_sector_size;
2971 u8 *fsp = sip->storep;
2973 block = do_div(lba, store_blks);
2974 if (block + num > store_blks)
2975 rest = block + num - store_blks;
2977 res = !memcmp(fsp + (block * lb_size), arr, (num - rest) * lb_size);
2981 res = memcmp(fsp, arr + ((num - rest) * lb_size),
2987 arr += num * lb_size;
2988 memcpy(fsp + (block * lb_size), arr, (num - rest) * lb_size);
2990 memcpy(fsp, arr + ((num - rest) * lb_size), rest * lb_size);
2994 static __be16 dif_compute_csum(const void *buf, int len)
2999 csum = (__force __be16)ip_compute_csum(buf, len);
3001 csum = cpu_to_be16(crc_t10dif(buf, len));
3006 static int dif_verify(struct t10_pi_tuple *sdt, const void *data,
3007 sector_t sector, u32 ei_lba)
3009 __be16 csum = dif_compute_csum(data, sdebug_sector_size);
3011 if (sdt->guard_tag != csum) {
3012 pr_err("GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
3013 (unsigned long)sector,
3014 be16_to_cpu(sdt->guard_tag),
3018 if (sdebug_dif == T10_PI_TYPE1_PROTECTION &&
3019 be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
3020 pr_err("REF check failed on sector %lu\n",
3021 (unsigned long)sector);
3024 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3025 be32_to_cpu(sdt->ref_tag) != ei_lba) {
3026 pr_err("REF check failed on sector %lu\n",
3027 (unsigned long)sector);
3033 static void dif_copy_prot(struct scsi_cmnd *scp, sector_t sector,
3034 unsigned int sectors, bool read)
3038 struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3039 scp->device->hostdata, true);
3040 struct t10_pi_tuple *dif_storep = sip->dif_storep;
3041 const void *dif_store_end = dif_storep + sdebug_store_sectors;
3042 struct sg_mapping_iter miter;
3044 /* Bytes of protection data to copy into sgl */
3045 resid = sectors * sizeof(*dif_storep);
3047 sg_miter_start(&miter, scsi_prot_sglist(scp),
3048 scsi_prot_sg_count(scp), SG_MITER_ATOMIC |
3049 (read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
3051 while (sg_miter_next(&miter) && resid > 0) {
3052 size_t len = min_t(size_t, miter.length, resid);
3053 void *start = dif_store(sip, sector);
3056 if (dif_store_end < start + len)
3057 rest = start + len - dif_store_end;
3062 memcpy(paddr, start, len - rest);
3064 memcpy(start, paddr, len - rest);
3068 memcpy(paddr + len - rest, dif_storep, rest);
3070 memcpy(dif_storep, paddr + len - rest, rest);
3073 sector += len / sizeof(*dif_storep);
3076 sg_miter_stop(&miter);
3079 static int prot_verify_read(struct scsi_cmnd *scp, sector_t start_sec,
3080 unsigned int sectors, u32 ei_lba)
3084 struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3085 scp->device->hostdata, true);
3086 struct t10_pi_tuple *sdt;
3088 for (i = 0; i < sectors; i++, ei_lba++) {
3091 sector = start_sec + i;
3092 sdt = dif_store(sip, sector);
3094 if (sdt->app_tag == cpu_to_be16(0xffff))
3097 ret = dif_verify(sdt, lba2fake_store(sip, sector), sector,
3105 dif_copy_prot(scp, start_sec, sectors, true);
3111 static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3118 struct sdeb_store_info *sip = devip2sip(devip, true);
3119 rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
3120 u8 *cmd = scp->cmnd;
3125 lba = get_unaligned_be64(cmd + 2);
3126 num = get_unaligned_be32(cmd + 10);
3131 lba = get_unaligned_be32(cmd + 2);
3132 num = get_unaligned_be16(cmd + 7);
3137 lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3138 (u32)(cmd[1] & 0x1f) << 16;
3139 num = (0 == cmd[4]) ? 256 : cmd[4];
3144 lba = get_unaligned_be32(cmd + 2);
3145 num = get_unaligned_be32(cmd + 6);
3148 case XDWRITEREAD_10:
3150 lba = get_unaligned_be32(cmd + 2);
3151 num = get_unaligned_be16(cmd + 7);
3154 default: /* assume READ(32) */
3155 lba = get_unaligned_be64(cmd + 12);
3156 ei_lba = get_unaligned_be32(cmd + 20);
3157 num = get_unaligned_be32(cmd + 28);
3161 if (unlikely(have_dif_prot && check_prot)) {
3162 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3164 mk_sense_invalid_opcode(scp);
3165 return check_condition_result;
3167 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3168 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3169 (cmd[1] & 0xe0) == 0)
3170 sdev_printk(KERN_ERR, scp->device, "Unprotected RD "
3173 if (unlikely((sdebug_opts & SDEBUG_OPT_SHORT_TRANSFER) &&
3174 atomic_read(&sdeb_inject_pending))) {
3176 atomic_set(&sdeb_inject_pending, 0);
3179 ret = check_device_access_params(scp, lba, num, false);
3182 if (unlikely((SDEBUG_OPT_MEDIUM_ERR & sdebug_opts) &&
3183 (lba <= (sdebug_medium_error_start + sdebug_medium_error_count - 1)) &&
3184 ((lba + num) > sdebug_medium_error_start))) {
3185 /* claim unrecoverable read error */
3186 mk_sense_buffer(scp, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
3187 /* set info field and valid bit for fixed descriptor */
3188 if (0x70 == (scp->sense_buffer[0] & 0x7f)) {
3189 scp->sense_buffer[0] |= 0x80; /* Valid bit */
3190 ret = (lba < OPT_MEDIUM_ERR_ADDR)
3191 ? OPT_MEDIUM_ERR_ADDR : (int)lba;
3192 put_unaligned_be32(ret, scp->sense_buffer + 3);
3194 scsi_set_resid(scp, scsi_bufflen(scp));
3195 return check_condition_result;
3198 read_lock(macc_lckp);
3201 if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3202 int prot_ret = prot_verify_read(scp, lba, num, ei_lba);
3205 read_unlock(macc_lckp);
3206 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, prot_ret);
3207 return illegal_condition_result;
3211 ret = do_device_access(sip, scp, 0, lba, num, false);
3212 read_unlock(macc_lckp);
3213 if (unlikely(ret == -1))
3214 return DID_ERROR << 16;
3216 scsi_set_resid(scp, scsi_bufflen(scp) - ret);
3218 if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3219 atomic_read(&sdeb_inject_pending))) {
3220 if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3221 mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3222 atomic_set(&sdeb_inject_pending, 0);
3223 return check_condition_result;
3224 } else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3225 /* Logical block guard check failed */
3226 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3227 atomic_set(&sdeb_inject_pending, 0);
3228 return illegal_condition_result;
3229 } else if (SDEBUG_OPT_DIX_ERR & sdebug_opts) {
3230 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3231 atomic_set(&sdeb_inject_pending, 0);
3232 return illegal_condition_result;
3238 static void dump_sector(unsigned char *buf, int len)
3242 pr_err(">>> Sector Dump <<<\n");
3243 for (i = 0 ; i < len ; i += 16) {
3246 for (j = 0, n = 0; j < 16; j++) {
3247 unsigned char c = buf[i+j];
3249 if (c >= 0x20 && c < 0x7e)
3250 n += scnprintf(b + n, sizeof(b) - n,
3253 n += scnprintf(b + n, sizeof(b) - n,
3256 pr_err("%04d: %s\n", i, b);
3260 static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
3261 unsigned int sectors, u32 ei_lba)
3264 struct t10_pi_tuple *sdt;
3266 sector_t sector = start_sec;
3269 struct sg_mapping_iter diter;
3270 struct sg_mapping_iter piter;
3272 BUG_ON(scsi_sg_count(SCpnt) == 0);
3273 BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
3275 sg_miter_start(&piter, scsi_prot_sglist(SCpnt),
3276 scsi_prot_sg_count(SCpnt),
3277 SG_MITER_ATOMIC | SG_MITER_FROM_SG);
3278 sg_miter_start(&diter, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
3279 SG_MITER_ATOMIC | SG_MITER_FROM_SG);
3281 /* For each protection page */
3282 while (sg_miter_next(&piter)) {
3284 if (WARN_ON(!sg_miter_next(&diter))) {
3289 for (ppage_offset = 0; ppage_offset < piter.length;
3290 ppage_offset += sizeof(struct t10_pi_tuple)) {
3291 /* If we're at the end of the current
3292 * data page advance to the next one
3294 if (dpage_offset >= diter.length) {
3295 if (WARN_ON(!sg_miter_next(&diter))) {
3302 sdt = piter.addr + ppage_offset;
3303 daddr = diter.addr + dpage_offset;
3305 ret = dif_verify(sdt, daddr, sector, ei_lba);
3307 dump_sector(daddr, sdebug_sector_size);
3313 dpage_offset += sdebug_sector_size;
3315 diter.consumed = dpage_offset;
3316 sg_miter_stop(&diter);
3318 sg_miter_stop(&piter);
3320 dif_copy_prot(SCpnt, start_sec, sectors, false);
3327 sg_miter_stop(&diter);
3328 sg_miter_stop(&piter);
3332 static unsigned long lba_to_map_index(sector_t lba)
3334 if (sdebug_unmap_alignment)
3335 lba += sdebug_unmap_granularity - sdebug_unmap_alignment;
3336 sector_div(lba, sdebug_unmap_granularity);
3340 static sector_t map_index_to_lba(unsigned long index)
3342 sector_t lba = index * sdebug_unmap_granularity;
3344 if (sdebug_unmap_alignment)
3345 lba -= sdebug_unmap_granularity - sdebug_unmap_alignment;
3349 static unsigned int map_state(struct sdeb_store_info *sip, sector_t lba,
3353 unsigned int mapped;
3354 unsigned long index;
3357 index = lba_to_map_index(lba);
3358 mapped = test_bit(index, sip->map_storep);
3361 next = find_next_zero_bit(sip->map_storep, map_size, index);
3363 next = find_next_bit(sip->map_storep, map_size, index);
3365 end = min_t(sector_t, sdebug_store_sectors, map_index_to_lba(next));
3370 static void map_region(struct sdeb_store_info *sip, sector_t lba,
3373 sector_t end = lba + len;
3376 unsigned long index = lba_to_map_index(lba);
3378 if (index < map_size)
3379 set_bit(index, sip->map_storep);
3381 lba = map_index_to_lba(index + 1);
3385 static void unmap_region(struct sdeb_store_info *sip, sector_t lba,
3388 sector_t end = lba + len;
3389 u8 *fsp = sip->storep;
3392 unsigned long index = lba_to_map_index(lba);
3394 if (lba == map_index_to_lba(index) &&
3395 lba + sdebug_unmap_granularity <= end &&
3397 clear_bit(index, sip->map_storep);
3398 if (sdebug_lbprz) { /* for LBPRZ=2 return 0xff_s */
3399 memset(fsp + lba * sdebug_sector_size,
3400 (sdebug_lbprz & 1) ? 0 : 0xff,
3401 sdebug_sector_size *
3402 sdebug_unmap_granularity);
3404 if (sip->dif_storep) {
3405 memset(sip->dif_storep + lba, 0xff,
3406 sizeof(*sip->dif_storep) *
3407 sdebug_unmap_granularity);
3410 lba = map_index_to_lba(index + 1);
3414 static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3421 struct sdeb_store_info *sip = devip2sip(devip, true);
3422 rwlock_t *macc_lckp = &sip->macc_lck;
3423 u8 *cmd = scp->cmnd;
3428 lba = get_unaligned_be64(cmd + 2);
3429 num = get_unaligned_be32(cmd + 10);
3434 lba = get_unaligned_be32(cmd + 2);
3435 num = get_unaligned_be16(cmd + 7);
3440 lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3441 (u32)(cmd[1] & 0x1f) << 16;
3442 num = (0 == cmd[4]) ? 256 : cmd[4];
3447 lba = get_unaligned_be32(cmd + 2);
3448 num = get_unaligned_be32(cmd + 6);
3451 case 0x53: /* XDWRITEREAD(10) */
3453 lba = get_unaligned_be32(cmd + 2);
3454 num = get_unaligned_be16(cmd + 7);
3457 default: /* assume WRITE(32) */
3458 lba = get_unaligned_be64(cmd + 12);
3459 ei_lba = get_unaligned_be32(cmd + 20);
3460 num = get_unaligned_be32(cmd + 28);
3464 if (unlikely(have_dif_prot && check_prot)) {
3465 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3467 mk_sense_invalid_opcode(scp);
3468 return check_condition_result;
3470 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3471 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3472 (cmd[1] & 0xe0) == 0)
3473 sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3477 write_lock(macc_lckp);
3478 ret = check_device_access_params(scp, lba, num, true);
3480 write_unlock(macc_lckp);
3485 if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3486 int prot_ret = prot_verify_write(scp, lba, num, ei_lba);
3489 write_unlock(macc_lckp);
3490 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, prot_ret);
3491 return illegal_condition_result;
3495 ret = do_device_access(sip, scp, 0, lba, num, true);
3496 if (unlikely(scsi_debug_lbp()))
3497 map_region(sip, lba, num);
3498 /* If ZBC zone then bump its write pointer */
3499 if (sdebug_dev_is_zoned(devip))
3500 zbc_inc_wp(devip, lba, num);
3501 write_unlock(macc_lckp);
3502 if (unlikely(-1 == ret))
3503 return DID_ERROR << 16;
3504 else if (unlikely(sdebug_verbose &&
3505 (ret < (num * sdebug_sector_size))))
3506 sdev_printk(KERN_INFO, scp->device,
3507 "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3508 my_name, num * sdebug_sector_size, ret);
3510 if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3511 atomic_read(&sdeb_inject_pending))) {
3512 if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3513 mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3514 atomic_set(&sdeb_inject_pending, 0);
3515 return check_condition_result;
3516 } else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3517 /* Logical block guard check failed */
3518 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3519 atomic_set(&sdeb_inject_pending, 0);
3520 return illegal_condition_result;
3521 } else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
3522 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3523 atomic_set(&sdeb_inject_pending, 0);
3524 return illegal_condition_result;
3531 * T10 has only specified WRITE SCATTERED(16) and WRITE SCATTERED(32).
3532 * No READ GATHERED yet (requires bidi or long cdb holding gather list).
3534 static int resp_write_scat(struct scsi_cmnd *scp,
3535 struct sdebug_dev_info *devip)
3537 u8 *cmd = scp->cmnd;
3540 struct sdeb_store_info *sip = devip2sip(devip, true);
3541 rwlock_t *macc_lckp = &sip->macc_lck;
3543 u16 lbdof, num_lrd, k;
3544 u32 num, num_by, bt_len, lbdof_blen, sg_off, cum_lb;
3545 u32 lb_size = sdebug_sector_size;
3550 static const u32 lrd_size = 32; /* + parameter list header size */
3552 if (cmd[0] == VARIABLE_LENGTH_CMD) {
3554 wrprotect = (cmd[10] >> 5) & 0x7;
3555 lbdof = get_unaligned_be16(cmd + 12);
3556 num_lrd = get_unaligned_be16(cmd + 16);
3557 bt_len = get_unaligned_be32(cmd + 28);
3558 } else { /* that leaves WRITE SCATTERED(16) */
3560 wrprotect = (cmd[2] >> 5) & 0x7;
3561 lbdof = get_unaligned_be16(cmd + 4);
3562 num_lrd = get_unaligned_be16(cmd + 8);
3563 bt_len = get_unaligned_be32(cmd + 10);
3564 if (unlikely(have_dif_prot)) {
3565 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3567 mk_sense_invalid_opcode(scp);
3568 return illegal_condition_result;
3570 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3571 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3573 sdev_printk(KERN_ERR, scp->device,
3574 "Unprotected WR to DIF device\n");
3577 if ((num_lrd == 0) || (bt_len == 0))
3578 return 0; /* T10 says these do-nothings are not errors */
3581 sdev_printk(KERN_INFO, scp->device,
3582 "%s: %s: LB Data Offset field bad\n",
3584 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3585 return illegal_condition_result;
3587 lbdof_blen = lbdof * lb_size;
3588 if ((lrd_size + (num_lrd * lrd_size)) > lbdof_blen) {
3590 sdev_printk(KERN_INFO, scp->device,
3591 "%s: %s: LBA range descriptors don't fit\n",
3593 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3594 return illegal_condition_result;
3596 lrdp = kzalloc(lbdof_blen, GFP_ATOMIC);
3598 return SCSI_MLQUEUE_HOST_BUSY;
3600 sdev_printk(KERN_INFO, scp->device,
3601 "%s: %s: Fetch header+scatter_list, lbdof_blen=%u\n",
3602 my_name, __func__, lbdof_blen);
3603 res = fetch_to_dev_buffer(scp, lrdp, lbdof_blen);
3605 ret = DID_ERROR << 16;
3609 write_lock(macc_lckp);
3610 sg_off = lbdof_blen;
3611 /* Spec says Buffer xfer Length field in number of LBs in dout */
3613 for (k = 0, up = lrdp + lrd_size; k < num_lrd; ++k, up += lrd_size) {
3614 lba = get_unaligned_be64(up + 0);
3615 num = get_unaligned_be32(up + 8);
3617 sdev_printk(KERN_INFO, scp->device,
3618 "%s: %s: k=%d LBA=0x%llx num=%u sg_off=%u\n",
3619 my_name, __func__, k, lba, num, sg_off);
3622 ret = check_device_access_params(scp, lba, num, true);
3624 goto err_out_unlock;
3625 num_by = num * lb_size;
3626 ei_lba = is_16 ? 0 : get_unaligned_be32(up + 12);
3628 if ((cum_lb + num) > bt_len) {
3630 sdev_printk(KERN_INFO, scp->device,
3631 "%s: %s: sum of blocks > data provided\n",
3633 mk_sense_buffer(scp, ILLEGAL_REQUEST, WRITE_ERROR_ASC,
3635 ret = illegal_condition_result;
3636 goto err_out_unlock;
3640 if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3641 int prot_ret = prot_verify_write(scp, lba, num,
3645 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10,
3647 ret = illegal_condition_result;
3648 goto err_out_unlock;
3652 ret = do_device_access(sip, scp, sg_off, lba, num, true);
3653 /* If ZBC zone then bump its write pointer */
3654 if (sdebug_dev_is_zoned(devip))
3655 zbc_inc_wp(devip, lba, num);
3656 if (unlikely(scsi_debug_lbp()))
3657 map_region(sip, lba, num);
3658 if (unlikely(-1 == ret)) {
3659 ret = DID_ERROR << 16;
3660 goto err_out_unlock;
3661 } else if (unlikely(sdebug_verbose && (ret < num_by)))
3662 sdev_printk(KERN_INFO, scp->device,
3663 "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3664 my_name, num_by, ret);
3666 if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3667 atomic_read(&sdeb_inject_pending))) {
3668 if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3669 mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3670 atomic_set(&sdeb_inject_pending, 0);
3671 ret = check_condition_result;
3672 goto err_out_unlock;
3673 } else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3674 /* Logical block guard check failed */
3675 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3676 atomic_set(&sdeb_inject_pending, 0);
3677 ret = illegal_condition_result;
3678 goto err_out_unlock;
3679 } else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
3680 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3681 atomic_set(&sdeb_inject_pending, 0);
3682 ret = illegal_condition_result;
3683 goto err_out_unlock;
3691 write_unlock(macc_lckp);
3697 static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
3698 u32 ei_lba, bool unmap, bool ndob)
3700 struct scsi_device *sdp = scp->device;
3701 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
3702 unsigned long long i;
3704 u32 lb_size = sdebug_sector_size;
3706 struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3707 scp->device->hostdata, true);
3708 rwlock_t *macc_lckp = &sip->macc_lck;
3712 write_lock(macc_lckp);
3714 ret = check_device_access_params(scp, lba, num, true);
3716 write_unlock(macc_lckp);
3720 if (unmap && scsi_debug_lbp()) {
3721 unmap_region(sip, lba, num);
3725 block = do_div(lbaa, sdebug_store_sectors);
3726 /* if ndob then zero 1 logical block, else fetch 1 logical block */
3728 fs1p = fsp + (block * lb_size);
3730 memset(fs1p, 0, lb_size);
3733 ret = fetch_to_dev_buffer(scp, fs1p, lb_size);
3736 write_unlock(&sip->macc_lck);
3737 return DID_ERROR << 16;
3738 } else if (sdebug_verbose && !ndob && (ret < lb_size))
3739 sdev_printk(KERN_INFO, scp->device,
3740 "%s: %s: lb size=%u, IO sent=%d bytes\n",
3741 my_name, "write same", lb_size, ret);
3743 /* Copy first sector to remaining blocks */
3744 for (i = 1 ; i < num ; i++) {
3746 block = do_div(lbaa, sdebug_store_sectors);
3747 memmove(fsp + (block * lb_size), fs1p, lb_size);
3749 if (scsi_debug_lbp())
3750 map_region(sip, lba, num);
3751 /* If ZBC zone then bump its write pointer */
3752 if (sdebug_dev_is_zoned(devip))
3753 zbc_inc_wp(devip, lba, num);
3755 write_unlock(macc_lckp);
3760 static int resp_write_same_10(struct scsi_cmnd *scp,
3761 struct sdebug_dev_info *devip)
3763 u8 *cmd = scp->cmnd;
3770 if (sdebug_lbpws10 == 0) {
3771 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3772 return check_condition_result;
3776 lba = get_unaligned_be32(cmd + 2);
3777 num = get_unaligned_be16(cmd + 7);
3778 if (num > sdebug_write_same_length) {
3779 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3780 return check_condition_result;
3782 return resp_write_same(scp, lba, num, ei_lba, unmap, false);
3785 static int resp_write_same_16(struct scsi_cmnd *scp,
3786 struct sdebug_dev_info *devip)
3788 u8 *cmd = scp->cmnd;
3795 if (cmd[1] & 0x8) { /* UNMAP */
3796 if (sdebug_lbpws == 0) {
3797 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3798 return check_condition_result;
3802 if (cmd[1] & 0x1) /* NDOB (no data-out buffer, assumes zeroes) */
3804 lba = get_unaligned_be64(cmd + 2);
3805 num = get_unaligned_be32(cmd + 10);
3806 if (num > sdebug_write_same_length) {
3807 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
3808 return check_condition_result;
3810 return resp_write_same(scp, lba, num, ei_lba, unmap, ndob);
3813 /* Note the mode field is in the same position as the (lower) service action
3814 * field. For the Report supported operation codes command, SPC-4 suggests
3815 * each mode of this command should be reported separately; for future. */
3816 static int resp_write_buffer(struct scsi_cmnd *scp,
3817 struct sdebug_dev_info *devip)
3819 u8 *cmd = scp->cmnd;
3820 struct scsi_device *sdp = scp->device;
3821 struct sdebug_dev_info *dp;
3824 mode = cmd[1] & 0x1f;
3826 case 0x4: /* download microcode (MC) and activate (ACT) */
3827 /* set UAs on this device only */
3828 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3829 set_bit(SDEBUG_UA_MICROCODE_CHANGED, devip->uas_bm);
3831 case 0x5: /* download MC, save and ACT */
3832 set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET, devip->uas_bm);
3834 case 0x6: /* download MC with offsets and ACT */
3835 /* set UAs on most devices (LUs) in this target */
3836 list_for_each_entry(dp,
3837 &devip->sdbg_host->dev_info_list,
3839 if (dp->target == sdp->id) {
3840 set_bit(SDEBUG_UA_BUS_RESET, dp->uas_bm);
3842 set_bit(SDEBUG_UA_MICROCODE_CHANGED,
3846 case 0x7: /* download MC with offsets, save, and ACT */
3847 /* set UA on all devices (LUs) in this target */
3848 list_for_each_entry(dp,
3849 &devip->sdbg_host->dev_info_list,
3851 if (dp->target == sdp->id)
3852 set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET,
3856 /* do nothing for this command for other mode values */
3862 static int resp_comp_write(struct scsi_cmnd *scp,
3863 struct sdebug_dev_info *devip)
3865 u8 *cmd = scp->cmnd;
3867 struct sdeb_store_info *sip = devip2sip(devip, true);
3868 rwlock_t *macc_lckp = &sip->macc_lck;
3871 u32 lb_size = sdebug_sector_size;
3876 lba = get_unaligned_be64(cmd + 2);
3877 num = cmd[13]; /* 1 to a maximum of 255 logical blocks */
3879 return 0; /* degenerate case, not an error */
3880 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3882 mk_sense_invalid_opcode(scp);
3883 return check_condition_result;
3885 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3886 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3887 (cmd[1] & 0xe0) == 0)
3888 sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3890 ret = check_device_access_params(scp, lba, num, false);
3894 arr = kcalloc(lb_size, dnum, GFP_ATOMIC);
3896 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3898 return check_condition_result;
3901 write_lock(macc_lckp);
3903 ret = do_dout_fetch(scp, dnum, arr);
3905 retval = DID_ERROR << 16;
3907 } else if (sdebug_verbose && (ret < (dnum * lb_size)))
3908 sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb "
3909 "indicated=%u, IO sent=%d bytes\n", my_name,
3910 dnum * lb_size, ret);
3911 if (!comp_write_worker(sip, lba, num, arr, false)) {
3912 mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
3913 retval = check_condition_result;
3916 if (scsi_debug_lbp())
3917 map_region(sip, lba, num);
3919 write_unlock(macc_lckp);
3924 struct unmap_block_desc {
3930 static int resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3933 struct unmap_block_desc *desc;
3934 struct sdeb_store_info *sip = devip2sip(devip, true);
3935 rwlock_t *macc_lckp = &sip->macc_lck;
3936 unsigned int i, payload_len, descriptors;
3939 if (!scsi_debug_lbp())
3940 return 0; /* fib and say its done */
3941 payload_len = get_unaligned_be16(scp->cmnd + 7);
3942 BUG_ON(scsi_bufflen(scp) != payload_len);
3944 descriptors = (payload_len - 8) / 16;
3945 if (descriptors > sdebug_unmap_max_desc) {
3946 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3947 return check_condition_result;
3950 buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC);
3952 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3954 return check_condition_result;
3957 scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
3959 BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
3960 BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
3962 desc = (void *)&buf[8];
3964 write_lock(macc_lckp);
3966 for (i = 0 ; i < descriptors ; i++) {
3967 unsigned long long lba = get_unaligned_be64(&desc[i].lba);
3968 unsigned int num = get_unaligned_be32(&desc[i].blocks);
3970 ret = check_device_access_params(scp, lba, num, true);
3974 unmap_region(sip, lba, num);
3980 write_unlock(macc_lckp);
3986 #define SDEBUG_GET_LBA_STATUS_LEN 32
3988 static int resp_get_lba_status(struct scsi_cmnd *scp,
3989 struct sdebug_dev_info *devip)
3991 u8 *cmd = scp->cmnd;
3993 u32 alloc_len, mapped, num;
3995 u8 arr[SDEBUG_GET_LBA_STATUS_LEN];
3997 lba = get_unaligned_be64(cmd + 2);
3998 alloc_len = get_unaligned_be32(cmd + 10);
4003 ret = check_device_access_params(scp, lba, 1, false);
4007 if (scsi_debug_lbp()) {
4008 struct sdeb_store_info *sip = devip2sip(devip, true);
4010 mapped = map_state(sip, lba, &num);
4013 /* following just in case virtual_gb changed */
4014 sdebug_capacity = get_sdebug_capacity();
4015 if (sdebug_capacity - lba <= 0xffffffff)
4016 num = sdebug_capacity - lba;
4021 memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
4022 put_unaligned_be32(20, arr); /* Parameter Data Length */
4023 put_unaligned_be64(lba, arr + 8); /* LBA */
4024 put_unaligned_be32(num, arr + 16); /* Number of blocks */
4025 arr[20] = !mapped; /* prov_stat=0: mapped; 1: dealloc */
4027 return fill_from_dev_buffer(scp, arr, SDEBUG_GET_LBA_STATUS_LEN);
4030 static int resp_sync_cache(struct scsi_cmnd *scp,
4031 struct sdebug_dev_info *devip)
4036 u8 *cmd = scp->cmnd;
4038 if (cmd[0] == SYNCHRONIZE_CACHE) { /* 10 byte cdb */
4039 lba = get_unaligned_be32(cmd + 2);
4040 num_blocks = get_unaligned_be16(cmd + 7);
4041 } else { /* SYNCHRONIZE_CACHE(16) */
4042 lba = get_unaligned_be64(cmd + 2);
4043 num_blocks = get_unaligned_be32(cmd + 10);
4045 if (lba + num_blocks > sdebug_capacity) {
4046 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4047 return check_condition_result;
4049 if (!write_since_sync || (cmd[1] & 0x2))
4050 res = SDEG_RES_IMMED_MASK;
4051 else /* delay if write_since_sync and IMMED clear */
4052 write_since_sync = false;
4057 * Assuming the LBA+num_blocks is not out-of-range, this function will return
4058 * CONDITION MET if the specified blocks will/have fitted in the cache, and
4059 * a GOOD status otherwise. Model a disk with a big cache and yield
4060 * CONDITION MET. Actually tries to bring range in main memory into the
4061 * cache associated with the CPU(s).
4063 static int resp_pre_fetch(struct scsi_cmnd *scp,
4064 struct sdebug_dev_info *devip)
4068 u64 block, rest = 0;
4070 u8 *cmd = scp->cmnd;
4071 struct sdeb_store_info *sip = devip2sip(devip, true);
4072 rwlock_t *macc_lckp = &sip->macc_lck;
4073 u8 *fsp = sip->storep;
4075 if (cmd[0] == PRE_FETCH) { /* 10 byte cdb */
4076 lba = get_unaligned_be32(cmd + 2);
4077 nblks = get_unaligned_be16(cmd + 7);
4078 } else { /* PRE-FETCH(16) */
4079 lba = get_unaligned_be64(cmd + 2);
4080 nblks = get_unaligned_be32(cmd + 10);
4082 if (lba + nblks > sdebug_capacity) {
4083 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4084 return check_condition_result;
4088 /* PRE-FETCH spec says nothing about LBP or PI so skip them */
4089 block = do_div(lba, sdebug_store_sectors);
4090 if (block + nblks > sdebug_store_sectors)
4091 rest = block + nblks - sdebug_store_sectors;
4093 /* Try to bring the PRE-FETCH range into CPU's cache */
4094 read_lock(macc_lckp);
4095 prefetch_range(fsp + (sdebug_sector_size * block),
4096 (nblks - rest) * sdebug_sector_size);
4098 prefetch_range(fsp, rest * sdebug_sector_size);
4099 read_unlock(macc_lckp);
4102 res = SDEG_RES_IMMED_MASK;
4103 return res | condition_met_result;
4106 #define RL_BUCKET_ELEMS 8
4108 /* Even though each pseudo target has a REPORT LUNS "well known logical unit"
4109 * (W-LUN), the normal Linux scanning logic does not associate it with a
4110 * device (e.g. /dev/sg7). The following magic will make that association:
4111 * "cd /sys/class/scsi_host/host<n> ; echo '- - 49409' > scan"
4112 * where <n> is a host number. If there are multiple targets in a host then
4113 * the above will associate a W-LUN to each target. To only get a W-LUN
4114 * for target 2, then use "echo '- 2 49409' > scan" .
4116 static int resp_report_luns(struct scsi_cmnd *scp,
4117 struct sdebug_dev_info *devip)
4119 unsigned char *cmd = scp->cmnd;
4120 unsigned int alloc_len;
4121 unsigned char select_report;
4123 struct scsi_lun *lun_p;
4124 u8 arr[RL_BUCKET_ELEMS * sizeof(struct scsi_lun)];
4125 unsigned int lun_cnt; /* normal LUN count (max: 256) */
4126 unsigned int wlun_cnt; /* report luns W-LUN count */
4127 unsigned int tlun_cnt; /* total LUN count */
4128 unsigned int rlen; /* response length (in bytes) */
4130 unsigned int off_rsp = 0;
4131 const int sz_lun = sizeof(struct scsi_lun);
4133 clear_luns_changed_on_target(devip);
4135 select_report = cmd[2];
4136 alloc_len = get_unaligned_be32(cmd + 6);
4138 if (alloc_len < 4) {
4139 pr_err("alloc len too small %d\n", alloc_len);
4140 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
4141 return check_condition_result;
4144 switch (select_report) {
4145 case 0: /* all LUNs apart from W-LUNs */
4146 lun_cnt = sdebug_max_luns;
4149 case 1: /* only W-LUNs */
4153 case 2: /* all LUNs */
4154 lun_cnt = sdebug_max_luns;
4157 case 0x10: /* only administrative LUs */
4158 case 0x11: /* see SPC-5 */
4159 case 0x12: /* only subsiduary LUs owned by referenced LU */
4161 pr_debug("select report invalid %d\n", select_report);
4162 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
4163 return check_condition_result;
4166 if (sdebug_no_lun_0 && (lun_cnt > 0))
4169 tlun_cnt = lun_cnt + wlun_cnt;
4170 rlen = tlun_cnt * sz_lun; /* excluding 8 byte header */
4171 scsi_set_resid(scp, scsi_bufflen(scp));
4172 pr_debug("select_report %d luns = %d wluns = %d no_lun0 %d\n",
4173 select_report, lun_cnt, wlun_cnt, sdebug_no_lun_0);
4175 /* loops rely on sizeof response header same as sizeof lun (both 8) */
4176 lun = sdebug_no_lun_0 ? 1 : 0;
4177 for (k = 0, j = 0, res = 0; true; ++k, j = 0) {
4178 memset(arr, 0, sizeof(arr));
4179 lun_p = (struct scsi_lun *)&arr[0];
4181 put_unaligned_be32(rlen, &arr[0]);
4185 for ( ; j < RL_BUCKET_ELEMS; ++j, ++lun_p) {
4186 if ((k * RL_BUCKET_ELEMS) + j > lun_cnt)
4188 int_to_scsilun(lun++, lun_p);
4189 if (lun > 1 && sdebug_lun_am == SAM_LUN_AM_FLAT)
4190 lun_p->scsi_lun[0] |= 0x40;
4192 if (j < RL_BUCKET_ELEMS)
4195 res = p_fill_from_dev_buffer(scp, arr, n, off_rsp);
4201 int_to_scsilun(SCSI_W_LUN_REPORT_LUNS, lun_p);
4205 res = p_fill_from_dev_buffer(scp, arr, j * sz_lun, off_rsp);
4209 static int resp_verify(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4211 bool is_bytchk3 = false;
4214 u32 vnum, a_num, off;
4215 const u32 lb_size = sdebug_sector_size;
4218 u8 *cmd = scp->cmnd;
4219 struct sdeb_store_info *sip = devip2sip(devip, true);
4220 rwlock_t *macc_lckp = &sip->macc_lck;
4222 bytchk = (cmd[1] >> 1) & 0x3;
4224 return 0; /* always claim internal verify okay */
4225 } else if (bytchk == 2) {
4226 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
4227 return check_condition_result;
4228 } else if (bytchk == 3) {
4229 is_bytchk3 = true; /* 1 block sent, compared repeatedly */
4233 lba = get_unaligned_be64(cmd + 2);
4234 vnum = get_unaligned_be32(cmd + 10);
4236 case VERIFY: /* is VERIFY(10) */
4237 lba = get_unaligned_be32(cmd + 2);
4238 vnum = get_unaligned_be16(cmd + 7);
4241 mk_sense_invalid_opcode(scp);
4242 return check_condition_result;
4244 a_num = is_bytchk3 ? 1 : vnum;
4245 /* Treat following check like one for read (i.e. no write) access */
4246 ret = check_device_access_params(scp, lba, a_num, false);
4250 arr = kcalloc(lb_size, vnum, GFP_ATOMIC);
4252 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4254 return check_condition_result;
4256 /* Not changing store, so only need read access */
4257 read_lock(macc_lckp);
4259 ret = do_dout_fetch(scp, a_num, arr);
4261 ret = DID_ERROR << 16;
4263 } else if (sdebug_verbose && (ret < (a_num * lb_size))) {
4264 sdev_printk(KERN_INFO, scp->device,
4265 "%s: %s: cdb indicated=%u, IO sent=%d bytes\n",
4266 my_name, __func__, a_num * lb_size, ret);
4269 for (j = 1, off = lb_size; j < vnum; ++j, off += lb_size)
4270 memcpy(arr + off, arr, lb_size);
4273 if (!comp_write_worker(sip, lba, vnum, arr, true)) {
4274 mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
4275 ret = check_condition_result;
4279 read_unlock(macc_lckp);
4284 #define RZONES_DESC_HD 64
4286 /* Report zones depending on start LBA nad reporting options */
4287 static int resp_report_zones(struct scsi_cmnd *scp,
4288 struct sdebug_dev_info *devip)
4290 unsigned int i, max_zones, rep_max_zones, nrz = 0;
4292 u32 alloc_len, rep_opts, rep_len;
4295 u8 *arr = NULL, *desc;
4296 u8 *cmd = scp->cmnd;
4297 struct sdeb_zone_state *zsp;
4298 struct sdeb_store_info *sip = devip2sip(devip, false);
4299 rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4301 if (!sdebug_dev_is_zoned(devip)) {
4302 mk_sense_invalid_opcode(scp);
4303 return check_condition_result;
4305 zs_lba = get_unaligned_be64(cmd + 2);
4306 alloc_len = get_unaligned_be32(cmd + 10);
4307 rep_opts = cmd[14] & 0x3f;
4308 partial = cmd[14] & 0x80;
4310 if (zs_lba >= sdebug_capacity) {
4311 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4312 return check_condition_result;
4315 max_zones = devip->nr_zones - (zs_lba >> devip->zsize_shift);
4316 rep_max_zones = min((alloc_len - 64) >> ilog2(RZONES_DESC_HD),
4319 arr = kcalloc(RZONES_DESC_HD, alloc_len, GFP_ATOMIC);
4321 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4323 return check_condition_result;
4326 read_lock(macc_lckp);
4329 for (i = 0; i < max_zones; i++) {
4330 lba = zs_lba + devip->zsize * i;
4331 if (lba > sdebug_capacity)
4333 zsp = zbc_zone(devip, lba);
4340 if (zsp->z_cond != ZC1_EMPTY)
4344 /* Implicit open zones */
4345 if (zsp->z_cond != ZC2_IMPLICIT_OPEN)
4349 /* Explicit open zones */
4350 if (zsp->z_cond != ZC3_EXPLICIT_OPEN)
4355 if (zsp->z_cond != ZC4_CLOSED)
4360 if (zsp->z_cond != ZC5_FULL)
4367 * Read-only, offline, reset WP recommended are
4368 * not emulated: no zones to report;
4372 /* non-seq-resource set */
4373 if (!zsp->z_non_seq_resource)
4377 /* Not write pointer (conventional) zones */
4378 if (!zbc_zone_is_conv(zsp))
4382 mk_sense_buffer(scp, ILLEGAL_REQUEST,
4383 INVALID_FIELD_IN_CDB, 0);
4384 ret = check_condition_result;
4388 if (nrz < rep_max_zones) {
4389 /* Fill zone descriptor */
4390 desc[0] = zsp->z_type;
4391 desc[1] = zsp->z_cond << 4;
4392 if (zsp->z_non_seq_resource)
4394 put_unaligned_be64((u64)zsp->z_size, desc + 8);
4395 put_unaligned_be64((u64)zsp->z_start, desc + 16);
4396 put_unaligned_be64((u64)zsp->z_wp, desc + 24);
4400 if (partial && nrz >= rep_max_zones)
4407 put_unaligned_be32(nrz * RZONES_DESC_HD, arr + 0);
4408 put_unaligned_be64(sdebug_capacity - 1, arr + 8);
4410 rep_len = (unsigned long)desc - (unsigned long)arr;
4411 ret = fill_from_dev_buffer(scp, arr, min_t(int, alloc_len, rep_len));
4414 read_unlock(macc_lckp);
4419 /* Logic transplanted from tcmu-runner, file_zbc.c */
4420 static void zbc_open_all(struct sdebug_dev_info *devip)
4422 struct sdeb_zone_state *zsp = &devip->zstate[0];
4425 for (i = 0; i < devip->nr_zones; i++, zsp++) {
4426 if (zsp->z_cond == ZC4_CLOSED)
4427 zbc_open_zone(devip, &devip->zstate[i], true);
4431 static int resp_open_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4435 enum sdebug_z_cond zc;
4436 u8 *cmd = scp->cmnd;
4437 struct sdeb_zone_state *zsp;
4438 bool all = cmd[14] & 0x01;
4439 struct sdeb_store_info *sip = devip2sip(devip, false);
4440 rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4442 if (!sdebug_dev_is_zoned(devip)) {
4443 mk_sense_invalid_opcode(scp);
4444 return check_condition_result;
4447 write_lock(macc_lckp);
4450 /* Check if all closed zones can be open */
4451 if (devip->max_open &&
4452 devip->nr_exp_open + devip->nr_closed > devip->max_open) {
4453 mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
4455 res = check_condition_result;
4458 /* Open all closed zones */
4459 zbc_open_all(devip);
4463 /* Open the specified zone */
4464 z_id = get_unaligned_be64(cmd + 2);
4465 if (z_id >= sdebug_capacity) {
4466 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4467 res = check_condition_result;
4471 zsp = zbc_zone(devip, z_id);
4472 if (z_id != zsp->z_start) {
4473 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4474 res = check_condition_result;
4477 if (zbc_zone_is_conv(zsp)) {
4478 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4479 res = check_condition_result;
4484 if (zc == ZC3_EXPLICIT_OPEN || zc == ZC5_FULL)
4487 if (devip->max_open && devip->nr_exp_open >= devip->max_open) {
4488 mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
4490 res = check_condition_result;
4494 zbc_open_zone(devip, zsp, true);
4496 write_unlock(macc_lckp);
4500 static void zbc_close_all(struct sdebug_dev_info *devip)
4504 for (i = 0; i < devip->nr_zones; i++)
4505 zbc_close_zone(devip, &devip->zstate[i]);
4508 static int resp_close_zone(struct scsi_cmnd *scp,
4509 struct sdebug_dev_info *devip)
4513 u8 *cmd = scp->cmnd;
4514 struct sdeb_zone_state *zsp;
4515 bool all = cmd[14] & 0x01;
4516 struct sdeb_store_info *sip = devip2sip(devip, false);
4517 rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4519 if (!sdebug_dev_is_zoned(devip)) {
4520 mk_sense_invalid_opcode(scp);
4521 return check_condition_result;
4524 write_lock(macc_lckp);
4527 zbc_close_all(devip);
4531 /* Close specified zone */
4532 z_id = get_unaligned_be64(cmd + 2);
4533 if (z_id >= sdebug_capacity) {
4534 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4535 res = check_condition_result;
4539 zsp = zbc_zone(devip, z_id);
4540 if (z_id != zsp->z_start) {
4541 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4542 res = check_condition_result;
4545 if (zbc_zone_is_conv(zsp)) {
4546 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4547 res = check_condition_result;
4551 zbc_close_zone(devip, zsp);
4553 write_unlock(macc_lckp);
4557 static void zbc_finish_zone(struct sdebug_dev_info *devip,
4558 struct sdeb_zone_state *zsp, bool empty)
4560 enum sdebug_z_cond zc = zsp->z_cond;
4562 if (zc == ZC4_CLOSED || zc == ZC2_IMPLICIT_OPEN ||
4563 zc == ZC3_EXPLICIT_OPEN || (empty && zc == ZC1_EMPTY)) {
4564 if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
4565 zbc_close_zone(devip, zsp);
4566 if (zsp->z_cond == ZC4_CLOSED)
4568 zsp->z_wp = zsp->z_start + zsp->z_size;
4569 zsp->z_cond = ZC5_FULL;
4573 static void zbc_finish_all(struct sdebug_dev_info *devip)
4577 for (i = 0; i < devip->nr_zones; i++)
4578 zbc_finish_zone(devip, &devip->zstate[i], false);
4581 static int resp_finish_zone(struct scsi_cmnd *scp,
4582 struct sdebug_dev_info *devip)
4584 struct sdeb_zone_state *zsp;
4587 u8 *cmd = scp->cmnd;
4588 bool all = cmd[14] & 0x01;
4589 struct sdeb_store_info *sip = devip2sip(devip, false);
4590 rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4592 if (!sdebug_dev_is_zoned(devip)) {
4593 mk_sense_invalid_opcode(scp);
4594 return check_condition_result;
4597 write_lock(macc_lckp);
4600 zbc_finish_all(devip);
4604 /* Finish the specified zone */
4605 z_id = get_unaligned_be64(cmd + 2);
4606 if (z_id >= sdebug_capacity) {
4607 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4608 res = check_condition_result;
4612 zsp = zbc_zone(devip, z_id);
4613 if (z_id != zsp->z_start) {
4614 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4615 res = check_condition_result;
4618 if (zbc_zone_is_conv(zsp)) {
4619 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4620 res = check_condition_result;
4624 zbc_finish_zone(devip, zsp, true);
4626 write_unlock(macc_lckp);
4630 static void zbc_rwp_zone(struct sdebug_dev_info *devip,
4631 struct sdeb_zone_state *zsp)
4633 enum sdebug_z_cond zc;
4635 if (zbc_zone_is_conv(zsp))
4639 if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
4640 zbc_close_zone(devip, zsp);
4642 if (zsp->z_cond == ZC4_CLOSED)
4645 zsp->z_non_seq_resource = false;
4646 zsp->z_wp = zsp->z_start;
4647 zsp->z_cond = ZC1_EMPTY;
4650 static void zbc_rwp_all(struct sdebug_dev_info *devip)
4654 for (i = 0; i < devip->nr_zones; i++)
4655 zbc_rwp_zone(devip, &devip->zstate[i]);
4658 static int resp_rwp_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4660 struct sdeb_zone_state *zsp;
4663 u8 *cmd = scp->cmnd;
4664 bool all = cmd[14] & 0x01;
4665 struct sdeb_store_info *sip = devip2sip(devip, false);
4666 rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4668 if (!sdebug_dev_is_zoned(devip)) {
4669 mk_sense_invalid_opcode(scp);
4670 return check_condition_result;
4673 write_lock(macc_lckp);
4680 z_id = get_unaligned_be64(cmd + 2);
4681 if (z_id >= sdebug_capacity) {
4682 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4683 res = check_condition_result;
4687 zsp = zbc_zone(devip, z_id);
4688 if (z_id != zsp->z_start) {
4689 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4690 res = check_condition_result;
4693 if (zbc_zone_is_conv(zsp)) {
4694 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4695 res = check_condition_result;
4699 zbc_rwp_zone(devip, zsp);
4701 write_unlock(macc_lckp);
4705 static struct sdebug_queue *get_queue(struct scsi_cmnd *cmnd)
4708 u32 tag = blk_mq_unique_tag(cmnd->request);
4710 hwq = blk_mq_unique_tag_to_hwq(tag);
4712 pr_debug("tag=%#x, hwq=%d\n", tag, hwq);
4713 if (WARN_ON_ONCE(hwq >= submit_queues))
4716 return sdebug_q_arr + hwq;
4719 static u32 get_tag(struct scsi_cmnd *cmnd)
4721 return blk_mq_unique_tag(cmnd->request);
4724 /* Queued (deferred) command completions converge here. */
4725 static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
4727 bool aborted = sd_dp->aborted;
4730 unsigned long iflags;
4731 struct sdebug_queue *sqp;
4732 struct sdebug_queued_cmd *sqcp;
4733 struct scsi_cmnd *scp;
4734 struct sdebug_dev_info *devip;
4736 if (unlikely(aborted))
4737 sd_dp->aborted = false;
4738 qc_idx = sd_dp->qc_idx;
4739 sqp = sdebug_q_arr + sd_dp->sqa_idx;
4740 if (sdebug_statistics) {
4741 atomic_inc(&sdebug_completions);
4742 if (raw_smp_processor_id() != sd_dp->issuing_cpu)
4743 atomic_inc(&sdebug_miss_cpus);
4745 if (unlikely((qc_idx < 0) || (qc_idx >= SDEBUG_CANQUEUE))) {
4746 pr_err("wild qc_idx=%d\n", qc_idx);
4749 spin_lock_irqsave(&sqp->qc_lock, iflags);
4750 sd_dp->defer_t = SDEB_DEFER_NONE;
4751 sqcp = &sqp->qc_arr[qc_idx];
4753 if (unlikely(scp == NULL)) {
4754 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4755 pr_err("scp is NULL, sqa_idx=%d, qc_idx=%d, hc_idx=%d\n",
4756 sd_dp->sqa_idx, qc_idx, sd_dp->hc_idx);
4759 devip = (struct sdebug_dev_info *)scp->device->hostdata;
4761 atomic_dec(&devip->num_in_q);
4763 pr_err("devip=NULL\n");
4764 if (unlikely(atomic_read(&retired_max_queue) > 0))
4767 sqcp->a_cmnd = NULL;
4768 if (unlikely(!test_and_clear_bit(qc_idx, sqp->in_use_bm))) {
4769 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4770 pr_err("Unexpected completion\n");
4774 if (unlikely(retiring)) { /* user has reduced max_queue */
4777 retval = atomic_read(&retired_max_queue);
4778 if (qc_idx >= retval) {
4779 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4780 pr_err("index %d too large\n", retval);
4783 k = find_last_bit(sqp->in_use_bm, retval);
4784 if ((k < sdebug_max_queue) || (k == retval))
4785 atomic_set(&retired_max_queue, 0);
4787 atomic_set(&retired_max_queue, k + 1);
4789 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4790 if (unlikely(aborted)) {
4792 pr_info("bypassing scsi_done() due to aborted cmd\n");
4795 scp->scsi_done(scp); /* callback to mid level */
4798 /* When high resolution timer goes off this function is called. */
4799 static enum hrtimer_restart sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
4801 struct sdebug_defer *sd_dp = container_of(timer, struct sdebug_defer,
4803 sdebug_q_cmd_complete(sd_dp);
4804 return HRTIMER_NORESTART;
4807 /* When work queue schedules work, it calls this function. */
4808 static void sdebug_q_cmd_wq_complete(struct work_struct *work)
4810 struct sdebug_defer *sd_dp = container_of(work, struct sdebug_defer,
4812 sdebug_q_cmd_complete(sd_dp);
4815 static bool got_shared_uuid;
4816 static uuid_t shared_uuid;
4818 static int sdebug_device_create_zones(struct sdebug_dev_info *devip)
4820 struct sdeb_zone_state *zsp;
4821 sector_t capacity = get_sdebug_capacity();
4822 sector_t zstart = 0;
4826 * Set the zone size: if sdeb_zbc_zone_size_mb is not set, figure out
4827 * a zone size allowing for at least 4 zones on the device. Otherwise,
4828 * use the specified zone size checking that at least 2 zones can be
4829 * created for the device.
4831 if (!sdeb_zbc_zone_size_mb) {
4832 devip->zsize = (DEF_ZBC_ZONE_SIZE_MB * SZ_1M)
4833 >> ilog2(sdebug_sector_size);
4834 while (capacity < devip->zsize << 2 && devip->zsize >= 2)
4836 if (devip->zsize < 2) {
4837 pr_err("Device capacity too small\n");
4841 if (!is_power_of_2(sdeb_zbc_zone_size_mb)) {
4842 pr_err("Zone size is not a power of 2\n");
4845 devip->zsize = (sdeb_zbc_zone_size_mb * SZ_1M)
4846 >> ilog2(sdebug_sector_size);
4847 if (devip->zsize >= capacity) {
4848 pr_err("Zone size too large for device capacity\n");
4853 devip->zsize_shift = ilog2(devip->zsize);
4854 devip->nr_zones = (capacity + devip->zsize - 1) >> devip->zsize_shift;
4856 if (sdeb_zbc_nr_conv >= devip->nr_zones) {
4857 pr_err("Number of conventional zones too large\n");
4860 devip->nr_conv_zones = sdeb_zbc_nr_conv;
4862 if (devip->zmodel == BLK_ZONED_HM) {
4863 /* zbc_max_open_zones can be 0, meaning "not reported" */
4864 if (sdeb_zbc_max_open >= devip->nr_zones - 1)
4865 devip->max_open = (devip->nr_zones - 1) / 2;
4867 devip->max_open = sdeb_zbc_max_open;
4870 devip->zstate = kcalloc(devip->nr_zones,
4871 sizeof(struct sdeb_zone_state), GFP_KERNEL);
4875 for (i = 0; i < devip->nr_zones; i++) {
4876 zsp = &devip->zstate[i];
4878 zsp->z_start = zstart;
4880 if (i < devip->nr_conv_zones) {
4881 zsp->z_type = ZBC_ZONE_TYPE_CNV;
4882 zsp->z_cond = ZBC_NOT_WRITE_POINTER;
4883 zsp->z_wp = (sector_t)-1;
4885 if (devip->zmodel == BLK_ZONED_HM)
4886 zsp->z_type = ZBC_ZONE_TYPE_SWR;
4888 zsp->z_type = ZBC_ZONE_TYPE_SWP;
4889 zsp->z_cond = ZC1_EMPTY;
4890 zsp->z_wp = zsp->z_start;
4893 if (zsp->z_start + devip->zsize < capacity)
4894 zsp->z_size = devip->zsize;
4896 zsp->z_size = capacity - zsp->z_start;
4898 zstart += zsp->z_size;
4904 static struct sdebug_dev_info *sdebug_device_create(
4905 struct sdebug_host_info *sdbg_host, gfp_t flags)
4907 struct sdebug_dev_info *devip;
4909 devip = kzalloc(sizeof(*devip), flags);
4911 if (sdebug_uuid_ctl == 1)
4912 uuid_gen(&devip->lu_name);
4913 else if (sdebug_uuid_ctl == 2) {
4914 if (got_shared_uuid)
4915 devip->lu_name = shared_uuid;
4917 uuid_gen(&shared_uuid);
4918 got_shared_uuid = true;
4919 devip->lu_name = shared_uuid;
4922 devip->sdbg_host = sdbg_host;
4923 if (sdeb_zbc_in_use) {
4924 devip->zmodel = sdeb_zbc_model;
4925 if (sdebug_device_create_zones(devip)) {
4930 devip->zmodel = BLK_ZONED_NONE;
4932 devip->sdbg_host = sdbg_host;
4933 devip->create_ts = ktime_get_boottime();
4934 atomic_set(&devip->stopped, (sdeb_tur_ms_to_ready > 0 ? 2 : 0));
4935 list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
4940 static struct sdebug_dev_info *find_build_dev_info(struct scsi_device *sdev)
4942 struct sdebug_host_info *sdbg_host;
4943 struct sdebug_dev_info *open_devip = NULL;
4944 struct sdebug_dev_info *devip;
4946 sdbg_host = *(struct sdebug_host_info **)shost_priv(sdev->host);
4948 pr_err("Host info NULL\n");
4952 list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
4953 if ((devip->used) && (devip->channel == sdev->channel) &&
4954 (devip->target == sdev->id) &&
4955 (devip->lun == sdev->lun))
4958 if ((!devip->used) && (!open_devip))
4962 if (!open_devip) { /* try and make a new one */
4963 open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
4965 pr_err("out of memory at line %d\n", __LINE__);
4970 open_devip->channel = sdev->channel;
4971 open_devip->target = sdev->id;
4972 open_devip->lun = sdev->lun;
4973 open_devip->sdbg_host = sdbg_host;
4974 atomic_set(&open_devip->num_in_q, 0);
4975 set_bit(SDEBUG_UA_POR, open_devip->uas_bm);
4976 open_devip->used = true;
4980 static int scsi_debug_slave_alloc(struct scsi_device *sdp)
4983 pr_info("slave_alloc <%u %u %u %llu>\n",
4984 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
4988 static int scsi_debug_slave_configure(struct scsi_device *sdp)
4990 struct sdebug_dev_info *devip =
4991 (struct sdebug_dev_info *)sdp->hostdata;
4994 pr_info("slave_configure <%u %u %u %llu>\n",
4995 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
4996 if (sdp->host->max_cmd_len != SDEBUG_MAX_CMD_LEN)
4997 sdp->host->max_cmd_len = SDEBUG_MAX_CMD_LEN;
4998 if (devip == NULL) {
4999 devip = find_build_dev_info(sdp);
5001 return 1; /* no resources, will be marked offline */
5003 sdp->hostdata = devip;
5005 sdp->no_uld_attach = 1;
5006 config_cdb_len(sdp);
5010 static void scsi_debug_slave_destroy(struct scsi_device *sdp)
5012 struct sdebug_dev_info *devip =
5013 (struct sdebug_dev_info *)sdp->hostdata;
5016 pr_info("slave_destroy <%u %u %u %llu>\n",
5017 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5019 /* make this slot available for re-use */
5020 devip->used = false;
5021 sdp->hostdata = NULL;
5025 static void stop_qc_helper(struct sdebug_defer *sd_dp,
5026 enum sdeb_defer_type defer_t)
5030 if (defer_t == SDEB_DEFER_HRT)
5031 hrtimer_cancel(&sd_dp->hrt);
5032 else if (defer_t == SDEB_DEFER_WQ)
5033 cancel_work_sync(&sd_dp->ew.work);
5036 /* If @cmnd found deletes its timer or work queue and returns true; else
5038 static bool stop_queued_cmnd(struct scsi_cmnd *cmnd)
5040 unsigned long iflags;
5041 int j, k, qmax, r_qmax;
5042 enum sdeb_defer_type l_defer_t;
5043 struct sdebug_queue *sqp;
5044 struct sdebug_queued_cmd *sqcp;
5045 struct sdebug_dev_info *devip;
5046 struct sdebug_defer *sd_dp;
5048 for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5049 spin_lock_irqsave(&sqp->qc_lock, iflags);
5050 qmax = sdebug_max_queue;
5051 r_qmax = atomic_read(&retired_max_queue);
5054 for (k = 0; k < qmax; ++k) {
5055 if (test_bit(k, sqp->in_use_bm)) {
5056 sqcp = &sqp->qc_arr[k];
5057 if (cmnd != sqcp->a_cmnd)
5060 devip = (struct sdebug_dev_info *)
5061 cmnd->device->hostdata;
5063 atomic_dec(&devip->num_in_q);
5064 sqcp->a_cmnd = NULL;
5065 sd_dp = sqcp->sd_dp;
5067 l_defer_t = sd_dp->defer_t;
5068 sd_dp->defer_t = SDEB_DEFER_NONE;
5070 l_defer_t = SDEB_DEFER_NONE;
5071 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5072 stop_qc_helper(sd_dp, l_defer_t);
5073 clear_bit(k, sqp->in_use_bm);
5077 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5082 /* Deletes (stops) timers or work queues of all queued commands */
5083 static void stop_all_queued(void)
5085 unsigned long iflags;
5087 enum sdeb_defer_type l_defer_t;
5088 struct sdebug_queue *sqp;
5089 struct sdebug_queued_cmd *sqcp;
5090 struct sdebug_dev_info *devip;
5091 struct sdebug_defer *sd_dp;
5093 for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5094 spin_lock_irqsave(&sqp->qc_lock, iflags);
5095 for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
5096 if (test_bit(k, sqp->in_use_bm)) {
5097 sqcp = &sqp->qc_arr[k];
5098 if (sqcp->a_cmnd == NULL)
5100 devip = (struct sdebug_dev_info *)
5101 sqcp->a_cmnd->device->hostdata;
5103 atomic_dec(&devip->num_in_q);
5104 sqcp->a_cmnd = NULL;
5105 sd_dp = sqcp->sd_dp;
5107 l_defer_t = sd_dp->defer_t;
5108 sd_dp->defer_t = SDEB_DEFER_NONE;
5110 l_defer_t = SDEB_DEFER_NONE;
5111 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5112 stop_qc_helper(sd_dp, l_defer_t);
5113 clear_bit(k, sqp->in_use_bm);
5114 spin_lock_irqsave(&sqp->qc_lock, iflags);
5117 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5121 /* Free queued command memory on heap */
5122 static void free_all_queued(void)
5125 struct sdebug_queue *sqp;
5126 struct sdebug_queued_cmd *sqcp;
5128 for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5129 for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
5130 sqcp = &sqp->qc_arr[k];
5137 static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
5143 ok = stop_queued_cmnd(SCpnt);
5144 if (SCpnt->device && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
5145 sdev_printk(KERN_INFO, SCpnt->device,
5146 "%s: command%s found\n", __func__,
5152 static int scsi_debug_device_reset(struct scsi_cmnd *SCpnt)
5155 if (SCpnt && SCpnt->device) {
5156 struct scsi_device *sdp = SCpnt->device;
5157 struct sdebug_dev_info *devip =
5158 (struct sdebug_dev_info *)sdp->hostdata;
5160 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5161 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5163 set_bit(SDEBUG_UA_POR, devip->uas_bm);
5168 static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
5170 struct sdebug_host_info *sdbg_host;
5171 struct sdebug_dev_info *devip;
5172 struct scsi_device *sdp;
5173 struct Scsi_Host *hp;
5176 ++num_target_resets;
5179 sdp = SCpnt->device;
5182 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5183 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5187 sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
5189 list_for_each_entry(devip,
5190 &sdbg_host->dev_info_list,
5192 if (devip->target == sdp->id) {
5193 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5197 if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5198 sdev_printk(KERN_INFO, sdp,
5199 "%s: %d device(s) found in target\n", __func__, k);
5204 static int scsi_debug_bus_reset(struct scsi_cmnd *SCpnt)
5206 struct sdebug_host_info *sdbg_host;
5207 struct sdebug_dev_info *devip;
5208 struct scsi_device *sdp;
5209 struct Scsi_Host *hp;
5213 if (!(SCpnt && SCpnt->device))
5215 sdp = SCpnt->device;
5216 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5217 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5220 sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
5222 list_for_each_entry(devip,
5223 &sdbg_host->dev_info_list,
5225 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5230 if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5231 sdev_printk(KERN_INFO, sdp,
5232 "%s: %d device(s) found in host\n", __func__, k);
5237 static int scsi_debug_host_reset(struct scsi_cmnd *SCpnt)
5239 struct sdebug_host_info *sdbg_host;
5240 struct sdebug_dev_info *devip;
5244 if ((SCpnt->device) && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
5245 sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__);
5246 spin_lock(&sdebug_host_list_lock);
5247 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
5248 list_for_each_entry(devip, &sdbg_host->dev_info_list,
5250 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5254 spin_unlock(&sdebug_host_list_lock);
5256 if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5257 sdev_printk(KERN_INFO, SCpnt->device,
5258 "%s: %d device(s) found\n", __func__, k);
5262 static void sdebug_build_parts(unsigned char *ramp, unsigned long store_size)
5264 struct msdos_partition *pp;
5265 int starts[SDEBUG_MAX_PARTS + 2], max_part_secs;
5266 int sectors_per_part, num_sectors, k;
5267 int heads_by_sects, start_sec, end_sec;
5269 /* assume partition table already zeroed */
5270 if ((sdebug_num_parts < 1) || (store_size < 1048576))
5272 if (sdebug_num_parts > SDEBUG_MAX_PARTS) {
5273 sdebug_num_parts = SDEBUG_MAX_PARTS;
5274 pr_warn("reducing partitions to %d\n", SDEBUG_MAX_PARTS);
5276 num_sectors = (int)get_sdebug_capacity();
5277 sectors_per_part = (num_sectors - sdebug_sectors_per)
5279 heads_by_sects = sdebug_heads * sdebug_sectors_per;
5280 starts[0] = sdebug_sectors_per;
5281 max_part_secs = sectors_per_part;
5282 for (k = 1; k < sdebug_num_parts; ++k) {
5283 starts[k] = ((k * sectors_per_part) / heads_by_sects)
5285 if (starts[k] - starts[k - 1] < max_part_secs)
5286 max_part_secs = starts[k] - starts[k - 1];
5288 starts[sdebug_num_parts] = num_sectors;
5289 starts[sdebug_num_parts + 1] = 0;
5291 ramp[510] = 0x55; /* magic partition markings */
5293 pp = (struct msdos_partition *)(ramp + 0x1be);
5294 for (k = 0; starts[k + 1]; ++k, ++pp) {
5295 start_sec = starts[k];
5296 end_sec = starts[k] + max_part_secs - 1;
5299 pp->cyl = start_sec / heads_by_sects;
5300 pp->head = (start_sec - (pp->cyl * heads_by_sects))
5301 / sdebug_sectors_per;
5302 pp->sector = (start_sec % sdebug_sectors_per) + 1;
5304 pp->end_cyl = end_sec / heads_by_sects;
5305 pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
5306 / sdebug_sectors_per;
5307 pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
5309 pp->start_sect = cpu_to_le32(start_sec);
5310 pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1);
5311 pp->sys_ind = 0x83; /* plain Linux partition */
5315 static void block_unblock_all_queues(bool block)
5318 struct sdebug_queue *sqp;
5320 for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp)
5321 atomic_set(&sqp->blocked, (int)block);
5324 /* Adjust (by rounding down) the sdebug_cmnd_count so abs(every_nth)-1
5325 * commands will be processed normally before triggers occur.
5327 static void tweak_cmnd_count(void)
5331 modulo = abs(sdebug_every_nth);
5334 block_unblock_all_queues(true);
5335 count = atomic_read(&sdebug_cmnd_count);
5336 atomic_set(&sdebug_cmnd_count, (count / modulo) * modulo);
5337 block_unblock_all_queues(false);
5340 static void clear_queue_stats(void)
5342 atomic_set(&sdebug_cmnd_count, 0);
5343 atomic_set(&sdebug_completions, 0);
5344 atomic_set(&sdebug_miss_cpus, 0);
5345 atomic_set(&sdebug_a_tsf, 0);
5348 static bool inject_on_this_cmd(void)
5350 if (sdebug_every_nth == 0)
5352 return (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) == 0;
5355 #define INCLUSIVE_TIMING_MAX_NS 1000000 /* 1 millisecond */
5357 /* Complete the processing of the thread that queued a SCSI command to this
5358 * driver. It either completes the command by calling cmnd_done() or
5359 * schedules a hr timer or work queue then returns 0. Returns
5360 * SCSI_MLQUEUE_HOST_BUSY if temporarily out of resources.
5362 static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
5364 int (*pfp)(struct scsi_cmnd *,
5365 struct sdebug_dev_info *),
5366 int delta_jiff, int ndelay)
5369 bool inject = false;
5370 bool hipri = (cmnd->request->cmd_flags & REQ_HIPRI);
5371 int k, num_in_q, qdepth;
5372 unsigned long iflags;
5373 u64 ns_from_boot = 0;
5374 struct sdebug_queue *sqp;
5375 struct sdebug_queued_cmd *sqcp;
5376 struct scsi_device *sdp;
5377 struct sdebug_defer *sd_dp;
5379 if (unlikely(devip == NULL)) {
5380 if (scsi_result == 0)
5381 scsi_result = DID_NO_CONNECT << 16;
5382 goto respond_in_thread;
5386 if (delta_jiff == 0)
5387 goto respond_in_thread;
5389 sqp = get_queue(cmnd);
5390 spin_lock_irqsave(&sqp->qc_lock, iflags);
5391 if (unlikely(atomic_read(&sqp->blocked))) {
5392 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5393 return SCSI_MLQUEUE_HOST_BUSY;
5395 num_in_q = atomic_read(&devip->num_in_q);
5396 qdepth = cmnd->device->queue_depth;
5397 if (unlikely((qdepth > 0) && (num_in_q >= qdepth))) {
5399 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5400 goto respond_in_thread;
5402 scsi_result = device_qfull_result;
5403 } else if (unlikely(sdebug_every_nth &&
5404 (SDEBUG_OPT_RARE_TSF & sdebug_opts) &&
5405 (scsi_result == 0))) {
5406 if ((num_in_q == (qdepth - 1)) &&
5407 (atomic_inc_return(&sdebug_a_tsf) >=
5408 abs(sdebug_every_nth))) {
5409 atomic_set(&sdebug_a_tsf, 0);
5411 scsi_result = device_qfull_result;
5415 k = find_first_zero_bit(sqp->in_use_bm, sdebug_max_queue);
5416 if (unlikely(k >= sdebug_max_queue)) {
5417 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5419 goto respond_in_thread;
5420 else if (SDEBUG_OPT_ALL_TSF & sdebug_opts)
5421 scsi_result = device_qfull_result;
5422 if (SDEBUG_OPT_Q_NOISE & sdebug_opts)
5423 sdev_printk(KERN_INFO, sdp,
5424 "%s: max_queue=%d exceeded, %s\n",
5425 __func__, sdebug_max_queue,
5426 (scsi_result ? "status: TASK SET FULL" :
5427 "report: host busy"));
5429 goto respond_in_thread;
5431 return SCSI_MLQUEUE_HOST_BUSY;
5433 set_bit(k, sqp->in_use_bm);
5434 atomic_inc(&devip->num_in_q);
5435 sqcp = &sqp->qc_arr[k];
5436 sqcp->a_cmnd = cmnd;
5437 cmnd->host_scribble = (unsigned char *)sqcp;
5438 sd_dp = sqcp->sd_dp;
5439 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5442 sd_dp = kzalloc(sizeof(*sd_dp), GFP_ATOMIC);
5444 atomic_dec(&devip->num_in_q);
5445 clear_bit(k, sqp->in_use_bm);
5446 return SCSI_MLQUEUE_HOST_BUSY;
5453 /* Set the hostwide tag */
5454 if (sdebug_host_max_queue)
5455 sd_dp->hc_idx = get_tag(cmnd);
5458 ns_from_boot = ktime_get_boottime_ns();
5460 /* one of the resp_*() response functions is called here */
5461 cmnd->result = pfp ? pfp(cmnd, devip) : 0;
5462 if (cmnd->result & SDEG_RES_IMMED_MASK) {
5463 cmnd->result &= ~SDEG_RES_IMMED_MASK;
5464 delta_jiff = ndelay = 0;
5466 if (cmnd->result == 0 && scsi_result != 0)
5467 cmnd->result = scsi_result;
5468 if (cmnd->result == 0 && unlikely(sdebug_opts & SDEBUG_OPT_TRANSPORT_ERR)) {
5469 if (atomic_read(&sdeb_inject_pending)) {
5470 mk_sense_buffer(cmnd, ABORTED_COMMAND, TRANSPORT_PROBLEM, ACK_NAK_TO);
5471 atomic_set(&sdeb_inject_pending, 0);
5472 cmnd->result = check_condition_result;
5476 if (unlikely(sdebug_verbose && cmnd->result))
5477 sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n",
5478 __func__, cmnd->result);
5480 if (delta_jiff > 0 || ndelay > 0) {
5483 if (delta_jiff > 0) {
5484 u64 ns = jiffies_to_nsecs(delta_jiff);
5486 if (sdebug_random && ns < U32_MAX) {
5487 ns = prandom_u32_max((u32)ns);
5488 } else if (sdebug_random) {
5489 ns >>= 12; /* scale to 4 usec precision */
5490 if (ns < U32_MAX) /* over 4 hours max */
5491 ns = prandom_u32_max((u32)ns);
5494 kt = ns_to_ktime(ns);
5495 } else { /* ndelay has a 4.2 second max */
5496 kt = sdebug_random ? prandom_u32_max((u32)ndelay) :
5498 if (ndelay < INCLUSIVE_TIMING_MAX_NS) {
5499 u64 d = ktime_get_boottime_ns() - ns_from_boot;
5501 if (kt <= d) { /* elapsed duration >= kt */
5502 spin_lock_irqsave(&sqp->qc_lock, iflags);
5503 sqcp->a_cmnd = NULL;
5504 atomic_dec(&devip->num_in_q);
5505 clear_bit(k, sqp->in_use_bm);
5506 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5509 /* call scsi_done() from this thread */
5510 cmnd->scsi_done(cmnd);
5513 /* otherwise reduce kt by elapsed time */
5518 sd_dp->cmpl_ts = ktime_add(ns_to_ktime(ns_from_boot), kt);
5519 spin_lock_irqsave(&sqp->qc_lock, iflags);
5520 if (!sd_dp->init_poll) {
5521 sd_dp->init_poll = true;
5522 sqcp->sd_dp = sd_dp;
5523 sd_dp->sqa_idx = sqp - sdebug_q_arr;
5526 sd_dp->defer_t = SDEB_DEFER_POLL;
5527 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5529 if (!sd_dp->init_hrt) {
5530 sd_dp->init_hrt = true;
5531 sqcp->sd_dp = sd_dp;
5532 hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC,
5533 HRTIMER_MODE_REL_PINNED);
5534 sd_dp->hrt.function = sdebug_q_cmd_hrt_complete;
5535 sd_dp->sqa_idx = sqp - sdebug_q_arr;
5538 sd_dp->defer_t = SDEB_DEFER_HRT;
5539 /* schedule the invocation of scsi_done() for a later time */
5540 hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED);
5542 if (sdebug_statistics)
5543 sd_dp->issuing_cpu = raw_smp_processor_id();
5544 } else { /* jdelay < 0, use work queue */
5545 if (unlikely((sdebug_opts & SDEBUG_OPT_CMD_ABORT) &&
5546 atomic_read(&sdeb_inject_pending)))
5547 sd_dp->aborted = true;
5549 sd_dp->cmpl_ts = ns_to_ktime(ns_from_boot);
5550 spin_lock_irqsave(&sqp->qc_lock, iflags);
5551 if (!sd_dp->init_poll) {
5552 sd_dp->init_poll = true;
5553 sqcp->sd_dp = sd_dp;
5554 sd_dp->sqa_idx = sqp - sdebug_q_arr;
5557 sd_dp->defer_t = SDEB_DEFER_POLL;
5558 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5560 if (!sd_dp->init_wq) {
5561 sd_dp->init_wq = true;
5562 sqcp->sd_dp = sd_dp;
5563 sd_dp->sqa_idx = sqp - sdebug_q_arr;
5565 INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete);
5567 sd_dp->defer_t = SDEB_DEFER_WQ;
5568 schedule_work(&sd_dp->ew.work);
5570 if (sdebug_statistics)
5571 sd_dp->issuing_cpu = raw_smp_processor_id();
5572 if (unlikely(sd_dp->aborted)) {
5573 sdev_printk(KERN_INFO, sdp, "abort request tag %d\n", cmnd->request->tag);
5574 blk_abort_request(cmnd->request);
5575 atomic_set(&sdeb_inject_pending, 0);
5576 sd_dp->aborted = false;
5579 if (unlikely((SDEBUG_OPT_Q_NOISE & sdebug_opts) && scsi_result == device_qfull_result))
5580 sdev_printk(KERN_INFO, sdp, "%s: num_in_q=%d +1, %s%s\n", __func__,
5581 num_in_q, (inject ? "<inject> " : ""), "status: TASK SET FULL");
5584 respond_in_thread: /* call back to mid-layer using invocation thread */
5585 cmnd->result = pfp != NULL ? pfp(cmnd, devip) : 0;
5586 cmnd->result &= ~SDEG_RES_IMMED_MASK;
5587 if (cmnd->result == 0 && scsi_result != 0)
5588 cmnd->result = scsi_result;
5589 cmnd->scsi_done(cmnd);
5593 /* Note: The following macros create attribute files in the
5594 /sys/module/scsi_debug/parameters directory. Unfortunately this
5595 driver is unaware of a change and cannot trigger auxiliary actions
5596 as it can when the corresponding attribute in the
5597 /sys/bus/pseudo/drivers/scsi_debug directory is changed.
5599 module_param_named(add_host, sdebug_add_host, int, S_IRUGO | S_IWUSR);
5600 module_param_named(ato, sdebug_ato, int, S_IRUGO);
5601 module_param_named(cdb_len, sdebug_cdb_len, int, 0644);
5602 module_param_named(clustering, sdebug_clustering, bool, S_IRUGO | S_IWUSR);
5603 module_param_named(delay, sdebug_jdelay, int, S_IRUGO | S_IWUSR);
5604 module_param_named(dev_size_mb, sdebug_dev_size_mb, int, S_IRUGO);
5605 module_param_named(dif, sdebug_dif, int, S_IRUGO);
5606 module_param_named(dix, sdebug_dix, int, S_IRUGO);
5607 module_param_named(dsense, sdebug_dsense, int, S_IRUGO | S_IWUSR);
5608 module_param_named(every_nth, sdebug_every_nth, int, S_IRUGO | S_IWUSR);
5609 module_param_named(fake_rw, sdebug_fake_rw, int, S_IRUGO | S_IWUSR);
5610 module_param_named(guard, sdebug_guard, uint, S_IRUGO);
5611 module_param_named(host_lock, sdebug_host_lock, bool, S_IRUGO | S_IWUSR);
5612 module_param_named(host_max_queue, sdebug_host_max_queue, int, S_IRUGO);
5613 module_param_string(inq_product, sdebug_inq_product_id,
5614 sizeof(sdebug_inq_product_id), S_IRUGO | S_IWUSR);
5615 module_param_string(inq_rev, sdebug_inq_product_rev,
5616 sizeof(sdebug_inq_product_rev), S_IRUGO | S_IWUSR);
5617 module_param_string(inq_vendor, sdebug_inq_vendor_id,
5618 sizeof(sdebug_inq_vendor_id), S_IRUGO | S_IWUSR);
5619 module_param_named(lbprz, sdebug_lbprz, int, S_IRUGO);
5620 module_param_named(lbpu, sdebug_lbpu, int, S_IRUGO);
5621 module_param_named(lbpws, sdebug_lbpws, int, S_IRUGO);
5622 module_param_named(lbpws10, sdebug_lbpws10, int, S_IRUGO);
5623 module_param_named(lowest_aligned, sdebug_lowest_aligned, int, S_IRUGO);
5624 module_param_named(lun_format, sdebug_lun_am_i, int, S_IRUGO | S_IWUSR);
5625 module_param_named(max_luns, sdebug_max_luns, int, S_IRUGO | S_IWUSR);
5626 module_param_named(max_queue, sdebug_max_queue, int, S_IRUGO | S_IWUSR);
5627 module_param_named(medium_error_count, sdebug_medium_error_count, int,
5629 module_param_named(medium_error_start, sdebug_medium_error_start, int,
5631 module_param_named(ndelay, sdebug_ndelay, int, S_IRUGO | S_IWUSR);
5632 module_param_named(no_lun_0, sdebug_no_lun_0, int, S_IRUGO | S_IWUSR);
5633 module_param_named(no_uld, sdebug_no_uld, int, S_IRUGO);
5634 module_param_named(num_parts, sdebug_num_parts, int, S_IRUGO);
5635 module_param_named(num_tgts, sdebug_num_tgts, int, S_IRUGO | S_IWUSR);
5636 module_param_named(opt_blks, sdebug_opt_blks, int, S_IRUGO);
5637 module_param_named(opt_xferlen_exp, sdebug_opt_xferlen_exp, int, S_IRUGO);
5638 module_param_named(opts, sdebug_opts, int, S_IRUGO | S_IWUSR);
5639 module_param_named(per_host_store, sdebug_per_host_store, bool,
5641 module_param_named(physblk_exp, sdebug_physblk_exp, int, S_IRUGO);
5642 module_param_named(ptype, sdebug_ptype, int, S_IRUGO | S_IWUSR);
5643 module_param_named(random, sdebug_random, bool, S_IRUGO | S_IWUSR);
5644 module_param_named(removable, sdebug_removable, bool, S_IRUGO | S_IWUSR);
5645 module_param_named(scsi_level, sdebug_scsi_level, int, S_IRUGO);
5646 module_param_named(sector_size, sdebug_sector_size, int, S_IRUGO);
5647 module_param_named(statistics, sdebug_statistics, bool, S_IRUGO | S_IWUSR);
5648 module_param_named(strict, sdebug_strict, bool, S_IRUGO | S_IWUSR);
5649 module_param_named(submit_queues, submit_queues, int, S_IRUGO);
5650 module_param_named(poll_queues, poll_queues, int, S_IRUGO);
5651 module_param_named(tur_ms_to_ready, sdeb_tur_ms_to_ready, int, S_IRUGO);
5652 module_param_named(unmap_alignment, sdebug_unmap_alignment, int, S_IRUGO);
5653 module_param_named(unmap_granularity, sdebug_unmap_granularity, int, S_IRUGO);
5654 module_param_named(unmap_max_blocks, sdebug_unmap_max_blocks, int, S_IRUGO);
5655 module_param_named(unmap_max_desc, sdebug_unmap_max_desc, int, S_IRUGO);
5656 module_param_named(uuid_ctl, sdebug_uuid_ctl, int, S_IRUGO);
5657 module_param_named(virtual_gb, sdebug_virtual_gb, int, S_IRUGO | S_IWUSR);
5658 module_param_named(vpd_use_hostno, sdebug_vpd_use_hostno, int,
5660 module_param_named(wp, sdebug_wp, bool, S_IRUGO | S_IWUSR);
5661 module_param_named(write_same_length, sdebug_write_same_length, int,
5663 module_param_named(zbc, sdeb_zbc_model_s, charp, S_IRUGO);
5664 module_param_named(zone_max_open, sdeb_zbc_max_open, int, S_IRUGO);
5665 module_param_named(zone_nr_conv, sdeb_zbc_nr_conv, int, S_IRUGO);
5666 module_param_named(zone_size_mb, sdeb_zbc_zone_size_mb, int, S_IRUGO);
5668 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
5669 MODULE_DESCRIPTION("SCSI debug adapter driver");
5670 MODULE_LICENSE("GPL");
5671 MODULE_VERSION(SDEBUG_VERSION);
5673 MODULE_PARM_DESC(add_host, "add n hosts, in sysfs if negative remove host(s) (def=1)");
5674 MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
5675 MODULE_PARM_DESC(cdb_len, "suggest CDB lengths to drivers (def=10)");
5676 MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
5677 MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
5678 MODULE_PARM_DESC(dev_size_mb, "size in MiB of ram shared by devs(def=8)");
5679 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
5680 MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
5681 MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
5682 MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
5683 MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
5684 MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
5685 MODULE_PARM_DESC(host_lock, "host_lock is ignored (def=0)");
5686 MODULE_PARM_DESC(host_max_queue,
5687 "host max # of queued cmds (0 to max(def) [max_queue fixed equal for !0])");
5688 MODULE_PARM_DESC(inq_product, "SCSI INQUIRY product string (def=\"scsi_debug\")");
5689 MODULE_PARM_DESC(inq_rev, "SCSI INQUIRY revision string (def=\""
5690 SDEBUG_VERSION "\")");
5691 MODULE_PARM_DESC(inq_vendor, "SCSI INQUIRY vendor string (def=\"Linux\")");
5692 MODULE_PARM_DESC(lbprz,
5693 "on read unmapped LBs return 0 when 1 (def), return 0xff when 2");
5694 MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
5695 MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
5696 MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
5697 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
5698 MODULE_PARM_DESC(lun_format, "LUN format: 0->peripheral (def); 1 --> flat address method");
5699 MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
5700 MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))");
5701 MODULE_PARM_DESC(medium_error_count, "count of sectors to return follow on MEDIUM error");
5702 MODULE_PARM_DESC(medium_error_start, "starting sector number to return MEDIUM error");
5703 MODULE_PARM_DESC(ndelay, "response delay in nanoseconds (def=0 -> ignore)");
5704 MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
5705 MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
5706 MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
5707 MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
5708 MODULE_PARM_DESC(opt_blks, "optimal transfer length in blocks (def=1024)");
5709 MODULE_PARM_DESC(opt_xferlen_exp, "optimal transfer length granularity exponent (def=physblk_exp)");
5710 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
5711 MODULE_PARM_DESC(per_host_store, "If set, next positive add_host will get new store (def=0)");
5712 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
5713 MODULE_PARM_DESC(poll_queues, "support for iouring iopoll queues (1 to max(submit_queues - 1))");
5714 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
5715 MODULE_PARM_DESC(random, "If set, uniformly randomize command duration between 0 and delay_in_ns");
5716 MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
5717 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=7[SPC-5])");
5718 MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
5719 MODULE_PARM_DESC(statistics, "collect statistics on commands, queues (def=0)");
5720 MODULE_PARM_DESC(strict, "stricter checks: reserved field in cdb (def=0)");
5721 MODULE_PARM_DESC(submit_queues, "support for block multi-queue (def=1)");
5722 MODULE_PARM_DESC(tur_ms_to_ready, "TEST UNIT READY millisecs before initial good status (def=0)");
5723 MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
5724 MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
5725 MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
5726 MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
5727 MODULE_PARM_DESC(uuid_ctl,
5728 "1->use uuid for lu name, 0->don't, 2->all use same (def=0)");
5729 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)");
5730 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
5731 MODULE_PARM_DESC(wp, "Write Protect (def=0)");
5732 MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
5733 MODULE_PARM_DESC(zbc, "'none' [0]; 'aware' [1]; 'managed' [2] (def=0). Can have 'host-' prefix");
5734 MODULE_PARM_DESC(zone_max_open, "Maximum number of open zones; [0] for no limit (def=auto)");
5735 MODULE_PARM_DESC(zone_nr_conv, "Number of conventional zones (def=1)");
5736 MODULE_PARM_DESC(zone_size_mb, "Zone size in MiB (def=auto)");
5738 #define SDEBUG_INFO_LEN 256
5739 static char sdebug_info[SDEBUG_INFO_LEN];
5741 static const char *scsi_debug_info(struct Scsi_Host *shp)
5745 k = scnprintf(sdebug_info, SDEBUG_INFO_LEN, "%s: version %s [%s]\n",
5746 my_name, SDEBUG_VERSION, sdebug_version_date);
5747 if (k >= (SDEBUG_INFO_LEN - 1))
5749 scnprintf(sdebug_info + k, SDEBUG_INFO_LEN - k,
5750 " dev_size_mb=%d, opts=0x%x, submit_queues=%d, %s=%d",
5751 sdebug_dev_size_mb, sdebug_opts, submit_queues,
5752 "statistics", (int)sdebug_statistics);
5756 /* 'echo <val> > /proc/scsi/scsi_debug/<host_id>' writes to opts */
5757 static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer,
5762 int minLen = length > 15 ? 15 : length;
5764 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
5766 memcpy(arr, buffer, minLen);
5768 if (1 != sscanf(arr, "%d", &opts))
5771 sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
5772 sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
5773 if (sdebug_every_nth != 0)
5778 /* Output seen with 'cat /proc/scsi/scsi_debug/<host_id>'. It will be the
5779 * same for each scsi_debug host (if more than one). Some of the counters
5780 * output are not atomics so might be inaccurate in a busy system. */
5781 static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
5784 struct sdebug_queue *sqp;
5785 struct sdebug_host_info *sdhp;
5787 seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n",
5788 SDEBUG_VERSION, sdebug_version_date);
5789 seq_printf(m, "num_tgts=%d, %ssize=%d MB, opts=0x%x, every_nth=%d\n",
5790 sdebug_num_tgts, "shared (ram) ", sdebug_dev_size_mb,
5791 sdebug_opts, sdebug_every_nth);
5792 seq_printf(m, "delay=%d, ndelay=%d, max_luns=%d, sector_size=%d %s\n",
5793 sdebug_jdelay, sdebug_ndelay, sdebug_max_luns,
5794 sdebug_sector_size, "bytes");
5795 seq_printf(m, "cylinders=%d, heads=%d, sectors=%d, command aborts=%d\n",
5796 sdebug_cylinders_per, sdebug_heads, sdebug_sectors_per,
5798 seq_printf(m, "RESETs: device=%d, target=%d, bus=%d, host=%d\n",
5799 num_dev_resets, num_target_resets, num_bus_resets,
5801 seq_printf(m, "dix_reads=%d, dix_writes=%d, dif_errors=%d\n",
5802 dix_reads, dix_writes, dif_errors);
5803 seq_printf(m, "usec_in_jiffy=%lu, statistics=%d\n", TICK_NSEC / 1000,
5805 seq_printf(m, "cmnd_count=%d, completions=%d, %s=%d, a_tsf=%d, mq_polls=%d\n",
5806 atomic_read(&sdebug_cmnd_count),
5807 atomic_read(&sdebug_completions),
5808 "miss_cpus", atomic_read(&sdebug_miss_cpus),
5809 atomic_read(&sdebug_a_tsf),
5810 atomic_read(&sdeb_mq_poll_count));
5812 seq_printf(m, "submit_queues=%d\n", submit_queues);
5813 for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5814 seq_printf(m, " queue %d:\n", j);
5815 f = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
5816 if (f != sdebug_max_queue) {
5817 l = find_last_bit(sqp->in_use_bm, sdebug_max_queue);
5818 seq_printf(m, " in_use_bm BUSY: %s: %d,%d\n",
5819 "first,last bits", f, l);
5823 seq_printf(m, "this host_no=%d\n", host->host_no);
5824 if (!xa_empty(per_store_ap)) {
5827 unsigned long l_idx;
5828 struct sdeb_store_info *sip;
5830 seq_puts(m, "\nhost list:\n");
5832 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
5834 seq_printf(m, " %d: host_no=%d, si_idx=%d\n", j,
5835 sdhp->shost->host_no, idx);
5838 seq_printf(m, "\nper_store array [most_recent_idx=%d]:\n",
5839 sdeb_most_recent_idx);
5841 xa_for_each(per_store_ap, l_idx, sip) {
5842 niu = xa_get_mark(per_store_ap, l_idx,
5843 SDEB_XA_NOT_IN_USE);
5845 seq_printf(m, " %d: idx=%d%s\n", j, idx,
5846 (niu ? " not_in_use" : ""));
5853 static ssize_t delay_show(struct device_driver *ddp, char *buf)
5855 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_jdelay);
5857 /* Returns -EBUSY if jdelay is being changed and commands are queued. The unit
5858 * of delay is jiffies.
5860 static ssize_t delay_store(struct device_driver *ddp, const char *buf,
5865 if (count > 0 && sscanf(buf, "%d", &jdelay) == 1) {
5867 if (sdebug_jdelay != jdelay) {
5869 struct sdebug_queue *sqp;
5871 block_unblock_all_queues(true);
5872 for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
5874 k = find_first_bit(sqp->in_use_bm,
5876 if (k != sdebug_max_queue) {
5877 res = -EBUSY; /* queued commands */
5882 sdebug_jdelay = jdelay;
5885 block_unblock_all_queues(false);
5891 static DRIVER_ATTR_RW(delay);
5893 static ssize_t ndelay_show(struct device_driver *ddp, char *buf)
5895 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ndelay);
5897 /* Returns -EBUSY if ndelay is being changed and commands are queued */
5898 /* If > 0 and accepted then sdebug_jdelay is set to JDELAY_OVERRIDDEN */
5899 static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
5904 if ((count > 0) && (1 == sscanf(buf, "%d", &ndelay)) &&
5905 (ndelay >= 0) && (ndelay < (1000 * 1000 * 1000))) {
5907 if (sdebug_ndelay != ndelay) {
5909 struct sdebug_queue *sqp;
5911 block_unblock_all_queues(true);
5912 for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
5914 k = find_first_bit(sqp->in_use_bm,
5916 if (k != sdebug_max_queue) {
5917 res = -EBUSY; /* queued commands */
5922 sdebug_ndelay = ndelay;
5923 sdebug_jdelay = ndelay ? JDELAY_OVERRIDDEN
5926 block_unblock_all_queues(false);
5932 static DRIVER_ATTR_RW(ndelay);
5934 static ssize_t opts_show(struct device_driver *ddp, char *buf)
5936 return scnprintf(buf, PAGE_SIZE, "0x%x\n", sdebug_opts);
5939 static ssize_t opts_store(struct device_driver *ddp, const char *buf,
5945 if (sscanf(buf, "%10s", work) == 1) {
5946 if (strncasecmp(work, "0x", 2) == 0) {
5947 if (kstrtoint(work + 2, 16, &opts) == 0)
5950 if (kstrtoint(work, 10, &opts) == 0)
5957 sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
5958 sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
5962 static DRIVER_ATTR_RW(opts);
5964 static ssize_t ptype_show(struct device_driver *ddp, char *buf)
5966 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ptype);
5968 static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
5973 /* Cannot change from or to TYPE_ZBC with sysfs */
5974 if (sdebug_ptype == TYPE_ZBC)
5977 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
5985 static DRIVER_ATTR_RW(ptype);
5987 static ssize_t dsense_show(struct device_driver *ddp, char *buf)
5989 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dsense);
5991 static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
5996 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6002 static DRIVER_ATTR_RW(dsense);
6004 static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
6006 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_fake_rw);
6008 static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
6013 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6014 bool want_store = (n == 0);
6015 struct sdebug_host_info *sdhp;
6018 sdebug_fake_rw = (sdebug_fake_rw > 0);
6019 if (sdebug_fake_rw == n)
6020 return count; /* not transitioning so do nothing */
6022 if (want_store) { /* 1 --> 0 transition, set up store */
6023 if (sdeb_first_idx < 0) {
6024 idx = sdebug_add_store();
6028 idx = sdeb_first_idx;
6029 xa_clear_mark(per_store_ap, idx,
6030 SDEB_XA_NOT_IN_USE);
6032 /* make all hosts use same store */
6033 list_for_each_entry(sdhp, &sdebug_host_list,
6035 if (sdhp->si_idx != idx) {
6036 xa_set_mark(per_store_ap, sdhp->si_idx,
6037 SDEB_XA_NOT_IN_USE);
6041 sdeb_most_recent_idx = idx;
6042 } else { /* 0 --> 1 transition is trigger for shrink */
6043 sdebug_erase_all_stores(true /* apart from first */);
6050 static DRIVER_ATTR_RW(fake_rw);
6052 static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
6054 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_lun_0);
6056 static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
6061 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6062 sdebug_no_lun_0 = n;
6067 static DRIVER_ATTR_RW(no_lun_0);
6069 static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
6071 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_tgts);
6073 static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
6078 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6079 sdebug_num_tgts = n;
6080 sdebug_max_tgts_luns();
6085 static DRIVER_ATTR_RW(num_tgts);
6087 static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)
6089 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dev_size_mb);
6091 static DRIVER_ATTR_RO(dev_size_mb);
6093 static ssize_t per_host_store_show(struct device_driver *ddp, char *buf)
6095 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_per_host_store);
6098 static ssize_t per_host_store_store(struct device_driver *ddp, const char *buf,
6103 if (kstrtobool(buf, &v))
6106 sdebug_per_host_store = v;
6109 static DRIVER_ATTR_RW(per_host_store);
6111 static ssize_t num_parts_show(struct device_driver *ddp, char *buf)
6113 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_parts);
6115 static DRIVER_ATTR_RO(num_parts);
6117 static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
6119 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_every_nth);
6121 static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
6127 if (sscanf(buf, "%10s", work) == 1) {
6128 if (strncasecmp(work, "0x", 2) == 0) {
6129 if (kstrtoint(work + 2, 16, &nth) == 0)
6130 goto every_nth_done;
6132 if (kstrtoint(work, 10, &nth) == 0)
6133 goto every_nth_done;
6139 sdebug_every_nth = nth;
6140 if (nth && !sdebug_statistics) {
6141 pr_info("every_nth needs statistics=1, set it\n");
6142 sdebug_statistics = true;
6147 static DRIVER_ATTR_RW(every_nth);
6149 static ssize_t lun_format_show(struct device_driver *ddp, char *buf)
6151 return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_lun_am);
6153 static ssize_t lun_format_store(struct device_driver *ddp, const char *buf,
6159 if (kstrtoint(buf, 0, &n))
6162 if (n > (int)SAM_LUN_AM_FLAT) {
6163 pr_warn("only LUN address methods 0 and 1 are supported\n");
6166 changed = ((int)sdebug_lun_am != n);
6168 if (changed && sdebug_scsi_level >= 5) { /* >= SPC-3 */
6169 struct sdebug_host_info *sdhp;
6170 struct sdebug_dev_info *dp;
6172 spin_lock(&sdebug_host_list_lock);
6173 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6174 list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
6175 set_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
6178 spin_unlock(&sdebug_host_list_lock);
6184 static DRIVER_ATTR_RW(lun_format);
6186 static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
6188 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_luns);
6190 static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
6196 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6198 pr_warn("max_luns can be no more than 256\n");
6201 changed = (sdebug_max_luns != n);
6202 sdebug_max_luns = n;
6203 sdebug_max_tgts_luns();
6204 if (changed && (sdebug_scsi_level >= 5)) { /* >= SPC-3 */
6205 struct sdebug_host_info *sdhp;
6206 struct sdebug_dev_info *dp;
6208 spin_lock(&sdebug_host_list_lock);
6209 list_for_each_entry(sdhp, &sdebug_host_list,
6211 list_for_each_entry(dp, &sdhp->dev_info_list,
6213 set_bit(SDEBUG_UA_LUNS_CHANGED,
6217 spin_unlock(&sdebug_host_list_lock);
6223 static DRIVER_ATTR_RW(max_luns);
6225 static ssize_t max_queue_show(struct device_driver *ddp, char *buf)
6227 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_queue);
6229 /* N.B. max_queue can be changed while there are queued commands. In flight
6230 * commands beyond the new max_queue will be completed. */
6231 static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
6235 struct sdebug_queue *sqp;
6237 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
6238 (n <= SDEBUG_CANQUEUE) &&
6239 (sdebug_host_max_queue == 0)) {
6240 block_unblock_all_queues(true);
6242 for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
6244 a = find_last_bit(sqp->in_use_bm, SDEBUG_CANQUEUE);
6248 sdebug_max_queue = n;
6249 if (k == SDEBUG_CANQUEUE)
6250 atomic_set(&retired_max_queue, 0);
6252 atomic_set(&retired_max_queue, k + 1);
6254 atomic_set(&retired_max_queue, 0);
6255 block_unblock_all_queues(false);
6260 static DRIVER_ATTR_RW(max_queue);
6262 static ssize_t host_max_queue_show(struct device_driver *ddp, char *buf)
6264 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_host_max_queue);
6268 * Since this is used for .can_queue, and we get the hc_idx tag from the bitmap
6269 * in range [0, sdebug_host_max_queue), we can't change it.
6271 static DRIVER_ATTR_RO(host_max_queue);
6273 static ssize_t no_uld_show(struct device_driver *ddp, char *buf)
6275 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_uld);
6277 static DRIVER_ATTR_RO(no_uld);
6279 static ssize_t scsi_level_show(struct device_driver *ddp, char *buf)
6281 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_scsi_level);
6283 static DRIVER_ATTR_RO(scsi_level);
6285 static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
6287 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_virtual_gb);
6289 static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
6295 /* Ignore capacity change for ZBC drives for now */
6296 if (sdeb_zbc_in_use)
6299 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6300 changed = (sdebug_virtual_gb != n);
6301 sdebug_virtual_gb = n;
6302 sdebug_capacity = get_sdebug_capacity();
6304 struct sdebug_host_info *sdhp;
6305 struct sdebug_dev_info *dp;
6307 spin_lock(&sdebug_host_list_lock);
6308 list_for_each_entry(sdhp, &sdebug_host_list,
6310 list_for_each_entry(dp, &sdhp->dev_info_list,
6312 set_bit(SDEBUG_UA_CAPACITY_CHANGED,
6316 spin_unlock(&sdebug_host_list_lock);
6322 static DRIVER_ATTR_RW(virtual_gb);
6324 static ssize_t add_host_show(struct device_driver *ddp, char *buf)
6326 /* absolute number of hosts currently active is what is shown */
6327 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_hosts);
6330 static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
6335 struct sdeb_store_info *sip;
6336 bool want_phs = (sdebug_fake_rw == 0) && sdebug_per_host_store;
6339 if (sscanf(buf, "%d", &delta_hosts) != 1)
6341 if (delta_hosts > 0) {
6345 xa_for_each_marked(per_store_ap, idx, sip,
6346 SDEB_XA_NOT_IN_USE) {
6347 sdeb_most_recent_idx = (int)idx;
6351 if (found) /* re-use case */
6352 sdebug_add_host_helper((int)idx);
6354 sdebug_do_add_host(true);
6356 sdebug_do_add_host(false);
6358 } while (--delta_hosts);
6359 } else if (delta_hosts < 0) {
6361 sdebug_do_remove_host(false);
6362 } while (++delta_hosts);
6366 static DRIVER_ATTR_RW(add_host);
6368 static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf)
6370 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_vpd_use_hostno);
6372 static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf,
6377 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6378 sdebug_vpd_use_hostno = n;
6383 static DRIVER_ATTR_RW(vpd_use_hostno);
6385 static ssize_t statistics_show(struct device_driver *ddp, char *buf)
6387 return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_statistics);
6389 static ssize_t statistics_store(struct device_driver *ddp, const char *buf,
6394 if ((count > 0) && (sscanf(buf, "%d", &n) == 1) && (n >= 0)) {
6396 sdebug_statistics = true;
6398 clear_queue_stats();
6399 sdebug_statistics = false;
6405 static DRIVER_ATTR_RW(statistics);
6407 static ssize_t sector_size_show(struct device_driver *ddp, char *buf)
6409 return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_sector_size);
6411 static DRIVER_ATTR_RO(sector_size);
6413 static ssize_t submit_queues_show(struct device_driver *ddp, char *buf)
6415 return scnprintf(buf, PAGE_SIZE, "%d\n", submit_queues);
6417 static DRIVER_ATTR_RO(submit_queues);
6419 static ssize_t dix_show(struct device_driver *ddp, char *buf)
6421 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dix);
6423 static DRIVER_ATTR_RO(dix);
6425 static ssize_t dif_show(struct device_driver *ddp, char *buf)
6427 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dif);
6429 static DRIVER_ATTR_RO(dif);
6431 static ssize_t guard_show(struct device_driver *ddp, char *buf)
6433 return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_guard);
6435 static DRIVER_ATTR_RO(guard);
6437 static ssize_t ato_show(struct device_driver *ddp, char *buf)
6439 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ato);
6441 static DRIVER_ATTR_RO(ato);
6443 static ssize_t map_show(struct device_driver *ddp, char *buf)
6447 if (!scsi_debug_lbp())
6448 return scnprintf(buf, PAGE_SIZE, "0-%u\n",
6449 sdebug_store_sectors);
6451 if (sdebug_fake_rw == 0 && !xa_empty(per_store_ap)) {
6452 struct sdeb_store_info *sip = xa_load(per_store_ap, 0);
6455 count = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
6456 (int)map_size, sip->map_storep);
6458 buf[count++] = '\n';
6463 static DRIVER_ATTR_RO(map);
6465 static ssize_t random_show(struct device_driver *ddp, char *buf)
6467 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_random);
6470 static ssize_t random_store(struct device_driver *ddp, const char *buf,
6475 if (kstrtobool(buf, &v))
6481 static DRIVER_ATTR_RW(random);
6483 static ssize_t removable_show(struct device_driver *ddp, char *buf)
6485 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_removable ? 1 : 0);
6487 static ssize_t removable_store(struct device_driver *ddp, const char *buf,
6492 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6493 sdebug_removable = (n > 0);
6498 static DRIVER_ATTR_RW(removable);
6500 static ssize_t host_lock_show(struct device_driver *ddp, char *buf)
6502 return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_host_lock);
6504 /* N.B. sdebug_host_lock does nothing, kept for backward compatibility */
6505 static ssize_t host_lock_store(struct device_driver *ddp, const char *buf,
6510 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6511 sdebug_host_lock = (n > 0);
6516 static DRIVER_ATTR_RW(host_lock);
6518 static ssize_t strict_show(struct device_driver *ddp, char *buf)
6520 return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_strict);
6522 static ssize_t strict_store(struct device_driver *ddp, const char *buf,
6527 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6528 sdebug_strict = (n > 0);
6533 static DRIVER_ATTR_RW(strict);
6535 static ssize_t uuid_ctl_show(struct device_driver *ddp, char *buf)
6537 return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_uuid_ctl);
6539 static DRIVER_ATTR_RO(uuid_ctl);
6541 static ssize_t cdb_len_show(struct device_driver *ddp, char *buf)
6543 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_cdb_len);
6545 static ssize_t cdb_len_store(struct device_driver *ddp, const char *buf,
6550 ret = kstrtoint(buf, 0, &n);
6554 all_config_cdb_len();
6557 static DRIVER_ATTR_RW(cdb_len);
6559 static const char * const zbc_model_strs_a[] = {
6560 [BLK_ZONED_NONE] = "none",
6561 [BLK_ZONED_HA] = "host-aware",
6562 [BLK_ZONED_HM] = "host-managed",
6565 static const char * const zbc_model_strs_b[] = {
6566 [BLK_ZONED_NONE] = "no",
6567 [BLK_ZONED_HA] = "aware",
6568 [BLK_ZONED_HM] = "managed",
6571 static const char * const zbc_model_strs_c[] = {
6572 [BLK_ZONED_NONE] = "0",
6573 [BLK_ZONED_HA] = "1",
6574 [BLK_ZONED_HM] = "2",
6577 static int sdeb_zbc_model_str(const char *cp)
6579 int res = sysfs_match_string(zbc_model_strs_a, cp);
6582 res = sysfs_match_string(zbc_model_strs_b, cp);
6584 res = sysfs_match_string(zbc_model_strs_c, cp);
6592 static ssize_t zbc_show(struct device_driver *ddp, char *buf)
6594 return scnprintf(buf, PAGE_SIZE, "%s\n",
6595 zbc_model_strs_a[sdeb_zbc_model]);
6597 static DRIVER_ATTR_RO(zbc);
6599 static ssize_t tur_ms_to_ready_show(struct device_driver *ddp, char *buf)
6601 return scnprintf(buf, PAGE_SIZE, "%d\n", sdeb_tur_ms_to_ready);
6603 static DRIVER_ATTR_RO(tur_ms_to_ready);
6605 /* Note: The following array creates attribute files in the
6606 /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
6607 files (over those found in the /sys/module/scsi_debug/parameters
6608 directory) is that auxiliary actions can be triggered when an attribute
6609 is changed. For example see: add_host_store() above.
6612 static struct attribute *sdebug_drv_attrs[] = {
6613 &driver_attr_delay.attr,
6614 &driver_attr_opts.attr,
6615 &driver_attr_ptype.attr,
6616 &driver_attr_dsense.attr,
6617 &driver_attr_fake_rw.attr,
6618 &driver_attr_host_max_queue.attr,
6619 &driver_attr_no_lun_0.attr,
6620 &driver_attr_num_tgts.attr,
6621 &driver_attr_dev_size_mb.attr,
6622 &driver_attr_num_parts.attr,
6623 &driver_attr_every_nth.attr,
6624 &driver_attr_lun_format.attr,
6625 &driver_attr_max_luns.attr,
6626 &driver_attr_max_queue.attr,
6627 &driver_attr_no_uld.attr,
6628 &driver_attr_scsi_level.attr,
6629 &driver_attr_virtual_gb.attr,
6630 &driver_attr_add_host.attr,
6631 &driver_attr_per_host_store.attr,
6632 &driver_attr_vpd_use_hostno.attr,
6633 &driver_attr_sector_size.attr,
6634 &driver_attr_statistics.attr,
6635 &driver_attr_submit_queues.attr,
6636 &driver_attr_dix.attr,
6637 &driver_attr_dif.attr,
6638 &driver_attr_guard.attr,
6639 &driver_attr_ato.attr,
6640 &driver_attr_map.attr,
6641 &driver_attr_random.attr,
6642 &driver_attr_removable.attr,
6643 &driver_attr_host_lock.attr,
6644 &driver_attr_ndelay.attr,
6645 &driver_attr_strict.attr,
6646 &driver_attr_uuid_ctl.attr,
6647 &driver_attr_cdb_len.attr,
6648 &driver_attr_tur_ms_to_ready.attr,
6649 &driver_attr_zbc.attr,
6652 ATTRIBUTE_GROUPS(sdebug_drv);
6654 static struct device *pseudo_primary;
6656 static int __init scsi_debug_init(void)
6658 bool want_store = (sdebug_fake_rw == 0);
6660 int k, ret, hosts_to_add;
6663 ramdisk_lck_a[0] = &atomic_rw;
6664 ramdisk_lck_a[1] = &atomic_rw2;
6665 atomic_set(&retired_max_queue, 0);
6667 if (sdebug_ndelay >= 1000 * 1000 * 1000) {
6668 pr_warn("ndelay must be less than 1 second, ignored\n");
6670 } else if (sdebug_ndelay > 0)
6671 sdebug_jdelay = JDELAY_OVERRIDDEN;
6673 switch (sdebug_sector_size) {
6680 pr_err("invalid sector_size %d\n", sdebug_sector_size);
6684 switch (sdebug_dif) {
6685 case T10_PI_TYPE0_PROTECTION:
6687 case T10_PI_TYPE1_PROTECTION:
6688 case T10_PI_TYPE2_PROTECTION:
6689 case T10_PI_TYPE3_PROTECTION:
6690 have_dif_prot = true;
6694 pr_err("dif must be 0, 1, 2 or 3\n");
6698 if (sdebug_num_tgts < 0) {
6699 pr_err("num_tgts must be >= 0\n");
6703 if (sdebug_guard > 1) {
6704 pr_err("guard must be 0 or 1\n");
6708 if (sdebug_ato > 1) {
6709 pr_err("ato must be 0 or 1\n");
6713 if (sdebug_physblk_exp > 15) {
6714 pr_err("invalid physblk_exp %u\n", sdebug_physblk_exp);
6718 sdebug_lun_am = sdebug_lun_am_i;
6719 if (sdebug_lun_am > SAM_LUN_AM_FLAT) {
6720 pr_warn("Invalid LUN format %u, using default\n", (int)sdebug_lun_am);
6721 sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
6724 if (sdebug_max_luns > 256) {
6725 if (sdebug_max_luns > 16384) {
6726 pr_warn("max_luns can be no more than 16384, use default\n");
6727 sdebug_max_luns = DEF_MAX_LUNS;
6729 sdebug_lun_am = SAM_LUN_AM_FLAT;
6732 if (sdebug_lowest_aligned > 0x3fff) {
6733 pr_err("lowest_aligned too big: %u\n", sdebug_lowest_aligned);
6737 if (submit_queues < 1) {
6738 pr_err("submit_queues must be 1 or more\n");
6742 if ((sdebug_max_queue > SDEBUG_CANQUEUE) || (sdebug_max_queue < 1)) {
6743 pr_err("max_queue must be in range [1, %d]\n", SDEBUG_CANQUEUE);
6747 if ((sdebug_host_max_queue > SDEBUG_CANQUEUE) ||
6748 (sdebug_host_max_queue < 0)) {
6749 pr_err("host_max_queue must be in range [0 %d]\n",
6754 if (sdebug_host_max_queue &&
6755 (sdebug_max_queue != sdebug_host_max_queue)) {
6756 sdebug_max_queue = sdebug_host_max_queue;
6757 pr_warn("fixing max submit queue depth to host max queue depth, %d\n",
6761 sdebug_q_arr = kcalloc(submit_queues, sizeof(struct sdebug_queue),
6763 if (sdebug_q_arr == NULL)
6765 for (k = 0; k < submit_queues; ++k)
6766 spin_lock_init(&sdebug_q_arr[k].qc_lock);
6769 * check for host managed zoned block device specified with
6770 * ptype=0x14 or zbc=XXX.
6772 if (sdebug_ptype == TYPE_ZBC) {
6773 sdeb_zbc_model = BLK_ZONED_HM;
6774 } else if (sdeb_zbc_model_s && *sdeb_zbc_model_s) {
6775 k = sdeb_zbc_model_str(sdeb_zbc_model_s);
6781 switch (sdeb_zbc_model) {
6782 case BLK_ZONED_NONE:
6784 sdebug_ptype = TYPE_DISK;
6787 sdebug_ptype = TYPE_ZBC;
6790 pr_err("Invalid ZBC model\n");
6795 if (sdeb_zbc_model != BLK_ZONED_NONE) {
6796 sdeb_zbc_in_use = true;
6797 if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
6798 sdebug_dev_size_mb = DEF_ZBC_DEV_SIZE_MB;
6801 if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
6802 sdebug_dev_size_mb = DEF_DEV_SIZE_MB;
6803 if (sdebug_dev_size_mb < 1)
6804 sdebug_dev_size_mb = 1; /* force minimum 1 MB ramdisk */
6805 sz = (unsigned long)sdebug_dev_size_mb * 1048576;
6806 sdebug_store_sectors = sz / sdebug_sector_size;
6807 sdebug_capacity = get_sdebug_capacity();
6809 /* play around with geometry, don't waste too much on track 0 */
6811 sdebug_sectors_per = 32;
6812 if (sdebug_dev_size_mb >= 256)
6814 else if (sdebug_dev_size_mb >= 16)
6816 sdebug_cylinders_per = (unsigned long)sdebug_capacity /
6817 (sdebug_sectors_per * sdebug_heads);
6818 if (sdebug_cylinders_per >= 1024) {
6819 /* other LLDs do this; implies >= 1GB ram disk ... */
6821 sdebug_sectors_per = 63;
6822 sdebug_cylinders_per = (unsigned long)sdebug_capacity /
6823 (sdebug_sectors_per * sdebug_heads);
6825 if (scsi_debug_lbp()) {
6826 sdebug_unmap_max_blocks =
6827 clamp(sdebug_unmap_max_blocks, 0U, 0xffffffffU);
6829 sdebug_unmap_max_desc =
6830 clamp(sdebug_unmap_max_desc, 0U, 256U);
6832 sdebug_unmap_granularity =
6833 clamp(sdebug_unmap_granularity, 1U, 0xffffffffU);
6835 if (sdebug_unmap_alignment &&
6836 sdebug_unmap_granularity <=
6837 sdebug_unmap_alignment) {
6838 pr_err("ERR: unmap_granularity <= unmap_alignment\n");
6843 xa_init_flags(per_store_ap, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
6845 idx = sdebug_add_store();
6852 pseudo_primary = root_device_register("pseudo_0");
6853 if (IS_ERR(pseudo_primary)) {
6854 pr_warn("root_device_register() error\n");
6855 ret = PTR_ERR(pseudo_primary);
6858 ret = bus_register(&pseudo_lld_bus);
6860 pr_warn("bus_register error: %d\n", ret);
6863 ret = driver_register(&sdebug_driverfs_driver);
6865 pr_warn("driver_register error: %d\n", ret);
6869 hosts_to_add = sdebug_add_host;
6870 sdebug_add_host = 0;
6872 for (k = 0; k < hosts_to_add; k++) {
6873 if (want_store && k == 0) {
6874 ret = sdebug_add_host_helper(idx);
6876 pr_err("add_host_helper k=%d, error=%d\n",
6881 ret = sdebug_do_add_host(want_store &&
6882 sdebug_per_host_store);
6884 pr_err("add_host k=%d error=%d\n", k, -ret);
6890 pr_info("built %d host(s)\n", sdebug_num_hosts);
6895 bus_unregister(&pseudo_lld_bus);
6897 root_device_unregister(pseudo_primary);
6899 sdebug_erase_store(idx, NULL);
6901 kfree(sdebug_q_arr);
6905 static void __exit scsi_debug_exit(void)
6907 int k = sdebug_num_hosts;
6911 sdebug_do_remove_host(true);
6913 driver_unregister(&sdebug_driverfs_driver);
6914 bus_unregister(&pseudo_lld_bus);
6915 root_device_unregister(pseudo_primary);
6917 sdebug_erase_all_stores(false);
6918 xa_destroy(per_store_ap);
6919 kfree(sdebug_q_arr);
6922 device_initcall(scsi_debug_init);
6923 module_exit(scsi_debug_exit);
6925 static void sdebug_release_adapter(struct device *dev)
6927 struct sdebug_host_info *sdbg_host;
6929 sdbg_host = to_sdebug_host(dev);
6933 /* idx must be valid, if sip is NULL then it will be obtained using idx */
6934 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip)
6939 if (xa_empty(per_store_ap))
6941 sip = xa_load(per_store_ap, idx);
6945 vfree(sip->map_storep);
6946 vfree(sip->dif_storep);
6948 xa_erase(per_store_ap, idx);
6952 /* Assume apart_from_first==false only in shutdown case. */
6953 static void sdebug_erase_all_stores(bool apart_from_first)
6956 struct sdeb_store_info *sip = NULL;
6958 xa_for_each(per_store_ap, idx, sip) {
6959 if (apart_from_first)
6960 apart_from_first = false;
6962 sdebug_erase_store(idx, sip);
6964 if (apart_from_first)
6965 sdeb_most_recent_idx = sdeb_first_idx;
6969 * Returns store xarray new element index (idx) if >=0 else negated errno.
6970 * Limit the number of stores to 65536.
6972 static int sdebug_add_store(void)
6976 unsigned long iflags;
6977 unsigned long sz = (unsigned long)sdebug_dev_size_mb * 1048576;
6978 struct sdeb_store_info *sip = NULL;
6979 struct xa_limit xal = { .max = 1 << 16, .min = 0 };
6981 sip = kzalloc(sizeof(*sip), GFP_KERNEL);
6985 xa_lock_irqsave(per_store_ap, iflags);
6986 res = __xa_alloc(per_store_ap, &n_idx, sip, xal, GFP_ATOMIC);
6987 if (unlikely(res < 0)) {
6988 xa_unlock_irqrestore(per_store_ap, iflags);
6990 pr_warn("%s: xa_alloc() errno=%d\n", __func__, -res);
6993 sdeb_most_recent_idx = n_idx;
6994 if (sdeb_first_idx < 0)
6995 sdeb_first_idx = n_idx;
6996 xa_unlock_irqrestore(per_store_ap, iflags);
6999 sip->storep = vzalloc(sz);
7001 pr_err("user data oom\n");
7004 if (sdebug_num_parts > 0)
7005 sdebug_build_parts(sip->storep, sz);
7007 /* DIF/DIX: what T10 calls Protection Information (PI) */
7011 dif_size = sdebug_store_sectors * sizeof(struct t10_pi_tuple);
7012 sip->dif_storep = vmalloc(dif_size);
7014 pr_info("dif_storep %u bytes @ %pK\n", dif_size,
7017 if (!sip->dif_storep) {
7018 pr_err("DIX oom\n");
7021 memset(sip->dif_storep, 0xff, dif_size);
7023 /* Logical Block Provisioning */
7024 if (scsi_debug_lbp()) {
7025 map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
7026 sip->map_storep = vmalloc(array_size(sizeof(long),
7027 BITS_TO_LONGS(map_size)));
7029 pr_info("%lu provisioning blocks\n", map_size);
7031 if (!sip->map_storep) {
7032 pr_err("LBP map oom\n");
7036 bitmap_zero(sip->map_storep, map_size);
7038 /* Map first 1KB for partition table */
7039 if (sdebug_num_parts)
7040 map_region(sip, 0, 2);
7043 rwlock_init(&sip->macc_lck);
7046 sdebug_erase_store((int)n_idx, sip);
7047 pr_warn("%s: failed, errno=%d\n", __func__, -res);
7051 static int sdebug_add_host_helper(int per_host_idx)
7053 int k, devs_per_host, idx;
7054 int error = -ENOMEM;
7055 struct sdebug_host_info *sdbg_host;
7056 struct sdebug_dev_info *sdbg_devinfo, *tmp;
7058 sdbg_host = kzalloc(sizeof(*sdbg_host), GFP_KERNEL);
7061 idx = (per_host_idx < 0) ? sdeb_first_idx : per_host_idx;
7062 if (xa_get_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE))
7063 xa_clear_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
7064 sdbg_host->si_idx = idx;
7066 INIT_LIST_HEAD(&sdbg_host->dev_info_list);
7068 devs_per_host = sdebug_num_tgts * sdebug_max_luns;
7069 for (k = 0; k < devs_per_host; k++) {
7070 sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
7075 spin_lock(&sdebug_host_list_lock);
7076 list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
7077 spin_unlock(&sdebug_host_list_lock);
7079 sdbg_host->dev.bus = &pseudo_lld_bus;
7080 sdbg_host->dev.parent = pseudo_primary;
7081 sdbg_host->dev.release = &sdebug_release_adapter;
7082 dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_num_hosts);
7084 error = device_register(&sdbg_host->dev);
7092 list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
7094 list_del(&sdbg_devinfo->dev_list);
7095 kfree(sdbg_devinfo->zstate);
7096 kfree(sdbg_devinfo);
7099 pr_warn("%s: failed, errno=%d\n", __func__, -error);
7103 static int sdebug_do_add_host(bool mk_new_store)
7105 int ph_idx = sdeb_most_recent_idx;
7108 ph_idx = sdebug_add_store();
7112 return sdebug_add_host_helper(ph_idx);
7115 static void sdebug_do_remove_host(bool the_end)
7118 struct sdebug_host_info *sdbg_host = NULL;
7119 struct sdebug_host_info *sdbg_host2;
7121 spin_lock(&sdebug_host_list_lock);
7122 if (!list_empty(&sdebug_host_list)) {
7123 sdbg_host = list_entry(sdebug_host_list.prev,
7124 struct sdebug_host_info, host_list);
7125 idx = sdbg_host->si_idx;
7127 if (!the_end && idx >= 0) {
7130 list_for_each_entry(sdbg_host2, &sdebug_host_list, host_list) {
7131 if (sdbg_host2 == sdbg_host)
7133 if (idx == sdbg_host2->si_idx) {
7139 xa_set_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
7140 if (idx == sdeb_most_recent_idx)
7141 --sdeb_most_recent_idx;
7145 list_del(&sdbg_host->host_list);
7146 spin_unlock(&sdebug_host_list_lock);
7151 device_unregister(&sdbg_host->dev);
7155 static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
7158 struct sdebug_dev_info *devip;
7160 block_unblock_all_queues(true);
7161 devip = (struct sdebug_dev_info *)sdev->hostdata;
7162 if (NULL == devip) {
7163 block_unblock_all_queues(false);
7166 num_in_q = atomic_read(&devip->num_in_q);
7168 if (qdepth > SDEBUG_CANQUEUE) {
7169 qdepth = SDEBUG_CANQUEUE;
7170 pr_warn("%s: requested qdepth [%d] exceeds canqueue [%d], trim\n", __func__,
7171 qdepth, SDEBUG_CANQUEUE);
7175 if (qdepth != sdev->queue_depth)
7176 scsi_change_queue_depth(sdev, qdepth);
7178 if (SDEBUG_OPT_Q_NOISE & sdebug_opts) {
7179 sdev_printk(KERN_INFO, sdev, "%s: qdepth=%d, num_in_q=%d\n",
7180 __func__, qdepth, num_in_q);
7182 block_unblock_all_queues(false);
7183 return sdev->queue_depth;
7186 static bool fake_timeout(struct scsi_cmnd *scp)
7188 if (0 == (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth))) {
7189 if (sdebug_every_nth < -1)
7190 sdebug_every_nth = -1;
7191 if (SDEBUG_OPT_TIMEOUT & sdebug_opts)
7192 return true; /* ignore command causing timeout */
7193 else if (SDEBUG_OPT_MAC_TIMEOUT & sdebug_opts &&
7194 scsi_medium_access_command(scp))
7195 return true; /* time out reads and writes */
7200 /* Response to TUR or media access command when device stopped */
7201 static int resp_not_ready(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
7205 ktime_t now_ts = ktime_get_boottime();
7206 struct scsi_device *sdp = scp->device;
7208 stopped_state = atomic_read(&devip->stopped);
7209 if (stopped_state == 2) {
7210 if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
7211 diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
7212 if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
7213 /* tur_ms_to_ready timer extinguished */
7214 atomic_set(&devip->stopped, 0);
7218 mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x1);
7220 sdev_printk(KERN_INFO, sdp,
7221 "%s: Not ready: in process of becoming ready\n", my_name);
7222 if (scp->cmnd[0] == TEST_UNIT_READY) {
7223 u64 tur_nanosecs_to_ready = (u64)sdeb_tur_ms_to_ready * 1000000;
7225 if (diff_ns <= tur_nanosecs_to_ready)
7226 diff_ns = tur_nanosecs_to_ready - diff_ns;
7228 diff_ns = tur_nanosecs_to_ready;
7229 /* As per 20-061r2 approved for spc6 by T10 on 20200716 */
7230 do_div(diff_ns, 1000000); /* diff_ns becomes milliseconds */
7231 scsi_set_sense_information(scp->sense_buffer, SCSI_SENSE_BUFFERSIZE,
7233 return check_condition_result;
7236 mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x2);
7238 sdev_printk(KERN_INFO, sdp, "%s: Not ready: initializing command required\n",
7240 return check_condition_result;
7243 static int sdebug_map_queues(struct Scsi_Host *shost)
7247 if (shost->nr_hw_queues == 1)
7250 for (i = 0, qoff = 0; i < HCTX_MAX_TYPES; i++) {
7251 struct blk_mq_queue_map *map = &shost->tag_set.map[i];
7255 if (i == HCTX_TYPE_DEFAULT)
7256 map->nr_queues = submit_queues - poll_queues;
7257 else if (i == HCTX_TYPE_POLL)
7258 map->nr_queues = poll_queues;
7260 if (!map->nr_queues) {
7261 BUG_ON(i == HCTX_TYPE_DEFAULT);
7265 map->queue_offset = qoff;
7266 blk_mq_map_queues(map);
7268 qoff += map->nr_queues;
7275 static int sdebug_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
7278 bool retiring = false;
7279 int num_entries = 0;
7280 unsigned int qc_idx = 0;
7281 unsigned long iflags;
7282 ktime_t kt_from_boot = ktime_get_boottime();
7283 struct sdebug_queue *sqp;
7284 struct sdebug_queued_cmd *sqcp;
7285 struct scsi_cmnd *scp;
7286 struct sdebug_dev_info *devip;
7287 struct sdebug_defer *sd_dp;
7289 sqp = sdebug_q_arr + queue_num;
7290 spin_lock_irqsave(&sqp->qc_lock, iflags);
7292 for (first = true; first || qc_idx + 1 < sdebug_max_queue; ) {
7294 qc_idx = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
7297 qc_idx = find_next_bit(sqp->in_use_bm, sdebug_max_queue, qc_idx + 1);
7299 if (unlikely(qc_idx >= sdebug_max_queue))
7302 sqcp = &sqp->qc_arr[qc_idx];
7303 sd_dp = sqcp->sd_dp;
7304 if (unlikely(!sd_dp))
7307 if (unlikely(scp == NULL)) {
7308 pr_err("scp is NULL, queue_num=%d, qc_idx=%u from %s\n",
7309 queue_num, qc_idx, __func__);
7312 if (sd_dp->defer_t == SDEB_DEFER_POLL) {
7313 if (kt_from_boot < sd_dp->cmpl_ts)
7316 } else /* ignoring non REQ_HIPRI requests */
7318 devip = (struct sdebug_dev_info *)scp->device->hostdata;
7320 atomic_dec(&devip->num_in_q);
7322 pr_err("devip=NULL from %s\n", __func__);
7323 if (unlikely(atomic_read(&retired_max_queue) > 0))
7326 sqcp->a_cmnd = NULL;
7327 if (unlikely(!test_and_clear_bit(qc_idx, sqp->in_use_bm))) {
7328 pr_err("Unexpected completion sqp %p queue_num=%d qc_idx=%u from %s\n",
7329 sqp, queue_num, qc_idx, __func__);
7332 if (unlikely(retiring)) { /* user has reduced max_queue */
7335 retval = atomic_read(&retired_max_queue);
7336 if (qc_idx >= retval) {
7337 pr_err("index %d too large\n", retval);
7340 k = find_last_bit(sqp->in_use_bm, retval);
7341 if ((k < sdebug_max_queue) || (k == retval))
7342 atomic_set(&retired_max_queue, 0);
7344 atomic_set(&retired_max_queue, k + 1);
7346 sd_dp->defer_t = SDEB_DEFER_NONE;
7347 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
7348 scp->scsi_done(scp); /* callback to mid level */
7349 spin_lock_irqsave(&sqp->qc_lock, iflags);
7352 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
7353 if (num_entries > 0)
7354 atomic_add(num_entries, &sdeb_mq_poll_count);
7358 static int scsi_debug_queuecommand(struct Scsi_Host *shost,
7359 struct scsi_cmnd *scp)
7362 struct scsi_device *sdp = scp->device;
7363 const struct opcode_info_t *oip;
7364 const struct opcode_info_t *r_oip;
7365 struct sdebug_dev_info *devip;
7366 u8 *cmd = scp->cmnd;
7367 int (*r_pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
7368 int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *) = NULL;
7371 u64 lun_index = sdp->lun & 0x3FFF;
7378 scsi_set_resid(scp, 0);
7379 if (sdebug_statistics) {
7380 atomic_inc(&sdebug_cmnd_count);
7381 inject_now = inject_on_this_cmd();
7385 if (unlikely(sdebug_verbose &&
7386 !(SDEBUG_OPT_NO_CDB_NOISE & sdebug_opts))) {
7391 sb = (int)sizeof(b);
7393 strcpy(b, "too long, over 32 bytes");
7395 for (k = 0, n = 0; k < len && n < sb; ++k)
7396 n += scnprintf(b + n, sb - n, "%02x ",
7399 sdev_printk(KERN_INFO, sdp, "%s: tag=%#x, cmd %s\n", my_name,
7400 blk_mq_unique_tag(scp->request), b);
7402 if (unlikely(inject_now && (sdebug_opts & SDEBUG_OPT_HOST_BUSY)))
7403 return SCSI_MLQUEUE_HOST_BUSY;
7404 has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS);
7405 if (unlikely(lun_index >= sdebug_max_luns && !has_wlun_rl))
7408 sdeb_i = opcode_ind_arr[opcode]; /* fully mapped */
7409 oip = &opcode_info_arr[sdeb_i]; /* safe if table consistent */
7410 devip = (struct sdebug_dev_info *)sdp->hostdata;
7411 if (unlikely(!devip)) {
7412 devip = find_build_dev_info(sdp);
7416 if (unlikely(inject_now && !atomic_read(&sdeb_inject_pending)))
7417 atomic_set(&sdeb_inject_pending, 1);
7419 na = oip->num_attached;
7421 if (na) { /* multiple commands with this opcode */
7423 if (FF_SA & r_oip->flags) {
7424 if (F_SA_LOW & oip->flags)
7427 sa = get_unaligned_be16(cmd + 8);
7428 for (k = 0; k <= na; oip = r_oip->arrp + k++) {
7429 if (opcode == oip->opcode && sa == oip->sa)
7432 } else { /* since no service action only check opcode */
7433 for (k = 0; k <= na; oip = r_oip->arrp + k++) {
7434 if (opcode == oip->opcode)
7439 if (F_SA_LOW & r_oip->flags)
7440 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 4);
7441 else if (F_SA_HIGH & r_oip->flags)
7442 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, 7);
7444 mk_sense_invalid_opcode(scp);
7447 } /* else (when na==0) we assume the oip is a match */
7449 if (unlikely(F_INV_OP & flags)) {
7450 mk_sense_invalid_opcode(scp);
7453 if (unlikely(has_wlun_rl && !(F_RL_WLUN_OK & flags))) {
7455 sdev_printk(KERN_INFO, sdp, "%s: Opcode 0x%x not%s\n",
7456 my_name, opcode, " supported for wlun");
7457 mk_sense_invalid_opcode(scp);
7460 if (unlikely(sdebug_strict)) { /* check cdb against mask */
7464 for (k = 1; k < oip->len_mask[0] && k < 16; ++k) {
7465 rem = ~oip->len_mask[k] & cmd[k];
7467 for (j = 7; j >= 0; --j, rem <<= 1) {
7471 mk_sense_invalid_fld(scp, SDEB_IN_CDB, k, j);
7476 if (unlikely(!(F_SKIP_UA & flags) &&
7477 find_first_bit(devip->uas_bm,
7478 SDEBUG_NUM_UAS) != SDEBUG_NUM_UAS)) {
7479 errsts = make_ua(scp, devip);
7483 if (unlikely(((F_M_ACCESS & flags) || scp->cmnd[0] == TEST_UNIT_READY) &&
7484 atomic_read(&devip->stopped))) {
7485 errsts = resp_not_ready(scp, devip);
7489 if (sdebug_fake_rw && (F_FAKE_RW & flags))
7491 if (unlikely(sdebug_every_nth)) {
7492 if (fake_timeout(scp))
7493 return 0; /* ignore command: make trouble */
7495 if (likely(oip->pfp))
7496 pfp = oip->pfp; /* calls a resp_* function */
7498 pfp = r_pfp; /* if leaf function ptr NULL, try the root's */
7501 if (F_DELAY_OVERR & flags) /* cmds like INQUIRY respond asap */
7502 return schedule_resp(scp, devip, errsts, pfp, 0, 0);
7503 else if ((flags & F_LONG_DELAY) && (sdebug_jdelay > 0 ||
7504 sdebug_ndelay > 10000)) {
7506 * Skip long delays if ndelay <= 10 microseconds. Otherwise
7507 * for Start Stop Unit (SSU) want at least 1 second delay and
7508 * if sdebug_jdelay>1 want a long delay of that many seconds.
7509 * For Synchronize Cache want 1/20 of SSU's delay.
7511 int jdelay = (sdebug_jdelay < 2) ? 1 : sdebug_jdelay;
7512 int denom = (flags & F_SYNC_DELAY) ? 20 : 1;
7514 jdelay = mult_frac(USER_HZ * jdelay, HZ, denom * USER_HZ);
7515 return schedule_resp(scp, devip, errsts, pfp, jdelay, 0);
7517 return schedule_resp(scp, devip, errsts, pfp, sdebug_jdelay,
7520 return schedule_resp(scp, devip, check_condition_result, NULL, 0, 0);
7522 return schedule_resp(scp, NULL, DID_NO_CONNECT << 16, NULL, 0, 0);
7525 static struct scsi_host_template sdebug_driver_template = {
7526 .show_info = scsi_debug_show_info,
7527 .write_info = scsi_debug_write_info,
7528 .proc_name = sdebug_proc_name,
7529 .name = "SCSI DEBUG",
7530 .info = scsi_debug_info,
7531 .slave_alloc = scsi_debug_slave_alloc,
7532 .slave_configure = scsi_debug_slave_configure,
7533 .slave_destroy = scsi_debug_slave_destroy,
7534 .ioctl = scsi_debug_ioctl,
7535 .queuecommand = scsi_debug_queuecommand,
7536 .change_queue_depth = sdebug_change_qdepth,
7537 .map_queues = sdebug_map_queues,
7538 .mq_poll = sdebug_blk_mq_poll,
7539 .eh_abort_handler = scsi_debug_abort,
7540 .eh_device_reset_handler = scsi_debug_device_reset,
7541 .eh_target_reset_handler = scsi_debug_target_reset,
7542 .eh_bus_reset_handler = scsi_debug_bus_reset,
7543 .eh_host_reset_handler = scsi_debug_host_reset,
7544 .can_queue = SDEBUG_CANQUEUE,
7546 .sg_tablesize = SG_MAX_SEGMENTS,
7547 .cmd_per_lun = DEF_CMD_PER_LUN,
7549 .max_segment_size = -1U,
7550 .module = THIS_MODULE,
7551 .track_queue_depth = 1,
7554 static int sdebug_driver_probe(struct device *dev)
7557 struct sdebug_host_info *sdbg_host;
7558 struct Scsi_Host *hpnt;
7561 sdbg_host = to_sdebug_host(dev);
7563 sdebug_driver_template.can_queue = sdebug_max_queue;
7564 sdebug_driver_template.cmd_per_lun = sdebug_max_queue;
7565 if (!sdebug_clustering)
7566 sdebug_driver_template.dma_boundary = PAGE_SIZE - 1;
7568 hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
7570 pr_err("scsi_host_alloc failed\n");
7574 if (submit_queues > nr_cpu_ids) {
7575 pr_warn("%s: trim submit_queues (was %d) to nr_cpu_ids=%u\n",
7576 my_name, submit_queues, nr_cpu_ids);
7577 submit_queues = nr_cpu_ids;
7580 * Decide whether to tell scsi subsystem that we want mq. The
7581 * following should give the same answer for each host.
7583 hpnt->nr_hw_queues = submit_queues;
7584 if (sdebug_host_max_queue)
7585 hpnt->host_tagset = 1;
7587 /* poll queues are possible for nr_hw_queues > 1 */
7588 if (hpnt->nr_hw_queues == 1 || (poll_queues < 1)) {
7589 pr_warn("%s: trim poll_queues to 0. poll_q/nr_hw = (%d/%d)\n",
7590 my_name, poll_queues, hpnt->nr_hw_queues);
7595 * Poll queues don't need interrupts, but we need at least one I/O queue
7596 * left over for non-polled I/O.
7597 * If condition not met, trim poll_queues to 1 (just for simplicity).
7599 if (poll_queues >= submit_queues) {
7600 if (submit_queues < 3)
7601 pr_warn("%s: trim poll_queues to 1\n", my_name);
7603 pr_warn("%s: trim poll_queues to 1. Perhaps try poll_queues=%d\n",
7604 my_name, submit_queues - 1);
7610 sdbg_host->shost = hpnt;
7611 *((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host;
7612 if ((hpnt->this_id >= 0) && (sdebug_num_tgts > hpnt->this_id))
7613 hpnt->max_id = sdebug_num_tgts + 1;
7615 hpnt->max_id = sdebug_num_tgts;
7616 /* = sdebug_max_luns; */
7617 hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
7621 switch (sdebug_dif) {
7623 case T10_PI_TYPE1_PROTECTION:
7624 hprot = SHOST_DIF_TYPE1_PROTECTION;
7626 hprot |= SHOST_DIX_TYPE1_PROTECTION;
7629 case T10_PI_TYPE2_PROTECTION:
7630 hprot = SHOST_DIF_TYPE2_PROTECTION;
7632 hprot |= SHOST_DIX_TYPE2_PROTECTION;
7635 case T10_PI_TYPE3_PROTECTION:
7636 hprot = SHOST_DIF_TYPE3_PROTECTION;
7638 hprot |= SHOST_DIX_TYPE3_PROTECTION;
7643 hprot |= SHOST_DIX_TYPE0_PROTECTION;
7647 scsi_host_set_prot(hpnt, hprot);
7649 if (have_dif_prot || sdebug_dix)
7650 pr_info("host protection%s%s%s%s%s%s%s\n",
7651 (hprot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
7652 (hprot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
7653 (hprot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
7654 (hprot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
7655 (hprot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
7656 (hprot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
7657 (hprot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
7659 if (sdebug_guard == 1)
7660 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
7662 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
7664 sdebug_verbose = !!(SDEBUG_OPT_NOISE & sdebug_opts);
7665 sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & sdebug_opts);
7666 if (sdebug_every_nth) /* need stats counters for every_nth */
7667 sdebug_statistics = true;
7668 error = scsi_add_host(hpnt, &sdbg_host->dev);
7670 pr_err("scsi_add_host failed\n");
7672 scsi_host_put(hpnt);
7674 scsi_scan_host(hpnt);
7680 static int sdebug_driver_remove(struct device *dev)
7682 struct sdebug_host_info *sdbg_host;
7683 struct sdebug_dev_info *sdbg_devinfo, *tmp;
7685 sdbg_host = to_sdebug_host(dev);
7688 pr_err("Unable to locate host info\n");
7692 scsi_remove_host(sdbg_host->shost);
7694 list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
7696 list_del(&sdbg_devinfo->dev_list);
7697 kfree(sdbg_devinfo->zstate);
7698 kfree(sdbg_devinfo);
7701 scsi_host_put(sdbg_host->shost);
7705 static int pseudo_lld_bus_match(struct device *dev,
7706 struct device_driver *dev_driver)
7711 static struct bus_type pseudo_lld_bus = {
7713 .match = pseudo_lld_bus_match,
7714 .probe = sdebug_driver_probe,
7715 .remove = sdebug_driver_remove,
7716 .drv_groups = sdebug_drv_groups,