1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
4 * Copyright (C) 1992 Eric Youngdale
5 * Simulate a host adapter with 2 disks attached. Do a lot of checking
6 * to make sure that we are not getting blocks mixed up, and PANIC if
7 * anything out of the ordinary is seen.
8 * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
10 * Copyright (C) 2001 - 2021 Douglas Gilbert
12 * For documentation see http://sg.danny.cz/sg/scsi_debug.html
16 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
18 #include <linux/module.h>
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/jiffies.h>
23 #include <linux/slab.h>
24 #include <linux/types.h>
25 #include <linux/string.h>
27 #include <linux/init.h>
28 #include <linux/proc_fs.h>
29 #include <linux/vmalloc.h>
30 #include <linux/moduleparam.h>
31 #include <linux/scatterlist.h>
32 #include <linux/blkdev.h>
33 #include <linux/crc-t10dif.h>
34 #include <linux/spinlock.h>
35 #include <linux/mutex.h>
36 #include <linux/interrupt.h>
37 #include <linux/atomic.h>
38 #include <linux/hrtimer.h>
39 #include <linux/uuid.h>
40 #include <linux/t10-pi.h>
41 #include <linux/msdos_partition.h>
42 #include <linux/random.h>
43 #include <linux/xarray.h>
44 #include <linux/prefetch.h>
46 #include <net/checksum.h>
48 #include <asm/unaligned.h>
50 #include <scsi/scsi.h>
51 #include <scsi/scsi_cmnd.h>
52 #include <scsi/scsi_device.h>
53 #include <scsi/scsi_host.h>
54 #include <scsi/scsicam.h>
55 #include <scsi/scsi_eh.h>
56 #include <scsi/scsi_tcq.h>
57 #include <scsi/scsi_dbg.h>
60 #include "scsi_logging.h"
62 /* make sure inq_product_rev string corresponds to this version */
63 #define SDEBUG_VERSION "0191" /* format to fit INQUIRY revision field */
64 static const char *sdebug_version_date = "20210520";
66 #define MY_NAME "scsi_debug"
68 /* Additional Sense Code (ASC) */
69 #define NO_ADDITIONAL_SENSE 0x0
70 #define LOGICAL_UNIT_NOT_READY 0x4
71 #define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
72 #define UNRECOVERED_READ_ERR 0x11
73 #define PARAMETER_LIST_LENGTH_ERR 0x1a
74 #define INVALID_OPCODE 0x20
75 #define LBA_OUT_OF_RANGE 0x21
76 #define INVALID_FIELD_IN_CDB 0x24
77 #define INVALID_FIELD_IN_PARAM_LIST 0x26
78 #define WRITE_PROTECTED 0x27
79 #define UA_RESET_ASC 0x29
80 #define UA_CHANGED_ASC 0x2a
81 #define TARGET_CHANGED_ASC 0x3f
82 #define LUNS_CHANGED_ASCQ 0x0e
83 #define INSUFF_RES_ASC 0x55
84 #define INSUFF_RES_ASCQ 0x3
85 #define POWER_ON_RESET_ASCQ 0x0
86 #define POWER_ON_OCCURRED_ASCQ 0x1
87 #define BUS_RESET_ASCQ 0x2 /* scsi bus reset occurred */
88 #define MODE_CHANGED_ASCQ 0x1 /* mode parameters changed */
89 #define CAPACITY_CHANGED_ASCQ 0x9
90 #define SAVING_PARAMS_UNSUP 0x39
91 #define TRANSPORT_PROBLEM 0x4b
92 #define THRESHOLD_EXCEEDED 0x5d
93 #define LOW_POWER_COND_ON 0x5e
94 #define MISCOMPARE_VERIFY_ASC 0x1d
95 #define MICROCODE_CHANGED_ASCQ 0x1 /* with TARGET_CHANGED_ASC */
96 #define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16
97 #define WRITE_ERROR_ASC 0xc
98 #define UNALIGNED_WRITE_ASCQ 0x4
99 #define WRITE_BOUNDARY_ASCQ 0x5
100 #define READ_INVDATA_ASCQ 0x6
101 #define READ_BOUNDARY_ASCQ 0x7
102 #define INSUFF_ZONE_ASCQ 0xe
104 /* Additional Sense Code Qualifier (ASCQ) */
105 #define ACK_NAK_TO 0x3
107 /* Default values for driver parameters */
108 #define DEF_NUM_HOST 1
109 #define DEF_NUM_TGTS 1
110 #define DEF_MAX_LUNS 1
111 /* With these defaults, this driver will make 1 host with 1 target
112 * (id 0) containing 1 logical unit (lun 0). That is 1 device.
115 #define DEF_CDB_LEN 10
116 #define DEF_JDELAY 1 /* if > 0 unit is a jiffy */
117 #define DEF_DEV_SIZE_PRE_INIT 0
118 #define DEF_DEV_SIZE_MB 8
119 #define DEF_ZBC_DEV_SIZE_MB 128
122 #define DEF_PER_HOST_STORE false
123 #define DEF_D_SENSE 0
124 #define DEF_EVERY_NTH 0
125 #define DEF_FAKE_RW 0
127 #define DEF_HOST_LOCK 0
130 #define DEF_LBPWS10 0
132 #define DEF_LOWEST_ALIGNED 0
133 #define DEF_NDELAY 0 /* if > 0 unit is a nanosecond */
134 #define DEF_NO_LUN_0 0
135 #define DEF_NUM_PARTS 0
137 #define DEF_OPT_BLKS 1024
138 #define DEF_PHYSBLK_EXP 0
139 #define DEF_OPT_XFERLEN_EXP 0
140 #define DEF_PTYPE TYPE_DISK
141 #define DEF_RANDOM false
142 #define DEF_REMOVABLE false
143 #define DEF_SCSI_LEVEL 7 /* INQUIRY, byte2 [6->SPC-4; 7->SPC-5] */
144 #define DEF_SECTOR_SIZE 512
145 #define DEF_UNMAP_ALIGNMENT 0
146 #define DEF_UNMAP_GRANULARITY 1
147 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
148 #define DEF_UNMAP_MAX_DESC 256
149 #define DEF_VIRTUAL_GB 0
150 #define DEF_VPD_USE_HOSTNO 1
151 #define DEF_WRITESAME_LENGTH 0xFFFF
153 #define DEF_STATISTICS false
154 #define DEF_SUBMIT_QUEUES 1
155 #define DEF_TUR_MS_TO_READY 0
156 #define DEF_UUID_CTL 0
157 #define JDELAY_OVERRIDDEN -9999
159 /* Default parameters for ZBC drives */
160 #define DEF_ZBC_ZONE_SIZE_MB 128
161 #define DEF_ZBC_MAX_OPEN_ZONES 8
162 #define DEF_ZBC_NR_CONV_ZONES 1
164 #define SDEBUG_LUN_0_VAL 0
166 /* bit mask values for sdebug_opts */
167 #define SDEBUG_OPT_NOISE 1
168 #define SDEBUG_OPT_MEDIUM_ERR 2
169 #define SDEBUG_OPT_TIMEOUT 4
170 #define SDEBUG_OPT_RECOVERED_ERR 8
171 #define SDEBUG_OPT_TRANSPORT_ERR 16
172 #define SDEBUG_OPT_DIF_ERR 32
173 #define SDEBUG_OPT_DIX_ERR 64
174 #define SDEBUG_OPT_MAC_TIMEOUT 128
175 #define SDEBUG_OPT_SHORT_TRANSFER 0x100
176 #define SDEBUG_OPT_Q_NOISE 0x200
177 #define SDEBUG_OPT_ALL_TSF 0x400 /* ignore */
178 #define SDEBUG_OPT_RARE_TSF 0x800
179 #define SDEBUG_OPT_N_WCE 0x1000
180 #define SDEBUG_OPT_RESET_NOISE 0x2000
181 #define SDEBUG_OPT_NO_CDB_NOISE 0x4000
182 #define SDEBUG_OPT_HOST_BUSY 0x8000
183 #define SDEBUG_OPT_CMD_ABORT 0x10000
184 #define SDEBUG_OPT_ALL_NOISE (SDEBUG_OPT_NOISE | SDEBUG_OPT_Q_NOISE | \
185 SDEBUG_OPT_RESET_NOISE)
186 #define SDEBUG_OPT_ALL_INJECTING (SDEBUG_OPT_RECOVERED_ERR | \
187 SDEBUG_OPT_TRANSPORT_ERR | \
188 SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR | \
189 SDEBUG_OPT_SHORT_TRANSFER | \
190 SDEBUG_OPT_HOST_BUSY | \
191 SDEBUG_OPT_CMD_ABORT)
192 #define SDEBUG_OPT_RECOV_DIF_DIX (SDEBUG_OPT_RECOVERED_ERR | \
193 SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR)
195 /* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs) are returned in
196 * priority order. In the subset implemented here lower numbers have higher
197 * priority. The UA numbers should be a sequence starting from 0 with
198 * SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */
199 #define SDEBUG_UA_POR 0 /* Power on, reset, or bus device reset */
200 #define SDEBUG_UA_POOCCUR 1 /* Power on occurred */
201 #define SDEBUG_UA_BUS_RESET 2
202 #define SDEBUG_UA_MODE_CHANGED 3
203 #define SDEBUG_UA_CAPACITY_CHANGED 4
204 #define SDEBUG_UA_LUNS_CHANGED 5
205 #define SDEBUG_UA_MICROCODE_CHANGED 6 /* simulate firmware change */
206 #define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 7
207 #define SDEBUG_NUM_UAS 8
209 /* when 1==SDEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
210 * sector on read commands: */
211 #define OPT_MEDIUM_ERR_ADDR 0x1234 /* that's sector 4660 in decimal */
212 #define OPT_MEDIUM_ERR_NUM 10 /* number of consecutive medium errs */
214 /* SDEBUG_CANQUEUE is the maximum number of commands that can be queued
215 * (for response) per submit queue at one time. Can be reduced by max_queue
216 * option. Command responses are not queued when jdelay=0 and ndelay=0. The
217 * per-device DEF_CMD_PER_LUN can be changed via sysfs:
218 * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth
219 * but cannot exceed SDEBUG_CANQUEUE .
221 #define SDEBUG_CANQUEUE_WORDS 3 /* a WORD is bits in a long */
222 #define SDEBUG_CANQUEUE (SDEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
223 #define DEF_CMD_PER_LUN SDEBUG_CANQUEUE
225 /* UA - Unit Attention; SA - Service Action; SSU - Start Stop Unit */
226 #define F_D_IN 1 /* Data-in command (e.g. READ) */
227 #define F_D_OUT 2 /* Data-out command (e.g. WRITE) */
228 #define F_D_OUT_MAYBE 4 /* WRITE SAME, NDOB bit */
230 #define F_RL_WLUN_OK 0x10 /* allowed with REPORT LUNS W-LUN */
231 #define F_SKIP_UA 0x20 /* bypass UAs (e.g. INQUIRY command) */
232 #define F_DELAY_OVERR 0x40 /* for commands like INQUIRY */
233 #define F_SA_LOW 0x80 /* SA is in cdb byte 1, bits 4 to 0 */
234 #define F_SA_HIGH 0x100 /* SA is in cdb bytes 8 and 9 */
235 #define F_INV_OP 0x200 /* invalid opcode (not supported) */
236 #define F_FAKE_RW 0x400 /* bypass resp_*() when fake_rw set */
237 #define F_M_ACCESS 0x800 /* media access, reacts to SSU state */
238 #define F_SSU_DELAY 0x1000 /* SSU command delay (long-ish) */
239 #define F_SYNC_DELAY 0x2000 /* SYNCHRONIZE CACHE delay */
241 /* Useful combinations of the above flags */
242 #define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
243 #define FF_MEDIA_IO (F_M_ACCESS | F_FAKE_RW)
244 #define FF_SA (F_SA_HIGH | F_SA_LOW)
245 #define F_LONG_DELAY (F_SSU_DELAY | F_SYNC_DELAY)
247 #define SDEBUG_MAX_PARTS 4
249 #define SDEBUG_MAX_CMD_LEN 32
251 #define SDEB_XA_NOT_IN_USE XA_MARK_1
253 /* Zone types (zbcr05 table 25) */
255 ZBC_ZONE_TYPE_CNV = 0x1,
256 ZBC_ZONE_TYPE_SWR = 0x2,
257 ZBC_ZONE_TYPE_SWP = 0x3,
260 /* enumeration names taken from table 26, zbcr05 */
262 ZBC_NOT_WRITE_POINTER = 0x0,
264 ZC2_IMPLICIT_OPEN = 0x2,
265 ZC3_EXPLICIT_OPEN = 0x3,
272 struct sdeb_zone_state { /* ZBC: per zone state */
273 enum sdebug_z_type z_type;
274 enum sdebug_z_cond z_cond;
275 bool z_non_seq_resource;
281 struct sdebug_dev_info {
282 struct list_head dev_list;
283 unsigned int channel;
287 struct sdebug_host_info *sdbg_host;
288 unsigned long uas_bm[1];
290 atomic_t stopped; /* 1: by SSU, 2: device start */
293 /* For ZBC devices */
294 enum blk_zoned_model zmodel;
296 unsigned int zsize_shift;
297 unsigned int nr_zones;
298 unsigned int nr_conv_zones;
299 unsigned int nr_imp_open;
300 unsigned int nr_exp_open;
301 unsigned int nr_closed;
302 unsigned int max_open;
303 ktime_t create_ts; /* time since bootup that this device was created */
304 struct sdeb_zone_state *zstate;
307 struct sdebug_host_info {
308 struct list_head host_list;
309 int si_idx; /* sdeb_store_info (per host) xarray index */
310 struct Scsi_Host *shost;
312 struct list_head dev_info_list;
315 /* There is an xarray of pointers to this struct's objects, one per host */
316 struct sdeb_store_info {
317 rwlock_t macc_lck; /* for atomic media access on this store */
318 u8 *storep; /* user data storage (ram) */
319 struct t10_pi_tuple *dif_storep; /* protection info */
320 void *map_storep; /* provisioning map */
323 #define to_sdebug_host(d) \
324 container_of(d, struct sdebug_host_info, dev)
326 enum sdeb_defer_type {SDEB_DEFER_NONE = 0, SDEB_DEFER_HRT = 1,
327 SDEB_DEFER_WQ = 2, SDEB_DEFER_POLL = 3};
329 struct sdebug_defer {
331 struct execute_work ew;
332 ktime_t cmpl_ts;/* time since boot to complete this cmd */
333 int sqa_idx; /* index of sdebug_queue array */
334 int qc_idx; /* index of sdebug_queued_cmd array within sqa_idx */
335 int hc_idx; /* hostwide tag index */
340 bool aborted; /* true when blk_abort_request() already called */
341 enum sdeb_defer_type defer_t;
344 struct sdebug_queued_cmd {
345 /* corresponding bit set in in_use_bm[] in owning struct sdebug_queue
346 * instance indicates this slot is in use.
348 struct sdebug_defer *sd_dp;
349 struct scsi_cmnd *a_cmnd;
352 struct sdebug_queue {
353 struct sdebug_queued_cmd qc_arr[SDEBUG_CANQUEUE];
354 unsigned long in_use_bm[SDEBUG_CANQUEUE_WORDS];
356 atomic_t blocked; /* to temporarily stop more being queued */
359 static atomic_t sdebug_cmnd_count; /* number of incoming commands */
360 static atomic_t sdebug_completions; /* count of deferred completions */
361 static atomic_t sdebug_miss_cpus; /* submission + completion cpus differ */
362 static atomic_t sdebug_a_tsf; /* 'almost task set full' counter */
363 static atomic_t sdeb_inject_pending;
364 static atomic_t sdeb_mq_poll_count; /* bumped when mq_poll returns > 0 */
366 struct opcode_info_t {
367 u8 num_attached; /* 0 if this is it (i.e. a leaf); use 0xff */
368 /* for terminating element */
369 u8 opcode; /* if num_attached > 0, preferred */
370 u16 sa; /* service action */
371 u32 flags; /* OR-ed set of SDEB_F_* */
372 int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
373 const struct opcode_info_t *arrp; /* num_attached elements or NULL */
374 u8 len_mask[16]; /* len_mask[0]-->cdb_len, then mask for cdb */
375 /* 1 to min(cdb_len, 15); ignore cdb[15...] */
378 /* SCSI opcodes (first byte of cdb) of interest mapped onto these indexes */
379 enum sdeb_opcode_index {
380 SDEB_I_INVALID_OPCODE = 0,
382 SDEB_I_REPORT_LUNS = 2,
383 SDEB_I_REQUEST_SENSE = 3,
384 SDEB_I_TEST_UNIT_READY = 4,
385 SDEB_I_MODE_SENSE = 5, /* 6, 10 */
386 SDEB_I_MODE_SELECT = 6, /* 6, 10 */
387 SDEB_I_LOG_SENSE = 7,
388 SDEB_I_READ_CAPACITY = 8, /* 10; 16 is in SA_IN(16) */
389 SDEB_I_READ = 9, /* 6, 10, 12, 16 */
390 SDEB_I_WRITE = 10, /* 6, 10, 12, 16 */
391 SDEB_I_START_STOP = 11,
392 SDEB_I_SERV_ACT_IN_16 = 12, /* add ...SERV_ACT_IN_12 if needed */
393 SDEB_I_SERV_ACT_OUT_16 = 13, /* add ...SERV_ACT_OUT_12 if needed */
394 SDEB_I_MAINT_IN = 14,
395 SDEB_I_MAINT_OUT = 15,
396 SDEB_I_VERIFY = 16, /* VERIFY(10), VERIFY(16) */
397 SDEB_I_VARIABLE_LEN = 17, /* READ(32), WRITE(32), WR_SCAT(32) */
398 SDEB_I_RESERVE = 18, /* 6, 10 */
399 SDEB_I_RELEASE = 19, /* 6, 10 */
400 SDEB_I_ALLOW_REMOVAL = 20, /* PREVENT ALLOW MEDIUM REMOVAL */
401 SDEB_I_REZERO_UNIT = 21, /* REWIND in SSC */
402 SDEB_I_ATA_PT = 22, /* 12, 16 */
403 SDEB_I_SEND_DIAG = 23,
405 SDEB_I_WRITE_BUFFER = 25,
406 SDEB_I_WRITE_SAME = 26, /* 10, 16 */
407 SDEB_I_SYNC_CACHE = 27, /* 10, 16 */
408 SDEB_I_COMP_WRITE = 28,
409 SDEB_I_PRE_FETCH = 29, /* 10, 16 */
410 SDEB_I_ZONE_OUT = 30, /* 0x94+SA; includes no data xfer */
411 SDEB_I_ZONE_IN = 31, /* 0x95+SA; all have data-in */
412 SDEB_I_LAST_ELEM_P1 = 32, /* keep this last (previous + 1) */
416 static const unsigned char opcode_ind_arr[256] = {
417 /* 0x0; 0x0->0x1f: 6 byte cdbs */
418 SDEB_I_TEST_UNIT_READY, SDEB_I_REZERO_UNIT, 0, SDEB_I_REQUEST_SENSE,
420 SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, 0,
421 0, 0, SDEB_I_INQUIRY, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
423 0, 0, SDEB_I_MODE_SENSE, SDEB_I_START_STOP, 0, SDEB_I_SEND_DIAG,
424 SDEB_I_ALLOW_REMOVAL, 0,
425 /* 0x20; 0x20->0x3f: 10 byte cdbs */
426 0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY, 0, 0,
427 SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, SDEB_I_VERIFY,
428 0, 0, 0, 0, SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, 0,
429 0, 0, 0, SDEB_I_WRITE_BUFFER, 0, 0, 0, 0,
430 /* 0x40; 0x40->0x5f: 10 byte cdbs */
431 0, SDEB_I_WRITE_SAME, SDEB_I_UNMAP, 0, 0, 0, 0, 0,
432 0, 0, 0, 0, 0, SDEB_I_LOG_SENSE, 0, 0,
433 0, 0, 0, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
435 0, 0, SDEB_I_MODE_SENSE, 0, 0, 0, 0, 0,
436 /* 0x60; 0x60->0x7d are reserved, 0x7e is "extended cdb" */
437 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
438 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
439 0, SDEB_I_VARIABLE_LEN,
440 /* 0x80; 0x80->0x9f: 16 byte cdbs */
441 0, 0, 0, 0, 0, SDEB_I_ATA_PT, 0, 0,
442 SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0,
443 0, 0, 0, SDEB_I_VERIFY,
444 SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, SDEB_I_WRITE_SAME,
445 SDEB_I_ZONE_OUT, SDEB_I_ZONE_IN, 0, 0,
446 0, 0, 0, 0, 0, 0, SDEB_I_SERV_ACT_IN_16, SDEB_I_SERV_ACT_OUT_16,
447 /* 0xa0; 0xa0->0xbf: 12 byte cdbs */
448 SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN,
449 SDEB_I_MAINT_OUT, 0, 0, 0,
450 SDEB_I_READ, 0 /* SDEB_I_SERV_ACT_OUT_12 */, SDEB_I_WRITE,
451 0 /* SDEB_I_SERV_ACT_IN_12 */, 0, 0, 0, 0,
452 0, 0, 0, 0, 0, 0, 0, 0,
453 0, 0, 0, 0, 0, 0, 0, 0,
454 /* 0xc0; 0xc0->0xff: vendor specific */
455 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
456 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
457 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
458 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
462 * The following "response" functions return the SCSI mid-level's 4 byte
463 * tuple-in-an-int. To handle commands with an IMMED bit, for a faster
464 * command completion, they can mask their return value with
465 * SDEG_RES_IMMED_MASK .
467 #define SDEG_RES_IMMED_MASK 0x40000000
469 static int resp_inquiry(struct scsi_cmnd *, struct sdebug_dev_info *);
470 static int resp_report_luns(struct scsi_cmnd *, struct sdebug_dev_info *);
471 static int resp_requests(struct scsi_cmnd *, struct sdebug_dev_info *);
472 static int resp_mode_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
473 static int resp_mode_select(struct scsi_cmnd *, struct sdebug_dev_info *);
474 static int resp_log_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
475 static int resp_readcap(struct scsi_cmnd *, struct sdebug_dev_info *);
476 static int resp_read_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
477 static int resp_write_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
478 static int resp_write_scat(struct scsi_cmnd *, struct sdebug_dev_info *);
479 static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *);
480 static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *);
481 static int resp_get_lba_status(struct scsi_cmnd *, struct sdebug_dev_info *);
482 static int resp_report_tgtpgs(struct scsi_cmnd *, struct sdebug_dev_info *);
483 static int resp_unmap(struct scsi_cmnd *, struct sdebug_dev_info *);
484 static int resp_rsup_opcodes(struct scsi_cmnd *, struct sdebug_dev_info *);
485 static int resp_rsup_tmfs(struct scsi_cmnd *, struct sdebug_dev_info *);
486 static int resp_verify(struct scsi_cmnd *, struct sdebug_dev_info *);
487 static int resp_write_same_10(struct scsi_cmnd *, struct sdebug_dev_info *);
488 static int resp_write_same_16(struct scsi_cmnd *, struct sdebug_dev_info *);
489 static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *);
490 static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *);
491 static int resp_sync_cache(struct scsi_cmnd *, struct sdebug_dev_info *);
492 static int resp_pre_fetch(struct scsi_cmnd *, struct sdebug_dev_info *);
493 static int resp_report_zones(struct scsi_cmnd *, struct sdebug_dev_info *);
494 static int resp_open_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
495 static int resp_close_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
496 static int resp_finish_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
497 static int resp_rwp_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
499 static int sdebug_do_add_host(bool mk_new_store);
500 static int sdebug_add_host_helper(int per_host_idx);
501 static void sdebug_do_remove_host(bool the_end);
502 static int sdebug_add_store(void);
503 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip);
504 static void sdebug_erase_all_stores(bool apart_from_first);
507 * The following are overflow arrays for cdbs that "hit" the same index in
508 * the opcode_info_arr array. The most time sensitive (or commonly used) cdb
509 * should be placed in opcode_info_arr[], the others should be placed here.
511 static const struct opcode_info_t msense_iarr[] = {
512 {0, 0x1a, 0, F_D_IN, NULL, NULL,
513 {6, 0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
516 static const struct opcode_info_t mselect_iarr[] = {
517 {0, 0x15, 0, F_D_OUT, NULL, NULL,
518 {6, 0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
521 static const struct opcode_info_t read_iarr[] = {
522 {0, 0x28, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(10) */
523 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
525 {0, 0x8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL, /* READ(6) */
526 {6, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
527 {0, 0xa8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(12) */
528 {12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf,
532 static const struct opcode_info_t write_iarr[] = {
533 {0, 0x2a, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(10) */
534 NULL, {10, 0xfb, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7,
536 {0, 0xa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(6) */
537 NULL, {6, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0,
539 {0, 0xaa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(12) */
540 NULL, {12, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
541 0xbf, 0xc7, 0, 0, 0, 0} },
544 static const struct opcode_info_t verify_iarr[] = {
545 {0, 0x2f, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,/* VERIFY(10) */
546 NULL, {10, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xff, 0xff, 0xc7,
550 static const struct opcode_info_t sa_in_16_iarr[] = {
551 {0, 0x9e, 0x12, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
552 {16, 0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
553 0xff, 0xff, 0xff, 0, 0xc7} }, /* GET LBA STATUS(16) */
556 static const struct opcode_info_t vl_iarr[] = { /* VARIABLE LENGTH */
557 {0, 0x7f, 0xb, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_dt0,
558 NULL, {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0xb, 0xfa,
559 0, 0xff, 0xff, 0xff, 0xff} }, /* WRITE(32) */
560 {0, 0x7f, 0x11, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
561 NULL, {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x11, 0xf8,
562 0, 0xff, 0xff, 0x0, 0x0} }, /* WRITE SCATTERED(32) */
565 static const struct opcode_info_t maint_in_iarr[] = { /* MAINT IN */
566 {0, 0xa3, 0xc, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL,
567 {12, 0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
568 0xc7, 0, 0, 0, 0} }, /* REPORT SUPPORTED OPERATION CODES */
569 {0, 0xa3, 0xd, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL,
570 {12, 0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
571 0, 0} }, /* REPORTED SUPPORTED TASK MANAGEMENT FUNCTIONS */
574 static const struct opcode_info_t write_same_iarr[] = {
575 {0, 0x93, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_write_same_16, NULL,
576 {16, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
577 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* WRITE SAME(16) */
580 static const struct opcode_info_t reserve_iarr[] = {
581 {0, 0x16, 0, F_D_OUT, NULL, NULL, /* RESERVE(6) */
582 {6, 0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
585 static const struct opcode_info_t release_iarr[] = {
586 {0, 0x17, 0, F_D_OUT, NULL, NULL, /* RELEASE(6) */
587 {6, 0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
590 static const struct opcode_info_t sync_cache_iarr[] = {
591 {0, 0x91, 0, F_SYNC_DELAY | F_M_ACCESS, resp_sync_cache, NULL,
592 {16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
593 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* SYNC_CACHE (16) */
596 static const struct opcode_info_t pre_fetch_iarr[] = {
597 {0, 0x90, 0, F_SYNC_DELAY | FF_MEDIA_IO, resp_pre_fetch, NULL,
598 {16, 0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
599 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* PRE-FETCH (16) */
602 static const struct opcode_info_t zone_out_iarr[] = { /* ZONE OUT(16) */
603 {0, 0x94, 0x1, F_SA_LOW | F_M_ACCESS, resp_close_zone, NULL,
604 {16, 0x1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
605 0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} }, /* CLOSE ZONE */
606 {0, 0x94, 0x2, F_SA_LOW | F_M_ACCESS, resp_finish_zone, NULL,
607 {16, 0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
608 0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} }, /* FINISH ZONE */
609 {0, 0x94, 0x4, F_SA_LOW | F_M_ACCESS, resp_rwp_zone, NULL,
610 {16, 0x4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
611 0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} }, /* RESET WRITE POINTER */
614 static const struct opcode_info_t zone_in_iarr[] = { /* ZONE IN(16) */
615 {0, 0x95, 0x6, F_SA_LOW | F_D_IN | F_M_ACCESS, NULL, NULL,
616 {16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
617 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* REPORT ZONES */
621 /* This array is accessed via SDEB_I_* values. Make sure all are mapped,
622 * plus the terminating elements for logic that scans this table such as
623 * REPORT SUPPORTED OPERATION CODES. */
624 static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEM_P1 + 1] = {
626 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* unknown opcodes */
627 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
628 {0, 0x12, 0, FF_RESPOND | F_D_IN, resp_inquiry, NULL, /* INQUIRY */
629 {6, 0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
630 {0, 0xa0, 0, FF_RESPOND | F_D_IN, resp_report_luns, NULL,
631 {12, 0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
632 0, 0} }, /* REPORT LUNS */
633 {0, 0x3, 0, FF_RESPOND | F_D_IN, resp_requests, NULL,
634 {6, 0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
635 {0, 0x0, 0, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */
636 {6, 0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
638 {ARRAY_SIZE(msense_iarr), 0x5a, 0, F_D_IN, /* MODE SENSE(10) */
639 resp_mode_sense, msense_iarr, {10, 0xf8, 0xff, 0xff, 0, 0, 0,
640 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
641 {ARRAY_SIZE(mselect_iarr), 0x55, 0, F_D_OUT, /* MODE SELECT(10) */
642 resp_mode_select, mselect_iarr, {10, 0xf1, 0, 0, 0, 0, 0, 0xff,
643 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
644 {0, 0x4d, 0, F_D_IN, resp_log_sense, NULL, /* LOG SENSE */
645 {10, 0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0,
647 {0, 0x25, 0, F_D_IN, resp_readcap, NULL, /* READ CAPACITY(10) */
648 {10, 0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0,
650 {ARRAY_SIZE(read_iarr), 0x88, 0, F_D_IN | FF_MEDIA_IO, /* READ(16) */
651 resp_read_dt0, read_iarr, {16, 0xfe, 0xff, 0xff, 0xff, 0xff,
652 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
654 {ARRAY_SIZE(write_iarr), 0x8a, 0, F_D_OUT | FF_MEDIA_IO,
655 resp_write_dt0, write_iarr, /* WRITE(16) */
656 {16, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
657 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
658 {0, 0x1b, 0, F_SSU_DELAY, resp_start_stop, NULL,/* START STOP UNIT */
659 {6, 0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
660 {ARRAY_SIZE(sa_in_16_iarr), 0x9e, 0x10, F_SA_LOW | F_D_IN,
661 resp_readcap16, sa_in_16_iarr, /* SA_IN(16), READ CAPACITY(16) */
662 {16, 0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
663 0xff, 0xff, 0xff, 0xff, 0x1, 0xc7} },
664 {0, 0x9f, 0x12, F_SA_LOW | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
665 NULL, {16, 0x12, 0xf9, 0x0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff,
666 0xff, 0xff, 0xff, 0xff, 0xc7} }, /* SA_OUT(16), WRITE SCAT(16) */
667 {ARRAY_SIZE(maint_in_iarr), 0xa3, 0xa, F_SA_LOW | F_D_IN,
668 resp_report_tgtpgs, /* MAINT IN, REPORT TARGET PORT GROUPS */
669 maint_in_iarr, {12, 0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff,
670 0xff, 0, 0xc7, 0, 0, 0, 0} },
672 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
673 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
674 {ARRAY_SIZE(verify_iarr), 0x8f, 0,
675 F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify, /* VERIFY(16) */
676 verify_iarr, {16, 0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
677 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },
678 {ARRAY_SIZE(vl_iarr), 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_MEDIA_IO,
679 resp_read_dt0, vl_iarr, /* VARIABLE LENGTH, READ(32) */
680 {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x9, 0xfe, 0, 0xff, 0xff,
682 {ARRAY_SIZE(reserve_iarr), 0x56, 0, F_D_OUT,
683 NULL, reserve_iarr, /* RESERVE(10) <no response function> */
684 {10, 0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
686 {ARRAY_SIZE(release_iarr), 0x57, 0, F_D_OUT,
687 NULL, release_iarr, /* RELEASE(10) <no response function> */
688 {10, 0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
691 {0, 0x1e, 0, 0, NULL, NULL, /* ALLOW REMOVAL */
692 {6, 0, 0, 0, 0x3, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
693 {0, 0x1, 0, 0, resp_start_stop, NULL, /* REWIND ?? */
694 {6, 0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
695 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */
696 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
697 {0, 0x1d, F_D_OUT, 0, NULL, NULL, /* SEND DIAGNOSTIC */
698 {6, 0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
699 {0, 0x42, 0, F_D_OUT | FF_MEDIA_IO, resp_unmap, NULL, /* UNMAP */
700 {10, 0x1, 0, 0, 0, 0, 0x3f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
702 {0, 0x3b, 0, F_D_OUT_MAYBE, resp_write_buffer, NULL,
703 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0,
704 0, 0, 0, 0} }, /* WRITE_BUFFER */
705 {ARRAY_SIZE(write_same_iarr), 0x41, 0, F_D_OUT_MAYBE | FF_MEDIA_IO,
706 resp_write_same_10, write_same_iarr, /* WRITE SAME(10) */
707 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0,
709 {ARRAY_SIZE(sync_cache_iarr), 0x35, 0, F_SYNC_DELAY | F_M_ACCESS,
710 resp_sync_cache, sync_cache_iarr,
711 {10, 0x7, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
712 0, 0, 0, 0} }, /* SYNC_CACHE (10) */
713 {0, 0x89, 0, F_D_OUT | FF_MEDIA_IO, resp_comp_write, NULL,
714 {16, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
715 0, 0xff, 0x3f, 0xc7} }, /* COMPARE AND WRITE */
716 {ARRAY_SIZE(pre_fetch_iarr), 0x34, 0, F_SYNC_DELAY | FF_MEDIA_IO,
717 resp_pre_fetch, pre_fetch_iarr,
718 {10, 0x2, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
719 0, 0, 0, 0} }, /* PRE-FETCH (10) */
722 {ARRAY_SIZE(zone_out_iarr), 0x94, 0x3, F_SA_LOW | F_M_ACCESS,
723 resp_open_zone, zone_out_iarr, /* ZONE_OUT(16), OPEN ZONE) */
724 {16, 0x3 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
725 0xff, 0xff, 0x0, 0x0, 0xff, 0xff, 0x1, 0xc7} },
726 {ARRAY_SIZE(zone_in_iarr), 0x95, 0x0, F_SA_LOW | F_M_ACCESS,
727 resp_report_zones, zone_in_iarr, /* ZONE_IN(16), REPORT ZONES) */
728 {16, 0x0 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
729 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xc7} },
731 {0xff, 0, 0, 0, NULL, NULL, /* terminating element */
732 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
735 static atomic_t sdebug_num_hosts;
736 static DEFINE_MUTEX(add_host_mutex);
738 static int sdebug_add_host = DEF_NUM_HOST; /* in sysfs this is relative */
739 static int sdebug_ato = DEF_ATO;
740 static int sdebug_cdb_len = DEF_CDB_LEN;
741 static int sdebug_jdelay = DEF_JDELAY; /* if > 0 then unit is jiffies */
742 static int sdebug_dev_size_mb = DEF_DEV_SIZE_PRE_INIT;
743 static int sdebug_dif = DEF_DIF;
744 static int sdebug_dix = DEF_DIX;
745 static int sdebug_dsense = DEF_D_SENSE;
746 static int sdebug_every_nth = DEF_EVERY_NTH;
747 static int sdebug_fake_rw = DEF_FAKE_RW;
748 static unsigned int sdebug_guard = DEF_GUARD;
749 static int sdebug_host_max_queue; /* per host */
750 static int sdebug_lowest_aligned = DEF_LOWEST_ALIGNED;
751 static int sdebug_max_luns = DEF_MAX_LUNS;
752 static int sdebug_max_queue = SDEBUG_CANQUEUE; /* per submit queue */
753 static unsigned int sdebug_medium_error_start = OPT_MEDIUM_ERR_ADDR;
754 static int sdebug_medium_error_count = OPT_MEDIUM_ERR_NUM;
755 static atomic_t retired_max_queue; /* if > 0 then was prior max_queue */
756 static int sdebug_ndelay = DEF_NDELAY; /* if > 0 then unit is nanoseconds */
757 static int sdebug_no_lun_0 = DEF_NO_LUN_0;
758 static int sdebug_no_uld;
759 static int sdebug_num_parts = DEF_NUM_PARTS;
760 static int sdebug_num_tgts = DEF_NUM_TGTS; /* targets per host */
761 static int sdebug_opt_blks = DEF_OPT_BLKS;
762 static int sdebug_opts = DEF_OPTS;
763 static int sdebug_physblk_exp = DEF_PHYSBLK_EXP;
764 static int sdebug_opt_xferlen_exp = DEF_OPT_XFERLEN_EXP;
765 static int sdebug_ptype = DEF_PTYPE; /* SCSI peripheral device type */
766 static int sdebug_scsi_level = DEF_SCSI_LEVEL;
767 static int sdebug_sector_size = DEF_SECTOR_SIZE;
768 static int sdeb_tur_ms_to_ready = DEF_TUR_MS_TO_READY;
769 static int sdebug_virtual_gb = DEF_VIRTUAL_GB;
770 static int sdebug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
771 static unsigned int sdebug_lbpu = DEF_LBPU;
772 static unsigned int sdebug_lbpws = DEF_LBPWS;
773 static unsigned int sdebug_lbpws10 = DEF_LBPWS10;
774 static unsigned int sdebug_lbprz = DEF_LBPRZ;
775 static unsigned int sdebug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
776 static unsigned int sdebug_unmap_granularity = DEF_UNMAP_GRANULARITY;
777 static unsigned int sdebug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
778 static unsigned int sdebug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
779 static unsigned int sdebug_write_same_length = DEF_WRITESAME_LENGTH;
780 static int sdebug_uuid_ctl = DEF_UUID_CTL;
781 static bool sdebug_random = DEF_RANDOM;
782 static bool sdebug_per_host_store = DEF_PER_HOST_STORE;
783 static bool sdebug_removable = DEF_REMOVABLE;
784 static bool sdebug_deflect_incoming;
785 static bool sdebug_clustering;
786 static bool sdebug_host_lock = DEF_HOST_LOCK;
787 static bool sdebug_strict = DEF_STRICT;
788 static bool sdebug_any_injecting_opt;
789 static bool sdebug_no_rwlock;
790 static bool sdebug_verbose;
791 static bool have_dif_prot;
792 static bool write_since_sync;
793 static bool sdebug_statistics = DEF_STATISTICS;
794 static bool sdebug_wp;
795 /* Following enum: 0: no zbc, def; 1: host aware; 2: host managed */
796 static enum blk_zoned_model sdeb_zbc_model = BLK_ZONED_NONE;
797 static char *sdeb_zbc_model_s;
799 enum sam_lun_addr_method {SAM_LUN_AM_PERIPHERAL = 0x0,
800 SAM_LUN_AM_FLAT = 0x1,
801 SAM_LUN_AM_LOGICAL_UNIT = 0x2,
802 SAM_LUN_AM_EXTENDED = 0x3};
803 static enum sam_lun_addr_method sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
804 static int sdebug_lun_am_i = (int)SAM_LUN_AM_PERIPHERAL;
806 static unsigned int sdebug_store_sectors;
807 static sector_t sdebug_capacity; /* in sectors */
809 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
810 may still need them */
811 static int sdebug_heads; /* heads per disk */
812 static int sdebug_cylinders_per; /* cylinders per surface */
813 static int sdebug_sectors_per; /* sectors per cylinder */
815 static LIST_HEAD(sdebug_host_list);
816 static DEFINE_SPINLOCK(sdebug_host_list_lock);
818 static struct xarray per_store_arr;
819 static struct xarray *per_store_ap = &per_store_arr;
820 static int sdeb_first_idx = -1; /* invalid index ==> none created */
821 static int sdeb_most_recent_idx = -1;
822 static DEFINE_RWLOCK(sdeb_fake_rw_lck); /* need a RW lock when fake_rw=1 */
824 static unsigned long map_size;
825 static int num_aborts;
826 static int num_dev_resets;
827 static int num_target_resets;
828 static int num_bus_resets;
829 static int num_host_resets;
830 static int dix_writes;
831 static int dix_reads;
832 static int dif_errors;
834 /* ZBC global data */
835 static bool sdeb_zbc_in_use; /* true for host-aware and host-managed disks */
836 static int sdeb_zbc_zone_size_mb;
837 static int sdeb_zbc_max_open = DEF_ZBC_MAX_OPEN_ZONES;
838 static int sdeb_zbc_nr_conv = DEF_ZBC_NR_CONV_ZONES;
840 static int submit_queues = DEF_SUBMIT_QUEUES; /* > 1 for multi-queue (mq) */
841 static int poll_queues; /* iouring iopoll interface.*/
842 static struct sdebug_queue *sdebug_q_arr; /* ptr to array of submit queues */
844 static DEFINE_RWLOCK(atomic_rw);
845 static DEFINE_RWLOCK(atomic_rw2);
847 static rwlock_t *ramdisk_lck_a[2];
849 static char sdebug_proc_name[] = MY_NAME;
850 static const char *my_name = MY_NAME;
852 static struct bus_type pseudo_lld_bus;
854 static struct device_driver sdebug_driverfs_driver = {
855 .name = sdebug_proc_name,
856 .bus = &pseudo_lld_bus,
859 static const int check_condition_result =
860 SAM_STAT_CHECK_CONDITION;
862 static const int illegal_condition_result =
863 (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
865 static const int device_qfull_result =
866 (DID_ABORT << 16) | SAM_STAT_TASK_SET_FULL;
868 static const int condition_met_result = SAM_STAT_CONDITION_MET;
871 /* Only do the extra work involved in logical block provisioning if one or
872 * more of the lbpu, lbpws or lbpws10 parameters are given and we are doing
873 * real reads and writes (i.e. not skipping them for speed).
875 static inline bool scsi_debug_lbp(void)
877 return 0 == sdebug_fake_rw &&
878 (sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10);
881 static void *lba2fake_store(struct sdeb_store_info *sip,
882 unsigned long long lba)
884 struct sdeb_store_info *lsip = sip;
886 lba = do_div(lba, sdebug_store_sectors);
887 if (!sip || !sip->storep) {
889 lsip = xa_load(per_store_ap, 0); /* should never be NULL */
891 return lsip->storep + lba * sdebug_sector_size;
894 static struct t10_pi_tuple *dif_store(struct sdeb_store_info *sip,
897 sector = sector_div(sector, sdebug_store_sectors);
899 return sip->dif_storep + sector;
902 static void sdebug_max_tgts_luns(void)
904 struct sdebug_host_info *sdbg_host;
905 struct Scsi_Host *hpnt;
907 spin_lock(&sdebug_host_list_lock);
908 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
909 hpnt = sdbg_host->shost;
910 if ((hpnt->this_id >= 0) &&
911 (sdebug_num_tgts > hpnt->this_id))
912 hpnt->max_id = sdebug_num_tgts + 1;
914 hpnt->max_id = sdebug_num_tgts;
915 /* sdebug_max_luns; */
916 hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
918 spin_unlock(&sdebug_host_list_lock);
921 enum sdeb_cmd_data {SDEB_IN_DATA = 0, SDEB_IN_CDB = 1};
923 /* Set in_bit to -1 to indicate no bit position of invalid field */
924 static void mk_sense_invalid_fld(struct scsi_cmnd *scp,
925 enum sdeb_cmd_data c_d,
926 int in_byte, int in_bit)
928 unsigned char *sbuff;
932 sbuff = scp->sense_buffer;
934 sdev_printk(KERN_ERR, scp->device,
935 "%s: sense_buffer is NULL\n", __func__);
938 asc = c_d ? INVALID_FIELD_IN_CDB : INVALID_FIELD_IN_PARAM_LIST;
939 memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
940 scsi_build_sense(scp, sdebug_dsense, ILLEGAL_REQUEST, asc, 0);
941 memset(sks, 0, sizeof(sks));
947 sks[0] |= 0x7 & in_bit;
949 put_unaligned_be16(in_byte, sks + 1);
955 memcpy(sbuff + sl + 4, sks, 3);
957 memcpy(sbuff + 15, sks, 3);
959 sdev_printk(KERN_INFO, scp->device, "%s: [sense_key,asc,ascq"
960 "]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n",
961 my_name, asc, c_d ? 'C' : 'D', in_byte, in_bit);
964 static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
966 if (!scp->sense_buffer) {
967 sdev_printk(KERN_ERR, scp->device,
968 "%s: sense_buffer is NULL\n", __func__);
971 memset(scp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
973 scsi_build_sense(scp, sdebug_dsense, key, asc, asq);
976 sdev_printk(KERN_INFO, scp->device,
977 "%s: [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
978 my_name, key, asc, asq);
981 static void mk_sense_invalid_opcode(struct scsi_cmnd *scp)
983 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
986 static int scsi_debug_ioctl(struct scsi_device *dev, unsigned int cmd,
989 if (sdebug_verbose) {
991 sdev_printk(KERN_INFO, dev,
992 "%s: BLKFLSBUF [0x1261]\n", __func__);
993 else if (0x5331 == cmd)
994 sdev_printk(KERN_INFO, dev,
995 "%s: CDROM_GET_CAPABILITY [0x5331]\n",
998 sdev_printk(KERN_INFO, dev, "%s: cmd=0x%x\n",
1002 /* return -ENOTTY; // correct return but upsets fdisk */
1005 static void config_cdb_len(struct scsi_device *sdev)
1007 switch (sdebug_cdb_len) {
1008 case 6: /* suggest 6 byte READ, WRITE and MODE SENSE/SELECT */
1009 sdev->use_10_for_rw = false;
1010 sdev->use_16_for_rw = false;
1011 sdev->use_10_for_ms = false;
1013 case 10: /* suggest 10 byte RWs and 6 byte MODE SENSE/SELECT */
1014 sdev->use_10_for_rw = true;
1015 sdev->use_16_for_rw = false;
1016 sdev->use_10_for_ms = false;
1018 case 12: /* suggest 10 byte RWs and 10 byte MODE SENSE/SELECT */
1019 sdev->use_10_for_rw = true;
1020 sdev->use_16_for_rw = false;
1021 sdev->use_10_for_ms = true;
1024 sdev->use_10_for_rw = false;
1025 sdev->use_16_for_rw = true;
1026 sdev->use_10_for_ms = true;
1028 case 32: /* No knobs to suggest this so same as 16 for now */
1029 sdev->use_10_for_rw = false;
1030 sdev->use_16_for_rw = true;
1031 sdev->use_10_for_ms = true;
1034 pr_warn("unexpected cdb_len=%d, force to 10\n",
1036 sdev->use_10_for_rw = true;
1037 sdev->use_16_for_rw = false;
1038 sdev->use_10_for_ms = false;
1039 sdebug_cdb_len = 10;
1044 static void all_config_cdb_len(void)
1046 struct sdebug_host_info *sdbg_host;
1047 struct Scsi_Host *shost;
1048 struct scsi_device *sdev;
1050 spin_lock(&sdebug_host_list_lock);
1051 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
1052 shost = sdbg_host->shost;
1053 shost_for_each_device(sdev, shost) {
1054 config_cdb_len(sdev);
1057 spin_unlock(&sdebug_host_list_lock);
1060 static void clear_luns_changed_on_target(struct sdebug_dev_info *devip)
1062 struct sdebug_host_info *sdhp;
1063 struct sdebug_dev_info *dp;
1065 spin_lock(&sdebug_host_list_lock);
1066 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
1067 list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
1068 if ((devip->sdbg_host == dp->sdbg_host) &&
1069 (devip->target == dp->target))
1070 clear_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
1073 spin_unlock(&sdebug_host_list_lock);
1076 static int make_ua(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1080 k = find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS);
1081 if (k != SDEBUG_NUM_UAS) {
1082 const char *cp = NULL;
1086 mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1087 POWER_ON_RESET_ASCQ);
1089 cp = "power on reset";
1091 case SDEBUG_UA_POOCCUR:
1092 mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1093 POWER_ON_OCCURRED_ASCQ);
1095 cp = "power on occurred";
1097 case SDEBUG_UA_BUS_RESET:
1098 mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1103 case SDEBUG_UA_MODE_CHANGED:
1104 mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1107 cp = "mode parameters changed";
1109 case SDEBUG_UA_CAPACITY_CHANGED:
1110 mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1111 CAPACITY_CHANGED_ASCQ);
1113 cp = "capacity data changed";
1115 case SDEBUG_UA_MICROCODE_CHANGED:
1116 mk_sense_buffer(scp, UNIT_ATTENTION,
1118 MICROCODE_CHANGED_ASCQ);
1120 cp = "microcode has been changed";
1122 case SDEBUG_UA_MICROCODE_CHANGED_WO_RESET:
1123 mk_sense_buffer(scp, UNIT_ATTENTION,
1125 MICROCODE_CHANGED_WO_RESET_ASCQ);
1127 cp = "microcode has been changed without reset";
1129 case SDEBUG_UA_LUNS_CHANGED:
1131 * SPC-3 behavior is to report a UNIT ATTENTION with
1132 * ASC/ASCQ REPORTED LUNS DATA HAS CHANGED on every LUN
1133 * on the target, until a REPORT LUNS command is
1134 * received. SPC-4 behavior is to report it only once.
1135 * NOTE: sdebug_scsi_level does not use the same
1136 * values as struct scsi_device->scsi_level.
1138 if (sdebug_scsi_level >= 6) /* SPC-4 and above */
1139 clear_luns_changed_on_target(devip);
1140 mk_sense_buffer(scp, UNIT_ATTENTION,
1144 cp = "reported luns data has changed";
1147 pr_warn("unexpected unit attention code=%d\n", k);
1152 clear_bit(k, devip->uas_bm);
1154 sdev_printk(KERN_INFO, scp->device,
1155 "%s reports: Unit attention: %s\n",
1157 return check_condition_result;
1162 /* Build SCSI "data-in" buffer. Returns 0 if ok else (DID_ERROR << 16). */
1163 static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1167 struct scsi_data_buffer *sdb = &scp->sdb;
1171 if (scp->sc_data_direction != DMA_FROM_DEVICE)
1172 return DID_ERROR << 16;
1174 act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
1176 scsi_set_resid(scp, scsi_bufflen(scp) - act_len);
1181 /* Partial build of SCSI "data-in" buffer. Returns 0 if ok else
1182 * (DID_ERROR << 16). Can write to offset in data-in buffer. If multiple
1183 * calls, not required to write in ascending offset order. Assumes resid
1184 * set to scsi_bufflen() prior to any calls.
1186 static int p_fill_from_dev_buffer(struct scsi_cmnd *scp, const void *arr,
1187 int arr_len, unsigned int off_dst)
1189 unsigned int act_len, n;
1190 struct scsi_data_buffer *sdb = &scp->sdb;
1191 off_t skip = off_dst;
1193 if (sdb->length <= off_dst)
1195 if (scp->sc_data_direction != DMA_FROM_DEVICE)
1196 return DID_ERROR << 16;
1198 act_len = sg_pcopy_from_buffer(sdb->table.sgl, sdb->table.nents,
1199 arr, arr_len, skip);
1200 pr_debug("%s: off_dst=%u, scsi_bufflen=%u, act_len=%u, resid=%d\n",
1201 __func__, off_dst, scsi_bufflen(scp), act_len,
1202 scsi_get_resid(scp));
1203 n = scsi_bufflen(scp) - (off_dst + act_len);
1204 scsi_set_resid(scp, min_t(u32, scsi_get_resid(scp), n));
1208 /* Fetches from SCSI "data-out" buffer. Returns number of bytes fetched into
1209 * 'arr' or -1 if error.
1211 static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1214 if (!scsi_bufflen(scp))
1216 if (scp->sc_data_direction != DMA_TO_DEVICE)
1219 return scsi_sg_copy_to_buffer(scp, arr, arr_len);
1223 static char sdebug_inq_vendor_id[9] = "Linux ";
1224 static char sdebug_inq_product_id[17] = "scsi_debug ";
1225 static char sdebug_inq_product_rev[5] = SDEBUG_VERSION;
1226 /* Use some locally assigned NAAs for SAS addresses. */
1227 static const u64 naa3_comp_a = 0x3222222000000000ULL;
1228 static const u64 naa3_comp_b = 0x3333333000000000ULL;
1229 static const u64 naa3_comp_c = 0x3111111000000000ULL;
1231 /* Device identification VPD page. Returns number of bytes placed in arr */
1232 static int inquiry_vpd_83(unsigned char *arr, int port_group_id,
1233 int target_dev_id, int dev_id_num,
1234 const char *dev_id_str, int dev_id_str_len,
1235 const uuid_t *lu_name)
1240 port_a = target_dev_id + 1;
1241 /* T10 vendor identifier field format (faked) */
1242 arr[0] = 0x2; /* ASCII */
1245 memcpy(&arr[4], sdebug_inq_vendor_id, 8);
1246 memcpy(&arr[12], sdebug_inq_product_id, 16);
1247 memcpy(&arr[28], dev_id_str, dev_id_str_len);
1248 num = 8 + 16 + dev_id_str_len;
1251 if (dev_id_num >= 0) {
1252 if (sdebug_uuid_ctl) {
1253 /* Locally assigned UUID */
1254 arr[num++] = 0x1; /* binary (not necessarily sas) */
1255 arr[num++] = 0xa; /* PIV=0, lu, naa */
1258 arr[num++] = 0x10; /* uuid type=1, locally assigned */
1260 memcpy(arr + num, lu_name, 16);
1263 /* NAA-3, Logical unit identifier (binary) */
1264 arr[num++] = 0x1; /* binary (not necessarily sas) */
1265 arr[num++] = 0x3; /* PIV=0, lu, naa */
1268 put_unaligned_be64(naa3_comp_b + dev_id_num, arr + num);
1271 /* Target relative port number */
1272 arr[num++] = 0x61; /* proto=sas, binary */
1273 arr[num++] = 0x94; /* PIV=1, target port, rel port */
1274 arr[num++] = 0x0; /* reserved */
1275 arr[num++] = 0x4; /* length */
1276 arr[num++] = 0x0; /* reserved */
1277 arr[num++] = 0x0; /* reserved */
1279 arr[num++] = 0x1; /* relative port A */
1281 /* NAA-3, Target port identifier */
1282 arr[num++] = 0x61; /* proto=sas, binary */
1283 arr[num++] = 0x93; /* piv=1, target port, naa */
1286 put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1288 /* NAA-3, Target port group identifier */
1289 arr[num++] = 0x61; /* proto=sas, binary */
1290 arr[num++] = 0x95; /* piv=1, target port group id */
1295 put_unaligned_be16(port_group_id, arr + num);
1297 /* NAA-3, Target device identifier */
1298 arr[num++] = 0x61; /* proto=sas, binary */
1299 arr[num++] = 0xa3; /* piv=1, target device, naa */
1302 put_unaligned_be64(naa3_comp_a + target_dev_id, arr + num);
1304 /* SCSI name string: Target device identifier */
1305 arr[num++] = 0x63; /* proto=sas, UTF-8 */
1306 arr[num++] = 0xa8; /* piv=1, target device, SCSI name string */
1309 memcpy(arr + num, "naa.32222220", 12);
1311 snprintf(b, sizeof(b), "%08X", target_dev_id);
1312 memcpy(arr + num, b, 8);
1314 memset(arr + num, 0, 4);
1319 static unsigned char vpd84_data[] = {
1320 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
1321 0x22,0x22,0x22,0x0,0xbb,0x1,
1322 0x22,0x22,0x22,0x0,0xbb,0x2,
1325 /* Software interface identification VPD page */
1326 static int inquiry_vpd_84(unsigned char *arr)
1328 memcpy(arr, vpd84_data, sizeof(vpd84_data));
1329 return sizeof(vpd84_data);
1332 /* Management network addresses VPD page */
1333 static int inquiry_vpd_85(unsigned char *arr)
1336 const char *na1 = "https://www.kernel.org/config";
1337 const char *na2 = "http://www.kernel.org/log";
1340 arr[num++] = 0x1; /* lu, storage config */
1341 arr[num++] = 0x0; /* reserved */
1346 plen = ((plen / 4) + 1) * 4;
1347 arr[num++] = plen; /* length, null termianted, padded */
1348 memcpy(arr + num, na1, olen);
1349 memset(arr + num + olen, 0, plen - olen);
1352 arr[num++] = 0x4; /* lu, logging */
1353 arr[num++] = 0x0; /* reserved */
1358 plen = ((plen / 4) + 1) * 4;
1359 arr[num++] = plen; /* length, null terminated, padded */
1360 memcpy(arr + num, na2, olen);
1361 memset(arr + num + olen, 0, plen - olen);
1367 /* SCSI ports VPD page */
1368 static int inquiry_vpd_88(unsigned char *arr, int target_dev_id)
1373 port_a = target_dev_id + 1;
1374 port_b = port_a + 1;
1375 arr[num++] = 0x0; /* reserved */
1376 arr[num++] = 0x0; /* reserved */
1378 arr[num++] = 0x1; /* relative port 1 (primary) */
1379 memset(arr + num, 0, 6);
1382 arr[num++] = 12; /* length tp descriptor */
1383 /* naa-5 target port identifier (A) */
1384 arr[num++] = 0x61; /* proto=sas, binary */
1385 arr[num++] = 0x93; /* PIV=1, target port, NAA */
1386 arr[num++] = 0x0; /* reserved */
1387 arr[num++] = 0x8; /* length */
1388 put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1390 arr[num++] = 0x0; /* reserved */
1391 arr[num++] = 0x0; /* reserved */
1393 arr[num++] = 0x2; /* relative port 2 (secondary) */
1394 memset(arr + num, 0, 6);
1397 arr[num++] = 12; /* length tp descriptor */
1398 /* naa-5 target port identifier (B) */
1399 arr[num++] = 0x61; /* proto=sas, binary */
1400 arr[num++] = 0x93; /* PIV=1, target port, NAA */
1401 arr[num++] = 0x0; /* reserved */
1402 arr[num++] = 0x8; /* length */
1403 put_unaligned_be64(naa3_comp_a + port_b, arr + num);
1410 static unsigned char vpd89_data[] = {
1411 /* from 4th byte */ 0,0,0,0,
1412 'l','i','n','u','x',' ',' ',' ',
1413 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
1415 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
1417 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
1418 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
1419 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
1420 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
1422 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1424 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1426 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
1427 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
1428 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
1429 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
1430 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
1431 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
1432 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
1433 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1434 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1435 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1436 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
1437 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
1438 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
1439 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
1440 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1441 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1442 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1443 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1444 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1445 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1446 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1447 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1448 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1449 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1450 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1451 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
1454 /* ATA Information VPD page */
1455 static int inquiry_vpd_89(unsigned char *arr)
1457 memcpy(arr, vpd89_data, sizeof(vpd89_data));
1458 return sizeof(vpd89_data);
1462 static unsigned char vpdb0_data[] = {
1463 /* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
1464 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1465 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1466 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1469 /* Block limits VPD page (SBC-3) */
1470 static int inquiry_vpd_b0(unsigned char *arr)
1474 memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
1476 /* Optimal transfer length granularity */
1477 if (sdebug_opt_xferlen_exp != 0 &&
1478 sdebug_physblk_exp < sdebug_opt_xferlen_exp)
1479 gran = 1 << sdebug_opt_xferlen_exp;
1481 gran = 1 << sdebug_physblk_exp;
1482 put_unaligned_be16(gran, arr + 2);
1484 /* Maximum Transfer Length */
1485 if (sdebug_store_sectors > 0x400)
1486 put_unaligned_be32(sdebug_store_sectors, arr + 4);
1488 /* Optimal Transfer Length */
1489 put_unaligned_be32(sdebug_opt_blks, &arr[8]);
1492 /* Maximum Unmap LBA Count */
1493 put_unaligned_be32(sdebug_unmap_max_blocks, &arr[16]);
1495 /* Maximum Unmap Block Descriptor Count */
1496 put_unaligned_be32(sdebug_unmap_max_desc, &arr[20]);
1499 /* Unmap Granularity Alignment */
1500 if (sdebug_unmap_alignment) {
1501 put_unaligned_be32(sdebug_unmap_alignment, &arr[28]);
1502 arr[28] |= 0x80; /* UGAVALID */
1505 /* Optimal Unmap Granularity */
1506 put_unaligned_be32(sdebug_unmap_granularity, &arr[24]);
1508 /* Maximum WRITE SAME Length */
1509 put_unaligned_be64(sdebug_write_same_length, &arr[32]);
1511 return 0x3c; /* Mandatory page length for Logical Block Provisioning */
1513 return sizeof(vpdb0_data);
1516 /* Block device characteristics VPD page (SBC-3) */
1517 static int inquiry_vpd_b1(struct sdebug_dev_info *devip, unsigned char *arr)
1519 memset(arr, 0, 0x3c);
1521 arr[1] = 1; /* non rotating medium (e.g. solid state) */
1523 arr[3] = 5; /* less than 1.8" */
1524 if (devip->zmodel == BLK_ZONED_HA)
1525 arr[4] = 1 << 4; /* zoned field = 01b */
1530 /* Logical block provisioning VPD page (SBC-4) */
1531 static int inquiry_vpd_b2(unsigned char *arr)
1533 memset(arr, 0, 0x4);
1534 arr[0] = 0; /* threshold exponent */
1541 if (sdebug_lbprz && scsi_debug_lbp())
1542 arr[1] |= (sdebug_lbprz & 0x7) << 2; /* sbc4r07 and later */
1543 /* anc_sup=0; dp=0 (no provisioning group descriptor) */
1544 /* minimum_percentage=0; provisioning_type=0 (unknown) */
1545 /* threshold_percentage=0 */
1549 /* Zoned block device characteristics VPD page (ZBC mandatory) */
1550 static int inquiry_vpd_b6(struct sdebug_dev_info *devip, unsigned char *arr)
1552 memset(arr, 0, 0x3c);
1553 arr[0] = 0x1; /* set URSWRZ (unrestricted read in seq. wr req zone) */
1555 * Set Optimal number of open sequential write preferred zones and
1556 * Optimal number of non-sequentially written sequential write
1557 * preferred zones fields to 'not reported' (0xffffffff). Leave other
1558 * fields set to zero, apart from Max. number of open swrz_s field.
1560 put_unaligned_be32(0xffffffff, &arr[4]);
1561 put_unaligned_be32(0xffffffff, &arr[8]);
1562 if (sdeb_zbc_model == BLK_ZONED_HM && devip->max_open)
1563 put_unaligned_be32(devip->max_open, &arr[12]);
1565 put_unaligned_be32(0xffffffff, &arr[12]);
1569 #define SDEBUG_LONG_INQ_SZ 96
1570 #define SDEBUG_MAX_INQ_ARR_SZ 584
1572 static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1574 unsigned char pq_pdt;
1576 unsigned char *cmd = scp->cmnd;
1579 bool have_wlun, is_disk, is_zbc, is_disk_zbc;
1581 alloc_len = get_unaligned_be16(cmd + 3);
1582 arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
1584 return DID_REQUEUE << 16;
1585 is_disk = (sdebug_ptype == TYPE_DISK);
1586 is_zbc = (devip->zmodel != BLK_ZONED_NONE);
1587 is_disk_zbc = (is_disk || is_zbc);
1588 have_wlun = scsi_is_wlun(scp->device->lun);
1590 pq_pdt = TYPE_WLUN; /* present, wlun */
1591 else if (sdebug_no_lun_0 && (devip->lun == SDEBUG_LUN_0_VAL))
1592 pq_pdt = 0x7f; /* not present, PQ=3, PDT=0x1f */
1594 pq_pdt = (sdebug_ptype & 0x1f);
1596 if (0x2 & cmd[1]) { /* CMDDT bit set */
1597 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
1599 return check_condition_result;
1600 } else if (0x1 & cmd[1]) { /* EVPD bit set */
1601 int lu_id_num, port_group_id, target_dev_id;
1604 int host_no = devip->sdbg_host->shost->host_no;
1606 port_group_id = (((host_no + 1) & 0x7f) << 8) +
1607 (devip->channel & 0x7f);
1608 if (sdebug_vpd_use_hostno == 0)
1610 lu_id_num = have_wlun ? -1 : (((host_no + 1) * 2000) +
1611 (devip->target * 1000) + devip->lun);
1612 target_dev_id = ((host_no + 1) * 2000) +
1613 (devip->target * 1000) - 3;
1614 len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
1615 if (0 == cmd[2]) { /* supported vital product data pages */
1616 arr[1] = cmd[2]; /*sanity */
1618 arr[n++] = 0x0; /* this page */
1619 arr[n++] = 0x80; /* unit serial number */
1620 arr[n++] = 0x83; /* device identification */
1621 arr[n++] = 0x84; /* software interface ident. */
1622 arr[n++] = 0x85; /* management network addresses */
1623 arr[n++] = 0x86; /* extended inquiry */
1624 arr[n++] = 0x87; /* mode page policy */
1625 arr[n++] = 0x88; /* SCSI ports */
1626 if (is_disk_zbc) { /* SBC or ZBC */
1627 arr[n++] = 0x89; /* ATA information */
1628 arr[n++] = 0xb0; /* Block limits */
1629 arr[n++] = 0xb1; /* Block characteristics */
1631 arr[n++] = 0xb2; /* LB Provisioning */
1633 arr[n++] = 0xb6; /* ZB dev. char. */
1635 arr[3] = n - 4; /* number of supported VPD pages */
1636 } else if (0x80 == cmd[2]) { /* unit serial number */
1637 arr[1] = cmd[2]; /*sanity */
1639 memcpy(&arr[4], lu_id_str, len);
1640 } else if (0x83 == cmd[2]) { /* device identification */
1641 arr[1] = cmd[2]; /*sanity */
1642 arr[3] = inquiry_vpd_83(&arr[4], port_group_id,
1643 target_dev_id, lu_id_num,
1646 } else if (0x84 == cmd[2]) { /* Software interface ident. */
1647 arr[1] = cmd[2]; /*sanity */
1648 arr[3] = inquiry_vpd_84(&arr[4]);
1649 } else if (0x85 == cmd[2]) { /* Management network addresses */
1650 arr[1] = cmd[2]; /*sanity */
1651 arr[3] = inquiry_vpd_85(&arr[4]);
1652 } else if (0x86 == cmd[2]) { /* extended inquiry */
1653 arr[1] = cmd[2]; /*sanity */
1654 arr[3] = 0x3c; /* number of following entries */
1655 if (sdebug_dif == T10_PI_TYPE3_PROTECTION)
1656 arr[4] = 0x4; /* SPT: GRD_CHK:1 */
1657 else if (have_dif_prot)
1658 arr[4] = 0x5; /* SPT: GRD_CHK:1, REF_CHK:1 */
1660 arr[4] = 0x0; /* no protection stuff */
1661 arr[5] = 0x7; /* head of q, ordered + simple q's */
1662 } else if (0x87 == cmd[2]) { /* mode page policy */
1663 arr[1] = cmd[2]; /*sanity */
1664 arr[3] = 0x8; /* number of following entries */
1665 arr[4] = 0x2; /* disconnect-reconnect mp */
1666 arr[6] = 0x80; /* mlus, shared */
1667 arr[8] = 0x18; /* protocol specific lu */
1668 arr[10] = 0x82; /* mlus, per initiator port */
1669 } else if (0x88 == cmd[2]) { /* SCSI Ports */
1670 arr[1] = cmd[2]; /*sanity */
1671 arr[3] = inquiry_vpd_88(&arr[4], target_dev_id);
1672 } else if (is_disk_zbc && 0x89 == cmd[2]) { /* ATA info */
1673 arr[1] = cmd[2]; /*sanity */
1674 n = inquiry_vpd_89(&arr[4]);
1675 put_unaligned_be16(n, arr + 2);
1676 } else if (is_disk_zbc && 0xb0 == cmd[2]) { /* Block limits */
1677 arr[1] = cmd[2]; /*sanity */
1678 arr[3] = inquiry_vpd_b0(&arr[4]);
1679 } else if (is_disk_zbc && 0xb1 == cmd[2]) { /* Block char. */
1680 arr[1] = cmd[2]; /*sanity */
1681 arr[3] = inquiry_vpd_b1(devip, &arr[4]);
1682 } else if (is_disk && 0xb2 == cmd[2]) { /* LB Prov. */
1683 arr[1] = cmd[2]; /*sanity */
1684 arr[3] = inquiry_vpd_b2(&arr[4]);
1685 } else if (is_zbc && cmd[2] == 0xb6) { /* ZB dev. charact. */
1686 arr[1] = cmd[2]; /*sanity */
1687 arr[3] = inquiry_vpd_b6(devip, &arr[4]);
1689 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
1691 return check_condition_result;
1693 len = min_t(u32, get_unaligned_be16(arr + 2) + 4, alloc_len);
1694 ret = fill_from_dev_buffer(scp, arr,
1695 min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ));
1699 /* drops through here for a standard inquiry */
1700 arr[1] = sdebug_removable ? 0x80 : 0; /* Removable disk */
1701 arr[2] = sdebug_scsi_level;
1702 arr[3] = 2; /* response_data_format==2 */
1703 arr[4] = SDEBUG_LONG_INQ_SZ - 5;
1704 arr[5] = (int)have_dif_prot; /* PROTECT bit */
1705 if (sdebug_vpd_use_hostno == 0)
1706 arr[5] |= 0x10; /* claim: implicit TPGS */
1707 arr[6] = 0x10; /* claim: MultiP */
1708 /* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
1709 arr[7] = 0xa; /* claim: LINKED + CMDQUE */
1710 memcpy(&arr[8], sdebug_inq_vendor_id, 8);
1711 memcpy(&arr[16], sdebug_inq_product_id, 16);
1712 memcpy(&arr[32], sdebug_inq_product_rev, 4);
1713 /* Use Vendor Specific area to place driver date in ASCII hex */
1714 memcpy(&arr[36], sdebug_version_date, 8);
1715 /* version descriptors (2 bytes each) follow */
1716 put_unaligned_be16(0xc0, arr + 58); /* SAM-6 no version claimed */
1717 put_unaligned_be16(0x5c0, arr + 60); /* SPC-5 no version claimed */
1719 if (is_disk) { /* SBC-4 no version claimed */
1720 put_unaligned_be16(0x600, arr + n);
1722 } else if (sdebug_ptype == TYPE_TAPE) { /* SSC-4 rev 3 */
1723 put_unaligned_be16(0x525, arr + n);
1725 } else if (is_zbc) { /* ZBC BSR INCITS 536 revision 05 */
1726 put_unaligned_be16(0x624, arr + n);
1729 put_unaligned_be16(0x2100, arr + n); /* SPL-4 no version claimed */
1730 ret = fill_from_dev_buffer(scp, arr,
1731 min_t(u32, alloc_len, SDEBUG_LONG_INQ_SZ));
1736 /* See resp_iec_m_pg() for how this data is manipulated */
1737 static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1740 static int resp_requests(struct scsi_cmnd *scp,
1741 struct sdebug_dev_info *devip)
1743 unsigned char *cmd = scp->cmnd;
1744 unsigned char arr[SCSI_SENSE_BUFFERSIZE]; /* assume >= 18 bytes */
1745 bool dsense = !!(cmd[1] & 1);
1746 u32 alloc_len = cmd[4];
1748 int stopped_state = atomic_read(&devip->stopped);
1750 memset(arr, 0, sizeof(arr));
1751 if (stopped_state > 0) { /* some "pollable" data [spc6r02: 5.12.2] */
1755 arr[2] = LOGICAL_UNIT_NOT_READY;
1756 arr[3] = (stopped_state == 2) ? 0x1 : 0x2;
1760 arr[2] = NOT_READY; /* NO_SENSE in sense_key */
1761 arr[7] = 0xa; /* 18 byte sense buffer */
1762 arr[12] = LOGICAL_UNIT_NOT_READY;
1763 arr[13] = (stopped_state == 2) ? 0x1 : 0x2;
1765 } else if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
1766 /* Information exceptions control mode page: TEST=1, MRIE=6 */
1769 arr[1] = 0x0; /* NO_SENSE in sense_key */
1770 arr[2] = THRESHOLD_EXCEEDED;
1771 arr[3] = 0xff; /* Failure prediction(false) */
1775 arr[2] = 0x0; /* NO_SENSE in sense_key */
1776 arr[7] = 0xa; /* 18 byte sense buffer */
1777 arr[12] = THRESHOLD_EXCEEDED;
1778 arr[13] = 0xff; /* Failure prediction(false) */
1780 } else { /* nothing to report */
1783 memset(arr, 0, len);
1786 memset(arr, 0, len);
1791 return fill_from_dev_buffer(scp, arr, min_t(u32, len, alloc_len));
1794 static int resp_start_stop(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1796 unsigned char *cmd = scp->cmnd;
1797 int power_cond, want_stop, stopped_state;
1800 power_cond = (cmd[4] & 0xf0) >> 4;
1802 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 7);
1803 return check_condition_result;
1805 want_stop = !(cmd[4] & 1);
1806 stopped_state = atomic_read(&devip->stopped);
1807 if (stopped_state == 2) {
1808 ktime_t now_ts = ktime_get_boottime();
1810 if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
1811 u64 diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
1813 if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
1814 /* tur_ms_to_ready timer extinguished */
1815 atomic_set(&devip->stopped, 0);
1819 if (stopped_state == 2) {
1821 stopped_state = 1; /* dummy up success */
1822 } else { /* Disallow tur_ms_to_ready delay to be overridden */
1823 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 0 /* START bit */);
1824 return check_condition_result;
1828 changing = (stopped_state != want_stop);
1830 atomic_xchg(&devip->stopped, want_stop);
1831 if (!changing || (cmd[1] & 0x1)) /* state unchanged or IMMED bit set in cdb */
1832 return SDEG_RES_IMMED_MASK;
1837 static sector_t get_sdebug_capacity(void)
1839 static const unsigned int gibibyte = 1073741824;
1841 if (sdebug_virtual_gb > 0)
1842 return (sector_t)sdebug_virtual_gb *
1843 (gibibyte / sdebug_sector_size);
1845 return sdebug_store_sectors;
1848 #define SDEBUG_READCAP_ARR_SZ 8
1849 static int resp_readcap(struct scsi_cmnd *scp,
1850 struct sdebug_dev_info *devip)
1852 unsigned char arr[SDEBUG_READCAP_ARR_SZ];
1855 /* following just in case virtual_gb changed */
1856 sdebug_capacity = get_sdebug_capacity();
1857 memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
1858 if (sdebug_capacity < 0xffffffff) {
1859 capac = (unsigned int)sdebug_capacity - 1;
1860 put_unaligned_be32(capac, arr + 0);
1862 put_unaligned_be32(0xffffffff, arr + 0);
1863 put_unaligned_be16(sdebug_sector_size, arr + 6);
1864 return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
1867 #define SDEBUG_READCAP16_ARR_SZ 32
1868 static int resp_readcap16(struct scsi_cmnd *scp,
1869 struct sdebug_dev_info *devip)
1871 unsigned char *cmd = scp->cmnd;
1872 unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
1875 alloc_len = get_unaligned_be32(cmd + 10);
1876 /* following just in case virtual_gb changed */
1877 sdebug_capacity = get_sdebug_capacity();
1878 memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
1879 put_unaligned_be64((u64)(sdebug_capacity - 1), arr + 0);
1880 put_unaligned_be32(sdebug_sector_size, arr + 8);
1881 arr[13] = sdebug_physblk_exp & 0xf;
1882 arr[14] = (sdebug_lowest_aligned >> 8) & 0x3f;
1884 if (scsi_debug_lbp()) {
1885 arr[14] |= 0x80; /* LBPME */
1886 /* from sbc4r07, this LBPRZ field is 1 bit, but the LBPRZ in
1887 * the LB Provisioning VPD page is 3 bits. Note that lbprz=2
1888 * in the wider field maps to 0 in this field.
1890 if (sdebug_lbprz & 1) /* precisely what the draft requires */
1894 arr[15] = sdebug_lowest_aligned & 0xff;
1896 if (have_dif_prot) {
1897 arr[12] = (sdebug_dif - 1) << 1; /* P_TYPE */
1898 arr[12] |= 1; /* PROT_EN */
1901 return fill_from_dev_buffer(scp, arr,
1902 min_t(u32, alloc_len, SDEBUG_READCAP16_ARR_SZ));
1905 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
1907 static int resp_report_tgtpgs(struct scsi_cmnd *scp,
1908 struct sdebug_dev_info *devip)
1910 unsigned char *cmd = scp->cmnd;
1912 int host_no = devip->sdbg_host->shost->host_no;
1913 int port_group_a, port_group_b, port_a, port_b;
1917 alen = get_unaligned_be32(cmd + 6);
1918 arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
1920 return DID_REQUEUE << 16;
1922 * EVPD page 0x88 states we have two ports, one
1923 * real and a fake port with no device connected.
1924 * So we create two port groups with one port each
1925 * and set the group with port B to unavailable.
1927 port_a = 0x1; /* relative port A */
1928 port_b = 0x2; /* relative port B */
1929 port_group_a = (((host_no + 1) & 0x7f) << 8) +
1930 (devip->channel & 0x7f);
1931 port_group_b = (((host_no + 1) & 0x7f) << 8) +
1932 (devip->channel & 0x7f) + 0x80;
1935 * The asymmetric access state is cycled according to the host_id.
1938 if (sdebug_vpd_use_hostno == 0) {
1939 arr[n++] = host_no % 3; /* Asymm access state */
1940 arr[n++] = 0x0F; /* claim: all states are supported */
1942 arr[n++] = 0x0; /* Active/Optimized path */
1943 arr[n++] = 0x01; /* only support active/optimized paths */
1945 put_unaligned_be16(port_group_a, arr + n);
1947 arr[n++] = 0; /* Reserved */
1948 arr[n++] = 0; /* Status code */
1949 arr[n++] = 0; /* Vendor unique */
1950 arr[n++] = 0x1; /* One port per group */
1951 arr[n++] = 0; /* Reserved */
1952 arr[n++] = 0; /* Reserved */
1953 put_unaligned_be16(port_a, arr + n);
1955 arr[n++] = 3; /* Port unavailable */
1956 arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
1957 put_unaligned_be16(port_group_b, arr + n);
1959 arr[n++] = 0; /* Reserved */
1960 arr[n++] = 0; /* Status code */
1961 arr[n++] = 0; /* Vendor unique */
1962 arr[n++] = 0x1; /* One port per group */
1963 arr[n++] = 0; /* Reserved */
1964 arr[n++] = 0; /* Reserved */
1965 put_unaligned_be16(port_b, arr + n);
1969 put_unaligned_be32(rlen, arr + 0);
1972 * Return the smallest value of either
1973 * - The allocated length
1974 * - The constructed command length
1975 * - The maximum array size
1977 rlen = min(alen, n);
1978 ret = fill_from_dev_buffer(scp, arr,
1979 min_t(u32, rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
1984 static int resp_rsup_opcodes(struct scsi_cmnd *scp,
1985 struct sdebug_dev_info *devip)
1988 u8 reporting_opts, req_opcode, sdeb_i, supp;
1990 u32 alloc_len, a_len;
1991 int k, offset, len, errsts, count, bump, na;
1992 const struct opcode_info_t *oip;
1993 const struct opcode_info_t *r_oip;
1995 u8 *cmd = scp->cmnd;
1997 rctd = !!(cmd[2] & 0x80);
1998 reporting_opts = cmd[2] & 0x7;
1999 req_opcode = cmd[3];
2000 req_sa = get_unaligned_be16(cmd + 4);
2001 alloc_len = get_unaligned_be32(cmd + 6);
2002 if (alloc_len < 4 || alloc_len > 0xffff) {
2003 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
2004 return check_condition_result;
2006 if (alloc_len > 8192)
2010 arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_ATOMIC);
2012 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
2014 return check_condition_result;
2016 switch (reporting_opts) {
2017 case 0: /* all commands */
2018 /* count number of commands */
2019 for (count = 0, oip = opcode_info_arr;
2020 oip->num_attached != 0xff; ++oip) {
2021 if (F_INV_OP & oip->flags)
2023 count += (oip->num_attached + 1);
2025 bump = rctd ? 20 : 8;
2026 put_unaligned_be32(count * bump, arr);
2027 for (offset = 4, oip = opcode_info_arr;
2028 oip->num_attached != 0xff && offset < a_len; ++oip) {
2029 if (F_INV_OP & oip->flags)
2031 na = oip->num_attached;
2032 arr[offset] = oip->opcode;
2033 put_unaligned_be16(oip->sa, arr + offset + 2);
2035 arr[offset + 5] |= 0x2;
2036 if (FF_SA & oip->flags)
2037 arr[offset + 5] |= 0x1;
2038 put_unaligned_be16(oip->len_mask[0], arr + offset + 6);
2040 put_unaligned_be16(0xa, arr + offset + 8);
2042 for (k = 0, oip = oip->arrp; k < na; ++k, ++oip) {
2043 if (F_INV_OP & oip->flags)
2046 arr[offset] = oip->opcode;
2047 put_unaligned_be16(oip->sa, arr + offset + 2);
2049 arr[offset + 5] |= 0x2;
2050 if (FF_SA & oip->flags)
2051 arr[offset + 5] |= 0x1;
2052 put_unaligned_be16(oip->len_mask[0],
2055 put_unaligned_be16(0xa,
2062 case 1: /* one command: opcode only */
2063 case 2: /* one command: opcode plus service action */
2064 case 3: /* one command: if sa==0 then opcode only else opcode+sa */
2065 sdeb_i = opcode_ind_arr[req_opcode];
2066 oip = &opcode_info_arr[sdeb_i];
2067 if (F_INV_OP & oip->flags) {
2071 if (1 == reporting_opts) {
2072 if (FF_SA & oip->flags) {
2073 mk_sense_invalid_fld(scp, SDEB_IN_CDB,
2076 return check_condition_result;
2079 } else if (2 == reporting_opts &&
2080 0 == (FF_SA & oip->flags)) {
2081 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
2082 kfree(arr); /* point at requested sa */
2083 return check_condition_result;
2085 if (0 == (FF_SA & oip->flags) &&
2086 req_opcode == oip->opcode)
2088 else if (0 == (FF_SA & oip->flags)) {
2089 na = oip->num_attached;
2090 for (k = 0, oip = oip->arrp; k < na;
2092 if (req_opcode == oip->opcode)
2095 supp = (k >= na) ? 1 : 3;
2096 } else if (req_sa != oip->sa) {
2097 na = oip->num_attached;
2098 for (k = 0, oip = oip->arrp; k < na;
2100 if (req_sa == oip->sa)
2103 supp = (k >= na) ? 1 : 3;
2107 u = oip->len_mask[0];
2108 put_unaligned_be16(u, arr + 2);
2109 arr[4] = oip->opcode;
2110 for (k = 1; k < u; ++k)
2111 arr[4 + k] = (k < 16) ?
2112 oip->len_mask[k] : 0xff;
2117 arr[1] = (rctd ? 0x80 : 0) | supp;
2119 put_unaligned_be16(0xa, arr + offset);
2124 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
2126 return check_condition_result;
2128 offset = (offset < a_len) ? offset : a_len;
2129 len = (offset < alloc_len) ? offset : alloc_len;
2130 errsts = fill_from_dev_buffer(scp, arr, len);
2135 static int resp_rsup_tmfs(struct scsi_cmnd *scp,
2136 struct sdebug_dev_info *devip)
2141 u8 *cmd = scp->cmnd;
2143 memset(arr, 0, sizeof(arr));
2144 repd = !!(cmd[2] & 0x80);
2145 alloc_len = get_unaligned_be32(cmd + 6);
2146 if (alloc_len < 4) {
2147 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
2148 return check_condition_result;
2150 arr[0] = 0xc8; /* ATS | ATSS | LURS */
2151 arr[1] = 0x1; /* ITNRS */
2158 len = (len < alloc_len) ? len : alloc_len;
2159 return fill_from_dev_buffer(scp, arr, len);
2162 /* <<Following mode page info copied from ST318451LW>> */
2164 static int resp_err_recov_pg(unsigned char *p, int pcontrol, int target)
2165 { /* Read-Write Error Recovery page for mode_sense */
2166 unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
2169 memcpy(p, err_recov_pg, sizeof(err_recov_pg));
2171 memset(p + 2, 0, sizeof(err_recov_pg) - 2);
2172 return sizeof(err_recov_pg);
2175 static int resp_disconnect_pg(unsigned char *p, int pcontrol, int target)
2176 { /* Disconnect-Reconnect page for mode_sense */
2177 unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
2178 0, 0, 0, 0, 0, 0, 0, 0};
2180 memcpy(p, disconnect_pg, sizeof(disconnect_pg));
2182 memset(p + 2, 0, sizeof(disconnect_pg) - 2);
2183 return sizeof(disconnect_pg);
2186 static int resp_format_pg(unsigned char *p, int pcontrol, int target)
2187 { /* Format device page for mode_sense */
2188 unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
2189 0, 0, 0, 0, 0, 0, 0, 0,
2190 0, 0, 0, 0, 0x40, 0, 0, 0};
2192 memcpy(p, format_pg, sizeof(format_pg));
2193 put_unaligned_be16(sdebug_sectors_per, p + 10);
2194 put_unaligned_be16(sdebug_sector_size, p + 12);
2195 if (sdebug_removable)
2196 p[20] |= 0x20; /* should agree with INQUIRY */
2198 memset(p + 2, 0, sizeof(format_pg) - 2);
2199 return sizeof(format_pg);
2202 static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2203 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,
2206 static int resp_caching_pg(unsigned char *p, int pcontrol, int target)
2207 { /* Caching page for mode_sense */
2208 unsigned char ch_caching_pg[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0,
2209 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
2210 unsigned char d_caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2211 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0, 0, 0, 0, 0};
2213 if (SDEBUG_OPT_N_WCE & sdebug_opts)
2214 caching_pg[2] &= ~0x4; /* set WCE=0 (default WCE=1) */
2215 memcpy(p, caching_pg, sizeof(caching_pg));
2217 memcpy(p + 2, ch_caching_pg, sizeof(ch_caching_pg));
2218 else if (2 == pcontrol)
2219 memcpy(p, d_caching_pg, sizeof(d_caching_pg));
2220 return sizeof(caching_pg);
2223 static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2226 static int resp_ctrl_m_pg(unsigned char *p, int pcontrol, int target)
2227 { /* Control mode page for mode_sense */
2228 unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
2230 unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2234 ctrl_m_pg[2] |= 0x4;
2236 ctrl_m_pg[2] &= ~0x4;
2239 ctrl_m_pg[5] |= 0x80; /* ATO=1 */
2241 memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
2243 memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
2244 else if (2 == pcontrol)
2245 memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
2246 return sizeof(ctrl_m_pg);
2250 static int resp_iec_m_pg(unsigned char *p, int pcontrol, int target)
2251 { /* Informational Exceptions control mode page for mode_sense */
2252 unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
2254 unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
2257 memcpy(p, iec_m_pg, sizeof(iec_m_pg));
2259 memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
2260 else if (2 == pcontrol)
2261 memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
2262 return sizeof(iec_m_pg);
2265 static int resp_sas_sf_m_pg(unsigned char *p, int pcontrol, int target)
2266 { /* SAS SSP mode page - short format for mode_sense */
2267 unsigned char sas_sf_m_pg[] = {0x19, 0x6,
2268 0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
2270 memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
2272 memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
2273 return sizeof(sas_sf_m_pg);
2277 static int resp_sas_pcd_m_spg(unsigned char *p, int pcontrol, int target,
2279 { /* SAS phy control and discover mode page for mode_sense */
2280 unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
2281 0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
2282 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2283 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2284 0x2, 0, 0, 0, 0, 0, 0, 0,
2285 0x88, 0x99, 0, 0, 0, 0, 0, 0,
2286 0, 0, 0, 0, 0, 0, 0, 0,
2287 0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
2288 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2289 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2290 0x3, 0, 0, 0, 0, 0, 0, 0,
2291 0x88, 0x99, 0, 0, 0, 0, 0, 0,
2292 0, 0, 0, 0, 0, 0, 0, 0,
2296 put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 16);
2297 put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 24);
2298 put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 64);
2299 put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 72);
2300 port_a = target_dev_id + 1;
2301 port_b = port_a + 1;
2302 memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
2303 put_unaligned_be32(port_a, p + 20);
2304 put_unaligned_be32(port_b, p + 48 + 20);
2306 memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
2307 return sizeof(sas_pcd_m_pg);
2310 static int resp_sas_sha_m_spg(unsigned char *p, int pcontrol)
2311 { /* SAS SSP shared protocol specific port mode subpage */
2312 unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
2313 0, 0, 0, 0, 0, 0, 0, 0,
2316 memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
2318 memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
2319 return sizeof(sas_sha_m_pg);
2322 #define SDEBUG_MAX_MSENSE_SZ 256
2324 static int resp_mode_sense(struct scsi_cmnd *scp,
2325 struct sdebug_dev_info *devip)
2327 int pcontrol, pcode, subpcode, bd_len;
2328 unsigned char dev_spec;
2329 u32 alloc_len, offset, len;
2331 int target = scp->device->id;
2333 unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
2334 unsigned char *cmd = scp->cmnd;
2335 bool dbd, llbaa, msense_6, is_disk, is_zbc, bad_pcode;
2337 dbd = !!(cmd[1] & 0x8); /* disable block descriptors */
2338 pcontrol = (cmd[2] & 0xc0) >> 6;
2339 pcode = cmd[2] & 0x3f;
2341 msense_6 = (MODE_SENSE == cmd[0]);
2342 llbaa = msense_6 ? false : !!(cmd[1] & 0x10);
2343 is_disk = (sdebug_ptype == TYPE_DISK);
2344 is_zbc = (devip->zmodel != BLK_ZONED_NONE);
2345 if ((is_disk || is_zbc) && !dbd)
2346 bd_len = llbaa ? 16 : 8;
2349 alloc_len = msense_6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2350 memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
2351 if (0x3 == pcontrol) { /* Saving values not supported */
2352 mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0);
2353 return check_condition_result;
2355 target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
2356 (devip->target * 1000) - 3;
2357 /* for disks+zbc set DPOFUA bit and clear write protect (WP) bit */
2358 if (is_disk || is_zbc) {
2359 dev_spec = 0x10; /* =0x90 if WP=1 implies read-only */
2371 arr[4] = 0x1; /* set LONGLBA bit */
2372 arr[7] = bd_len; /* assume 255 or less */
2376 if ((bd_len > 0) && (!sdebug_capacity))
2377 sdebug_capacity = get_sdebug_capacity();
2380 if (sdebug_capacity > 0xfffffffe)
2381 put_unaligned_be32(0xffffffff, ap + 0);
2383 put_unaligned_be32(sdebug_capacity, ap + 0);
2384 put_unaligned_be16(sdebug_sector_size, ap + 6);
2387 } else if (16 == bd_len) {
2388 put_unaligned_be64((u64)sdebug_capacity, ap + 0);
2389 put_unaligned_be32(sdebug_sector_size, ap + 12);
2394 if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) {
2395 /* TODO: Control Extension page */
2396 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2397 return check_condition_result;
2402 case 0x1: /* Read-Write error recovery page, direct access */
2403 len = resp_err_recov_pg(ap, pcontrol, target);
2406 case 0x2: /* Disconnect-Reconnect page, all devices */
2407 len = resp_disconnect_pg(ap, pcontrol, target);
2410 case 0x3: /* Format device page, direct access */
2412 len = resp_format_pg(ap, pcontrol, target);
2417 case 0x8: /* Caching page, direct access */
2418 if (is_disk || is_zbc) {
2419 len = resp_caching_pg(ap, pcontrol, target);
2424 case 0xa: /* Control Mode page, all devices */
2425 len = resp_ctrl_m_pg(ap, pcontrol, target);
2428 case 0x19: /* if spc==1 then sas phy, control+discover */
2429 if ((subpcode > 0x2) && (subpcode < 0xff)) {
2430 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2431 return check_condition_result;
2434 if ((0x0 == subpcode) || (0xff == subpcode))
2435 len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2436 if ((0x1 == subpcode) || (0xff == subpcode))
2437 len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
2439 if ((0x2 == subpcode) || (0xff == subpcode))
2440 len += resp_sas_sha_m_spg(ap + len, pcontrol);
2443 case 0x1c: /* Informational Exceptions Mode page, all devices */
2444 len = resp_iec_m_pg(ap, pcontrol, target);
2447 case 0x3f: /* Read all Mode pages */
2448 if ((0 == subpcode) || (0xff == subpcode)) {
2449 len = resp_err_recov_pg(ap, pcontrol, target);
2450 len += resp_disconnect_pg(ap + len, pcontrol, target);
2452 len += resp_format_pg(ap + len, pcontrol,
2454 len += resp_caching_pg(ap + len, pcontrol,
2456 } else if (is_zbc) {
2457 len += resp_caching_pg(ap + len, pcontrol,
2460 len += resp_ctrl_m_pg(ap + len, pcontrol, target);
2461 len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2462 if (0xff == subpcode) {
2463 len += resp_sas_pcd_m_spg(ap + len, pcontrol,
2464 target, target_dev_id);
2465 len += resp_sas_sha_m_spg(ap + len, pcontrol);
2467 len += resp_iec_m_pg(ap + len, pcontrol, target);
2470 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2471 return check_condition_result;
2479 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2480 return check_condition_result;
2483 arr[0] = offset - 1;
2485 put_unaligned_be16((offset - 2), arr + 0);
2486 return fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, offset));
2489 #define SDEBUG_MAX_MSELECT_SZ 512
2491 static int resp_mode_select(struct scsi_cmnd *scp,
2492 struct sdebug_dev_info *devip)
2494 int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
2495 int param_len, res, mpage;
2496 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
2497 unsigned char *cmd = scp->cmnd;
2498 int mselect6 = (MODE_SELECT == cmd[0]);
2500 memset(arr, 0, sizeof(arr));
2503 param_len = mselect6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2504 if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
2505 mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1);
2506 return check_condition_result;
2508 res = fetch_to_dev_buffer(scp, arr, param_len);
2510 return DID_ERROR << 16;
2511 else if (sdebug_verbose && (res < param_len))
2512 sdev_printk(KERN_INFO, scp->device,
2513 "%s: cdb indicated=%d, IO sent=%d bytes\n",
2514 __func__, param_len, res);
2515 md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2);
2516 bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6);
2517 off = bd_len + (mselect6 ? 4 : 8);
2518 if (md_len > 2 || off >= res) {
2519 mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
2520 return check_condition_result;
2522 mpage = arr[off] & 0x3f;
2523 ps = !!(arr[off] & 0x80);
2525 mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 7);
2526 return check_condition_result;
2528 spf = !!(arr[off] & 0x40);
2529 pg_len = spf ? (get_unaligned_be16(arr + off + 2) + 4) :
2531 if ((pg_len + off) > param_len) {
2532 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2533 PARAMETER_LIST_LENGTH_ERR, 0);
2534 return check_condition_result;
2537 case 0x8: /* Caching Mode page */
2538 if (caching_pg[1] == arr[off + 1]) {
2539 memcpy(caching_pg + 2, arr + off + 2,
2540 sizeof(caching_pg) - 2);
2541 goto set_mode_changed_ua;
2544 case 0xa: /* Control Mode page */
2545 if (ctrl_m_pg[1] == arr[off + 1]) {
2546 memcpy(ctrl_m_pg + 2, arr + off + 2,
2547 sizeof(ctrl_m_pg) - 2);
2548 if (ctrl_m_pg[4] & 0x8)
2552 sdebug_dsense = !!(ctrl_m_pg[2] & 0x4);
2553 goto set_mode_changed_ua;
2556 case 0x1c: /* Informational Exceptions Mode page */
2557 if (iec_m_pg[1] == arr[off + 1]) {
2558 memcpy(iec_m_pg + 2, arr + off + 2,
2559 sizeof(iec_m_pg) - 2);
2560 goto set_mode_changed_ua;
2566 mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 5);
2567 return check_condition_result;
2568 set_mode_changed_ua:
2569 set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm);
2573 static int resp_temp_l_pg(unsigned char *arr)
2575 unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
2576 0x0, 0x1, 0x3, 0x2, 0x0, 65,
2579 memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
2580 return sizeof(temp_l_pg);
2583 static int resp_ie_l_pg(unsigned char *arr)
2585 unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
2588 memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
2589 if (iec_m_pg[2] & 0x4) { /* TEST bit set */
2590 arr[4] = THRESHOLD_EXCEEDED;
2593 return sizeof(ie_l_pg);
2596 static int resp_env_rep_l_spg(unsigned char *arr)
2598 unsigned char env_rep_l_spg[] = {0x0, 0x0, 0x23, 0x8,
2599 0x0, 40, 72, 0xff, 45, 18, 0, 0,
2600 0x1, 0x0, 0x23, 0x8,
2601 0x0, 55, 72, 35, 55, 45, 0, 0,
2604 memcpy(arr, env_rep_l_spg, sizeof(env_rep_l_spg));
2605 return sizeof(env_rep_l_spg);
2608 #define SDEBUG_MAX_LSENSE_SZ 512
2610 static int resp_log_sense(struct scsi_cmnd *scp,
2611 struct sdebug_dev_info *devip)
2613 int ppc, sp, pcode, subpcode;
2614 u32 alloc_len, len, n;
2615 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
2616 unsigned char *cmd = scp->cmnd;
2618 memset(arr, 0, sizeof(arr));
2622 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, ppc ? 1 : 0);
2623 return check_condition_result;
2625 pcode = cmd[2] & 0x3f;
2626 subpcode = cmd[3] & 0xff;
2627 alloc_len = get_unaligned_be16(cmd + 7);
2629 if (0 == subpcode) {
2631 case 0x0: /* Supported log pages log page */
2633 arr[n++] = 0x0; /* this page */
2634 arr[n++] = 0xd; /* Temperature */
2635 arr[n++] = 0x2f; /* Informational exceptions */
2638 case 0xd: /* Temperature log page */
2639 arr[3] = resp_temp_l_pg(arr + 4);
2641 case 0x2f: /* Informational exceptions log page */
2642 arr[3] = resp_ie_l_pg(arr + 4);
2645 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2646 return check_condition_result;
2648 } else if (0xff == subpcode) {
2652 case 0x0: /* Supported log pages and subpages log page */
2655 arr[n++] = 0x0; /* 0,0 page */
2657 arr[n++] = 0xff; /* this page */
2659 arr[n++] = 0x0; /* Temperature */
2661 arr[n++] = 0x1; /* Environment reporting */
2663 arr[n++] = 0xff; /* all 0xd subpages */
2665 arr[n++] = 0x0; /* Informational exceptions */
2667 arr[n++] = 0xff; /* all 0x2f subpages */
2670 case 0xd: /* Temperature subpages */
2673 arr[n++] = 0x0; /* Temperature */
2675 arr[n++] = 0x1; /* Environment reporting */
2677 arr[n++] = 0xff; /* these subpages */
2680 case 0x2f: /* Informational exceptions subpages */
2683 arr[n++] = 0x0; /* Informational exceptions */
2685 arr[n++] = 0xff; /* these subpages */
2689 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2690 return check_condition_result;
2692 } else if (subpcode > 0) {
2695 if (pcode == 0xd && subpcode == 1)
2696 arr[3] = resp_env_rep_l_spg(arr + 4);
2698 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2699 return check_condition_result;
2702 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2703 return check_condition_result;
2705 len = min_t(u32, get_unaligned_be16(arr + 2) + 4, alloc_len);
2706 return fill_from_dev_buffer(scp, arr,
2707 min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ));
2710 static inline bool sdebug_dev_is_zoned(struct sdebug_dev_info *devip)
2712 return devip->nr_zones != 0;
2715 static struct sdeb_zone_state *zbc_zone(struct sdebug_dev_info *devip,
2716 unsigned long long lba)
2718 return &devip->zstate[lba >> devip->zsize_shift];
2721 static inline bool zbc_zone_is_conv(struct sdeb_zone_state *zsp)
2723 return zsp->z_type == ZBC_ZONE_TYPE_CNV;
2726 static void zbc_close_zone(struct sdebug_dev_info *devip,
2727 struct sdeb_zone_state *zsp)
2729 enum sdebug_z_cond zc;
2731 if (zbc_zone_is_conv(zsp))
2735 if (!(zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN))
2738 if (zc == ZC2_IMPLICIT_OPEN)
2739 devip->nr_imp_open--;
2741 devip->nr_exp_open--;
2743 if (zsp->z_wp == zsp->z_start) {
2744 zsp->z_cond = ZC1_EMPTY;
2746 zsp->z_cond = ZC4_CLOSED;
2751 static void zbc_close_imp_open_zone(struct sdebug_dev_info *devip)
2753 struct sdeb_zone_state *zsp = &devip->zstate[0];
2756 for (i = 0; i < devip->nr_zones; i++, zsp++) {
2757 if (zsp->z_cond == ZC2_IMPLICIT_OPEN) {
2758 zbc_close_zone(devip, zsp);
2764 static void zbc_open_zone(struct sdebug_dev_info *devip,
2765 struct sdeb_zone_state *zsp, bool explicit)
2767 enum sdebug_z_cond zc;
2769 if (zbc_zone_is_conv(zsp))
2773 if ((explicit && zc == ZC3_EXPLICIT_OPEN) ||
2774 (!explicit && zc == ZC2_IMPLICIT_OPEN))
2777 /* Close an implicit open zone if necessary */
2778 if (explicit && zsp->z_cond == ZC2_IMPLICIT_OPEN)
2779 zbc_close_zone(devip, zsp);
2780 else if (devip->max_open &&
2781 devip->nr_imp_open + devip->nr_exp_open >= devip->max_open)
2782 zbc_close_imp_open_zone(devip);
2784 if (zsp->z_cond == ZC4_CLOSED)
2787 zsp->z_cond = ZC3_EXPLICIT_OPEN;
2788 devip->nr_exp_open++;
2790 zsp->z_cond = ZC2_IMPLICIT_OPEN;
2791 devip->nr_imp_open++;
2795 static void zbc_inc_wp(struct sdebug_dev_info *devip,
2796 unsigned long long lba, unsigned int num)
2798 struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
2799 unsigned long long n, end, zend = zsp->z_start + zsp->z_size;
2801 if (zbc_zone_is_conv(zsp))
2804 if (zsp->z_type == ZBC_ZONE_TYPE_SWR) {
2806 if (zsp->z_wp >= zend)
2807 zsp->z_cond = ZC5_FULL;
2812 if (lba != zsp->z_wp)
2813 zsp->z_non_seq_resource = true;
2819 } else if (end > zsp->z_wp) {
2825 if (zsp->z_wp >= zend)
2826 zsp->z_cond = ZC5_FULL;
2832 zend = zsp->z_start + zsp->z_size;
2837 static int check_zbc_access_params(struct scsi_cmnd *scp,
2838 unsigned long long lba, unsigned int num, bool write)
2840 struct scsi_device *sdp = scp->device;
2841 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
2842 struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
2843 struct sdeb_zone_state *zsp_end = zbc_zone(devip, lba + num - 1);
2846 if (devip->zmodel == BLK_ZONED_HA)
2848 /* For host-managed, reads cannot cross zone types boundaries */
2849 if (zsp_end != zsp &&
2850 zbc_zone_is_conv(zsp) &&
2851 !zbc_zone_is_conv(zsp_end)) {
2852 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2855 return check_condition_result;
2860 /* No restrictions for writes within conventional zones */
2861 if (zbc_zone_is_conv(zsp)) {
2862 if (!zbc_zone_is_conv(zsp_end)) {
2863 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2865 WRITE_BOUNDARY_ASCQ);
2866 return check_condition_result;
2871 if (zsp->z_type == ZBC_ZONE_TYPE_SWR) {
2872 /* Writes cannot cross sequential zone boundaries */
2873 if (zsp_end != zsp) {
2874 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2876 WRITE_BOUNDARY_ASCQ);
2877 return check_condition_result;
2879 /* Cannot write full zones */
2880 if (zsp->z_cond == ZC5_FULL) {
2881 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2882 INVALID_FIELD_IN_CDB, 0);
2883 return check_condition_result;
2885 /* Writes must be aligned to the zone WP */
2886 if (lba != zsp->z_wp) {
2887 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2889 UNALIGNED_WRITE_ASCQ);
2890 return check_condition_result;
2894 /* Handle implicit open of closed and empty zones */
2895 if (zsp->z_cond == ZC1_EMPTY || zsp->z_cond == ZC4_CLOSED) {
2896 if (devip->max_open &&
2897 devip->nr_exp_open >= devip->max_open) {
2898 mk_sense_buffer(scp, DATA_PROTECT,
2901 return check_condition_result;
2903 zbc_open_zone(devip, zsp, false);
2909 static inline int check_device_access_params
2910 (struct scsi_cmnd *scp, unsigned long long lba,
2911 unsigned int num, bool write)
2913 struct scsi_device *sdp = scp->device;
2914 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
2916 if (lba + num > sdebug_capacity) {
2917 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2918 return check_condition_result;
2920 /* transfer length excessive (tie in to block limits VPD page) */
2921 if (num > sdebug_store_sectors) {
2922 /* needs work to find which cdb byte 'num' comes from */
2923 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2924 return check_condition_result;
2926 if (write && unlikely(sdebug_wp)) {
2927 mk_sense_buffer(scp, DATA_PROTECT, WRITE_PROTECTED, 0x2);
2928 return check_condition_result;
2930 if (sdebug_dev_is_zoned(devip))
2931 return check_zbc_access_params(scp, lba, num, write);
2937 * Note: if BUG_ON() fires it usually indicates a problem with the parser
2938 * tables. Perhaps a missing F_FAKE_RW or FF_MEDIA_IO flag. Response functions
2939 * that access any of the "stores" in struct sdeb_store_info should call this
2940 * function with bug_if_fake_rw set to true.
2942 static inline struct sdeb_store_info *devip2sip(struct sdebug_dev_info *devip,
2943 bool bug_if_fake_rw)
2945 if (sdebug_fake_rw) {
2946 BUG_ON(bug_if_fake_rw); /* See note above */
2949 return xa_load(per_store_ap, devip->sdbg_host->si_idx);
2952 /* Returns number of bytes copied or -1 if error. */
2953 static int do_device_access(struct sdeb_store_info *sip, struct scsi_cmnd *scp,
2954 u32 sg_skip, u64 lba, u32 num, bool do_write)
2957 u64 block, rest = 0;
2958 enum dma_data_direction dir;
2959 struct scsi_data_buffer *sdb = &scp->sdb;
2963 dir = DMA_TO_DEVICE;
2964 write_since_sync = true;
2966 dir = DMA_FROM_DEVICE;
2969 if (!sdb->length || !sip)
2971 if (scp->sc_data_direction != dir)
2975 block = do_div(lba, sdebug_store_sectors);
2976 if (block + num > sdebug_store_sectors)
2977 rest = block + num - sdebug_store_sectors;
2979 ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
2980 fsp + (block * sdebug_sector_size),
2981 (num - rest) * sdebug_sector_size, sg_skip, do_write);
2982 if (ret != (num - rest) * sdebug_sector_size)
2986 ret += sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
2987 fsp, rest * sdebug_sector_size,
2988 sg_skip + ((num - rest) * sdebug_sector_size),
2995 /* Returns number of bytes copied or -1 if error. */
2996 static int do_dout_fetch(struct scsi_cmnd *scp, u32 num, u8 *doutp)
2998 struct scsi_data_buffer *sdb = &scp->sdb;
3002 if (scp->sc_data_direction != DMA_TO_DEVICE)
3004 return sg_copy_buffer(sdb->table.sgl, sdb->table.nents, doutp,
3005 num * sdebug_sector_size, 0, true);
3008 /* If sip->storep+lba compares equal to arr(num), then copy top half of
3009 * arr into sip->storep+lba and return true. If comparison fails then
3011 static bool comp_write_worker(struct sdeb_store_info *sip, u64 lba, u32 num,
3012 const u8 *arr, bool compare_only)
3015 u64 block, rest = 0;
3016 u32 store_blks = sdebug_store_sectors;
3017 u32 lb_size = sdebug_sector_size;
3018 u8 *fsp = sip->storep;
3020 block = do_div(lba, store_blks);
3021 if (block + num > store_blks)
3022 rest = block + num - store_blks;
3024 res = !memcmp(fsp + (block * lb_size), arr, (num - rest) * lb_size);
3028 res = memcmp(fsp, arr + ((num - rest) * lb_size),
3034 arr += num * lb_size;
3035 memcpy(fsp + (block * lb_size), arr, (num - rest) * lb_size);
3037 memcpy(fsp, arr + ((num - rest) * lb_size), rest * lb_size);
3041 static __be16 dif_compute_csum(const void *buf, int len)
3046 csum = (__force __be16)ip_compute_csum(buf, len);
3048 csum = cpu_to_be16(crc_t10dif(buf, len));
3053 static int dif_verify(struct t10_pi_tuple *sdt, const void *data,
3054 sector_t sector, u32 ei_lba)
3056 __be16 csum = dif_compute_csum(data, sdebug_sector_size);
3058 if (sdt->guard_tag != csum) {
3059 pr_err("GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
3060 (unsigned long)sector,
3061 be16_to_cpu(sdt->guard_tag),
3065 if (sdebug_dif == T10_PI_TYPE1_PROTECTION &&
3066 be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
3067 pr_err("REF check failed on sector %lu\n",
3068 (unsigned long)sector);
3071 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3072 be32_to_cpu(sdt->ref_tag) != ei_lba) {
3073 pr_err("REF check failed on sector %lu\n",
3074 (unsigned long)sector);
3080 static void dif_copy_prot(struct scsi_cmnd *scp, sector_t sector,
3081 unsigned int sectors, bool read)
3085 struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3086 scp->device->hostdata, true);
3087 struct t10_pi_tuple *dif_storep = sip->dif_storep;
3088 const void *dif_store_end = dif_storep + sdebug_store_sectors;
3089 struct sg_mapping_iter miter;
3091 /* Bytes of protection data to copy into sgl */
3092 resid = sectors * sizeof(*dif_storep);
3094 sg_miter_start(&miter, scsi_prot_sglist(scp),
3095 scsi_prot_sg_count(scp), SG_MITER_ATOMIC |
3096 (read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
3098 while (sg_miter_next(&miter) && resid > 0) {
3099 size_t len = min_t(size_t, miter.length, resid);
3100 void *start = dif_store(sip, sector);
3103 if (dif_store_end < start + len)
3104 rest = start + len - dif_store_end;
3109 memcpy(paddr, start, len - rest);
3111 memcpy(start, paddr, len - rest);
3115 memcpy(paddr + len - rest, dif_storep, rest);
3117 memcpy(dif_storep, paddr + len - rest, rest);
3120 sector += len / sizeof(*dif_storep);
3123 sg_miter_stop(&miter);
3126 static int prot_verify_read(struct scsi_cmnd *scp, sector_t start_sec,
3127 unsigned int sectors, u32 ei_lba)
3132 struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3133 scp->device->hostdata, true);
3134 struct t10_pi_tuple *sdt;
3136 for (i = 0; i < sectors; i++, ei_lba++) {
3137 sector = start_sec + i;
3138 sdt = dif_store(sip, sector);
3140 if (sdt->app_tag == cpu_to_be16(0xffff))
3144 * Because scsi_debug acts as both initiator and
3145 * target we proceed to verify the PI even if
3146 * RDPROTECT=3. This is done so the "initiator" knows
3147 * which type of error to return. Otherwise we would
3148 * have to iterate over the PI twice.
3150 if (scp->cmnd[1] >> 5) { /* RDPROTECT */
3151 ret = dif_verify(sdt, lba2fake_store(sip, sector),
3160 dif_copy_prot(scp, start_sec, sectors, true);
3167 sdeb_read_lock(struct sdeb_store_info *sip)
3169 if (sdebug_no_rwlock) {
3171 __acquire(&sip->macc_lck);
3173 __acquire(&sdeb_fake_rw_lck);
3176 read_lock(&sip->macc_lck);
3178 read_lock(&sdeb_fake_rw_lck);
3183 sdeb_read_unlock(struct sdeb_store_info *sip)
3185 if (sdebug_no_rwlock) {
3187 __release(&sip->macc_lck);
3189 __release(&sdeb_fake_rw_lck);
3192 read_unlock(&sip->macc_lck);
3194 read_unlock(&sdeb_fake_rw_lck);
3199 sdeb_write_lock(struct sdeb_store_info *sip)
3201 if (sdebug_no_rwlock) {
3203 __acquire(&sip->macc_lck);
3205 __acquire(&sdeb_fake_rw_lck);
3208 write_lock(&sip->macc_lck);
3210 write_lock(&sdeb_fake_rw_lck);
3215 sdeb_write_unlock(struct sdeb_store_info *sip)
3217 if (sdebug_no_rwlock) {
3219 __release(&sip->macc_lck);
3221 __release(&sdeb_fake_rw_lck);
3224 write_unlock(&sip->macc_lck);
3226 write_unlock(&sdeb_fake_rw_lck);
3230 static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3237 struct sdeb_store_info *sip = devip2sip(devip, true);
3238 u8 *cmd = scp->cmnd;
3243 lba = get_unaligned_be64(cmd + 2);
3244 num = get_unaligned_be32(cmd + 10);
3249 lba = get_unaligned_be32(cmd + 2);
3250 num = get_unaligned_be16(cmd + 7);
3255 lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3256 (u32)(cmd[1] & 0x1f) << 16;
3257 num = (0 == cmd[4]) ? 256 : cmd[4];
3262 lba = get_unaligned_be32(cmd + 2);
3263 num = get_unaligned_be32(cmd + 6);
3266 case XDWRITEREAD_10:
3268 lba = get_unaligned_be32(cmd + 2);
3269 num = get_unaligned_be16(cmd + 7);
3272 default: /* assume READ(32) */
3273 lba = get_unaligned_be64(cmd + 12);
3274 ei_lba = get_unaligned_be32(cmd + 20);
3275 num = get_unaligned_be32(cmd + 28);
3279 if (unlikely(have_dif_prot && check_prot)) {
3280 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3282 mk_sense_invalid_opcode(scp);
3283 return check_condition_result;
3285 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3286 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3287 (cmd[1] & 0xe0) == 0)
3288 sdev_printk(KERN_ERR, scp->device, "Unprotected RD "
3291 if (unlikely((sdebug_opts & SDEBUG_OPT_SHORT_TRANSFER) &&
3292 atomic_read(&sdeb_inject_pending))) {
3294 atomic_set(&sdeb_inject_pending, 0);
3297 ret = check_device_access_params(scp, lba, num, false);
3300 if (unlikely((SDEBUG_OPT_MEDIUM_ERR & sdebug_opts) &&
3301 (lba <= (sdebug_medium_error_start + sdebug_medium_error_count - 1)) &&
3302 ((lba + num) > sdebug_medium_error_start))) {
3303 /* claim unrecoverable read error */
3304 mk_sense_buffer(scp, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
3305 /* set info field and valid bit for fixed descriptor */
3306 if (0x70 == (scp->sense_buffer[0] & 0x7f)) {
3307 scp->sense_buffer[0] |= 0x80; /* Valid bit */
3308 ret = (lba < OPT_MEDIUM_ERR_ADDR)
3309 ? OPT_MEDIUM_ERR_ADDR : (int)lba;
3310 put_unaligned_be32(ret, scp->sense_buffer + 3);
3312 scsi_set_resid(scp, scsi_bufflen(scp));
3313 return check_condition_result;
3316 sdeb_read_lock(sip);
3319 if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3320 switch (prot_verify_read(scp, lba, num, ei_lba)) {
3321 case 1: /* Guard tag error */
3322 if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */
3323 sdeb_read_unlock(sip);
3324 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3325 return check_condition_result;
3326 } else if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) {
3327 sdeb_read_unlock(sip);
3328 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3329 return illegal_condition_result;
3332 case 3: /* Reference tag error */
3333 if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */
3334 sdeb_read_unlock(sip);
3335 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3);
3336 return check_condition_result;
3337 } else if (scp->prot_flags & SCSI_PROT_REF_CHECK) {
3338 sdeb_read_unlock(sip);
3339 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3);
3340 return illegal_condition_result;
3346 ret = do_device_access(sip, scp, 0, lba, num, false);
3347 sdeb_read_unlock(sip);
3348 if (unlikely(ret == -1))
3349 return DID_ERROR << 16;
3351 scsi_set_resid(scp, scsi_bufflen(scp) - ret);
3353 if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3354 atomic_read(&sdeb_inject_pending))) {
3355 if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3356 mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3357 atomic_set(&sdeb_inject_pending, 0);
3358 return check_condition_result;
3359 } else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3360 /* Logical block guard check failed */
3361 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3362 atomic_set(&sdeb_inject_pending, 0);
3363 return illegal_condition_result;
3364 } else if (SDEBUG_OPT_DIX_ERR & sdebug_opts) {
3365 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3366 atomic_set(&sdeb_inject_pending, 0);
3367 return illegal_condition_result;
3373 static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
3374 unsigned int sectors, u32 ei_lba)
3377 struct t10_pi_tuple *sdt;
3379 sector_t sector = start_sec;
3382 struct sg_mapping_iter diter;
3383 struct sg_mapping_iter piter;
3385 BUG_ON(scsi_sg_count(SCpnt) == 0);
3386 BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
3388 sg_miter_start(&piter, scsi_prot_sglist(SCpnt),
3389 scsi_prot_sg_count(SCpnt),
3390 SG_MITER_ATOMIC | SG_MITER_FROM_SG);
3391 sg_miter_start(&diter, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
3392 SG_MITER_ATOMIC | SG_MITER_FROM_SG);
3394 /* For each protection page */
3395 while (sg_miter_next(&piter)) {
3397 if (WARN_ON(!sg_miter_next(&diter))) {
3402 for (ppage_offset = 0; ppage_offset < piter.length;
3403 ppage_offset += sizeof(struct t10_pi_tuple)) {
3404 /* If we're at the end of the current
3405 * data page advance to the next one
3407 if (dpage_offset >= diter.length) {
3408 if (WARN_ON(!sg_miter_next(&diter))) {
3415 sdt = piter.addr + ppage_offset;
3416 daddr = diter.addr + dpage_offset;
3418 if (SCpnt->cmnd[1] >> 5 != 3) { /* WRPROTECT */
3419 ret = dif_verify(sdt, daddr, sector, ei_lba);
3426 dpage_offset += sdebug_sector_size;
3428 diter.consumed = dpage_offset;
3429 sg_miter_stop(&diter);
3431 sg_miter_stop(&piter);
3433 dif_copy_prot(SCpnt, start_sec, sectors, false);
3440 sg_miter_stop(&diter);
3441 sg_miter_stop(&piter);
3445 static unsigned long lba_to_map_index(sector_t lba)
3447 if (sdebug_unmap_alignment)
3448 lba += sdebug_unmap_granularity - sdebug_unmap_alignment;
3449 sector_div(lba, sdebug_unmap_granularity);
3453 static sector_t map_index_to_lba(unsigned long index)
3455 sector_t lba = index * sdebug_unmap_granularity;
3457 if (sdebug_unmap_alignment)
3458 lba -= sdebug_unmap_granularity - sdebug_unmap_alignment;
3462 static unsigned int map_state(struct sdeb_store_info *sip, sector_t lba,
3466 unsigned int mapped;
3467 unsigned long index;
3470 index = lba_to_map_index(lba);
3471 mapped = test_bit(index, sip->map_storep);
3474 next = find_next_zero_bit(sip->map_storep, map_size, index);
3476 next = find_next_bit(sip->map_storep, map_size, index);
3478 end = min_t(sector_t, sdebug_store_sectors, map_index_to_lba(next));
3483 static void map_region(struct sdeb_store_info *sip, sector_t lba,
3486 sector_t end = lba + len;
3489 unsigned long index = lba_to_map_index(lba);
3491 if (index < map_size)
3492 set_bit(index, sip->map_storep);
3494 lba = map_index_to_lba(index + 1);
3498 static void unmap_region(struct sdeb_store_info *sip, sector_t lba,
3501 sector_t end = lba + len;
3502 u8 *fsp = sip->storep;
3505 unsigned long index = lba_to_map_index(lba);
3507 if (lba == map_index_to_lba(index) &&
3508 lba + sdebug_unmap_granularity <= end &&
3510 clear_bit(index, sip->map_storep);
3511 if (sdebug_lbprz) { /* for LBPRZ=2 return 0xff_s */
3512 memset(fsp + lba * sdebug_sector_size,
3513 (sdebug_lbprz & 1) ? 0 : 0xff,
3514 sdebug_sector_size *
3515 sdebug_unmap_granularity);
3517 if (sip->dif_storep) {
3518 memset(sip->dif_storep + lba, 0xff,
3519 sizeof(*sip->dif_storep) *
3520 sdebug_unmap_granularity);
3523 lba = map_index_to_lba(index + 1);
3527 static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3534 struct sdeb_store_info *sip = devip2sip(devip, true);
3535 u8 *cmd = scp->cmnd;
3540 lba = get_unaligned_be64(cmd + 2);
3541 num = get_unaligned_be32(cmd + 10);
3546 lba = get_unaligned_be32(cmd + 2);
3547 num = get_unaligned_be16(cmd + 7);
3552 lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3553 (u32)(cmd[1] & 0x1f) << 16;
3554 num = (0 == cmd[4]) ? 256 : cmd[4];
3559 lba = get_unaligned_be32(cmd + 2);
3560 num = get_unaligned_be32(cmd + 6);
3563 case 0x53: /* XDWRITEREAD(10) */
3565 lba = get_unaligned_be32(cmd + 2);
3566 num = get_unaligned_be16(cmd + 7);
3569 default: /* assume WRITE(32) */
3570 lba = get_unaligned_be64(cmd + 12);
3571 ei_lba = get_unaligned_be32(cmd + 20);
3572 num = get_unaligned_be32(cmd + 28);
3576 if (unlikely(have_dif_prot && check_prot)) {
3577 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3579 mk_sense_invalid_opcode(scp);
3580 return check_condition_result;
3582 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3583 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3584 (cmd[1] & 0xe0) == 0)
3585 sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3589 sdeb_write_lock(sip);
3590 ret = check_device_access_params(scp, lba, num, true);
3592 sdeb_write_unlock(sip);
3597 if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3598 switch (prot_verify_write(scp, lba, num, ei_lba)) {
3599 case 1: /* Guard tag error */
3600 if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) {
3601 sdeb_write_unlock(sip);
3602 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3603 return illegal_condition_result;
3604 } else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */
3605 sdeb_write_unlock(sip);
3606 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3607 return check_condition_result;
3610 case 3: /* Reference tag error */
3611 if (scp->prot_flags & SCSI_PROT_REF_CHECK) {
3612 sdeb_write_unlock(sip);
3613 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3);
3614 return illegal_condition_result;
3615 } else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */
3616 sdeb_write_unlock(sip);
3617 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3);
3618 return check_condition_result;
3624 ret = do_device_access(sip, scp, 0, lba, num, true);
3625 if (unlikely(scsi_debug_lbp()))
3626 map_region(sip, lba, num);
3627 /* If ZBC zone then bump its write pointer */
3628 if (sdebug_dev_is_zoned(devip))
3629 zbc_inc_wp(devip, lba, num);
3630 sdeb_write_unlock(sip);
3631 if (unlikely(-1 == ret))
3632 return DID_ERROR << 16;
3633 else if (unlikely(sdebug_verbose &&
3634 (ret < (num * sdebug_sector_size))))
3635 sdev_printk(KERN_INFO, scp->device,
3636 "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3637 my_name, num * sdebug_sector_size, ret);
3639 if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3640 atomic_read(&sdeb_inject_pending))) {
3641 if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3642 mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3643 atomic_set(&sdeb_inject_pending, 0);
3644 return check_condition_result;
3645 } else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3646 /* Logical block guard check failed */
3647 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3648 atomic_set(&sdeb_inject_pending, 0);
3649 return illegal_condition_result;
3650 } else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
3651 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3652 atomic_set(&sdeb_inject_pending, 0);
3653 return illegal_condition_result;
3660 * T10 has only specified WRITE SCATTERED(16) and WRITE SCATTERED(32).
3661 * No READ GATHERED yet (requires bidi or long cdb holding gather list).
3663 static int resp_write_scat(struct scsi_cmnd *scp,
3664 struct sdebug_dev_info *devip)
3666 u8 *cmd = scp->cmnd;
3669 struct sdeb_store_info *sip = devip2sip(devip, true);
3671 u16 lbdof, num_lrd, k;
3672 u32 num, num_by, bt_len, lbdof_blen, sg_off, cum_lb;
3673 u32 lb_size = sdebug_sector_size;
3678 static const u32 lrd_size = 32; /* + parameter list header size */
3680 if (cmd[0] == VARIABLE_LENGTH_CMD) {
3682 wrprotect = (cmd[10] >> 5) & 0x7;
3683 lbdof = get_unaligned_be16(cmd + 12);
3684 num_lrd = get_unaligned_be16(cmd + 16);
3685 bt_len = get_unaligned_be32(cmd + 28);
3686 } else { /* that leaves WRITE SCATTERED(16) */
3688 wrprotect = (cmd[2] >> 5) & 0x7;
3689 lbdof = get_unaligned_be16(cmd + 4);
3690 num_lrd = get_unaligned_be16(cmd + 8);
3691 bt_len = get_unaligned_be32(cmd + 10);
3692 if (unlikely(have_dif_prot)) {
3693 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3695 mk_sense_invalid_opcode(scp);
3696 return illegal_condition_result;
3698 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3699 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3701 sdev_printk(KERN_ERR, scp->device,
3702 "Unprotected WR to DIF device\n");
3705 if ((num_lrd == 0) || (bt_len == 0))
3706 return 0; /* T10 says these do-nothings are not errors */
3709 sdev_printk(KERN_INFO, scp->device,
3710 "%s: %s: LB Data Offset field bad\n",
3712 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3713 return illegal_condition_result;
3715 lbdof_blen = lbdof * lb_size;
3716 if ((lrd_size + (num_lrd * lrd_size)) > lbdof_blen) {
3718 sdev_printk(KERN_INFO, scp->device,
3719 "%s: %s: LBA range descriptors don't fit\n",
3721 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3722 return illegal_condition_result;
3724 lrdp = kzalloc(lbdof_blen, GFP_ATOMIC);
3726 return SCSI_MLQUEUE_HOST_BUSY;
3728 sdev_printk(KERN_INFO, scp->device,
3729 "%s: %s: Fetch header+scatter_list, lbdof_blen=%u\n",
3730 my_name, __func__, lbdof_blen);
3731 res = fetch_to_dev_buffer(scp, lrdp, lbdof_blen);
3733 ret = DID_ERROR << 16;
3737 sdeb_write_lock(sip);
3738 sg_off = lbdof_blen;
3739 /* Spec says Buffer xfer Length field in number of LBs in dout */
3741 for (k = 0, up = lrdp + lrd_size; k < num_lrd; ++k, up += lrd_size) {
3742 lba = get_unaligned_be64(up + 0);
3743 num = get_unaligned_be32(up + 8);
3745 sdev_printk(KERN_INFO, scp->device,
3746 "%s: %s: k=%d LBA=0x%llx num=%u sg_off=%u\n",
3747 my_name, __func__, k, lba, num, sg_off);
3750 ret = check_device_access_params(scp, lba, num, true);
3752 goto err_out_unlock;
3753 num_by = num * lb_size;
3754 ei_lba = is_16 ? 0 : get_unaligned_be32(up + 12);
3756 if ((cum_lb + num) > bt_len) {
3758 sdev_printk(KERN_INFO, scp->device,
3759 "%s: %s: sum of blocks > data provided\n",
3761 mk_sense_buffer(scp, ILLEGAL_REQUEST, WRITE_ERROR_ASC,
3763 ret = illegal_condition_result;
3764 goto err_out_unlock;
3768 if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3769 int prot_ret = prot_verify_write(scp, lba, num,
3773 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10,
3775 ret = illegal_condition_result;
3776 goto err_out_unlock;
3780 ret = do_device_access(sip, scp, sg_off, lba, num, true);
3781 /* If ZBC zone then bump its write pointer */
3782 if (sdebug_dev_is_zoned(devip))
3783 zbc_inc_wp(devip, lba, num);
3784 if (unlikely(scsi_debug_lbp()))
3785 map_region(sip, lba, num);
3786 if (unlikely(-1 == ret)) {
3787 ret = DID_ERROR << 16;
3788 goto err_out_unlock;
3789 } else if (unlikely(sdebug_verbose && (ret < num_by)))
3790 sdev_printk(KERN_INFO, scp->device,
3791 "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3792 my_name, num_by, ret);
3794 if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3795 atomic_read(&sdeb_inject_pending))) {
3796 if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3797 mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3798 atomic_set(&sdeb_inject_pending, 0);
3799 ret = check_condition_result;
3800 goto err_out_unlock;
3801 } else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3802 /* Logical block guard check failed */
3803 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3804 atomic_set(&sdeb_inject_pending, 0);
3805 ret = illegal_condition_result;
3806 goto err_out_unlock;
3807 } else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
3808 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3809 atomic_set(&sdeb_inject_pending, 0);
3810 ret = illegal_condition_result;
3811 goto err_out_unlock;
3819 sdeb_write_unlock(sip);
3825 static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
3826 u32 ei_lba, bool unmap, bool ndob)
3828 struct scsi_device *sdp = scp->device;
3829 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
3830 unsigned long long i;
3832 u32 lb_size = sdebug_sector_size;
3834 struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3835 scp->device->hostdata, true);
3839 sdeb_write_lock(sip);
3841 ret = check_device_access_params(scp, lba, num, true);
3843 sdeb_write_unlock(sip);
3847 if (unmap && scsi_debug_lbp()) {
3848 unmap_region(sip, lba, num);
3852 block = do_div(lbaa, sdebug_store_sectors);
3853 /* if ndob then zero 1 logical block, else fetch 1 logical block */
3855 fs1p = fsp + (block * lb_size);
3857 memset(fs1p, 0, lb_size);
3860 ret = fetch_to_dev_buffer(scp, fs1p, lb_size);
3863 sdeb_write_unlock(sip);
3864 return DID_ERROR << 16;
3865 } else if (sdebug_verbose && !ndob && (ret < lb_size))
3866 sdev_printk(KERN_INFO, scp->device,
3867 "%s: %s: lb size=%u, IO sent=%d bytes\n",
3868 my_name, "write same", lb_size, ret);
3870 /* Copy first sector to remaining blocks */
3871 for (i = 1 ; i < num ; i++) {
3873 block = do_div(lbaa, sdebug_store_sectors);
3874 memmove(fsp + (block * lb_size), fs1p, lb_size);
3876 if (scsi_debug_lbp())
3877 map_region(sip, lba, num);
3878 /* If ZBC zone then bump its write pointer */
3879 if (sdebug_dev_is_zoned(devip))
3880 zbc_inc_wp(devip, lba, num);
3882 sdeb_write_unlock(sip);
3887 static int resp_write_same_10(struct scsi_cmnd *scp,
3888 struct sdebug_dev_info *devip)
3890 u8 *cmd = scp->cmnd;
3897 if (sdebug_lbpws10 == 0) {
3898 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3899 return check_condition_result;
3903 lba = get_unaligned_be32(cmd + 2);
3904 num = get_unaligned_be16(cmd + 7);
3905 if (num > sdebug_write_same_length) {
3906 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3907 return check_condition_result;
3909 return resp_write_same(scp, lba, num, ei_lba, unmap, false);
3912 static int resp_write_same_16(struct scsi_cmnd *scp,
3913 struct sdebug_dev_info *devip)
3915 u8 *cmd = scp->cmnd;
3922 if (cmd[1] & 0x8) { /* UNMAP */
3923 if (sdebug_lbpws == 0) {
3924 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3925 return check_condition_result;
3929 if (cmd[1] & 0x1) /* NDOB (no data-out buffer, assumes zeroes) */
3931 lba = get_unaligned_be64(cmd + 2);
3932 num = get_unaligned_be32(cmd + 10);
3933 if (num > sdebug_write_same_length) {
3934 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
3935 return check_condition_result;
3937 return resp_write_same(scp, lba, num, ei_lba, unmap, ndob);
3940 /* Note the mode field is in the same position as the (lower) service action
3941 * field. For the Report supported operation codes command, SPC-4 suggests
3942 * each mode of this command should be reported separately; for future. */
3943 static int resp_write_buffer(struct scsi_cmnd *scp,
3944 struct sdebug_dev_info *devip)
3946 u8 *cmd = scp->cmnd;
3947 struct scsi_device *sdp = scp->device;
3948 struct sdebug_dev_info *dp;
3951 mode = cmd[1] & 0x1f;
3953 case 0x4: /* download microcode (MC) and activate (ACT) */
3954 /* set UAs on this device only */
3955 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3956 set_bit(SDEBUG_UA_MICROCODE_CHANGED, devip->uas_bm);
3958 case 0x5: /* download MC, save and ACT */
3959 set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET, devip->uas_bm);
3961 case 0x6: /* download MC with offsets and ACT */
3962 /* set UAs on most devices (LUs) in this target */
3963 list_for_each_entry(dp,
3964 &devip->sdbg_host->dev_info_list,
3966 if (dp->target == sdp->id) {
3967 set_bit(SDEBUG_UA_BUS_RESET, dp->uas_bm);
3969 set_bit(SDEBUG_UA_MICROCODE_CHANGED,
3973 case 0x7: /* download MC with offsets, save, and ACT */
3974 /* set UA on all devices (LUs) in this target */
3975 list_for_each_entry(dp,
3976 &devip->sdbg_host->dev_info_list,
3978 if (dp->target == sdp->id)
3979 set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET,
3983 /* do nothing for this command for other mode values */
3989 static int resp_comp_write(struct scsi_cmnd *scp,
3990 struct sdebug_dev_info *devip)
3992 u8 *cmd = scp->cmnd;
3994 struct sdeb_store_info *sip = devip2sip(devip, true);
3997 u32 lb_size = sdebug_sector_size;
4002 lba = get_unaligned_be64(cmd + 2);
4003 num = cmd[13]; /* 1 to a maximum of 255 logical blocks */
4005 return 0; /* degenerate case, not an error */
4006 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
4008 mk_sense_invalid_opcode(scp);
4009 return check_condition_result;
4011 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
4012 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
4013 (cmd[1] & 0xe0) == 0)
4014 sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
4016 ret = check_device_access_params(scp, lba, num, false);
4020 arr = kcalloc(lb_size, dnum, GFP_ATOMIC);
4022 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4024 return check_condition_result;
4027 sdeb_write_lock(sip);
4029 ret = do_dout_fetch(scp, dnum, arr);
4031 retval = DID_ERROR << 16;
4033 } else if (sdebug_verbose && (ret < (dnum * lb_size)))
4034 sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb "
4035 "indicated=%u, IO sent=%d bytes\n", my_name,
4036 dnum * lb_size, ret);
4037 if (!comp_write_worker(sip, lba, num, arr, false)) {
4038 mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
4039 retval = check_condition_result;
4042 if (scsi_debug_lbp())
4043 map_region(sip, lba, num);
4045 sdeb_write_unlock(sip);
4050 struct unmap_block_desc {
4056 static int resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4059 struct unmap_block_desc *desc;
4060 struct sdeb_store_info *sip = devip2sip(devip, true);
4061 unsigned int i, payload_len, descriptors;
4064 if (!scsi_debug_lbp())
4065 return 0; /* fib and say its done */
4066 payload_len = get_unaligned_be16(scp->cmnd + 7);
4067 BUG_ON(scsi_bufflen(scp) != payload_len);
4069 descriptors = (payload_len - 8) / 16;
4070 if (descriptors > sdebug_unmap_max_desc) {
4071 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
4072 return check_condition_result;
4075 buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC);
4077 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4079 return check_condition_result;
4082 scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
4084 BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
4085 BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
4087 desc = (void *)&buf[8];
4089 sdeb_write_lock(sip);
4091 for (i = 0 ; i < descriptors ; i++) {
4092 unsigned long long lba = get_unaligned_be64(&desc[i].lba);
4093 unsigned int num = get_unaligned_be32(&desc[i].blocks);
4095 ret = check_device_access_params(scp, lba, num, true);
4099 unmap_region(sip, lba, num);
4105 sdeb_write_unlock(sip);
4111 #define SDEBUG_GET_LBA_STATUS_LEN 32
4113 static int resp_get_lba_status(struct scsi_cmnd *scp,
4114 struct sdebug_dev_info *devip)
4116 u8 *cmd = scp->cmnd;
4118 u32 alloc_len, mapped, num;
4120 u8 arr[SDEBUG_GET_LBA_STATUS_LEN];
4122 lba = get_unaligned_be64(cmd + 2);
4123 alloc_len = get_unaligned_be32(cmd + 10);
4128 ret = check_device_access_params(scp, lba, 1, false);
4132 if (scsi_debug_lbp()) {
4133 struct sdeb_store_info *sip = devip2sip(devip, true);
4135 mapped = map_state(sip, lba, &num);
4138 /* following just in case virtual_gb changed */
4139 sdebug_capacity = get_sdebug_capacity();
4140 if (sdebug_capacity - lba <= 0xffffffff)
4141 num = sdebug_capacity - lba;
4146 memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
4147 put_unaligned_be32(20, arr); /* Parameter Data Length */
4148 put_unaligned_be64(lba, arr + 8); /* LBA */
4149 put_unaligned_be32(num, arr + 16); /* Number of blocks */
4150 arr[20] = !mapped; /* prov_stat=0: mapped; 1: dealloc */
4152 return fill_from_dev_buffer(scp, arr, SDEBUG_GET_LBA_STATUS_LEN);
4155 static int resp_sync_cache(struct scsi_cmnd *scp,
4156 struct sdebug_dev_info *devip)
4161 u8 *cmd = scp->cmnd;
4163 if (cmd[0] == SYNCHRONIZE_CACHE) { /* 10 byte cdb */
4164 lba = get_unaligned_be32(cmd + 2);
4165 num_blocks = get_unaligned_be16(cmd + 7);
4166 } else { /* SYNCHRONIZE_CACHE(16) */
4167 lba = get_unaligned_be64(cmd + 2);
4168 num_blocks = get_unaligned_be32(cmd + 10);
4170 if (lba + num_blocks > sdebug_capacity) {
4171 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4172 return check_condition_result;
4174 if (!write_since_sync || (cmd[1] & 0x2))
4175 res = SDEG_RES_IMMED_MASK;
4176 else /* delay if write_since_sync and IMMED clear */
4177 write_since_sync = false;
4182 * Assuming the LBA+num_blocks is not out-of-range, this function will return
4183 * CONDITION MET if the specified blocks will/have fitted in the cache, and
4184 * a GOOD status otherwise. Model a disk with a big cache and yield
4185 * CONDITION MET. Actually tries to bring range in main memory into the
4186 * cache associated with the CPU(s).
4188 static int resp_pre_fetch(struct scsi_cmnd *scp,
4189 struct sdebug_dev_info *devip)
4193 u64 block, rest = 0;
4195 u8 *cmd = scp->cmnd;
4196 struct sdeb_store_info *sip = devip2sip(devip, true);
4197 u8 *fsp = sip->storep;
4199 if (cmd[0] == PRE_FETCH) { /* 10 byte cdb */
4200 lba = get_unaligned_be32(cmd + 2);
4201 nblks = get_unaligned_be16(cmd + 7);
4202 } else { /* PRE-FETCH(16) */
4203 lba = get_unaligned_be64(cmd + 2);
4204 nblks = get_unaligned_be32(cmd + 10);
4206 if (lba + nblks > sdebug_capacity) {
4207 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4208 return check_condition_result;
4212 /* PRE-FETCH spec says nothing about LBP or PI so skip them */
4213 block = do_div(lba, sdebug_store_sectors);
4214 if (block + nblks > sdebug_store_sectors)
4215 rest = block + nblks - sdebug_store_sectors;
4217 /* Try to bring the PRE-FETCH range into CPU's cache */
4218 sdeb_read_lock(sip);
4219 prefetch_range(fsp + (sdebug_sector_size * block),
4220 (nblks - rest) * sdebug_sector_size);
4222 prefetch_range(fsp, rest * sdebug_sector_size);
4223 sdeb_read_unlock(sip);
4226 res = SDEG_RES_IMMED_MASK;
4227 return res | condition_met_result;
4230 #define RL_BUCKET_ELEMS 8
4232 /* Even though each pseudo target has a REPORT LUNS "well known logical unit"
4233 * (W-LUN), the normal Linux scanning logic does not associate it with a
4234 * device (e.g. /dev/sg7). The following magic will make that association:
4235 * "cd /sys/class/scsi_host/host<n> ; echo '- - 49409' > scan"
4236 * where <n> is a host number. If there are multiple targets in a host then
4237 * the above will associate a W-LUN to each target. To only get a W-LUN
4238 * for target 2, then use "echo '- 2 49409' > scan" .
4240 static int resp_report_luns(struct scsi_cmnd *scp,
4241 struct sdebug_dev_info *devip)
4243 unsigned char *cmd = scp->cmnd;
4244 unsigned int alloc_len;
4245 unsigned char select_report;
4247 struct scsi_lun *lun_p;
4248 u8 arr[RL_BUCKET_ELEMS * sizeof(struct scsi_lun)];
4249 unsigned int lun_cnt; /* normal LUN count (max: 256) */
4250 unsigned int wlun_cnt; /* report luns W-LUN count */
4251 unsigned int tlun_cnt; /* total LUN count */
4252 unsigned int rlen; /* response length (in bytes) */
4254 unsigned int off_rsp = 0;
4255 const int sz_lun = sizeof(struct scsi_lun);
4257 clear_luns_changed_on_target(devip);
4259 select_report = cmd[2];
4260 alloc_len = get_unaligned_be32(cmd + 6);
4262 if (alloc_len < 4) {
4263 pr_err("alloc len too small %d\n", alloc_len);
4264 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
4265 return check_condition_result;
4268 switch (select_report) {
4269 case 0: /* all LUNs apart from W-LUNs */
4270 lun_cnt = sdebug_max_luns;
4273 case 1: /* only W-LUNs */
4277 case 2: /* all LUNs */
4278 lun_cnt = sdebug_max_luns;
4281 case 0x10: /* only administrative LUs */
4282 case 0x11: /* see SPC-5 */
4283 case 0x12: /* only subsiduary LUs owned by referenced LU */
4285 pr_debug("select report invalid %d\n", select_report);
4286 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
4287 return check_condition_result;
4290 if (sdebug_no_lun_0 && (lun_cnt > 0))
4293 tlun_cnt = lun_cnt + wlun_cnt;
4294 rlen = tlun_cnt * sz_lun; /* excluding 8 byte header */
4295 scsi_set_resid(scp, scsi_bufflen(scp));
4296 pr_debug("select_report %d luns = %d wluns = %d no_lun0 %d\n",
4297 select_report, lun_cnt, wlun_cnt, sdebug_no_lun_0);
4299 /* loops rely on sizeof response header same as sizeof lun (both 8) */
4300 lun = sdebug_no_lun_0 ? 1 : 0;
4301 for (k = 0, j = 0, res = 0; true; ++k, j = 0) {
4302 memset(arr, 0, sizeof(arr));
4303 lun_p = (struct scsi_lun *)&arr[0];
4305 put_unaligned_be32(rlen, &arr[0]);
4309 for ( ; j < RL_BUCKET_ELEMS; ++j, ++lun_p) {
4310 if ((k * RL_BUCKET_ELEMS) + j > lun_cnt)
4312 int_to_scsilun(lun++, lun_p);
4313 if (lun > 1 && sdebug_lun_am == SAM_LUN_AM_FLAT)
4314 lun_p->scsi_lun[0] |= 0x40;
4316 if (j < RL_BUCKET_ELEMS)
4319 res = p_fill_from_dev_buffer(scp, arr, n, off_rsp);
4325 int_to_scsilun(SCSI_W_LUN_REPORT_LUNS, lun_p);
4329 res = p_fill_from_dev_buffer(scp, arr, j * sz_lun, off_rsp);
4333 static int resp_verify(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4335 bool is_bytchk3 = false;
4338 u32 vnum, a_num, off;
4339 const u32 lb_size = sdebug_sector_size;
4342 u8 *cmd = scp->cmnd;
4343 struct sdeb_store_info *sip = devip2sip(devip, true);
4345 bytchk = (cmd[1] >> 1) & 0x3;
4347 return 0; /* always claim internal verify okay */
4348 } else if (bytchk == 2) {
4349 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
4350 return check_condition_result;
4351 } else if (bytchk == 3) {
4352 is_bytchk3 = true; /* 1 block sent, compared repeatedly */
4356 lba = get_unaligned_be64(cmd + 2);
4357 vnum = get_unaligned_be32(cmd + 10);
4359 case VERIFY: /* is VERIFY(10) */
4360 lba = get_unaligned_be32(cmd + 2);
4361 vnum = get_unaligned_be16(cmd + 7);
4364 mk_sense_invalid_opcode(scp);
4365 return check_condition_result;
4368 return 0; /* not an error */
4369 a_num = is_bytchk3 ? 1 : vnum;
4370 /* Treat following check like one for read (i.e. no write) access */
4371 ret = check_device_access_params(scp, lba, a_num, false);
4375 arr = kcalloc(lb_size, vnum, GFP_ATOMIC);
4377 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4379 return check_condition_result;
4381 /* Not changing store, so only need read access */
4382 sdeb_read_lock(sip);
4384 ret = do_dout_fetch(scp, a_num, arr);
4386 ret = DID_ERROR << 16;
4388 } else if (sdebug_verbose && (ret < (a_num * lb_size))) {
4389 sdev_printk(KERN_INFO, scp->device,
4390 "%s: %s: cdb indicated=%u, IO sent=%d bytes\n",
4391 my_name, __func__, a_num * lb_size, ret);
4394 for (j = 1, off = lb_size; j < vnum; ++j, off += lb_size)
4395 memcpy(arr + off, arr, lb_size);
4398 if (!comp_write_worker(sip, lba, vnum, arr, true)) {
4399 mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
4400 ret = check_condition_result;
4404 sdeb_read_unlock(sip);
4409 #define RZONES_DESC_HD 64
4411 /* Report zones depending on start LBA nad reporting options */
4412 static int resp_report_zones(struct scsi_cmnd *scp,
4413 struct sdebug_dev_info *devip)
4415 unsigned int i, max_zones, rep_max_zones, nrz = 0;
4417 u32 alloc_len, rep_opts, rep_len;
4420 u8 *arr = NULL, *desc;
4421 u8 *cmd = scp->cmnd;
4422 struct sdeb_zone_state *zsp;
4423 struct sdeb_store_info *sip = devip2sip(devip, false);
4425 if (!sdebug_dev_is_zoned(devip)) {
4426 mk_sense_invalid_opcode(scp);
4427 return check_condition_result;
4429 zs_lba = get_unaligned_be64(cmd + 2);
4430 alloc_len = get_unaligned_be32(cmd + 10);
4432 return 0; /* not an error */
4433 rep_opts = cmd[14] & 0x3f;
4434 partial = cmd[14] & 0x80;
4436 if (zs_lba >= sdebug_capacity) {
4437 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4438 return check_condition_result;
4441 max_zones = devip->nr_zones - (zs_lba >> devip->zsize_shift);
4442 rep_max_zones = min((alloc_len - 64) >> ilog2(RZONES_DESC_HD),
4445 arr = kzalloc(alloc_len, GFP_ATOMIC);
4447 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4449 return check_condition_result;
4452 sdeb_read_lock(sip);
4455 for (i = 0; i < max_zones; i++) {
4456 lba = zs_lba + devip->zsize * i;
4457 if (lba > sdebug_capacity)
4459 zsp = zbc_zone(devip, lba);
4466 if (zsp->z_cond != ZC1_EMPTY)
4470 /* Implicit open zones */
4471 if (zsp->z_cond != ZC2_IMPLICIT_OPEN)
4475 /* Explicit open zones */
4476 if (zsp->z_cond != ZC3_EXPLICIT_OPEN)
4481 if (zsp->z_cond != ZC4_CLOSED)
4486 if (zsp->z_cond != ZC5_FULL)
4493 * Read-only, offline, reset WP recommended are
4494 * not emulated: no zones to report;
4498 /* non-seq-resource set */
4499 if (!zsp->z_non_seq_resource)
4503 /* Not write pointer (conventional) zones */
4504 if (!zbc_zone_is_conv(zsp))
4508 mk_sense_buffer(scp, ILLEGAL_REQUEST,
4509 INVALID_FIELD_IN_CDB, 0);
4510 ret = check_condition_result;
4514 if (nrz < rep_max_zones) {
4515 /* Fill zone descriptor */
4516 desc[0] = zsp->z_type;
4517 desc[1] = zsp->z_cond << 4;
4518 if (zsp->z_non_seq_resource)
4520 put_unaligned_be64((u64)zsp->z_size, desc + 8);
4521 put_unaligned_be64((u64)zsp->z_start, desc + 16);
4522 put_unaligned_be64((u64)zsp->z_wp, desc + 24);
4526 if (partial && nrz >= rep_max_zones)
4533 put_unaligned_be32(nrz * RZONES_DESC_HD, arr + 0);
4534 put_unaligned_be64(sdebug_capacity - 1, arr + 8);
4536 rep_len = (unsigned long)desc - (unsigned long)arr;
4537 ret = fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, rep_len));
4540 sdeb_read_unlock(sip);
4545 /* Logic transplanted from tcmu-runner, file_zbc.c */
4546 static void zbc_open_all(struct sdebug_dev_info *devip)
4548 struct sdeb_zone_state *zsp = &devip->zstate[0];
4551 for (i = 0; i < devip->nr_zones; i++, zsp++) {
4552 if (zsp->z_cond == ZC4_CLOSED)
4553 zbc_open_zone(devip, &devip->zstate[i], true);
4557 static int resp_open_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4561 enum sdebug_z_cond zc;
4562 u8 *cmd = scp->cmnd;
4563 struct sdeb_zone_state *zsp;
4564 bool all = cmd[14] & 0x01;
4565 struct sdeb_store_info *sip = devip2sip(devip, false);
4567 if (!sdebug_dev_is_zoned(devip)) {
4568 mk_sense_invalid_opcode(scp);
4569 return check_condition_result;
4572 sdeb_write_lock(sip);
4575 /* Check if all closed zones can be open */
4576 if (devip->max_open &&
4577 devip->nr_exp_open + devip->nr_closed > devip->max_open) {
4578 mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
4580 res = check_condition_result;
4583 /* Open all closed zones */
4584 zbc_open_all(devip);
4588 /* Open the specified zone */
4589 z_id = get_unaligned_be64(cmd + 2);
4590 if (z_id >= sdebug_capacity) {
4591 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4592 res = check_condition_result;
4596 zsp = zbc_zone(devip, z_id);
4597 if (z_id != zsp->z_start) {
4598 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4599 res = check_condition_result;
4602 if (zbc_zone_is_conv(zsp)) {
4603 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4604 res = check_condition_result;
4609 if (zc == ZC3_EXPLICIT_OPEN || zc == ZC5_FULL)
4612 if (devip->max_open && devip->nr_exp_open >= devip->max_open) {
4613 mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
4615 res = check_condition_result;
4619 zbc_open_zone(devip, zsp, true);
4621 sdeb_write_unlock(sip);
4625 static void zbc_close_all(struct sdebug_dev_info *devip)
4629 for (i = 0; i < devip->nr_zones; i++)
4630 zbc_close_zone(devip, &devip->zstate[i]);
4633 static int resp_close_zone(struct scsi_cmnd *scp,
4634 struct sdebug_dev_info *devip)
4638 u8 *cmd = scp->cmnd;
4639 struct sdeb_zone_state *zsp;
4640 bool all = cmd[14] & 0x01;
4641 struct sdeb_store_info *sip = devip2sip(devip, false);
4643 if (!sdebug_dev_is_zoned(devip)) {
4644 mk_sense_invalid_opcode(scp);
4645 return check_condition_result;
4648 sdeb_write_lock(sip);
4651 zbc_close_all(devip);
4655 /* Close specified zone */
4656 z_id = get_unaligned_be64(cmd + 2);
4657 if (z_id >= sdebug_capacity) {
4658 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4659 res = check_condition_result;
4663 zsp = zbc_zone(devip, z_id);
4664 if (z_id != zsp->z_start) {
4665 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4666 res = check_condition_result;
4669 if (zbc_zone_is_conv(zsp)) {
4670 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4671 res = check_condition_result;
4675 zbc_close_zone(devip, zsp);
4677 sdeb_write_unlock(sip);
4681 static void zbc_finish_zone(struct sdebug_dev_info *devip,
4682 struct sdeb_zone_state *zsp, bool empty)
4684 enum sdebug_z_cond zc = zsp->z_cond;
4686 if (zc == ZC4_CLOSED || zc == ZC2_IMPLICIT_OPEN ||
4687 zc == ZC3_EXPLICIT_OPEN || (empty && zc == ZC1_EMPTY)) {
4688 if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
4689 zbc_close_zone(devip, zsp);
4690 if (zsp->z_cond == ZC4_CLOSED)
4692 zsp->z_wp = zsp->z_start + zsp->z_size;
4693 zsp->z_cond = ZC5_FULL;
4697 static void zbc_finish_all(struct sdebug_dev_info *devip)
4701 for (i = 0; i < devip->nr_zones; i++)
4702 zbc_finish_zone(devip, &devip->zstate[i], false);
4705 static int resp_finish_zone(struct scsi_cmnd *scp,
4706 struct sdebug_dev_info *devip)
4708 struct sdeb_zone_state *zsp;
4711 u8 *cmd = scp->cmnd;
4712 bool all = cmd[14] & 0x01;
4713 struct sdeb_store_info *sip = devip2sip(devip, false);
4715 if (!sdebug_dev_is_zoned(devip)) {
4716 mk_sense_invalid_opcode(scp);
4717 return check_condition_result;
4720 sdeb_write_lock(sip);
4723 zbc_finish_all(devip);
4727 /* Finish the specified zone */
4728 z_id = get_unaligned_be64(cmd + 2);
4729 if (z_id >= sdebug_capacity) {
4730 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4731 res = check_condition_result;
4735 zsp = zbc_zone(devip, z_id);
4736 if (z_id != zsp->z_start) {
4737 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4738 res = check_condition_result;
4741 if (zbc_zone_is_conv(zsp)) {
4742 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4743 res = check_condition_result;
4747 zbc_finish_zone(devip, zsp, true);
4749 sdeb_write_unlock(sip);
4753 static void zbc_rwp_zone(struct sdebug_dev_info *devip,
4754 struct sdeb_zone_state *zsp)
4756 enum sdebug_z_cond zc;
4757 struct sdeb_store_info *sip = devip2sip(devip, false);
4759 if (zbc_zone_is_conv(zsp))
4763 if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
4764 zbc_close_zone(devip, zsp);
4766 if (zsp->z_cond == ZC4_CLOSED)
4769 if (zsp->z_wp > zsp->z_start)
4770 memset(sip->storep + zsp->z_start * sdebug_sector_size, 0,
4771 (zsp->z_wp - zsp->z_start) * sdebug_sector_size);
4773 zsp->z_non_seq_resource = false;
4774 zsp->z_wp = zsp->z_start;
4775 zsp->z_cond = ZC1_EMPTY;
4778 static void zbc_rwp_all(struct sdebug_dev_info *devip)
4782 for (i = 0; i < devip->nr_zones; i++)
4783 zbc_rwp_zone(devip, &devip->zstate[i]);
4786 static int resp_rwp_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4788 struct sdeb_zone_state *zsp;
4791 u8 *cmd = scp->cmnd;
4792 bool all = cmd[14] & 0x01;
4793 struct sdeb_store_info *sip = devip2sip(devip, false);
4795 if (!sdebug_dev_is_zoned(devip)) {
4796 mk_sense_invalid_opcode(scp);
4797 return check_condition_result;
4800 sdeb_write_lock(sip);
4807 z_id = get_unaligned_be64(cmd + 2);
4808 if (z_id >= sdebug_capacity) {
4809 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4810 res = check_condition_result;
4814 zsp = zbc_zone(devip, z_id);
4815 if (z_id != zsp->z_start) {
4816 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4817 res = check_condition_result;
4820 if (zbc_zone_is_conv(zsp)) {
4821 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4822 res = check_condition_result;
4826 zbc_rwp_zone(devip, zsp);
4828 sdeb_write_unlock(sip);
4832 static struct sdebug_queue *get_queue(struct scsi_cmnd *cmnd)
4835 u32 tag = blk_mq_unique_tag(scsi_cmd_to_rq(cmnd));
4837 hwq = blk_mq_unique_tag_to_hwq(tag);
4839 pr_debug("tag=%#x, hwq=%d\n", tag, hwq);
4840 if (WARN_ON_ONCE(hwq >= submit_queues))
4843 return sdebug_q_arr + hwq;
4846 static u32 get_tag(struct scsi_cmnd *cmnd)
4848 return blk_mq_unique_tag(scsi_cmd_to_rq(cmnd));
4851 /* Queued (deferred) command completions converge here. */
4852 static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
4854 bool aborted = sd_dp->aborted;
4857 unsigned long iflags;
4858 struct sdebug_queue *sqp;
4859 struct sdebug_queued_cmd *sqcp;
4860 struct scsi_cmnd *scp;
4861 struct sdebug_dev_info *devip;
4863 if (unlikely(aborted))
4864 sd_dp->aborted = false;
4865 qc_idx = sd_dp->qc_idx;
4866 sqp = sdebug_q_arr + sd_dp->sqa_idx;
4867 if (sdebug_statistics) {
4868 atomic_inc(&sdebug_completions);
4869 if (raw_smp_processor_id() != sd_dp->issuing_cpu)
4870 atomic_inc(&sdebug_miss_cpus);
4872 if (unlikely((qc_idx < 0) || (qc_idx >= SDEBUG_CANQUEUE))) {
4873 pr_err("wild qc_idx=%d\n", qc_idx);
4876 spin_lock_irqsave(&sqp->qc_lock, iflags);
4877 WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_NONE);
4878 sqcp = &sqp->qc_arr[qc_idx];
4880 if (unlikely(scp == NULL)) {
4881 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4882 pr_err("scp is NULL, sqa_idx=%d, qc_idx=%d, hc_idx=%d\n",
4883 sd_dp->sqa_idx, qc_idx, sd_dp->hc_idx);
4886 devip = (struct sdebug_dev_info *)scp->device->hostdata;
4888 atomic_dec(&devip->num_in_q);
4890 pr_err("devip=NULL\n");
4891 if (unlikely(atomic_read(&retired_max_queue) > 0))
4894 sqcp->a_cmnd = NULL;
4895 if (unlikely(!test_and_clear_bit(qc_idx, sqp->in_use_bm))) {
4896 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4897 pr_err("Unexpected completion\n");
4901 if (unlikely(retiring)) { /* user has reduced max_queue */
4904 retval = atomic_read(&retired_max_queue);
4905 if (qc_idx >= retval) {
4906 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4907 pr_err("index %d too large\n", retval);
4910 k = find_last_bit(sqp->in_use_bm, retval);
4911 if ((k < sdebug_max_queue) || (k == retval))
4912 atomic_set(&retired_max_queue, 0);
4914 atomic_set(&retired_max_queue, k + 1);
4916 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4917 if (unlikely(aborted)) {
4919 pr_info("bypassing scsi_done() due to aborted cmd\n");
4922 scsi_done(scp); /* callback to mid level */
4925 /* When high resolution timer goes off this function is called. */
4926 static enum hrtimer_restart sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
4928 struct sdebug_defer *sd_dp = container_of(timer, struct sdebug_defer,
4930 sdebug_q_cmd_complete(sd_dp);
4931 return HRTIMER_NORESTART;
4934 /* When work queue schedules work, it calls this function. */
4935 static void sdebug_q_cmd_wq_complete(struct work_struct *work)
4937 struct sdebug_defer *sd_dp = container_of(work, struct sdebug_defer,
4939 sdebug_q_cmd_complete(sd_dp);
4942 static bool got_shared_uuid;
4943 static uuid_t shared_uuid;
4945 static int sdebug_device_create_zones(struct sdebug_dev_info *devip)
4947 struct sdeb_zone_state *zsp;
4948 sector_t capacity = get_sdebug_capacity();
4949 sector_t zstart = 0;
4953 * Set the zone size: if sdeb_zbc_zone_size_mb is not set, figure out
4954 * a zone size allowing for at least 4 zones on the device. Otherwise,
4955 * use the specified zone size checking that at least 2 zones can be
4956 * created for the device.
4958 if (!sdeb_zbc_zone_size_mb) {
4959 devip->zsize = (DEF_ZBC_ZONE_SIZE_MB * SZ_1M)
4960 >> ilog2(sdebug_sector_size);
4961 while (capacity < devip->zsize << 2 && devip->zsize >= 2)
4963 if (devip->zsize < 2) {
4964 pr_err("Device capacity too small\n");
4968 if (!is_power_of_2(sdeb_zbc_zone_size_mb)) {
4969 pr_err("Zone size is not a power of 2\n");
4972 devip->zsize = (sdeb_zbc_zone_size_mb * SZ_1M)
4973 >> ilog2(sdebug_sector_size);
4974 if (devip->zsize >= capacity) {
4975 pr_err("Zone size too large for device capacity\n");
4980 devip->zsize_shift = ilog2(devip->zsize);
4981 devip->nr_zones = (capacity + devip->zsize - 1) >> devip->zsize_shift;
4983 if (sdeb_zbc_nr_conv >= devip->nr_zones) {
4984 pr_err("Number of conventional zones too large\n");
4987 devip->nr_conv_zones = sdeb_zbc_nr_conv;
4989 if (devip->zmodel == BLK_ZONED_HM) {
4990 /* zbc_max_open_zones can be 0, meaning "not reported" */
4991 if (sdeb_zbc_max_open >= devip->nr_zones - 1)
4992 devip->max_open = (devip->nr_zones - 1) / 2;
4994 devip->max_open = sdeb_zbc_max_open;
4997 devip->zstate = kcalloc(devip->nr_zones,
4998 sizeof(struct sdeb_zone_state), GFP_KERNEL);
5002 for (i = 0; i < devip->nr_zones; i++) {
5003 zsp = &devip->zstate[i];
5005 zsp->z_start = zstart;
5007 if (i < devip->nr_conv_zones) {
5008 zsp->z_type = ZBC_ZONE_TYPE_CNV;
5009 zsp->z_cond = ZBC_NOT_WRITE_POINTER;
5010 zsp->z_wp = (sector_t)-1;
5012 if (devip->zmodel == BLK_ZONED_HM)
5013 zsp->z_type = ZBC_ZONE_TYPE_SWR;
5015 zsp->z_type = ZBC_ZONE_TYPE_SWP;
5016 zsp->z_cond = ZC1_EMPTY;
5017 zsp->z_wp = zsp->z_start;
5020 if (zsp->z_start + devip->zsize < capacity)
5021 zsp->z_size = devip->zsize;
5023 zsp->z_size = capacity - zsp->z_start;
5025 zstart += zsp->z_size;
5031 static struct sdebug_dev_info *sdebug_device_create(
5032 struct sdebug_host_info *sdbg_host, gfp_t flags)
5034 struct sdebug_dev_info *devip;
5036 devip = kzalloc(sizeof(*devip), flags);
5038 if (sdebug_uuid_ctl == 1)
5039 uuid_gen(&devip->lu_name);
5040 else if (sdebug_uuid_ctl == 2) {
5041 if (got_shared_uuid)
5042 devip->lu_name = shared_uuid;
5044 uuid_gen(&shared_uuid);
5045 got_shared_uuid = true;
5046 devip->lu_name = shared_uuid;
5049 devip->sdbg_host = sdbg_host;
5050 if (sdeb_zbc_in_use) {
5051 devip->zmodel = sdeb_zbc_model;
5052 if (sdebug_device_create_zones(devip)) {
5057 devip->zmodel = BLK_ZONED_NONE;
5059 devip->sdbg_host = sdbg_host;
5060 devip->create_ts = ktime_get_boottime();
5061 atomic_set(&devip->stopped, (sdeb_tur_ms_to_ready > 0 ? 2 : 0));
5062 list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
5067 static struct sdebug_dev_info *find_build_dev_info(struct scsi_device *sdev)
5069 struct sdebug_host_info *sdbg_host;
5070 struct sdebug_dev_info *open_devip = NULL;
5071 struct sdebug_dev_info *devip;
5073 sdbg_host = *(struct sdebug_host_info **)shost_priv(sdev->host);
5075 pr_err("Host info NULL\n");
5079 list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
5080 if ((devip->used) && (devip->channel == sdev->channel) &&
5081 (devip->target == sdev->id) &&
5082 (devip->lun == sdev->lun))
5085 if ((!devip->used) && (!open_devip))
5089 if (!open_devip) { /* try and make a new one */
5090 open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
5092 pr_err("out of memory at line %d\n", __LINE__);
5097 open_devip->channel = sdev->channel;
5098 open_devip->target = sdev->id;
5099 open_devip->lun = sdev->lun;
5100 open_devip->sdbg_host = sdbg_host;
5101 atomic_set(&open_devip->num_in_q, 0);
5102 set_bit(SDEBUG_UA_POOCCUR, open_devip->uas_bm);
5103 open_devip->used = true;
5107 static int scsi_debug_slave_alloc(struct scsi_device *sdp)
5110 pr_info("slave_alloc <%u %u %u %llu>\n",
5111 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5115 static int scsi_debug_slave_configure(struct scsi_device *sdp)
5117 struct sdebug_dev_info *devip =
5118 (struct sdebug_dev_info *)sdp->hostdata;
5121 pr_info("slave_configure <%u %u %u %llu>\n",
5122 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5123 if (sdp->host->max_cmd_len != SDEBUG_MAX_CMD_LEN)
5124 sdp->host->max_cmd_len = SDEBUG_MAX_CMD_LEN;
5125 if (smp_load_acquire(&sdebug_deflect_incoming)) {
5126 pr_info("Exit early due to deflect_incoming\n");
5129 if (devip == NULL) {
5130 devip = find_build_dev_info(sdp);
5132 return 1; /* no resources, will be marked offline */
5134 sdp->hostdata = devip;
5136 sdp->no_uld_attach = 1;
5137 config_cdb_len(sdp);
5141 static void scsi_debug_slave_destroy(struct scsi_device *sdp)
5143 struct sdebug_dev_info *devip =
5144 (struct sdebug_dev_info *)sdp->hostdata;
5147 pr_info("slave_destroy <%u %u %u %llu>\n",
5148 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5150 /* make this slot available for re-use */
5151 devip->used = false;
5152 sdp->hostdata = NULL;
5156 static void stop_qc_helper(struct sdebug_defer *sd_dp,
5157 enum sdeb_defer_type defer_t)
5161 if (defer_t == SDEB_DEFER_HRT)
5162 hrtimer_cancel(&sd_dp->hrt);
5163 else if (defer_t == SDEB_DEFER_WQ)
5164 cancel_work_sync(&sd_dp->ew.work);
5167 /* If @cmnd found deletes its timer or work queue and returns true; else
5169 static bool stop_queued_cmnd(struct scsi_cmnd *cmnd)
5171 unsigned long iflags;
5172 int j, k, qmax, r_qmax;
5173 enum sdeb_defer_type l_defer_t;
5174 struct sdebug_queue *sqp;
5175 struct sdebug_queued_cmd *sqcp;
5176 struct sdebug_dev_info *devip;
5177 struct sdebug_defer *sd_dp;
5179 for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5180 spin_lock_irqsave(&sqp->qc_lock, iflags);
5181 qmax = sdebug_max_queue;
5182 r_qmax = atomic_read(&retired_max_queue);
5185 for (k = 0; k < qmax; ++k) {
5186 if (test_bit(k, sqp->in_use_bm)) {
5187 sqcp = &sqp->qc_arr[k];
5188 if (cmnd != sqcp->a_cmnd)
5191 devip = (struct sdebug_dev_info *)
5192 cmnd->device->hostdata;
5194 atomic_dec(&devip->num_in_q);
5195 sqcp->a_cmnd = NULL;
5196 sd_dp = sqcp->sd_dp;
5198 l_defer_t = READ_ONCE(sd_dp->defer_t);
5199 WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_NONE);
5201 l_defer_t = SDEB_DEFER_NONE;
5202 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5203 stop_qc_helper(sd_dp, l_defer_t);
5204 clear_bit(k, sqp->in_use_bm);
5208 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5213 /* Deletes (stops) timers or work queues of all queued commands */
5214 static void stop_all_queued(bool done_with_no_conn)
5216 unsigned long iflags;
5218 enum sdeb_defer_type l_defer_t;
5219 struct sdebug_queue *sqp;
5220 struct sdebug_queued_cmd *sqcp;
5221 struct sdebug_dev_info *devip;
5222 struct sdebug_defer *sd_dp;
5223 struct scsi_cmnd *scp;
5225 for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5226 spin_lock_irqsave(&sqp->qc_lock, iflags);
5227 for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
5228 if (test_bit(k, sqp->in_use_bm)) {
5229 sqcp = &sqp->qc_arr[k];
5233 devip = (struct sdebug_dev_info *)
5234 sqcp->a_cmnd->device->hostdata;
5236 atomic_dec(&devip->num_in_q);
5237 sqcp->a_cmnd = NULL;
5238 sd_dp = sqcp->sd_dp;
5240 l_defer_t = READ_ONCE(sd_dp->defer_t);
5241 WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_NONE);
5243 l_defer_t = SDEB_DEFER_NONE;
5244 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5245 stop_qc_helper(sd_dp, l_defer_t);
5246 if (done_with_no_conn && l_defer_t != SDEB_DEFER_NONE) {
5247 scp->result = DID_NO_CONNECT << 16;
5250 clear_bit(k, sqp->in_use_bm);
5251 spin_lock_irqsave(&sqp->qc_lock, iflags);
5254 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5258 /* Free queued command memory on heap */
5259 static void free_all_queued(void)
5262 struct sdebug_queue *sqp;
5263 struct sdebug_queued_cmd *sqcp;
5265 for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5266 for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
5267 sqcp = &sqp->qc_arr[k];
5274 static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
5280 ok = stop_queued_cmnd(SCpnt);
5281 if (SCpnt->device && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
5282 sdev_printk(KERN_INFO, SCpnt->device,
5283 "%s: command%s found\n", __func__,
5289 static int scsi_debug_device_reset(struct scsi_cmnd *SCpnt)
5292 if (SCpnt && SCpnt->device) {
5293 struct scsi_device *sdp = SCpnt->device;
5294 struct sdebug_dev_info *devip =
5295 (struct sdebug_dev_info *)sdp->hostdata;
5297 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5298 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5300 set_bit(SDEBUG_UA_POR, devip->uas_bm);
5305 static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
5307 struct sdebug_host_info *sdbg_host;
5308 struct sdebug_dev_info *devip;
5309 struct scsi_device *sdp;
5310 struct Scsi_Host *hp;
5313 ++num_target_resets;
5316 sdp = SCpnt->device;
5319 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5320 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5324 sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
5326 list_for_each_entry(devip,
5327 &sdbg_host->dev_info_list,
5329 if (devip->target == sdp->id) {
5330 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5334 if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5335 sdev_printk(KERN_INFO, sdp,
5336 "%s: %d device(s) found in target\n", __func__, k);
5341 static int scsi_debug_bus_reset(struct scsi_cmnd *SCpnt)
5343 struct sdebug_host_info *sdbg_host;
5344 struct sdebug_dev_info *devip;
5345 struct scsi_device *sdp;
5346 struct Scsi_Host *hp;
5350 if (!(SCpnt && SCpnt->device))
5352 sdp = SCpnt->device;
5353 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5354 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5357 sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
5359 list_for_each_entry(devip,
5360 &sdbg_host->dev_info_list,
5362 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5367 if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5368 sdev_printk(KERN_INFO, sdp,
5369 "%s: %d device(s) found in host\n", __func__, k);
5374 static int scsi_debug_host_reset(struct scsi_cmnd *SCpnt)
5376 struct sdebug_host_info *sdbg_host;
5377 struct sdebug_dev_info *devip;
5381 if ((SCpnt->device) && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
5382 sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__);
5383 spin_lock(&sdebug_host_list_lock);
5384 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
5385 list_for_each_entry(devip, &sdbg_host->dev_info_list,
5387 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5391 spin_unlock(&sdebug_host_list_lock);
5392 stop_all_queued(false);
5393 if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5394 sdev_printk(KERN_INFO, SCpnt->device,
5395 "%s: %d device(s) found\n", __func__, k);
5399 static void sdebug_build_parts(unsigned char *ramp, unsigned long store_size)
5401 struct msdos_partition *pp;
5402 int starts[SDEBUG_MAX_PARTS + 2], max_part_secs;
5403 int sectors_per_part, num_sectors, k;
5404 int heads_by_sects, start_sec, end_sec;
5406 /* assume partition table already zeroed */
5407 if ((sdebug_num_parts < 1) || (store_size < 1048576))
5409 if (sdebug_num_parts > SDEBUG_MAX_PARTS) {
5410 sdebug_num_parts = SDEBUG_MAX_PARTS;
5411 pr_warn("reducing partitions to %d\n", SDEBUG_MAX_PARTS);
5413 num_sectors = (int)get_sdebug_capacity();
5414 sectors_per_part = (num_sectors - sdebug_sectors_per)
5416 heads_by_sects = sdebug_heads * sdebug_sectors_per;
5417 starts[0] = sdebug_sectors_per;
5418 max_part_secs = sectors_per_part;
5419 for (k = 1; k < sdebug_num_parts; ++k) {
5420 starts[k] = ((k * sectors_per_part) / heads_by_sects)
5422 if (starts[k] - starts[k - 1] < max_part_secs)
5423 max_part_secs = starts[k] - starts[k - 1];
5425 starts[sdebug_num_parts] = num_sectors;
5426 starts[sdebug_num_parts + 1] = 0;
5428 ramp[510] = 0x55; /* magic partition markings */
5430 pp = (struct msdos_partition *)(ramp + 0x1be);
5431 for (k = 0; starts[k + 1]; ++k, ++pp) {
5432 start_sec = starts[k];
5433 end_sec = starts[k] + max_part_secs - 1;
5436 pp->cyl = start_sec / heads_by_sects;
5437 pp->head = (start_sec - (pp->cyl * heads_by_sects))
5438 / sdebug_sectors_per;
5439 pp->sector = (start_sec % sdebug_sectors_per) + 1;
5441 pp->end_cyl = end_sec / heads_by_sects;
5442 pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
5443 / sdebug_sectors_per;
5444 pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
5446 pp->start_sect = cpu_to_le32(start_sec);
5447 pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1);
5448 pp->sys_ind = 0x83; /* plain Linux partition */
5452 static void sdeb_block_all_queues(void)
5455 struct sdebug_queue *sqp;
5457 for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp)
5458 atomic_set(&sqp->blocked, (int)true);
5461 static void sdeb_unblock_all_queues(void)
5464 struct sdebug_queue *sqp;
5466 for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp)
5467 atomic_set(&sqp->blocked, (int)false);
5471 sdeb_add_n_hosts(int num_hosts)
5478 struct sdeb_store_info *sip;
5479 bool want_phs = (sdebug_fake_rw == 0) && sdebug_per_host_store;
5483 xa_for_each_marked(per_store_ap, idx, sip, SDEB_XA_NOT_IN_USE) {
5484 sdeb_most_recent_idx = (int)idx;
5488 if (found) /* re-use case */
5489 sdebug_add_host_helper((int)idx);
5491 sdebug_do_add_host(true /* make new store */);
5493 sdebug_do_add_host(false);
5495 } while (--num_hosts);
5498 /* Adjust (by rounding down) the sdebug_cmnd_count so abs(every_nth)-1
5499 * commands will be processed normally before triggers occur.
5501 static void tweak_cmnd_count(void)
5505 modulo = abs(sdebug_every_nth);
5508 sdeb_block_all_queues();
5509 count = atomic_read(&sdebug_cmnd_count);
5510 atomic_set(&sdebug_cmnd_count, (count / modulo) * modulo);
5511 sdeb_unblock_all_queues();
5514 static void clear_queue_stats(void)
5516 atomic_set(&sdebug_cmnd_count, 0);
5517 atomic_set(&sdebug_completions, 0);
5518 atomic_set(&sdebug_miss_cpus, 0);
5519 atomic_set(&sdebug_a_tsf, 0);
5522 static bool inject_on_this_cmd(void)
5524 if (sdebug_every_nth == 0)
5526 return (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) == 0;
5529 static int process_deflect_incoming(struct scsi_cmnd *scp)
5531 u8 opcode = scp->cmnd[0];
5533 if (opcode == SYNCHRONIZE_CACHE || opcode == SYNCHRONIZE_CACHE_16)
5535 return DID_NO_CONNECT << 16;
5538 #define INCLUSIVE_TIMING_MAX_NS 1000000 /* 1 millisecond */
5540 /* Complete the processing of the thread that queued a SCSI command to this
5541 * driver. It either completes the command by calling cmnd_done() or
5542 * schedules a hr timer or work queue then returns 0. Returns
5543 * SCSI_MLQUEUE_HOST_BUSY if temporarily out of resources.
5545 static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
5547 int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *),
5548 int delta_jiff, int ndelay)
5551 bool inject = false;
5552 bool polled = scsi_cmd_to_rq(cmnd)->cmd_flags & REQ_POLLED;
5553 int k, num_in_q, qdepth;
5554 unsigned long iflags;
5555 u64 ns_from_boot = 0;
5556 struct sdebug_queue *sqp;
5557 struct sdebug_queued_cmd *sqcp;
5558 struct scsi_device *sdp;
5559 struct sdebug_defer *sd_dp;
5561 if (unlikely(devip == NULL)) {
5562 if (scsi_result == 0)
5563 scsi_result = DID_NO_CONNECT << 16;
5564 goto respond_in_thread;
5568 if (delta_jiff == 0) {
5569 sqp = get_queue(cmnd);
5570 if (atomic_read(&sqp->blocked)) {
5571 if (smp_load_acquire(&sdebug_deflect_incoming))
5572 return process_deflect_incoming(cmnd);
5574 return SCSI_MLQUEUE_HOST_BUSY;
5576 goto respond_in_thread;
5579 sqp = get_queue(cmnd);
5580 spin_lock_irqsave(&sqp->qc_lock, iflags);
5581 if (unlikely(atomic_read(&sqp->blocked))) {
5582 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5583 if (smp_load_acquire(&sdebug_deflect_incoming)) {
5584 scsi_result = process_deflect_incoming(cmnd);
5585 goto respond_in_thread;
5588 pr_info("blocked --> SCSI_MLQUEUE_HOST_BUSY\n");
5589 return SCSI_MLQUEUE_HOST_BUSY;
5591 num_in_q = atomic_read(&devip->num_in_q);
5592 qdepth = cmnd->device->queue_depth;
5593 if (unlikely((qdepth > 0) && (num_in_q >= qdepth))) {
5595 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5596 goto respond_in_thread;
5598 scsi_result = device_qfull_result;
5599 } else if (unlikely(sdebug_every_nth &&
5600 (SDEBUG_OPT_RARE_TSF & sdebug_opts) &&
5601 (scsi_result == 0))) {
5602 if ((num_in_q == (qdepth - 1)) &&
5603 (atomic_inc_return(&sdebug_a_tsf) >=
5604 abs(sdebug_every_nth))) {
5605 atomic_set(&sdebug_a_tsf, 0);
5607 scsi_result = device_qfull_result;
5611 k = find_first_zero_bit(sqp->in_use_bm, sdebug_max_queue);
5612 if (unlikely(k >= sdebug_max_queue)) {
5613 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5615 goto respond_in_thread;
5616 scsi_result = device_qfull_result;
5617 if (SDEBUG_OPT_Q_NOISE & sdebug_opts)
5618 sdev_printk(KERN_INFO, sdp, "%s: max_queue=%d exceeded: TASK SET FULL\n",
5619 __func__, sdebug_max_queue);
5620 goto respond_in_thread;
5622 set_bit(k, sqp->in_use_bm);
5623 atomic_inc(&devip->num_in_q);
5624 sqcp = &sqp->qc_arr[k];
5625 sqcp->a_cmnd = cmnd;
5626 cmnd->host_scribble = (unsigned char *)sqcp;
5627 sd_dp = sqcp->sd_dp;
5628 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5631 sd_dp = kzalloc(sizeof(*sd_dp), GFP_ATOMIC);
5633 atomic_dec(&devip->num_in_q);
5634 clear_bit(k, sqp->in_use_bm);
5635 return SCSI_MLQUEUE_HOST_BUSY;
5642 /* Set the hostwide tag */
5643 if (sdebug_host_max_queue)
5644 sd_dp->hc_idx = get_tag(cmnd);
5647 ns_from_boot = ktime_get_boottime_ns();
5649 /* one of the resp_*() response functions is called here */
5650 cmnd->result = pfp ? pfp(cmnd, devip) : 0;
5651 if (cmnd->result & SDEG_RES_IMMED_MASK) {
5652 cmnd->result &= ~SDEG_RES_IMMED_MASK;
5653 delta_jiff = ndelay = 0;
5655 if (cmnd->result == 0 && scsi_result != 0)
5656 cmnd->result = scsi_result;
5657 if (cmnd->result == 0 && unlikely(sdebug_opts & SDEBUG_OPT_TRANSPORT_ERR)) {
5658 if (atomic_read(&sdeb_inject_pending)) {
5659 mk_sense_buffer(cmnd, ABORTED_COMMAND, TRANSPORT_PROBLEM, ACK_NAK_TO);
5660 atomic_set(&sdeb_inject_pending, 0);
5661 cmnd->result = check_condition_result;
5665 if (unlikely(sdebug_verbose && cmnd->result))
5666 sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n",
5667 __func__, cmnd->result);
5669 if (delta_jiff > 0 || ndelay > 0) {
5672 if (delta_jiff > 0) {
5673 u64 ns = jiffies_to_nsecs(delta_jiff);
5675 if (sdebug_random && ns < U32_MAX) {
5676 ns = prandom_u32_max((u32)ns);
5677 } else if (sdebug_random) {
5678 ns >>= 12; /* scale to 4 usec precision */
5679 if (ns < U32_MAX) /* over 4 hours max */
5680 ns = prandom_u32_max((u32)ns);
5683 kt = ns_to_ktime(ns);
5684 } else { /* ndelay has a 4.2 second max */
5685 kt = sdebug_random ? prandom_u32_max((u32)ndelay) :
5687 if (ndelay < INCLUSIVE_TIMING_MAX_NS) {
5688 u64 d = ktime_get_boottime_ns() - ns_from_boot;
5690 if (kt <= d) { /* elapsed duration >= kt */
5691 spin_lock_irqsave(&sqp->qc_lock, iflags);
5692 sqcp->a_cmnd = NULL;
5693 atomic_dec(&devip->num_in_q);
5694 clear_bit(k, sqp->in_use_bm);
5695 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5698 /* call scsi_done() from this thread */
5702 /* otherwise reduce kt by elapsed time */
5707 sd_dp->cmpl_ts = ktime_add(ns_to_ktime(ns_from_boot), kt);
5708 spin_lock_irqsave(&sqp->qc_lock, iflags);
5709 if (!sd_dp->init_poll) {
5710 sd_dp->init_poll = true;
5711 sqcp->sd_dp = sd_dp;
5712 sd_dp->sqa_idx = sqp - sdebug_q_arr;
5715 WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_POLL);
5716 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5718 if (!sd_dp->init_hrt) {
5719 sd_dp->init_hrt = true;
5720 sqcp->sd_dp = sd_dp;
5721 hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC,
5722 HRTIMER_MODE_REL_PINNED);
5723 sd_dp->hrt.function = sdebug_q_cmd_hrt_complete;
5724 sd_dp->sqa_idx = sqp - sdebug_q_arr;
5727 WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_HRT);
5728 /* schedule the invocation of scsi_done() for a later time */
5729 hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED);
5731 if (sdebug_statistics)
5732 sd_dp->issuing_cpu = raw_smp_processor_id();
5733 } else { /* jdelay < 0, use work queue */
5734 if (unlikely((sdebug_opts & SDEBUG_OPT_CMD_ABORT) &&
5735 atomic_read(&sdeb_inject_pending)))
5736 sd_dp->aborted = true;
5738 sd_dp->cmpl_ts = ns_to_ktime(ns_from_boot);
5739 spin_lock_irqsave(&sqp->qc_lock, iflags);
5740 if (!sd_dp->init_poll) {
5741 sd_dp->init_poll = true;
5742 sqcp->sd_dp = sd_dp;
5743 sd_dp->sqa_idx = sqp - sdebug_q_arr;
5746 WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_POLL);
5747 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5749 if (!sd_dp->init_wq) {
5750 sd_dp->init_wq = true;
5751 sqcp->sd_dp = sd_dp;
5752 sd_dp->sqa_idx = sqp - sdebug_q_arr;
5754 INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete);
5756 WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_WQ);
5757 schedule_work(&sd_dp->ew.work);
5759 if (sdebug_statistics)
5760 sd_dp->issuing_cpu = raw_smp_processor_id();
5761 if (unlikely(sd_dp->aborted)) {
5762 sdev_printk(KERN_INFO, sdp, "abort request tag %d\n",
5763 scsi_cmd_to_rq(cmnd)->tag);
5764 blk_abort_request(scsi_cmd_to_rq(cmnd));
5765 atomic_set(&sdeb_inject_pending, 0);
5766 sd_dp->aborted = false;
5769 if (unlikely((SDEBUG_OPT_Q_NOISE & sdebug_opts) && scsi_result == device_qfull_result))
5770 sdev_printk(KERN_INFO, sdp, "%s: num_in_q=%d +1, %s%s\n", __func__,
5771 num_in_q, (inject ? "<inject> " : ""), "status: TASK SET FULL");
5774 respond_in_thread: /* call back to mid-layer using invocation thread */
5775 cmnd->result = pfp != NULL ? pfp(cmnd, devip) : 0;
5776 cmnd->result &= ~SDEG_RES_IMMED_MASK;
5777 if (cmnd->result == 0 && scsi_result != 0) {
5778 cmnd->result = scsi_result;
5780 pr_info("respond_in_thread: tag=0x%x, scp->result=0x%x\n",
5781 blk_mq_unique_tag(scsi_cmd_to_rq(cmnd)), scsi_result);
5787 /* Note: The following macros create attribute files in the
5788 /sys/module/scsi_debug/parameters directory. Unfortunately this
5789 driver is unaware of a change and cannot trigger auxiliary actions
5790 as it can when the corresponding attribute in the
5791 /sys/bus/pseudo/drivers/scsi_debug directory is changed.
5793 module_param_named(add_host, sdebug_add_host, int, S_IRUGO | S_IWUSR);
5794 module_param_named(ato, sdebug_ato, int, S_IRUGO);
5795 module_param_named(cdb_len, sdebug_cdb_len, int, 0644);
5796 module_param_named(clustering, sdebug_clustering, bool, S_IRUGO | S_IWUSR);
5797 module_param_named(delay, sdebug_jdelay, int, S_IRUGO | S_IWUSR);
5798 module_param_named(dev_size_mb, sdebug_dev_size_mb, int, S_IRUGO);
5799 module_param_named(dif, sdebug_dif, int, S_IRUGO);
5800 module_param_named(dix, sdebug_dix, int, S_IRUGO);
5801 module_param_named(dsense, sdebug_dsense, int, S_IRUGO | S_IWUSR);
5802 module_param_named(every_nth, sdebug_every_nth, int, S_IRUGO | S_IWUSR);
5803 module_param_named(fake_rw, sdebug_fake_rw, int, S_IRUGO | S_IWUSR);
5804 module_param_named(guard, sdebug_guard, uint, S_IRUGO);
5805 module_param_named(host_lock, sdebug_host_lock, bool, S_IRUGO | S_IWUSR);
5806 module_param_named(host_max_queue, sdebug_host_max_queue, int, S_IRUGO);
5807 module_param_string(inq_product, sdebug_inq_product_id,
5808 sizeof(sdebug_inq_product_id), S_IRUGO | S_IWUSR);
5809 module_param_string(inq_rev, sdebug_inq_product_rev,
5810 sizeof(sdebug_inq_product_rev), S_IRUGO | S_IWUSR);
5811 module_param_string(inq_vendor, sdebug_inq_vendor_id,
5812 sizeof(sdebug_inq_vendor_id), S_IRUGO | S_IWUSR);
5813 module_param_named(lbprz, sdebug_lbprz, int, S_IRUGO);
5814 module_param_named(lbpu, sdebug_lbpu, int, S_IRUGO);
5815 module_param_named(lbpws, sdebug_lbpws, int, S_IRUGO);
5816 module_param_named(lbpws10, sdebug_lbpws10, int, S_IRUGO);
5817 module_param_named(lowest_aligned, sdebug_lowest_aligned, int, S_IRUGO);
5818 module_param_named(lun_format, sdebug_lun_am_i, int, S_IRUGO | S_IWUSR);
5819 module_param_named(max_luns, sdebug_max_luns, int, S_IRUGO | S_IWUSR);
5820 module_param_named(max_queue, sdebug_max_queue, int, S_IRUGO | S_IWUSR);
5821 module_param_named(medium_error_count, sdebug_medium_error_count, int,
5823 module_param_named(medium_error_start, sdebug_medium_error_start, int,
5825 module_param_named(ndelay, sdebug_ndelay, int, S_IRUGO | S_IWUSR);
5826 module_param_named(no_lun_0, sdebug_no_lun_0, int, S_IRUGO | S_IWUSR);
5827 module_param_named(no_rwlock, sdebug_no_rwlock, bool, S_IRUGO | S_IWUSR);
5828 module_param_named(no_uld, sdebug_no_uld, int, S_IRUGO);
5829 module_param_named(num_parts, sdebug_num_parts, int, S_IRUGO);
5830 module_param_named(num_tgts, sdebug_num_tgts, int, S_IRUGO | S_IWUSR);
5831 module_param_named(opt_blks, sdebug_opt_blks, int, S_IRUGO);
5832 module_param_named(opt_xferlen_exp, sdebug_opt_xferlen_exp, int, S_IRUGO);
5833 module_param_named(opts, sdebug_opts, int, S_IRUGO | S_IWUSR);
5834 module_param_named(per_host_store, sdebug_per_host_store, bool,
5836 module_param_named(physblk_exp, sdebug_physblk_exp, int, S_IRUGO);
5837 module_param_named(ptype, sdebug_ptype, int, S_IRUGO | S_IWUSR);
5838 module_param_named(random, sdebug_random, bool, S_IRUGO | S_IWUSR);
5839 module_param_named(removable, sdebug_removable, bool, S_IRUGO | S_IWUSR);
5840 module_param_named(scsi_level, sdebug_scsi_level, int, S_IRUGO);
5841 module_param_named(sector_size, sdebug_sector_size, int, S_IRUGO);
5842 module_param_named(statistics, sdebug_statistics, bool, S_IRUGO | S_IWUSR);
5843 module_param_named(strict, sdebug_strict, bool, S_IRUGO | S_IWUSR);
5844 module_param_named(submit_queues, submit_queues, int, S_IRUGO);
5845 module_param_named(poll_queues, poll_queues, int, S_IRUGO);
5846 module_param_named(tur_ms_to_ready, sdeb_tur_ms_to_ready, int, S_IRUGO);
5847 module_param_named(unmap_alignment, sdebug_unmap_alignment, int, S_IRUGO);
5848 module_param_named(unmap_granularity, sdebug_unmap_granularity, int, S_IRUGO);
5849 module_param_named(unmap_max_blocks, sdebug_unmap_max_blocks, int, S_IRUGO);
5850 module_param_named(unmap_max_desc, sdebug_unmap_max_desc, int, S_IRUGO);
5851 module_param_named(uuid_ctl, sdebug_uuid_ctl, int, S_IRUGO);
5852 module_param_named(virtual_gb, sdebug_virtual_gb, int, S_IRUGO | S_IWUSR);
5853 module_param_named(vpd_use_hostno, sdebug_vpd_use_hostno, int,
5855 module_param_named(wp, sdebug_wp, bool, S_IRUGO | S_IWUSR);
5856 module_param_named(write_same_length, sdebug_write_same_length, int,
5858 module_param_named(zbc, sdeb_zbc_model_s, charp, S_IRUGO);
5859 module_param_named(zone_max_open, sdeb_zbc_max_open, int, S_IRUGO);
5860 module_param_named(zone_nr_conv, sdeb_zbc_nr_conv, int, S_IRUGO);
5861 module_param_named(zone_size_mb, sdeb_zbc_zone_size_mb, int, S_IRUGO);
5863 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
5864 MODULE_DESCRIPTION("SCSI debug adapter driver");
5865 MODULE_LICENSE("GPL");
5866 MODULE_VERSION(SDEBUG_VERSION);
5868 MODULE_PARM_DESC(add_host, "add n hosts, in sysfs if negative remove host(s) (def=1)");
5869 MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
5870 MODULE_PARM_DESC(cdb_len, "suggest CDB lengths to drivers (def=10)");
5871 MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
5872 MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
5873 MODULE_PARM_DESC(dev_size_mb, "size in MiB of ram shared by devs(def=8)");
5874 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
5875 MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
5876 MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
5877 MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
5878 MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
5879 MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
5880 MODULE_PARM_DESC(host_lock, "host_lock is ignored (def=0)");
5881 MODULE_PARM_DESC(host_max_queue,
5882 "host max # of queued cmds (0 to max(def) [max_queue fixed equal for !0])");
5883 MODULE_PARM_DESC(inq_product, "SCSI INQUIRY product string (def=\"scsi_debug\")");
5884 MODULE_PARM_DESC(inq_rev, "SCSI INQUIRY revision string (def=\""
5885 SDEBUG_VERSION "\")");
5886 MODULE_PARM_DESC(inq_vendor, "SCSI INQUIRY vendor string (def=\"Linux\")");
5887 MODULE_PARM_DESC(lbprz,
5888 "on read unmapped LBs return 0 when 1 (def), return 0xff when 2");
5889 MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
5890 MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
5891 MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
5892 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
5893 MODULE_PARM_DESC(lun_format, "LUN format: 0->peripheral (def); 1 --> flat address method");
5894 MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
5895 MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))");
5896 MODULE_PARM_DESC(medium_error_count, "count of sectors to return follow on MEDIUM error");
5897 MODULE_PARM_DESC(medium_error_start, "starting sector number to return MEDIUM error");
5898 MODULE_PARM_DESC(ndelay, "response delay in nanoseconds (def=0 -> ignore)");
5899 MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
5900 MODULE_PARM_DESC(no_rwlock, "don't protect user data reads+writes (def=0)");
5901 MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
5902 MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
5903 MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
5904 MODULE_PARM_DESC(opt_blks, "optimal transfer length in blocks (def=1024)");
5905 MODULE_PARM_DESC(opt_xferlen_exp, "optimal transfer length granularity exponent (def=physblk_exp)");
5906 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
5907 MODULE_PARM_DESC(per_host_store, "If set, next positive add_host will get new store (def=0)");
5908 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
5909 MODULE_PARM_DESC(poll_queues, "support for iouring iopoll queues (1 to max(submit_queues - 1))");
5910 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
5911 MODULE_PARM_DESC(random, "If set, uniformly randomize command duration between 0 and delay_in_ns");
5912 MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
5913 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=7[SPC-5])");
5914 MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
5915 MODULE_PARM_DESC(statistics, "collect statistics on commands, queues (def=0)");
5916 MODULE_PARM_DESC(strict, "stricter checks: reserved field in cdb (def=0)");
5917 MODULE_PARM_DESC(submit_queues, "support for block multi-queue (def=1)");
5918 MODULE_PARM_DESC(tur_ms_to_ready, "TEST UNIT READY millisecs before initial good status (def=0)");
5919 MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
5920 MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
5921 MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
5922 MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
5923 MODULE_PARM_DESC(uuid_ctl,
5924 "1->use uuid for lu name, 0->don't, 2->all use same (def=0)");
5925 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)");
5926 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
5927 MODULE_PARM_DESC(wp, "Write Protect (def=0)");
5928 MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
5929 MODULE_PARM_DESC(zbc, "'none' [0]; 'aware' [1]; 'managed' [2] (def=0). Can have 'host-' prefix");
5930 MODULE_PARM_DESC(zone_max_open, "Maximum number of open zones; [0] for no limit (def=auto)");
5931 MODULE_PARM_DESC(zone_nr_conv, "Number of conventional zones (def=1)");
5932 MODULE_PARM_DESC(zone_size_mb, "Zone size in MiB (def=auto)");
5934 #define SDEBUG_INFO_LEN 256
5935 static char sdebug_info[SDEBUG_INFO_LEN];
5937 static const char *scsi_debug_info(struct Scsi_Host *shp)
5941 k = scnprintf(sdebug_info, SDEBUG_INFO_LEN, "%s: version %s [%s]\n",
5942 my_name, SDEBUG_VERSION, sdebug_version_date);
5943 if (k >= (SDEBUG_INFO_LEN - 1))
5945 scnprintf(sdebug_info + k, SDEBUG_INFO_LEN - k,
5946 " dev_size_mb=%d, opts=0x%x, submit_queues=%d, %s=%d",
5947 sdebug_dev_size_mb, sdebug_opts, submit_queues,
5948 "statistics", (int)sdebug_statistics);
5952 /* 'echo <val> > /proc/scsi/scsi_debug/<host_id>' writes to opts */
5953 static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer,
5958 int minLen = length > 15 ? 15 : length;
5960 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
5962 memcpy(arr, buffer, minLen);
5964 if (1 != sscanf(arr, "%d", &opts))
5967 sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
5968 sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
5969 if (sdebug_every_nth != 0)
5974 /* Output seen with 'cat /proc/scsi/scsi_debug/<host_id>'. It will be the
5975 * same for each scsi_debug host (if more than one). Some of the counters
5976 * output are not atomics so might be inaccurate in a busy system. */
5977 static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
5980 struct sdebug_queue *sqp;
5981 struct sdebug_host_info *sdhp;
5983 seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n",
5984 SDEBUG_VERSION, sdebug_version_date);
5985 seq_printf(m, "num_tgts=%d, %ssize=%d MB, opts=0x%x, every_nth=%d\n",
5986 sdebug_num_tgts, "shared (ram) ", sdebug_dev_size_mb,
5987 sdebug_opts, sdebug_every_nth);
5988 seq_printf(m, "delay=%d, ndelay=%d, max_luns=%d, sector_size=%d %s\n",
5989 sdebug_jdelay, sdebug_ndelay, sdebug_max_luns,
5990 sdebug_sector_size, "bytes");
5991 seq_printf(m, "cylinders=%d, heads=%d, sectors=%d, command aborts=%d\n",
5992 sdebug_cylinders_per, sdebug_heads, sdebug_sectors_per,
5994 seq_printf(m, "RESETs: device=%d, target=%d, bus=%d, host=%d\n",
5995 num_dev_resets, num_target_resets, num_bus_resets,
5997 seq_printf(m, "dix_reads=%d, dix_writes=%d, dif_errors=%d\n",
5998 dix_reads, dix_writes, dif_errors);
5999 seq_printf(m, "usec_in_jiffy=%lu, statistics=%d\n", TICK_NSEC / 1000,
6001 seq_printf(m, "cmnd_count=%d, completions=%d, %s=%d, a_tsf=%d, mq_polls=%d\n",
6002 atomic_read(&sdebug_cmnd_count),
6003 atomic_read(&sdebug_completions),
6004 "miss_cpus", atomic_read(&sdebug_miss_cpus),
6005 atomic_read(&sdebug_a_tsf),
6006 atomic_read(&sdeb_mq_poll_count));
6008 seq_printf(m, "submit_queues=%d\n", submit_queues);
6009 for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
6010 seq_printf(m, " queue %d:\n", j);
6011 f = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
6012 if (f != sdebug_max_queue) {
6013 l = find_last_bit(sqp->in_use_bm, sdebug_max_queue);
6014 seq_printf(m, " in_use_bm BUSY: %s: %d,%d\n",
6015 "first,last bits", f, l);
6019 seq_printf(m, "this host_no=%d\n", host->host_no);
6020 if (!xa_empty(per_store_ap)) {
6023 unsigned long l_idx;
6024 struct sdeb_store_info *sip;
6026 seq_puts(m, "\nhost list:\n");
6028 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6030 seq_printf(m, " %d: host_no=%d, si_idx=%d\n", j,
6031 sdhp->shost->host_no, idx);
6034 seq_printf(m, "\nper_store array [most_recent_idx=%d]:\n",
6035 sdeb_most_recent_idx);
6037 xa_for_each(per_store_ap, l_idx, sip) {
6038 niu = xa_get_mark(per_store_ap, l_idx,
6039 SDEB_XA_NOT_IN_USE);
6041 seq_printf(m, " %d: idx=%d%s\n", j, idx,
6042 (niu ? " not_in_use" : ""));
6049 static ssize_t delay_show(struct device_driver *ddp, char *buf)
6051 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_jdelay);
6053 /* Returns -EBUSY if jdelay is being changed and commands are queued. The unit
6054 * of delay is jiffies.
6056 static ssize_t delay_store(struct device_driver *ddp, const char *buf,
6061 if (count > 0 && sscanf(buf, "%d", &jdelay) == 1) {
6063 if (sdebug_jdelay != jdelay) {
6065 struct sdebug_queue *sqp;
6067 sdeb_block_all_queues();
6068 for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
6070 k = find_first_bit(sqp->in_use_bm,
6072 if (k != sdebug_max_queue) {
6073 res = -EBUSY; /* queued commands */
6078 sdebug_jdelay = jdelay;
6081 sdeb_unblock_all_queues();
6087 static DRIVER_ATTR_RW(delay);
6089 static ssize_t ndelay_show(struct device_driver *ddp, char *buf)
6091 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ndelay);
6093 /* Returns -EBUSY if ndelay is being changed and commands are queued */
6094 /* If > 0 and accepted then sdebug_jdelay is set to JDELAY_OVERRIDDEN */
6095 static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
6100 if ((count > 0) && (1 == sscanf(buf, "%d", &ndelay)) &&
6101 (ndelay >= 0) && (ndelay < (1000 * 1000 * 1000))) {
6103 if (sdebug_ndelay != ndelay) {
6105 struct sdebug_queue *sqp;
6107 sdeb_block_all_queues();
6108 for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
6110 k = find_first_bit(sqp->in_use_bm,
6112 if (k != sdebug_max_queue) {
6113 res = -EBUSY; /* queued commands */
6118 sdebug_ndelay = ndelay;
6119 sdebug_jdelay = ndelay ? JDELAY_OVERRIDDEN
6122 sdeb_unblock_all_queues();
6128 static DRIVER_ATTR_RW(ndelay);
6130 static ssize_t opts_show(struct device_driver *ddp, char *buf)
6132 return scnprintf(buf, PAGE_SIZE, "0x%x\n", sdebug_opts);
6135 static ssize_t opts_store(struct device_driver *ddp, const char *buf,
6141 if (sscanf(buf, "%10s", work) == 1) {
6142 if (strncasecmp(work, "0x", 2) == 0) {
6143 if (kstrtoint(work + 2, 16, &opts) == 0)
6146 if (kstrtoint(work, 10, &opts) == 0)
6153 sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
6154 sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
6158 static DRIVER_ATTR_RW(opts);
6160 static ssize_t ptype_show(struct device_driver *ddp, char *buf)
6162 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ptype);
6164 static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
6169 /* Cannot change from or to TYPE_ZBC with sysfs */
6170 if (sdebug_ptype == TYPE_ZBC)
6173 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6181 static DRIVER_ATTR_RW(ptype);
6183 static ssize_t dsense_show(struct device_driver *ddp, char *buf)
6185 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dsense);
6187 static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
6192 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6198 static DRIVER_ATTR_RW(dsense);
6200 static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
6202 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_fake_rw);
6204 static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
6209 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6210 bool want_store = (n == 0);
6211 struct sdebug_host_info *sdhp;
6214 sdebug_fake_rw = (sdebug_fake_rw > 0);
6215 if (sdebug_fake_rw == n)
6216 return count; /* not transitioning so do nothing */
6218 if (want_store) { /* 1 --> 0 transition, set up store */
6219 if (sdeb_first_idx < 0) {
6220 idx = sdebug_add_store();
6224 idx = sdeb_first_idx;
6225 xa_clear_mark(per_store_ap, idx,
6226 SDEB_XA_NOT_IN_USE);
6228 /* make all hosts use same store */
6229 list_for_each_entry(sdhp, &sdebug_host_list,
6231 if (sdhp->si_idx != idx) {
6232 xa_set_mark(per_store_ap, sdhp->si_idx,
6233 SDEB_XA_NOT_IN_USE);
6237 sdeb_most_recent_idx = idx;
6238 } else { /* 0 --> 1 transition is trigger for shrink */
6239 sdebug_erase_all_stores(true /* apart from first */);
6246 static DRIVER_ATTR_RW(fake_rw);
6248 static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
6250 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_lun_0);
6252 static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
6257 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6258 sdebug_no_lun_0 = n;
6263 static DRIVER_ATTR_RW(no_lun_0);
6265 static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
6267 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_tgts);
6269 static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
6274 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6275 sdebug_num_tgts = n;
6276 sdebug_max_tgts_luns();
6281 static DRIVER_ATTR_RW(num_tgts);
6283 static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)
6285 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dev_size_mb);
6287 static DRIVER_ATTR_RO(dev_size_mb);
6289 static ssize_t per_host_store_show(struct device_driver *ddp, char *buf)
6291 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_per_host_store);
6294 static ssize_t per_host_store_store(struct device_driver *ddp, const char *buf,
6299 if (kstrtobool(buf, &v))
6302 sdebug_per_host_store = v;
6305 static DRIVER_ATTR_RW(per_host_store);
6307 static ssize_t num_parts_show(struct device_driver *ddp, char *buf)
6309 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_parts);
6311 static DRIVER_ATTR_RO(num_parts);
6313 static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
6315 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_every_nth);
6317 static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
6323 if (sscanf(buf, "%10s", work) == 1) {
6324 if (strncasecmp(work, "0x", 2) == 0) {
6325 if (kstrtoint(work + 2, 16, &nth) == 0)
6326 goto every_nth_done;
6328 if (kstrtoint(work, 10, &nth) == 0)
6329 goto every_nth_done;
6335 sdebug_every_nth = nth;
6336 if (nth && !sdebug_statistics) {
6337 pr_info("every_nth needs statistics=1, set it\n");
6338 sdebug_statistics = true;
6343 static DRIVER_ATTR_RW(every_nth);
6345 static ssize_t lun_format_show(struct device_driver *ddp, char *buf)
6347 return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_lun_am);
6349 static ssize_t lun_format_store(struct device_driver *ddp, const char *buf,
6355 if (kstrtoint(buf, 0, &n))
6358 if (n > (int)SAM_LUN_AM_FLAT) {
6359 pr_warn("only LUN address methods 0 and 1 are supported\n");
6362 changed = ((int)sdebug_lun_am != n);
6364 if (changed && sdebug_scsi_level >= 5) { /* >= SPC-3 */
6365 struct sdebug_host_info *sdhp;
6366 struct sdebug_dev_info *dp;
6368 spin_lock(&sdebug_host_list_lock);
6369 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6370 list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
6371 set_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
6374 spin_unlock(&sdebug_host_list_lock);
6380 static DRIVER_ATTR_RW(lun_format);
6382 static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
6384 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_luns);
6386 static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
6392 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6394 pr_warn("max_luns can be no more than 256\n");
6397 changed = (sdebug_max_luns != n);
6398 sdebug_max_luns = n;
6399 sdebug_max_tgts_luns();
6400 if (changed && (sdebug_scsi_level >= 5)) { /* >= SPC-3 */
6401 struct sdebug_host_info *sdhp;
6402 struct sdebug_dev_info *dp;
6404 spin_lock(&sdebug_host_list_lock);
6405 list_for_each_entry(sdhp, &sdebug_host_list,
6407 list_for_each_entry(dp, &sdhp->dev_info_list,
6409 set_bit(SDEBUG_UA_LUNS_CHANGED,
6413 spin_unlock(&sdebug_host_list_lock);
6419 static DRIVER_ATTR_RW(max_luns);
6421 static ssize_t max_queue_show(struct device_driver *ddp, char *buf)
6423 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_queue);
6425 /* N.B. max_queue can be changed while there are queued commands. In flight
6426 * commands beyond the new max_queue will be completed. */
6427 static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
6431 struct sdebug_queue *sqp;
6433 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
6434 (n <= SDEBUG_CANQUEUE) &&
6435 (sdebug_host_max_queue == 0)) {
6436 sdeb_block_all_queues();
6438 for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
6440 a = find_last_bit(sqp->in_use_bm, SDEBUG_CANQUEUE);
6444 sdebug_max_queue = n;
6445 if (k == SDEBUG_CANQUEUE)
6446 atomic_set(&retired_max_queue, 0);
6448 atomic_set(&retired_max_queue, k + 1);
6450 atomic_set(&retired_max_queue, 0);
6451 sdeb_unblock_all_queues();
6456 static DRIVER_ATTR_RW(max_queue);
6458 static ssize_t host_max_queue_show(struct device_driver *ddp, char *buf)
6460 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_host_max_queue);
6463 static ssize_t no_rwlock_show(struct device_driver *ddp, char *buf)
6465 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_rwlock);
6468 static ssize_t no_rwlock_store(struct device_driver *ddp, const char *buf, size_t count)
6472 if (kstrtobool(buf, &v))
6475 sdebug_no_rwlock = v;
6478 static DRIVER_ATTR_RW(no_rwlock);
6481 * Since this is used for .can_queue, and we get the hc_idx tag from the bitmap
6482 * in range [0, sdebug_host_max_queue), we can't change it.
6484 static DRIVER_ATTR_RO(host_max_queue);
6486 static ssize_t no_uld_show(struct device_driver *ddp, char *buf)
6488 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_uld);
6490 static DRIVER_ATTR_RO(no_uld);
6492 static ssize_t scsi_level_show(struct device_driver *ddp, char *buf)
6494 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_scsi_level);
6496 static DRIVER_ATTR_RO(scsi_level);
6498 static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
6500 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_virtual_gb);
6502 static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
6508 /* Ignore capacity change for ZBC drives for now */
6509 if (sdeb_zbc_in_use)
6512 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6513 changed = (sdebug_virtual_gb != n);
6514 sdebug_virtual_gb = n;
6515 sdebug_capacity = get_sdebug_capacity();
6517 struct sdebug_host_info *sdhp;
6518 struct sdebug_dev_info *dp;
6520 spin_lock(&sdebug_host_list_lock);
6521 list_for_each_entry(sdhp, &sdebug_host_list,
6523 list_for_each_entry(dp, &sdhp->dev_info_list,
6525 set_bit(SDEBUG_UA_CAPACITY_CHANGED,
6529 spin_unlock(&sdebug_host_list_lock);
6535 static DRIVER_ATTR_RW(virtual_gb);
6537 static ssize_t add_host_show(struct device_driver *ddp, char *buf)
6539 /* absolute number of hosts currently active is what is shown */
6540 return scnprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&sdebug_num_hosts));
6544 * Accept positive and negative values. Hex values (only positive) may be prefixed by '0x'.
6545 * To remove all hosts use a large negative number (e.g. -9999). The value 0 does nothing.
6546 * Returns -EBUSY if another add_host sysfs invocation is active.
6548 static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
6553 if (count == 0 || kstrtoint(buf, 0, &delta_hosts))
6556 pr_info("prior num_hosts=%d, num_to_add=%d\n",
6557 atomic_read(&sdebug_num_hosts), delta_hosts);
6558 if (delta_hosts == 0)
6560 if (mutex_trylock(&add_host_mutex) == 0)
6562 if (delta_hosts > 0) {
6563 sdeb_add_n_hosts(delta_hosts);
6564 } else if (delta_hosts < 0) {
6565 smp_store_release(&sdebug_deflect_incoming, true);
6566 sdeb_block_all_queues();
6567 if (delta_hosts >= atomic_read(&sdebug_num_hosts))
6568 stop_all_queued(true);
6570 if (atomic_read(&sdebug_num_hosts) < 1) {
6574 sdebug_do_remove_host(false);
6575 } while (++delta_hosts);
6576 sdeb_unblock_all_queues();
6577 smp_store_release(&sdebug_deflect_incoming, false);
6579 mutex_unlock(&add_host_mutex);
6581 pr_info("post num_hosts=%d\n", atomic_read(&sdebug_num_hosts));
6584 static DRIVER_ATTR_RW(add_host);
6586 static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf)
6588 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_vpd_use_hostno);
6590 static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf,
6595 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6596 sdebug_vpd_use_hostno = n;
6601 static DRIVER_ATTR_RW(vpd_use_hostno);
6603 static ssize_t statistics_show(struct device_driver *ddp, char *buf)
6605 return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_statistics);
6607 static ssize_t statistics_store(struct device_driver *ddp, const char *buf,
6612 if ((count > 0) && (sscanf(buf, "%d", &n) == 1) && (n >= 0)) {
6614 sdebug_statistics = true;
6616 clear_queue_stats();
6617 sdebug_statistics = false;
6623 static DRIVER_ATTR_RW(statistics);
6625 static ssize_t sector_size_show(struct device_driver *ddp, char *buf)
6627 return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_sector_size);
6629 static DRIVER_ATTR_RO(sector_size);
6631 static ssize_t submit_queues_show(struct device_driver *ddp, char *buf)
6633 return scnprintf(buf, PAGE_SIZE, "%d\n", submit_queues);
6635 static DRIVER_ATTR_RO(submit_queues);
6637 static ssize_t dix_show(struct device_driver *ddp, char *buf)
6639 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dix);
6641 static DRIVER_ATTR_RO(dix);
6643 static ssize_t dif_show(struct device_driver *ddp, char *buf)
6645 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dif);
6647 static DRIVER_ATTR_RO(dif);
6649 static ssize_t guard_show(struct device_driver *ddp, char *buf)
6651 return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_guard);
6653 static DRIVER_ATTR_RO(guard);
6655 static ssize_t ato_show(struct device_driver *ddp, char *buf)
6657 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ato);
6659 static DRIVER_ATTR_RO(ato);
6661 static ssize_t map_show(struct device_driver *ddp, char *buf)
6665 if (!scsi_debug_lbp())
6666 return scnprintf(buf, PAGE_SIZE, "0-%u\n",
6667 sdebug_store_sectors);
6669 if (sdebug_fake_rw == 0 && !xa_empty(per_store_ap)) {
6670 struct sdeb_store_info *sip = xa_load(per_store_ap, 0);
6673 count = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
6674 (int)map_size, sip->map_storep);
6676 buf[count++] = '\n';
6681 static DRIVER_ATTR_RO(map);
6683 static ssize_t random_show(struct device_driver *ddp, char *buf)
6685 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_random);
6688 static ssize_t random_store(struct device_driver *ddp, const char *buf,
6693 if (kstrtobool(buf, &v))
6699 static DRIVER_ATTR_RW(random);
6701 static ssize_t removable_show(struct device_driver *ddp, char *buf)
6703 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_removable ? 1 : 0);
6705 static ssize_t removable_store(struct device_driver *ddp, const char *buf,
6710 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6711 sdebug_removable = (n > 0);
6716 static DRIVER_ATTR_RW(removable);
6718 static ssize_t host_lock_show(struct device_driver *ddp, char *buf)
6720 return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_host_lock);
6722 /* N.B. sdebug_host_lock does nothing, kept for backward compatibility */
6723 static ssize_t host_lock_store(struct device_driver *ddp, const char *buf,
6728 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6729 sdebug_host_lock = (n > 0);
6734 static DRIVER_ATTR_RW(host_lock);
6736 static ssize_t strict_show(struct device_driver *ddp, char *buf)
6738 return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_strict);
6740 static ssize_t strict_store(struct device_driver *ddp, const char *buf,
6745 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6746 sdebug_strict = (n > 0);
6751 static DRIVER_ATTR_RW(strict);
6753 static ssize_t uuid_ctl_show(struct device_driver *ddp, char *buf)
6755 return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_uuid_ctl);
6757 static DRIVER_ATTR_RO(uuid_ctl);
6759 static ssize_t cdb_len_show(struct device_driver *ddp, char *buf)
6761 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_cdb_len);
6763 static ssize_t cdb_len_store(struct device_driver *ddp, const char *buf,
6768 ret = kstrtoint(buf, 0, &n);
6772 all_config_cdb_len();
6775 static DRIVER_ATTR_RW(cdb_len);
6777 static const char * const zbc_model_strs_a[] = {
6778 [BLK_ZONED_NONE] = "none",
6779 [BLK_ZONED_HA] = "host-aware",
6780 [BLK_ZONED_HM] = "host-managed",
6783 static const char * const zbc_model_strs_b[] = {
6784 [BLK_ZONED_NONE] = "no",
6785 [BLK_ZONED_HA] = "aware",
6786 [BLK_ZONED_HM] = "managed",
6789 static const char * const zbc_model_strs_c[] = {
6790 [BLK_ZONED_NONE] = "0",
6791 [BLK_ZONED_HA] = "1",
6792 [BLK_ZONED_HM] = "2",
6795 static int sdeb_zbc_model_str(const char *cp)
6797 int res = sysfs_match_string(zbc_model_strs_a, cp);
6800 res = sysfs_match_string(zbc_model_strs_b, cp);
6802 res = sysfs_match_string(zbc_model_strs_c, cp);
6810 static ssize_t zbc_show(struct device_driver *ddp, char *buf)
6812 return scnprintf(buf, PAGE_SIZE, "%s\n",
6813 zbc_model_strs_a[sdeb_zbc_model]);
6815 static DRIVER_ATTR_RO(zbc);
6817 static ssize_t tur_ms_to_ready_show(struct device_driver *ddp, char *buf)
6819 return scnprintf(buf, PAGE_SIZE, "%d\n", sdeb_tur_ms_to_ready);
6821 static DRIVER_ATTR_RO(tur_ms_to_ready);
6823 /* Note: The following array creates attribute files in the
6824 /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
6825 files (over those found in the /sys/module/scsi_debug/parameters
6826 directory) is that auxiliary actions can be triggered when an attribute
6827 is changed. For example see: add_host_store() above.
6830 static struct attribute *sdebug_drv_attrs[] = {
6831 &driver_attr_delay.attr,
6832 &driver_attr_opts.attr,
6833 &driver_attr_ptype.attr,
6834 &driver_attr_dsense.attr,
6835 &driver_attr_fake_rw.attr,
6836 &driver_attr_host_max_queue.attr,
6837 &driver_attr_no_lun_0.attr,
6838 &driver_attr_num_tgts.attr,
6839 &driver_attr_dev_size_mb.attr,
6840 &driver_attr_num_parts.attr,
6841 &driver_attr_every_nth.attr,
6842 &driver_attr_lun_format.attr,
6843 &driver_attr_max_luns.attr,
6844 &driver_attr_max_queue.attr,
6845 &driver_attr_no_rwlock.attr,
6846 &driver_attr_no_uld.attr,
6847 &driver_attr_scsi_level.attr,
6848 &driver_attr_virtual_gb.attr,
6849 &driver_attr_add_host.attr,
6850 &driver_attr_per_host_store.attr,
6851 &driver_attr_vpd_use_hostno.attr,
6852 &driver_attr_sector_size.attr,
6853 &driver_attr_statistics.attr,
6854 &driver_attr_submit_queues.attr,
6855 &driver_attr_dix.attr,
6856 &driver_attr_dif.attr,
6857 &driver_attr_guard.attr,
6858 &driver_attr_ato.attr,
6859 &driver_attr_map.attr,
6860 &driver_attr_random.attr,
6861 &driver_attr_removable.attr,
6862 &driver_attr_host_lock.attr,
6863 &driver_attr_ndelay.attr,
6864 &driver_attr_strict.attr,
6865 &driver_attr_uuid_ctl.attr,
6866 &driver_attr_cdb_len.attr,
6867 &driver_attr_tur_ms_to_ready.attr,
6868 &driver_attr_zbc.attr,
6871 ATTRIBUTE_GROUPS(sdebug_drv);
6873 static struct device *pseudo_primary;
6875 static int __init scsi_debug_init(void)
6877 bool want_store = (sdebug_fake_rw == 0);
6879 int k, ret, hosts_to_add;
6882 ramdisk_lck_a[0] = &atomic_rw;
6883 ramdisk_lck_a[1] = &atomic_rw2;
6884 atomic_set(&retired_max_queue, 0);
6886 if (sdebug_ndelay >= 1000 * 1000 * 1000) {
6887 pr_warn("ndelay must be less than 1 second, ignored\n");
6889 } else if (sdebug_ndelay > 0)
6890 sdebug_jdelay = JDELAY_OVERRIDDEN;
6892 switch (sdebug_sector_size) {
6899 pr_err("invalid sector_size %d\n", sdebug_sector_size);
6903 switch (sdebug_dif) {
6904 case T10_PI_TYPE0_PROTECTION:
6906 case T10_PI_TYPE1_PROTECTION:
6907 case T10_PI_TYPE2_PROTECTION:
6908 case T10_PI_TYPE3_PROTECTION:
6909 have_dif_prot = true;
6913 pr_err("dif must be 0, 1, 2 or 3\n");
6917 if (sdebug_num_tgts < 0) {
6918 pr_err("num_tgts must be >= 0\n");
6922 if (sdebug_guard > 1) {
6923 pr_err("guard must be 0 or 1\n");
6927 if (sdebug_ato > 1) {
6928 pr_err("ato must be 0 or 1\n");
6932 if (sdebug_physblk_exp > 15) {
6933 pr_err("invalid physblk_exp %u\n", sdebug_physblk_exp);
6937 sdebug_lun_am = sdebug_lun_am_i;
6938 if (sdebug_lun_am > SAM_LUN_AM_FLAT) {
6939 pr_warn("Invalid LUN format %u, using default\n", (int)sdebug_lun_am);
6940 sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
6943 if (sdebug_max_luns > 256) {
6944 if (sdebug_max_luns > 16384) {
6945 pr_warn("max_luns can be no more than 16384, use default\n");
6946 sdebug_max_luns = DEF_MAX_LUNS;
6948 sdebug_lun_am = SAM_LUN_AM_FLAT;
6951 if (sdebug_lowest_aligned > 0x3fff) {
6952 pr_err("lowest_aligned too big: %u\n", sdebug_lowest_aligned);
6956 if (submit_queues < 1) {
6957 pr_err("submit_queues must be 1 or more\n");
6961 if ((sdebug_max_queue > SDEBUG_CANQUEUE) || (sdebug_max_queue < 1)) {
6962 pr_err("max_queue must be in range [1, %d]\n", SDEBUG_CANQUEUE);
6966 if ((sdebug_host_max_queue > SDEBUG_CANQUEUE) ||
6967 (sdebug_host_max_queue < 0)) {
6968 pr_err("host_max_queue must be in range [0 %d]\n",
6973 if (sdebug_host_max_queue &&
6974 (sdebug_max_queue != sdebug_host_max_queue)) {
6975 sdebug_max_queue = sdebug_host_max_queue;
6976 pr_warn("fixing max submit queue depth to host max queue depth, %d\n",
6980 sdebug_q_arr = kcalloc(submit_queues, sizeof(struct sdebug_queue),
6982 if (sdebug_q_arr == NULL)
6984 for (k = 0; k < submit_queues; ++k)
6985 spin_lock_init(&sdebug_q_arr[k].qc_lock);
6988 * check for host managed zoned block device specified with
6989 * ptype=0x14 or zbc=XXX.
6991 if (sdebug_ptype == TYPE_ZBC) {
6992 sdeb_zbc_model = BLK_ZONED_HM;
6993 } else if (sdeb_zbc_model_s && *sdeb_zbc_model_s) {
6994 k = sdeb_zbc_model_str(sdeb_zbc_model_s);
7000 switch (sdeb_zbc_model) {
7001 case BLK_ZONED_NONE:
7003 sdebug_ptype = TYPE_DISK;
7006 sdebug_ptype = TYPE_ZBC;
7009 pr_err("Invalid ZBC model\n");
7014 if (sdeb_zbc_model != BLK_ZONED_NONE) {
7015 sdeb_zbc_in_use = true;
7016 if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
7017 sdebug_dev_size_mb = DEF_ZBC_DEV_SIZE_MB;
7020 if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
7021 sdebug_dev_size_mb = DEF_DEV_SIZE_MB;
7022 if (sdebug_dev_size_mb < 1)
7023 sdebug_dev_size_mb = 1; /* force minimum 1 MB ramdisk */
7024 sz = (unsigned long)sdebug_dev_size_mb * 1048576;
7025 sdebug_store_sectors = sz / sdebug_sector_size;
7026 sdebug_capacity = get_sdebug_capacity();
7028 /* play around with geometry, don't waste too much on track 0 */
7030 sdebug_sectors_per = 32;
7031 if (sdebug_dev_size_mb >= 256)
7033 else if (sdebug_dev_size_mb >= 16)
7035 sdebug_cylinders_per = (unsigned long)sdebug_capacity /
7036 (sdebug_sectors_per * sdebug_heads);
7037 if (sdebug_cylinders_per >= 1024) {
7038 /* other LLDs do this; implies >= 1GB ram disk ... */
7040 sdebug_sectors_per = 63;
7041 sdebug_cylinders_per = (unsigned long)sdebug_capacity /
7042 (sdebug_sectors_per * sdebug_heads);
7044 if (scsi_debug_lbp()) {
7045 sdebug_unmap_max_blocks =
7046 clamp(sdebug_unmap_max_blocks, 0U, 0xffffffffU);
7048 sdebug_unmap_max_desc =
7049 clamp(sdebug_unmap_max_desc, 0U, 256U);
7051 sdebug_unmap_granularity =
7052 clamp(sdebug_unmap_granularity, 1U, 0xffffffffU);
7054 if (sdebug_unmap_alignment &&
7055 sdebug_unmap_granularity <=
7056 sdebug_unmap_alignment) {
7057 pr_err("ERR: unmap_granularity <= unmap_alignment\n");
7062 xa_init_flags(per_store_ap, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
7064 idx = sdebug_add_store();
7071 pseudo_primary = root_device_register("pseudo_0");
7072 if (IS_ERR(pseudo_primary)) {
7073 pr_warn("root_device_register() error\n");
7074 ret = PTR_ERR(pseudo_primary);
7077 ret = bus_register(&pseudo_lld_bus);
7079 pr_warn("bus_register error: %d\n", ret);
7082 ret = driver_register(&sdebug_driverfs_driver);
7084 pr_warn("driver_register error: %d\n", ret);
7088 hosts_to_add = sdebug_add_host;
7089 sdebug_add_host = 0;
7091 for (k = 0; k < hosts_to_add; k++) {
7092 if (smp_load_acquire(&sdebug_deflect_incoming)) {
7093 pr_info("exit early as sdebug_deflect_incoming is set\n");
7096 if (want_store && k == 0) {
7097 ret = sdebug_add_host_helper(idx);
7099 pr_err("add_host_helper k=%d, error=%d\n",
7104 ret = sdebug_do_add_host(want_store &&
7105 sdebug_per_host_store);
7107 pr_err("add_host k=%d error=%d\n", k, -ret);
7113 pr_info("built %d host(s)\n", atomic_read(&sdebug_num_hosts));
7116 * Even though all the hosts have been established, due to async device (LU) scanning
7117 * by the scsi mid-level, there may still be devices (LUs) being set up.
7122 bus_unregister(&pseudo_lld_bus);
7124 root_device_unregister(pseudo_primary);
7126 sdebug_erase_store(idx, NULL);
7128 kfree(sdebug_q_arr);
7132 static void __exit scsi_debug_exit(void)
7136 /* Possible race with LUs still being set up; stop them asap */
7137 sdeb_block_all_queues();
7138 smp_store_release(&sdebug_deflect_incoming, true);
7139 stop_all_queued(false);
7140 for (k = 0; atomic_read(&sdebug_num_hosts) > 0; k++)
7141 sdebug_do_remove_host(true);
7144 pr_info("removed %d hosts\n", k);
7145 driver_unregister(&sdebug_driverfs_driver);
7146 bus_unregister(&pseudo_lld_bus);
7147 root_device_unregister(pseudo_primary);
7149 sdebug_erase_all_stores(false);
7150 xa_destroy(per_store_ap);
7151 kfree(sdebug_q_arr);
7154 device_initcall(scsi_debug_init);
7155 module_exit(scsi_debug_exit);
7157 static void sdebug_release_adapter(struct device *dev)
7159 struct sdebug_host_info *sdbg_host;
7161 sdbg_host = to_sdebug_host(dev);
7165 /* idx must be valid, if sip is NULL then it will be obtained using idx */
7166 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip)
7171 if (xa_empty(per_store_ap))
7173 sip = xa_load(per_store_ap, idx);
7177 vfree(sip->map_storep);
7178 vfree(sip->dif_storep);
7180 xa_erase(per_store_ap, idx);
7184 /* Assume apart_from_first==false only in shutdown case. */
7185 static void sdebug_erase_all_stores(bool apart_from_first)
7188 struct sdeb_store_info *sip = NULL;
7190 xa_for_each(per_store_ap, idx, sip) {
7191 if (apart_from_first)
7192 apart_from_first = false;
7194 sdebug_erase_store(idx, sip);
7196 if (apart_from_first)
7197 sdeb_most_recent_idx = sdeb_first_idx;
7201 * Returns store xarray new element index (idx) if >=0 else negated errno.
7202 * Limit the number of stores to 65536.
7204 static int sdebug_add_store(void)
7208 unsigned long iflags;
7209 unsigned long sz = (unsigned long)sdebug_dev_size_mb * 1048576;
7210 struct sdeb_store_info *sip = NULL;
7211 struct xa_limit xal = { .max = 1 << 16, .min = 0 };
7213 sip = kzalloc(sizeof(*sip), GFP_KERNEL);
7217 xa_lock_irqsave(per_store_ap, iflags);
7218 res = __xa_alloc(per_store_ap, &n_idx, sip, xal, GFP_ATOMIC);
7219 if (unlikely(res < 0)) {
7220 xa_unlock_irqrestore(per_store_ap, iflags);
7222 pr_warn("%s: xa_alloc() errno=%d\n", __func__, -res);
7225 sdeb_most_recent_idx = n_idx;
7226 if (sdeb_first_idx < 0)
7227 sdeb_first_idx = n_idx;
7228 xa_unlock_irqrestore(per_store_ap, iflags);
7231 sip->storep = vzalloc(sz);
7233 pr_err("user data oom\n");
7236 if (sdebug_num_parts > 0)
7237 sdebug_build_parts(sip->storep, sz);
7239 /* DIF/DIX: what T10 calls Protection Information (PI) */
7243 dif_size = sdebug_store_sectors * sizeof(struct t10_pi_tuple);
7244 sip->dif_storep = vmalloc(dif_size);
7246 pr_info("dif_storep %u bytes @ %pK\n", dif_size,
7249 if (!sip->dif_storep) {
7250 pr_err("DIX oom\n");
7253 memset(sip->dif_storep, 0xff, dif_size);
7255 /* Logical Block Provisioning */
7256 if (scsi_debug_lbp()) {
7257 map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
7258 sip->map_storep = vmalloc(array_size(sizeof(long),
7259 BITS_TO_LONGS(map_size)));
7261 pr_info("%lu provisioning blocks\n", map_size);
7263 if (!sip->map_storep) {
7264 pr_err("LBP map oom\n");
7268 bitmap_zero(sip->map_storep, map_size);
7270 /* Map first 1KB for partition table */
7271 if (sdebug_num_parts)
7272 map_region(sip, 0, 2);
7275 rwlock_init(&sip->macc_lck);
7278 sdebug_erase_store((int)n_idx, sip);
7279 pr_warn("%s: failed, errno=%d\n", __func__, -res);
7283 static int sdebug_add_host_helper(int per_host_idx)
7285 int k, devs_per_host, idx;
7286 int error = -ENOMEM;
7287 struct sdebug_host_info *sdbg_host;
7288 struct sdebug_dev_info *sdbg_devinfo, *tmp;
7290 sdbg_host = kzalloc(sizeof(*sdbg_host), GFP_KERNEL);
7293 idx = (per_host_idx < 0) ? sdeb_first_idx : per_host_idx;
7294 if (xa_get_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE))
7295 xa_clear_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
7296 sdbg_host->si_idx = idx;
7298 INIT_LIST_HEAD(&sdbg_host->dev_info_list);
7300 devs_per_host = sdebug_num_tgts * sdebug_max_luns;
7301 for (k = 0; k < devs_per_host; k++) {
7302 sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
7307 spin_lock(&sdebug_host_list_lock);
7308 list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
7309 spin_unlock(&sdebug_host_list_lock);
7311 sdbg_host->dev.bus = &pseudo_lld_bus;
7312 sdbg_host->dev.parent = pseudo_primary;
7313 sdbg_host->dev.release = &sdebug_release_adapter;
7314 dev_set_name(&sdbg_host->dev, "adapter%d", atomic_read(&sdebug_num_hosts));
7316 error = device_register(&sdbg_host->dev);
7320 atomic_inc(&sdebug_num_hosts);
7324 list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
7326 list_del(&sdbg_devinfo->dev_list);
7327 kfree(sdbg_devinfo->zstate);
7328 kfree(sdbg_devinfo);
7331 pr_warn("%s: failed, errno=%d\n", __func__, -error);
7335 static int sdebug_do_add_host(bool mk_new_store)
7337 int ph_idx = sdeb_most_recent_idx;
7340 ph_idx = sdebug_add_store();
7344 return sdebug_add_host_helper(ph_idx);
7347 static void sdebug_do_remove_host(bool the_end)
7350 struct sdebug_host_info *sdbg_host = NULL;
7351 struct sdebug_host_info *sdbg_host2;
7353 spin_lock(&sdebug_host_list_lock);
7354 if (!list_empty(&sdebug_host_list)) {
7355 sdbg_host = list_entry(sdebug_host_list.prev,
7356 struct sdebug_host_info, host_list);
7357 idx = sdbg_host->si_idx;
7359 if (!the_end && idx >= 0) {
7362 list_for_each_entry(sdbg_host2, &sdebug_host_list, host_list) {
7363 if (sdbg_host2 == sdbg_host)
7365 if (idx == sdbg_host2->si_idx) {
7371 xa_set_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
7372 if (idx == sdeb_most_recent_idx)
7373 --sdeb_most_recent_idx;
7377 list_del(&sdbg_host->host_list);
7378 spin_unlock(&sdebug_host_list_lock);
7383 device_unregister(&sdbg_host->dev);
7384 atomic_dec(&sdebug_num_hosts);
7387 static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
7390 struct sdebug_dev_info *devip;
7392 sdeb_block_all_queues();
7393 devip = (struct sdebug_dev_info *)sdev->hostdata;
7394 if (NULL == devip) {
7395 sdeb_unblock_all_queues();
7398 num_in_q = atomic_read(&devip->num_in_q);
7400 if (qdepth > SDEBUG_CANQUEUE) {
7401 qdepth = SDEBUG_CANQUEUE;
7402 pr_warn("%s: requested qdepth [%d] exceeds canqueue [%d], trim\n", __func__,
7403 qdepth, SDEBUG_CANQUEUE);
7407 if (qdepth != sdev->queue_depth)
7408 scsi_change_queue_depth(sdev, qdepth);
7410 if (SDEBUG_OPT_Q_NOISE & sdebug_opts) {
7411 sdev_printk(KERN_INFO, sdev, "%s: qdepth=%d, num_in_q=%d\n",
7412 __func__, qdepth, num_in_q);
7414 sdeb_unblock_all_queues();
7415 return sdev->queue_depth;
7418 static bool fake_timeout(struct scsi_cmnd *scp)
7420 if (0 == (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth))) {
7421 if (sdebug_every_nth < -1)
7422 sdebug_every_nth = -1;
7423 if (SDEBUG_OPT_TIMEOUT & sdebug_opts)
7424 return true; /* ignore command causing timeout */
7425 else if (SDEBUG_OPT_MAC_TIMEOUT & sdebug_opts &&
7426 scsi_medium_access_command(scp))
7427 return true; /* time out reads and writes */
7432 /* Response to TUR or media access command when device stopped */
7433 static int resp_not_ready(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
7437 ktime_t now_ts = ktime_get_boottime();
7438 struct scsi_device *sdp = scp->device;
7440 stopped_state = atomic_read(&devip->stopped);
7441 if (stopped_state == 2) {
7442 if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
7443 diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
7444 if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
7445 /* tur_ms_to_ready timer extinguished */
7446 atomic_set(&devip->stopped, 0);
7450 mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x1);
7452 sdev_printk(KERN_INFO, sdp,
7453 "%s: Not ready: in process of becoming ready\n", my_name);
7454 if (scp->cmnd[0] == TEST_UNIT_READY) {
7455 u64 tur_nanosecs_to_ready = (u64)sdeb_tur_ms_to_ready * 1000000;
7457 if (diff_ns <= tur_nanosecs_to_ready)
7458 diff_ns = tur_nanosecs_to_ready - diff_ns;
7460 diff_ns = tur_nanosecs_to_ready;
7461 /* As per 20-061r2 approved for spc6 by T10 on 20200716 */
7462 do_div(diff_ns, 1000000); /* diff_ns becomes milliseconds */
7463 scsi_set_sense_information(scp->sense_buffer, SCSI_SENSE_BUFFERSIZE,
7465 return check_condition_result;
7468 mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x2);
7470 sdev_printk(KERN_INFO, sdp, "%s: Not ready: initializing command required\n",
7472 return check_condition_result;
7475 static int sdebug_map_queues(struct Scsi_Host *shost)
7479 if (shost->nr_hw_queues == 1)
7482 for (i = 0, qoff = 0; i < HCTX_MAX_TYPES; i++) {
7483 struct blk_mq_queue_map *map = &shost->tag_set.map[i];
7487 if (i == HCTX_TYPE_DEFAULT)
7488 map->nr_queues = submit_queues - poll_queues;
7489 else if (i == HCTX_TYPE_POLL)
7490 map->nr_queues = poll_queues;
7492 if (!map->nr_queues) {
7493 BUG_ON(i == HCTX_TYPE_DEFAULT);
7497 map->queue_offset = qoff;
7498 blk_mq_map_queues(map);
7500 qoff += map->nr_queues;
7507 static int sdebug_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
7510 bool retiring = false;
7511 int num_entries = 0;
7512 unsigned int qc_idx = 0;
7513 unsigned long iflags;
7514 ktime_t kt_from_boot = ktime_get_boottime();
7515 struct sdebug_queue *sqp;
7516 struct sdebug_queued_cmd *sqcp;
7517 struct scsi_cmnd *scp;
7518 struct sdebug_dev_info *devip;
7519 struct sdebug_defer *sd_dp;
7521 sqp = sdebug_q_arr + queue_num;
7523 spin_lock_irqsave(&sqp->qc_lock, iflags);
7525 qc_idx = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
7526 if (qc_idx >= sdebug_max_queue)
7529 for (first = true; first || qc_idx + 1 < sdebug_max_queue; ) {
7532 if (!test_bit(qc_idx, sqp->in_use_bm))
7535 qc_idx = find_next_bit(sqp->in_use_bm, sdebug_max_queue, qc_idx + 1);
7537 if (qc_idx >= sdebug_max_queue)
7540 sqcp = &sqp->qc_arr[qc_idx];
7541 sd_dp = sqcp->sd_dp;
7542 if (unlikely(!sd_dp))
7545 if (unlikely(scp == NULL)) {
7546 pr_err("scp is NULL, queue_num=%d, qc_idx=%u from %s\n",
7547 queue_num, qc_idx, __func__);
7550 if (READ_ONCE(sd_dp->defer_t) == SDEB_DEFER_POLL) {
7551 if (kt_from_boot < sd_dp->cmpl_ts)
7554 } else /* ignoring non REQ_POLLED requests */
7556 devip = (struct sdebug_dev_info *)scp->device->hostdata;
7558 atomic_dec(&devip->num_in_q);
7560 pr_err("devip=NULL from %s\n", __func__);
7561 if (unlikely(atomic_read(&retired_max_queue) > 0))
7564 sqcp->a_cmnd = NULL;
7565 if (unlikely(!test_and_clear_bit(qc_idx, sqp->in_use_bm))) {
7566 pr_err("Unexpected completion sqp %p queue_num=%d qc_idx=%u from %s\n",
7567 sqp, queue_num, qc_idx, __func__);
7570 if (unlikely(retiring)) { /* user has reduced max_queue */
7573 retval = atomic_read(&retired_max_queue);
7574 if (qc_idx >= retval) {
7575 pr_err("index %d too large\n", retval);
7578 k = find_last_bit(sqp->in_use_bm, retval);
7579 if ((k < sdebug_max_queue) || (k == retval))
7580 atomic_set(&retired_max_queue, 0);
7582 atomic_set(&retired_max_queue, k + 1);
7584 WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_NONE);
7585 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
7586 scsi_done(scp); /* callback to mid level */
7588 spin_lock_irqsave(&sqp->qc_lock, iflags);
7589 if (find_first_bit(sqp->in_use_bm, sdebug_max_queue) >= sdebug_max_queue)
7594 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
7596 if (num_entries > 0)
7597 atomic_add(num_entries, &sdeb_mq_poll_count);
7601 static int scsi_debug_queuecommand(struct Scsi_Host *shost,
7602 struct scsi_cmnd *scp)
7605 struct scsi_device *sdp = scp->device;
7606 const struct opcode_info_t *oip;
7607 const struct opcode_info_t *r_oip;
7608 struct sdebug_dev_info *devip;
7609 u8 *cmd = scp->cmnd;
7610 int (*r_pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
7611 int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *) = NULL;
7614 u64 lun_index = sdp->lun & 0x3FFF;
7621 scsi_set_resid(scp, 0);
7622 if (sdebug_statistics) {
7623 atomic_inc(&sdebug_cmnd_count);
7624 inject_now = inject_on_this_cmd();
7628 if (unlikely(sdebug_verbose &&
7629 !(SDEBUG_OPT_NO_CDB_NOISE & sdebug_opts))) {
7634 sb = (int)sizeof(b);
7636 strcpy(b, "too long, over 32 bytes");
7638 for (k = 0, n = 0; k < len && n < sb; ++k)
7639 n += scnprintf(b + n, sb - n, "%02x ",
7642 sdev_printk(KERN_INFO, sdp, "%s: tag=%#x, cmd %s\n", my_name,
7643 blk_mq_unique_tag(scsi_cmd_to_rq(scp)), b);
7645 if (unlikely(inject_now && (sdebug_opts & SDEBUG_OPT_HOST_BUSY)))
7646 return SCSI_MLQUEUE_HOST_BUSY;
7647 has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS);
7648 if (unlikely(lun_index >= sdebug_max_luns && !has_wlun_rl))
7651 sdeb_i = opcode_ind_arr[opcode]; /* fully mapped */
7652 oip = &opcode_info_arr[sdeb_i]; /* safe if table consistent */
7653 devip = (struct sdebug_dev_info *)sdp->hostdata;
7654 if (unlikely(!devip)) {
7655 devip = find_build_dev_info(sdp);
7659 if (unlikely(inject_now && !atomic_read(&sdeb_inject_pending)))
7660 atomic_set(&sdeb_inject_pending, 1);
7662 na = oip->num_attached;
7664 if (na) { /* multiple commands with this opcode */
7666 if (FF_SA & r_oip->flags) {
7667 if (F_SA_LOW & oip->flags)
7670 sa = get_unaligned_be16(cmd + 8);
7671 for (k = 0; k <= na; oip = r_oip->arrp + k++) {
7672 if (opcode == oip->opcode && sa == oip->sa)
7675 } else { /* since no service action only check opcode */
7676 for (k = 0; k <= na; oip = r_oip->arrp + k++) {
7677 if (opcode == oip->opcode)
7682 if (F_SA_LOW & r_oip->flags)
7683 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 4);
7684 else if (F_SA_HIGH & r_oip->flags)
7685 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, 7);
7687 mk_sense_invalid_opcode(scp);
7690 } /* else (when na==0) we assume the oip is a match */
7692 if (unlikely(F_INV_OP & flags)) {
7693 mk_sense_invalid_opcode(scp);
7696 if (unlikely(has_wlun_rl && !(F_RL_WLUN_OK & flags))) {
7698 sdev_printk(KERN_INFO, sdp, "%s: Opcode 0x%x not%s\n",
7699 my_name, opcode, " supported for wlun");
7700 mk_sense_invalid_opcode(scp);
7703 if (unlikely(sdebug_strict)) { /* check cdb against mask */
7707 for (k = 1; k < oip->len_mask[0] && k < 16; ++k) {
7708 rem = ~oip->len_mask[k] & cmd[k];
7710 for (j = 7; j >= 0; --j, rem <<= 1) {
7714 mk_sense_invalid_fld(scp, SDEB_IN_CDB, k, j);
7719 if (unlikely(!(F_SKIP_UA & flags) &&
7720 find_first_bit(devip->uas_bm,
7721 SDEBUG_NUM_UAS) != SDEBUG_NUM_UAS)) {
7722 errsts = make_ua(scp, devip);
7726 if (unlikely(((F_M_ACCESS & flags) || scp->cmnd[0] == TEST_UNIT_READY) &&
7727 atomic_read(&devip->stopped))) {
7728 errsts = resp_not_ready(scp, devip);
7732 if (sdebug_fake_rw && (F_FAKE_RW & flags))
7734 if (unlikely(sdebug_every_nth)) {
7735 if (fake_timeout(scp))
7736 return 0; /* ignore command: make trouble */
7738 if (likely(oip->pfp))
7739 pfp = oip->pfp; /* calls a resp_* function */
7741 pfp = r_pfp; /* if leaf function ptr NULL, try the root's */
7744 if (F_DELAY_OVERR & flags) /* cmds like INQUIRY respond asap */
7745 return schedule_resp(scp, devip, errsts, pfp, 0, 0);
7746 else if ((flags & F_LONG_DELAY) && (sdebug_jdelay > 0 ||
7747 sdebug_ndelay > 10000)) {
7749 * Skip long delays if ndelay <= 10 microseconds. Otherwise
7750 * for Start Stop Unit (SSU) want at least 1 second delay and
7751 * if sdebug_jdelay>1 want a long delay of that many seconds.
7752 * For Synchronize Cache want 1/20 of SSU's delay.
7754 int jdelay = (sdebug_jdelay < 2) ? 1 : sdebug_jdelay;
7755 int denom = (flags & F_SYNC_DELAY) ? 20 : 1;
7757 jdelay = mult_frac(USER_HZ * jdelay, HZ, denom * USER_HZ);
7758 return schedule_resp(scp, devip, errsts, pfp, jdelay, 0);
7760 return schedule_resp(scp, devip, errsts, pfp, sdebug_jdelay,
7763 return schedule_resp(scp, devip, check_condition_result, NULL, 0, 0);
7765 return schedule_resp(scp, NULL, DID_NO_CONNECT << 16, NULL, 0, 0);
7768 static struct scsi_host_template sdebug_driver_template = {
7769 .show_info = scsi_debug_show_info,
7770 .write_info = scsi_debug_write_info,
7771 .proc_name = sdebug_proc_name,
7772 .name = "SCSI DEBUG",
7773 .info = scsi_debug_info,
7774 .slave_alloc = scsi_debug_slave_alloc,
7775 .slave_configure = scsi_debug_slave_configure,
7776 .slave_destroy = scsi_debug_slave_destroy,
7777 .ioctl = scsi_debug_ioctl,
7778 .queuecommand = scsi_debug_queuecommand,
7779 .change_queue_depth = sdebug_change_qdepth,
7780 .map_queues = sdebug_map_queues,
7781 .mq_poll = sdebug_blk_mq_poll,
7782 .eh_abort_handler = scsi_debug_abort,
7783 .eh_device_reset_handler = scsi_debug_device_reset,
7784 .eh_target_reset_handler = scsi_debug_target_reset,
7785 .eh_bus_reset_handler = scsi_debug_bus_reset,
7786 .eh_host_reset_handler = scsi_debug_host_reset,
7787 .can_queue = SDEBUG_CANQUEUE,
7789 .sg_tablesize = SG_MAX_SEGMENTS,
7790 .cmd_per_lun = DEF_CMD_PER_LUN,
7792 .max_segment_size = -1U,
7793 .module = THIS_MODULE,
7794 .track_queue_depth = 1,
7797 static int sdebug_driver_probe(struct device *dev)
7800 struct sdebug_host_info *sdbg_host;
7801 struct Scsi_Host *hpnt;
7804 sdbg_host = to_sdebug_host(dev);
7806 sdebug_driver_template.can_queue = sdebug_max_queue;
7807 sdebug_driver_template.cmd_per_lun = sdebug_max_queue;
7808 if (!sdebug_clustering)
7809 sdebug_driver_template.dma_boundary = PAGE_SIZE - 1;
7811 hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
7813 pr_err("scsi_host_alloc failed\n");
7817 if (submit_queues > nr_cpu_ids) {
7818 pr_warn("%s: trim submit_queues (was %d) to nr_cpu_ids=%u\n",
7819 my_name, submit_queues, nr_cpu_ids);
7820 submit_queues = nr_cpu_ids;
7823 * Decide whether to tell scsi subsystem that we want mq. The
7824 * following should give the same answer for each host.
7826 hpnt->nr_hw_queues = submit_queues;
7827 if (sdebug_host_max_queue)
7828 hpnt->host_tagset = 1;
7830 /* poll queues are possible for nr_hw_queues > 1 */
7831 if (hpnt->nr_hw_queues == 1 || (poll_queues < 1)) {
7832 pr_warn("%s: trim poll_queues to 0. poll_q/nr_hw = (%d/%d)\n",
7833 my_name, poll_queues, hpnt->nr_hw_queues);
7838 * Poll queues don't need interrupts, but we need at least one I/O queue
7839 * left over for non-polled I/O.
7840 * If condition not met, trim poll_queues to 1 (just for simplicity).
7842 if (poll_queues >= submit_queues) {
7843 if (submit_queues < 3)
7844 pr_warn("%s: trim poll_queues to 1\n", my_name);
7846 pr_warn("%s: trim poll_queues to 1. Perhaps try poll_queues=%d\n",
7847 my_name, submit_queues - 1);
7853 sdbg_host->shost = hpnt;
7854 *((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host;
7855 if ((hpnt->this_id >= 0) && (sdebug_num_tgts > hpnt->this_id))
7856 hpnt->max_id = sdebug_num_tgts + 1;
7858 hpnt->max_id = sdebug_num_tgts;
7859 /* = sdebug_max_luns; */
7860 hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
7864 switch (sdebug_dif) {
7866 case T10_PI_TYPE1_PROTECTION:
7867 hprot = SHOST_DIF_TYPE1_PROTECTION;
7869 hprot |= SHOST_DIX_TYPE1_PROTECTION;
7872 case T10_PI_TYPE2_PROTECTION:
7873 hprot = SHOST_DIF_TYPE2_PROTECTION;
7875 hprot |= SHOST_DIX_TYPE2_PROTECTION;
7878 case T10_PI_TYPE3_PROTECTION:
7879 hprot = SHOST_DIF_TYPE3_PROTECTION;
7881 hprot |= SHOST_DIX_TYPE3_PROTECTION;
7886 hprot |= SHOST_DIX_TYPE0_PROTECTION;
7890 scsi_host_set_prot(hpnt, hprot);
7892 if (have_dif_prot || sdebug_dix)
7893 pr_info("host protection%s%s%s%s%s%s%s\n",
7894 (hprot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
7895 (hprot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
7896 (hprot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
7897 (hprot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
7898 (hprot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
7899 (hprot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
7900 (hprot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
7902 if (sdebug_guard == 1)
7903 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
7905 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
7907 sdebug_verbose = !!(SDEBUG_OPT_NOISE & sdebug_opts);
7908 sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & sdebug_opts);
7909 if (sdebug_every_nth) /* need stats counters for every_nth */
7910 sdebug_statistics = true;
7911 error = scsi_add_host(hpnt, &sdbg_host->dev);
7913 pr_err("scsi_add_host failed\n");
7915 scsi_host_put(hpnt);
7917 scsi_scan_host(hpnt);
7923 static void sdebug_driver_remove(struct device *dev)
7925 struct sdebug_host_info *sdbg_host;
7926 struct sdebug_dev_info *sdbg_devinfo, *tmp;
7928 sdbg_host = to_sdebug_host(dev);
7930 scsi_remove_host(sdbg_host->shost);
7932 list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
7934 list_del(&sdbg_devinfo->dev_list);
7935 kfree(sdbg_devinfo->zstate);
7936 kfree(sdbg_devinfo);
7939 scsi_host_put(sdbg_host->shost);
7942 static int pseudo_lld_bus_match(struct device *dev,
7943 struct device_driver *dev_driver)
7948 static struct bus_type pseudo_lld_bus = {
7950 .match = pseudo_lld_bus_match,
7951 .probe = sdebug_driver_probe,
7952 .remove = sdebug_driver_remove,
7953 .drv_groups = sdebug_drv_groups,