1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
4 * Copyright (C) 1992 Eric Youngdale
5 * Simulate a host adapter with 2 disks attached. Do a lot of checking
6 * to make sure that we are not getting blocks mixed up, and PANIC if
7 * anything out of the ordinary is seen.
8 * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
10 * Copyright (C) 2001 - 2020 Douglas Gilbert
12 * For documentation see http://sg.danny.cz/sg/scsi_debug.html
16 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
18 #include <linux/module.h>
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/jiffies.h>
23 #include <linux/slab.h>
24 #include <linux/types.h>
25 #include <linux/string.h>
26 #include <linux/genhd.h>
28 #include <linux/init.h>
29 #include <linux/proc_fs.h>
30 #include <linux/vmalloc.h>
31 #include <linux/moduleparam.h>
32 #include <linux/scatterlist.h>
33 #include <linux/blkdev.h>
34 #include <linux/crc-t10dif.h>
35 #include <linux/spinlock.h>
36 #include <linux/interrupt.h>
37 #include <linux/atomic.h>
38 #include <linux/hrtimer.h>
39 #include <linux/uuid.h>
40 #include <linux/t10-pi.h>
41 #include <linux/msdos_partition.h>
42 #include <linux/random.h>
43 #include <linux/xarray.h>
44 #include <linux/prefetch.h>
46 #include <net/checksum.h>
48 #include <asm/unaligned.h>
50 #include <scsi/scsi.h>
51 #include <scsi/scsi_cmnd.h>
52 #include <scsi/scsi_device.h>
53 #include <scsi/scsi_host.h>
54 #include <scsi/scsicam.h>
55 #include <scsi/scsi_eh.h>
56 #include <scsi/scsi_tcq.h>
57 #include <scsi/scsi_dbg.h>
60 #include "scsi_logging.h"
62 /* make sure inq_product_rev string corresponds to this version */
63 #define SDEBUG_VERSION "0190" /* format to fit INQUIRY revision field */
64 static const char *sdebug_version_date = "20200710";
66 #define MY_NAME "scsi_debug"
68 /* Additional Sense Code (ASC) */
69 #define NO_ADDITIONAL_SENSE 0x0
70 #define LOGICAL_UNIT_NOT_READY 0x4
71 #define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
72 #define UNRECOVERED_READ_ERR 0x11
73 #define PARAMETER_LIST_LENGTH_ERR 0x1a
74 #define INVALID_OPCODE 0x20
75 #define LBA_OUT_OF_RANGE 0x21
76 #define INVALID_FIELD_IN_CDB 0x24
77 #define INVALID_FIELD_IN_PARAM_LIST 0x26
78 #define WRITE_PROTECTED 0x27
79 #define UA_RESET_ASC 0x29
80 #define UA_CHANGED_ASC 0x2a
81 #define TARGET_CHANGED_ASC 0x3f
82 #define LUNS_CHANGED_ASCQ 0x0e
83 #define INSUFF_RES_ASC 0x55
84 #define INSUFF_RES_ASCQ 0x3
85 #define POWER_ON_RESET_ASCQ 0x0
86 #define BUS_RESET_ASCQ 0x2 /* scsi bus reset occurred */
87 #define MODE_CHANGED_ASCQ 0x1 /* mode parameters changed */
88 #define CAPACITY_CHANGED_ASCQ 0x9
89 #define SAVING_PARAMS_UNSUP 0x39
90 #define TRANSPORT_PROBLEM 0x4b
91 #define THRESHOLD_EXCEEDED 0x5d
92 #define LOW_POWER_COND_ON 0x5e
93 #define MISCOMPARE_VERIFY_ASC 0x1d
94 #define MICROCODE_CHANGED_ASCQ 0x1 /* with TARGET_CHANGED_ASC */
95 #define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16
96 #define WRITE_ERROR_ASC 0xc
97 #define UNALIGNED_WRITE_ASCQ 0x4
98 #define WRITE_BOUNDARY_ASCQ 0x5
99 #define READ_INVDATA_ASCQ 0x6
100 #define READ_BOUNDARY_ASCQ 0x7
101 #define INSUFF_ZONE_ASCQ 0xe
103 /* Additional Sense Code Qualifier (ASCQ) */
104 #define ACK_NAK_TO 0x3
106 /* Default values for driver parameters */
107 #define DEF_NUM_HOST 1
108 #define DEF_NUM_TGTS 1
109 #define DEF_MAX_LUNS 1
110 /* With these defaults, this driver will make 1 host with 1 target
111 * (id 0) containing 1 logical unit (lun 0). That is 1 device.
114 #define DEF_CDB_LEN 10
115 #define DEF_JDELAY 1 /* if > 0 unit is a jiffy */
116 #define DEF_DEV_SIZE_PRE_INIT 0
117 #define DEF_DEV_SIZE_MB 8
118 #define DEF_ZBC_DEV_SIZE_MB 128
121 #define DEF_PER_HOST_STORE false
122 #define DEF_D_SENSE 0
123 #define DEF_EVERY_NTH 0
124 #define DEF_FAKE_RW 0
126 #define DEF_HOST_LOCK 0
129 #define DEF_LBPWS10 0
131 #define DEF_LOWEST_ALIGNED 0
132 #define DEF_NDELAY 0 /* if > 0 unit is a nanosecond */
133 #define DEF_NO_LUN_0 0
134 #define DEF_NUM_PARTS 0
136 #define DEF_OPT_BLKS 1024
137 #define DEF_PHYSBLK_EXP 0
138 #define DEF_OPT_XFERLEN_EXP 0
139 #define DEF_PTYPE TYPE_DISK
140 #define DEF_RANDOM false
141 #define DEF_REMOVABLE false
142 #define DEF_SCSI_LEVEL 7 /* INQUIRY, byte2 [6->SPC-4; 7->SPC-5] */
143 #define DEF_SECTOR_SIZE 512
144 #define DEF_UNMAP_ALIGNMENT 0
145 #define DEF_UNMAP_GRANULARITY 1
146 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
147 #define DEF_UNMAP_MAX_DESC 256
148 #define DEF_VIRTUAL_GB 0
149 #define DEF_VPD_USE_HOSTNO 1
150 #define DEF_WRITESAME_LENGTH 0xFFFF
152 #define DEF_STATISTICS false
153 #define DEF_SUBMIT_QUEUES 1
154 #define DEF_TUR_MS_TO_READY 0
155 #define DEF_UUID_CTL 0
156 #define JDELAY_OVERRIDDEN -9999
158 /* Default parameters for ZBC drives */
159 #define DEF_ZBC_ZONE_SIZE_MB 128
160 #define DEF_ZBC_MAX_OPEN_ZONES 8
161 #define DEF_ZBC_NR_CONV_ZONES 1
163 #define SDEBUG_LUN_0_VAL 0
165 /* bit mask values for sdebug_opts */
166 #define SDEBUG_OPT_NOISE 1
167 #define SDEBUG_OPT_MEDIUM_ERR 2
168 #define SDEBUG_OPT_TIMEOUT 4
169 #define SDEBUG_OPT_RECOVERED_ERR 8
170 #define SDEBUG_OPT_TRANSPORT_ERR 16
171 #define SDEBUG_OPT_DIF_ERR 32
172 #define SDEBUG_OPT_DIX_ERR 64
173 #define SDEBUG_OPT_MAC_TIMEOUT 128
174 #define SDEBUG_OPT_SHORT_TRANSFER 0x100
175 #define SDEBUG_OPT_Q_NOISE 0x200
176 #define SDEBUG_OPT_ALL_TSF 0x400
177 #define SDEBUG_OPT_RARE_TSF 0x800
178 #define SDEBUG_OPT_N_WCE 0x1000
179 #define SDEBUG_OPT_RESET_NOISE 0x2000
180 #define SDEBUG_OPT_NO_CDB_NOISE 0x4000
181 #define SDEBUG_OPT_HOST_BUSY 0x8000
182 #define SDEBUG_OPT_CMD_ABORT 0x10000
183 #define SDEBUG_OPT_ALL_NOISE (SDEBUG_OPT_NOISE | SDEBUG_OPT_Q_NOISE | \
184 SDEBUG_OPT_RESET_NOISE)
185 #define SDEBUG_OPT_ALL_INJECTING (SDEBUG_OPT_RECOVERED_ERR | \
186 SDEBUG_OPT_TRANSPORT_ERR | \
187 SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR | \
188 SDEBUG_OPT_SHORT_TRANSFER | \
189 SDEBUG_OPT_HOST_BUSY | \
190 SDEBUG_OPT_CMD_ABORT)
191 #define SDEBUG_OPT_RECOV_DIF_DIX (SDEBUG_OPT_RECOVERED_ERR | \
192 SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR)
194 /* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs) are returned in
195 * priority order. In the subset implemented here lower numbers have higher
196 * priority. The UA numbers should be a sequence starting from 0 with
197 * SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */
198 #define SDEBUG_UA_POR 0 /* Power on, reset, or bus device reset */
199 #define SDEBUG_UA_BUS_RESET 1
200 #define SDEBUG_UA_MODE_CHANGED 2
201 #define SDEBUG_UA_CAPACITY_CHANGED 3
202 #define SDEBUG_UA_LUNS_CHANGED 4
203 #define SDEBUG_UA_MICROCODE_CHANGED 5 /* simulate firmware change */
204 #define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 6
205 #define SDEBUG_NUM_UAS 7
207 /* when 1==SDEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
208 * sector on read commands: */
209 #define OPT_MEDIUM_ERR_ADDR 0x1234 /* that's sector 4660 in decimal */
210 #define OPT_MEDIUM_ERR_NUM 10 /* number of consecutive medium errs */
212 /* If REPORT LUNS has luns >= 256 it can choose "flat space" (value 1)
213 * or "peripheral device" addressing (value 0) */
214 #define SAM2_LUN_ADDRESS_METHOD 0
216 /* SDEBUG_CANQUEUE is the maximum number of commands that can be queued
217 * (for response) per submit queue at one time. Can be reduced by max_queue
218 * option. Command responses are not queued when jdelay=0 and ndelay=0. The
219 * per-device DEF_CMD_PER_LUN can be changed via sysfs:
220 * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth
221 * but cannot exceed SDEBUG_CANQUEUE .
223 #define SDEBUG_CANQUEUE_WORDS 3 /* a WORD is bits in a long */
224 #define SDEBUG_CANQUEUE (SDEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
225 #define DEF_CMD_PER_LUN 255
227 /* UA - Unit Attention; SA - Service Action; SSU - Start Stop Unit */
228 #define F_D_IN 1 /* Data-in command (e.g. READ) */
229 #define F_D_OUT 2 /* Data-out command (e.g. WRITE) */
230 #define F_D_OUT_MAYBE 4 /* WRITE SAME, NDOB bit */
232 #define F_RL_WLUN_OK 0x10 /* allowed with REPORT LUNS W-LUN */
233 #define F_SKIP_UA 0x20 /* bypass UAs (e.g. INQUIRY command) */
234 #define F_DELAY_OVERR 0x40 /* for commands like INQUIRY */
235 #define F_SA_LOW 0x80 /* SA is in cdb byte 1, bits 4 to 0 */
236 #define F_SA_HIGH 0x100 /* SA is in cdb bytes 8 and 9 */
237 #define F_INV_OP 0x200 /* invalid opcode (not supported) */
238 #define F_FAKE_RW 0x400 /* bypass resp_*() when fake_rw set */
239 #define F_M_ACCESS 0x800 /* media access, reacts to SSU state */
240 #define F_SSU_DELAY 0x1000 /* SSU command delay (long-ish) */
241 #define F_SYNC_DELAY 0x2000 /* SYNCHRONIZE CACHE delay */
243 /* Useful combinations of the above flags */
244 #define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
245 #define FF_MEDIA_IO (F_M_ACCESS | F_FAKE_RW)
246 #define FF_SA (F_SA_HIGH | F_SA_LOW)
247 #define F_LONG_DELAY (F_SSU_DELAY | F_SYNC_DELAY)
249 #define SDEBUG_MAX_PARTS 4
251 #define SDEBUG_MAX_CMD_LEN 32
253 #define SDEB_XA_NOT_IN_USE XA_MARK_1
255 /* Zone types (zbcr05 table 25) */
257 ZBC_ZONE_TYPE_CNV = 0x1,
258 ZBC_ZONE_TYPE_SWR = 0x2,
259 ZBC_ZONE_TYPE_SWP = 0x3,
262 /* enumeration names taken from table 26, zbcr05 */
264 ZBC_NOT_WRITE_POINTER = 0x0,
266 ZC2_IMPLICIT_OPEN = 0x2,
267 ZC3_EXPLICIT_OPEN = 0x3,
274 struct sdeb_zone_state { /* ZBC: per zone state */
275 enum sdebug_z_type z_type;
276 enum sdebug_z_cond z_cond;
277 bool z_non_seq_resource;
283 struct sdebug_dev_info {
284 struct list_head dev_list;
285 unsigned int channel;
289 struct sdebug_host_info *sdbg_host;
290 unsigned long uas_bm[1];
292 atomic_t stopped; /* 1: by SSU, 2: device start */
295 /* For ZBC devices */
296 enum blk_zoned_model zmodel;
298 unsigned int zsize_shift;
299 unsigned int nr_zones;
300 unsigned int nr_conv_zones;
301 unsigned int nr_imp_open;
302 unsigned int nr_exp_open;
303 unsigned int nr_closed;
304 unsigned int max_open;
305 ktime_t create_ts; /* time since bootup that this device was created */
306 struct sdeb_zone_state *zstate;
309 struct sdebug_host_info {
310 struct list_head host_list;
311 int si_idx; /* sdeb_store_info (per host) xarray index */
312 struct Scsi_Host *shost;
314 struct list_head dev_info_list;
317 /* There is an xarray of pointers to this struct's objects, one per host */
318 struct sdeb_store_info {
319 rwlock_t macc_lck; /* for atomic media access on this store */
320 u8 *storep; /* user data storage (ram) */
321 struct t10_pi_tuple *dif_storep; /* protection info */
322 void *map_storep; /* provisioning map */
325 #define to_sdebug_host(d) \
326 container_of(d, struct sdebug_host_info, dev)
328 enum sdeb_defer_type {SDEB_DEFER_NONE = 0, SDEB_DEFER_HRT = 1,
331 struct sdebug_defer {
333 struct execute_work ew;
334 int sqa_idx; /* index of sdebug_queue array */
335 int qc_idx; /* index of sdebug_queued_cmd array within sqa_idx */
336 int hc_idx; /* hostwide tag index */
340 bool aborted; /* true when blk_abort_request() already called */
341 enum sdeb_defer_type defer_t;
344 struct sdebug_queued_cmd {
345 /* corresponding bit set in in_use_bm[] in owning struct sdebug_queue
346 * instance indicates this slot is in use.
348 struct sdebug_defer *sd_dp;
349 struct scsi_cmnd *a_cmnd;
352 struct sdebug_queue {
353 struct sdebug_queued_cmd qc_arr[SDEBUG_CANQUEUE];
354 unsigned long in_use_bm[SDEBUG_CANQUEUE_WORDS];
356 atomic_t blocked; /* to temporarily stop more being queued */
359 static atomic_t sdebug_cmnd_count; /* number of incoming commands */
360 static atomic_t sdebug_completions; /* count of deferred completions */
361 static atomic_t sdebug_miss_cpus; /* submission + completion cpus differ */
362 static atomic_t sdebug_a_tsf; /* 'almost task set full' counter */
363 static atomic_t sdeb_inject_pending;
365 struct opcode_info_t {
366 u8 num_attached; /* 0 if this is it (i.e. a leaf); use 0xff */
367 /* for terminating element */
368 u8 opcode; /* if num_attached > 0, preferred */
369 u16 sa; /* service action */
370 u32 flags; /* OR-ed set of SDEB_F_* */
371 int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
372 const struct opcode_info_t *arrp; /* num_attached elements or NULL */
373 u8 len_mask[16]; /* len_mask[0]-->cdb_len, then mask for cdb */
374 /* 1 to min(cdb_len, 15); ignore cdb[15...] */
377 /* SCSI opcodes (first byte of cdb) of interest mapped onto these indexes */
378 enum sdeb_opcode_index {
379 SDEB_I_INVALID_OPCODE = 0,
381 SDEB_I_REPORT_LUNS = 2,
382 SDEB_I_REQUEST_SENSE = 3,
383 SDEB_I_TEST_UNIT_READY = 4,
384 SDEB_I_MODE_SENSE = 5, /* 6, 10 */
385 SDEB_I_MODE_SELECT = 6, /* 6, 10 */
386 SDEB_I_LOG_SENSE = 7,
387 SDEB_I_READ_CAPACITY = 8, /* 10; 16 is in SA_IN(16) */
388 SDEB_I_READ = 9, /* 6, 10, 12, 16 */
389 SDEB_I_WRITE = 10, /* 6, 10, 12, 16 */
390 SDEB_I_START_STOP = 11,
391 SDEB_I_SERV_ACT_IN_16 = 12, /* add ...SERV_ACT_IN_12 if needed */
392 SDEB_I_SERV_ACT_OUT_16 = 13, /* add ...SERV_ACT_OUT_12 if needed */
393 SDEB_I_MAINT_IN = 14,
394 SDEB_I_MAINT_OUT = 15,
395 SDEB_I_VERIFY = 16, /* VERIFY(10), VERIFY(16) */
396 SDEB_I_VARIABLE_LEN = 17, /* READ(32), WRITE(32), WR_SCAT(32) */
397 SDEB_I_RESERVE = 18, /* 6, 10 */
398 SDEB_I_RELEASE = 19, /* 6, 10 */
399 SDEB_I_ALLOW_REMOVAL = 20, /* PREVENT ALLOW MEDIUM REMOVAL */
400 SDEB_I_REZERO_UNIT = 21, /* REWIND in SSC */
401 SDEB_I_ATA_PT = 22, /* 12, 16 */
402 SDEB_I_SEND_DIAG = 23,
404 SDEB_I_WRITE_BUFFER = 25,
405 SDEB_I_WRITE_SAME = 26, /* 10, 16 */
406 SDEB_I_SYNC_CACHE = 27, /* 10, 16 */
407 SDEB_I_COMP_WRITE = 28,
408 SDEB_I_PRE_FETCH = 29, /* 10, 16 */
409 SDEB_I_ZONE_OUT = 30, /* 0x94+SA; includes no data xfer */
410 SDEB_I_ZONE_IN = 31, /* 0x95+SA; all have data-in */
411 SDEB_I_LAST_ELEM_P1 = 32, /* keep this last (previous + 1) */
415 static const unsigned char opcode_ind_arr[256] = {
416 /* 0x0; 0x0->0x1f: 6 byte cdbs */
417 SDEB_I_TEST_UNIT_READY, SDEB_I_REZERO_UNIT, 0, SDEB_I_REQUEST_SENSE,
419 SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, 0,
420 0, 0, SDEB_I_INQUIRY, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
422 0, 0, SDEB_I_MODE_SENSE, SDEB_I_START_STOP, 0, SDEB_I_SEND_DIAG,
423 SDEB_I_ALLOW_REMOVAL, 0,
424 /* 0x20; 0x20->0x3f: 10 byte cdbs */
425 0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY, 0, 0,
426 SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, SDEB_I_VERIFY,
427 0, 0, 0, 0, SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, 0,
428 0, 0, 0, SDEB_I_WRITE_BUFFER, 0, 0, 0, 0,
429 /* 0x40; 0x40->0x5f: 10 byte cdbs */
430 0, SDEB_I_WRITE_SAME, SDEB_I_UNMAP, 0, 0, 0, 0, 0,
431 0, 0, 0, 0, 0, SDEB_I_LOG_SENSE, 0, 0,
432 0, 0, 0, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
434 0, 0, SDEB_I_MODE_SENSE, 0, 0, 0, 0, 0,
435 /* 0x60; 0x60->0x7d are reserved, 0x7e is "extended cdb" */
436 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
437 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
438 0, SDEB_I_VARIABLE_LEN,
439 /* 0x80; 0x80->0x9f: 16 byte cdbs */
440 0, 0, 0, 0, 0, SDEB_I_ATA_PT, 0, 0,
441 SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0,
442 0, 0, 0, SDEB_I_VERIFY,
443 SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, SDEB_I_WRITE_SAME,
444 SDEB_I_ZONE_OUT, SDEB_I_ZONE_IN, 0, 0,
445 0, 0, 0, 0, 0, 0, SDEB_I_SERV_ACT_IN_16, SDEB_I_SERV_ACT_OUT_16,
446 /* 0xa0; 0xa0->0xbf: 12 byte cdbs */
447 SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN,
448 SDEB_I_MAINT_OUT, 0, 0, 0,
449 SDEB_I_READ, 0 /* SDEB_I_SERV_ACT_OUT_12 */, SDEB_I_WRITE,
450 0 /* SDEB_I_SERV_ACT_IN_12 */, 0, 0, 0, 0,
451 0, 0, 0, 0, 0, 0, 0, 0,
452 0, 0, 0, 0, 0, 0, 0, 0,
453 /* 0xc0; 0xc0->0xff: vendor specific */
454 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
455 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
456 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
457 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
461 * The following "response" functions return the SCSI mid-level's 4 byte
462 * tuple-in-an-int. To handle commands with an IMMED bit, for a faster
463 * command completion, they can mask their return value with
464 * SDEG_RES_IMMED_MASK .
466 #define SDEG_RES_IMMED_MASK 0x40000000
468 static int resp_inquiry(struct scsi_cmnd *, struct sdebug_dev_info *);
469 static int resp_report_luns(struct scsi_cmnd *, struct sdebug_dev_info *);
470 static int resp_requests(struct scsi_cmnd *, struct sdebug_dev_info *);
471 static int resp_mode_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
472 static int resp_mode_select(struct scsi_cmnd *, struct sdebug_dev_info *);
473 static int resp_log_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
474 static int resp_readcap(struct scsi_cmnd *, struct sdebug_dev_info *);
475 static int resp_read_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
476 static int resp_write_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
477 static int resp_write_scat(struct scsi_cmnd *, struct sdebug_dev_info *);
478 static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *);
479 static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *);
480 static int resp_get_lba_status(struct scsi_cmnd *, struct sdebug_dev_info *);
481 static int resp_report_tgtpgs(struct scsi_cmnd *, struct sdebug_dev_info *);
482 static int resp_unmap(struct scsi_cmnd *, struct sdebug_dev_info *);
483 static int resp_rsup_opcodes(struct scsi_cmnd *, struct sdebug_dev_info *);
484 static int resp_rsup_tmfs(struct scsi_cmnd *, struct sdebug_dev_info *);
485 static int resp_verify(struct scsi_cmnd *, struct sdebug_dev_info *);
486 static int resp_write_same_10(struct scsi_cmnd *, struct sdebug_dev_info *);
487 static int resp_write_same_16(struct scsi_cmnd *, struct sdebug_dev_info *);
488 static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *);
489 static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *);
490 static int resp_sync_cache(struct scsi_cmnd *, struct sdebug_dev_info *);
491 static int resp_pre_fetch(struct scsi_cmnd *, struct sdebug_dev_info *);
492 static int resp_report_zones(struct scsi_cmnd *, struct sdebug_dev_info *);
493 static int resp_open_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
494 static int resp_close_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
495 static int resp_finish_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
496 static int resp_rwp_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
498 static int sdebug_do_add_host(bool mk_new_store);
499 static int sdebug_add_host_helper(int per_host_idx);
500 static void sdebug_do_remove_host(bool the_end);
501 static int sdebug_add_store(void);
502 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip);
503 static void sdebug_erase_all_stores(bool apart_from_first);
506 * The following are overflow arrays for cdbs that "hit" the same index in
507 * the opcode_info_arr array. The most time sensitive (or commonly used) cdb
508 * should be placed in opcode_info_arr[], the others should be placed here.
510 static const struct opcode_info_t msense_iarr[] = {
511 {0, 0x1a, 0, F_D_IN, NULL, NULL,
512 {6, 0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
515 static const struct opcode_info_t mselect_iarr[] = {
516 {0, 0x15, 0, F_D_OUT, NULL, NULL,
517 {6, 0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
520 static const struct opcode_info_t read_iarr[] = {
521 {0, 0x28, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(10) */
522 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
524 {0, 0x8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL, /* READ(6) */
525 {6, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
526 {0, 0xa8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(12) */
527 {12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf,
531 static const struct opcode_info_t write_iarr[] = {
532 {0, 0x2a, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(10) */
533 NULL, {10, 0xfb, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7,
535 {0, 0xa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(6) */
536 NULL, {6, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0,
538 {0, 0xaa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(12) */
539 NULL, {12, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
540 0xbf, 0xc7, 0, 0, 0, 0} },
543 static const struct opcode_info_t verify_iarr[] = {
544 {0, 0x2f, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,/* VERIFY(10) */
545 NULL, {10, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xff, 0xff, 0xc7,
549 static const struct opcode_info_t sa_in_16_iarr[] = {
550 {0, 0x9e, 0x12, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
551 {16, 0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
552 0xff, 0xff, 0xff, 0, 0xc7} }, /* GET LBA STATUS(16) */
555 static const struct opcode_info_t vl_iarr[] = { /* VARIABLE LENGTH */
556 {0, 0x7f, 0xb, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_dt0,
557 NULL, {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0xb, 0xfa,
558 0, 0xff, 0xff, 0xff, 0xff} }, /* WRITE(32) */
559 {0, 0x7f, 0x11, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
560 NULL, {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x11, 0xf8,
561 0, 0xff, 0xff, 0x0, 0x0} }, /* WRITE SCATTERED(32) */
564 static const struct opcode_info_t maint_in_iarr[] = { /* MAINT IN */
565 {0, 0xa3, 0xc, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL,
566 {12, 0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
567 0xc7, 0, 0, 0, 0} }, /* REPORT SUPPORTED OPERATION CODES */
568 {0, 0xa3, 0xd, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL,
569 {12, 0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
570 0, 0} }, /* REPORTED SUPPORTED TASK MANAGEMENT FUNCTIONS */
573 static const struct opcode_info_t write_same_iarr[] = {
574 {0, 0x93, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_write_same_16, NULL,
575 {16, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
576 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* WRITE SAME(16) */
579 static const struct opcode_info_t reserve_iarr[] = {
580 {0, 0x16, 0, F_D_OUT, NULL, NULL, /* RESERVE(6) */
581 {6, 0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
584 static const struct opcode_info_t release_iarr[] = {
585 {0, 0x17, 0, F_D_OUT, NULL, NULL, /* RELEASE(6) */
586 {6, 0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
589 static const struct opcode_info_t sync_cache_iarr[] = {
590 {0, 0x91, 0, F_SYNC_DELAY | F_M_ACCESS, resp_sync_cache, NULL,
591 {16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
592 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* SYNC_CACHE (16) */
595 static const struct opcode_info_t pre_fetch_iarr[] = {
596 {0, 0x90, 0, F_SYNC_DELAY | FF_MEDIA_IO, resp_pre_fetch, NULL,
597 {16, 0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
598 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* PRE-FETCH (16) */
601 static const struct opcode_info_t zone_out_iarr[] = { /* ZONE OUT(16) */
602 {0, 0x94, 0x1, F_SA_LOW | F_M_ACCESS, resp_close_zone, NULL,
603 {16, 0x1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
604 0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} }, /* CLOSE ZONE */
605 {0, 0x94, 0x2, F_SA_LOW | F_M_ACCESS, resp_finish_zone, NULL,
606 {16, 0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
607 0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} }, /* FINISH ZONE */
608 {0, 0x94, 0x4, F_SA_LOW | F_M_ACCESS, resp_rwp_zone, NULL,
609 {16, 0x4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
610 0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} }, /* RESET WRITE POINTER */
613 static const struct opcode_info_t zone_in_iarr[] = { /* ZONE IN(16) */
614 {0, 0x95, 0x6, F_SA_LOW | F_D_IN | F_M_ACCESS, NULL, NULL,
615 {16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
616 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* REPORT ZONES */
620 /* This array is accessed via SDEB_I_* values. Make sure all are mapped,
621 * plus the terminating elements for logic that scans this table such as
622 * REPORT SUPPORTED OPERATION CODES. */
623 static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEM_P1 + 1] = {
625 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* unknown opcodes */
626 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
627 {0, 0x12, 0, FF_RESPOND | F_D_IN, resp_inquiry, NULL, /* INQUIRY */
628 {6, 0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
629 {0, 0xa0, 0, FF_RESPOND | F_D_IN, resp_report_luns, NULL,
630 {12, 0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
631 0, 0} }, /* REPORT LUNS */
632 {0, 0x3, 0, FF_RESPOND | F_D_IN, resp_requests, NULL,
633 {6, 0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
634 {0, 0x0, 0, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */
635 {6, 0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
637 {ARRAY_SIZE(msense_iarr), 0x5a, 0, F_D_IN, /* MODE SENSE(10) */
638 resp_mode_sense, msense_iarr, {10, 0xf8, 0xff, 0xff, 0, 0, 0,
639 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
640 {ARRAY_SIZE(mselect_iarr), 0x55, 0, F_D_OUT, /* MODE SELECT(10) */
641 resp_mode_select, mselect_iarr, {10, 0xf1, 0, 0, 0, 0, 0, 0xff,
642 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
643 {0, 0x4d, 0, F_D_IN, resp_log_sense, NULL, /* LOG SENSE */
644 {10, 0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0,
646 {0, 0x25, 0, F_D_IN, resp_readcap, NULL, /* READ CAPACITY(10) */
647 {10, 0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0,
649 {ARRAY_SIZE(read_iarr), 0x88, 0, F_D_IN | FF_MEDIA_IO, /* READ(16) */
650 resp_read_dt0, read_iarr, {16, 0xfe, 0xff, 0xff, 0xff, 0xff,
651 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
653 {ARRAY_SIZE(write_iarr), 0x8a, 0, F_D_OUT | FF_MEDIA_IO,
654 resp_write_dt0, write_iarr, /* WRITE(16) */
655 {16, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
656 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
657 {0, 0x1b, 0, F_SSU_DELAY, resp_start_stop, NULL,/* START STOP UNIT */
658 {6, 0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
659 {ARRAY_SIZE(sa_in_16_iarr), 0x9e, 0x10, F_SA_LOW | F_D_IN,
660 resp_readcap16, sa_in_16_iarr, /* SA_IN(16), READ CAPACITY(16) */
661 {16, 0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
662 0xff, 0xff, 0xff, 0xff, 0x1, 0xc7} },
663 {0, 0x9f, 0x12, F_SA_LOW | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
664 NULL, {16, 0x12, 0xf9, 0x0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff,
665 0xff, 0xff, 0xff, 0xff, 0xc7} }, /* SA_OUT(16), WRITE SCAT(16) */
666 {ARRAY_SIZE(maint_in_iarr), 0xa3, 0xa, F_SA_LOW | F_D_IN,
667 resp_report_tgtpgs, /* MAINT IN, REPORT TARGET PORT GROUPS */
668 maint_in_iarr, {12, 0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff,
669 0xff, 0, 0xc7, 0, 0, 0, 0} },
671 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
672 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
673 {ARRAY_SIZE(verify_iarr), 0x8f, 0,
674 F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify, /* VERIFY(16) */
675 verify_iarr, {16, 0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
676 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },
677 {ARRAY_SIZE(vl_iarr), 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_MEDIA_IO,
678 resp_read_dt0, vl_iarr, /* VARIABLE LENGTH, READ(32) */
679 {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x9, 0xfe, 0, 0xff, 0xff,
681 {ARRAY_SIZE(reserve_iarr), 0x56, 0, F_D_OUT,
682 NULL, reserve_iarr, /* RESERVE(10) <no response function> */
683 {10, 0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
685 {ARRAY_SIZE(release_iarr), 0x57, 0, F_D_OUT,
686 NULL, release_iarr, /* RELEASE(10) <no response function> */
687 {10, 0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
690 {0, 0x1e, 0, 0, NULL, NULL, /* ALLOW REMOVAL */
691 {6, 0, 0, 0, 0x3, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
692 {0, 0x1, 0, 0, resp_start_stop, NULL, /* REWIND ?? */
693 {6, 0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
694 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */
695 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
696 {0, 0x1d, F_D_OUT, 0, NULL, NULL, /* SEND DIAGNOSTIC */
697 {6, 0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
698 {0, 0x42, 0, F_D_OUT | FF_MEDIA_IO, resp_unmap, NULL, /* UNMAP */
699 {10, 0x1, 0, 0, 0, 0, 0x3f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
701 {0, 0x3b, 0, F_D_OUT_MAYBE, resp_write_buffer, NULL,
702 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0,
703 0, 0, 0, 0} }, /* WRITE_BUFFER */
704 {ARRAY_SIZE(write_same_iarr), 0x41, 0, F_D_OUT_MAYBE | FF_MEDIA_IO,
705 resp_write_same_10, write_same_iarr, /* WRITE SAME(10) */
706 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0,
708 {ARRAY_SIZE(sync_cache_iarr), 0x35, 0, F_SYNC_DELAY | F_M_ACCESS,
709 resp_sync_cache, sync_cache_iarr,
710 {10, 0x7, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
711 0, 0, 0, 0} }, /* SYNC_CACHE (10) */
712 {0, 0x89, 0, F_D_OUT | FF_MEDIA_IO, resp_comp_write, NULL,
713 {16, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
714 0, 0xff, 0x3f, 0xc7} }, /* COMPARE AND WRITE */
715 {ARRAY_SIZE(pre_fetch_iarr), 0x34, 0, F_SYNC_DELAY | FF_MEDIA_IO,
716 resp_pre_fetch, pre_fetch_iarr,
717 {10, 0x2, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
718 0, 0, 0, 0} }, /* PRE-FETCH (10) */
721 {ARRAY_SIZE(zone_out_iarr), 0x94, 0x3, F_SA_LOW | F_M_ACCESS,
722 resp_open_zone, zone_out_iarr, /* ZONE_OUT(16), OPEN ZONE) */
723 {16, 0x3 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
724 0xff, 0xff, 0x0, 0x0, 0xff, 0xff, 0x1, 0xc7} },
725 {ARRAY_SIZE(zone_in_iarr), 0x95, 0x0, F_SA_LOW | F_M_ACCESS,
726 resp_report_zones, zone_in_iarr, /* ZONE_IN(16), REPORT ZONES) */
727 {16, 0x0 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
728 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xc7} },
730 {0xff, 0, 0, 0, NULL, NULL, /* terminating element */
731 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
734 static int sdebug_num_hosts;
735 static int sdebug_add_host = DEF_NUM_HOST; /* in sysfs this is relative */
736 static int sdebug_ato = DEF_ATO;
737 static int sdebug_cdb_len = DEF_CDB_LEN;
738 static int sdebug_jdelay = DEF_JDELAY; /* if > 0 then unit is jiffies */
739 static int sdebug_dev_size_mb = DEF_DEV_SIZE_PRE_INIT;
740 static int sdebug_dif = DEF_DIF;
741 static int sdebug_dix = DEF_DIX;
742 static int sdebug_dsense = DEF_D_SENSE;
743 static int sdebug_every_nth = DEF_EVERY_NTH;
744 static int sdebug_fake_rw = DEF_FAKE_RW;
745 static unsigned int sdebug_guard = DEF_GUARD;
746 static int sdebug_host_max_queue; /* per host */
747 static int sdebug_lowest_aligned = DEF_LOWEST_ALIGNED;
748 static int sdebug_max_luns = DEF_MAX_LUNS;
749 static int sdebug_max_queue = SDEBUG_CANQUEUE; /* per submit queue */
750 static unsigned int sdebug_medium_error_start = OPT_MEDIUM_ERR_ADDR;
751 static int sdebug_medium_error_count = OPT_MEDIUM_ERR_NUM;
752 static atomic_t retired_max_queue; /* if > 0 then was prior max_queue */
753 static int sdebug_ndelay = DEF_NDELAY; /* if > 0 then unit is nanoseconds */
754 static int sdebug_no_lun_0 = DEF_NO_LUN_0;
755 static int sdebug_no_uld;
756 static int sdebug_num_parts = DEF_NUM_PARTS;
757 static int sdebug_num_tgts = DEF_NUM_TGTS; /* targets per host */
758 static int sdebug_opt_blks = DEF_OPT_BLKS;
759 static int sdebug_opts = DEF_OPTS;
760 static int sdebug_physblk_exp = DEF_PHYSBLK_EXP;
761 static int sdebug_opt_xferlen_exp = DEF_OPT_XFERLEN_EXP;
762 static int sdebug_ptype = DEF_PTYPE; /* SCSI peripheral device type */
763 static int sdebug_scsi_level = DEF_SCSI_LEVEL;
764 static int sdebug_sector_size = DEF_SECTOR_SIZE;
765 static int sdeb_tur_ms_to_ready = DEF_TUR_MS_TO_READY;
766 static int sdebug_virtual_gb = DEF_VIRTUAL_GB;
767 static int sdebug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
768 static unsigned int sdebug_lbpu = DEF_LBPU;
769 static unsigned int sdebug_lbpws = DEF_LBPWS;
770 static unsigned int sdebug_lbpws10 = DEF_LBPWS10;
771 static unsigned int sdebug_lbprz = DEF_LBPRZ;
772 static unsigned int sdebug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
773 static unsigned int sdebug_unmap_granularity = DEF_UNMAP_GRANULARITY;
774 static unsigned int sdebug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
775 static unsigned int sdebug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
776 static unsigned int sdebug_write_same_length = DEF_WRITESAME_LENGTH;
777 static int sdebug_uuid_ctl = DEF_UUID_CTL;
778 static bool sdebug_random = DEF_RANDOM;
779 static bool sdebug_per_host_store = DEF_PER_HOST_STORE;
780 static bool sdebug_removable = DEF_REMOVABLE;
781 static bool sdebug_clustering;
782 static bool sdebug_host_lock = DEF_HOST_LOCK;
783 static bool sdebug_strict = DEF_STRICT;
784 static bool sdebug_any_injecting_opt;
785 static bool sdebug_verbose;
786 static bool have_dif_prot;
787 static bool write_since_sync;
788 static bool sdebug_statistics = DEF_STATISTICS;
789 static bool sdebug_wp;
790 /* Following enum: 0: no zbc, def; 1: host aware; 2: host managed */
791 static enum blk_zoned_model sdeb_zbc_model = BLK_ZONED_NONE;
792 static char *sdeb_zbc_model_s;
794 static unsigned int sdebug_store_sectors;
795 static sector_t sdebug_capacity; /* in sectors */
797 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
798 may still need them */
799 static int sdebug_heads; /* heads per disk */
800 static int sdebug_cylinders_per; /* cylinders per surface */
801 static int sdebug_sectors_per; /* sectors per cylinder */
803 static LIST_HEAD(sdebug_host_list);
804 static DEFINE_SPINLOCK(sdebug_host_list_lock);
806 static struct xarray per_store_arr;
807 static struct xarray *per_store_ap = &per_store_arr;
808 static int sdeb_first_idx = -1; /* invalid index ==> none created */
809 static int sdeb_most_recent_idx = -1;
810 static DEFINE_RWLOCK(sdeb_fake_rw_lck); /* need a RW lock when fake_rw=1 */
812 static unsigned long map_size;
813 static int num_aborts;
814 static int num_dev_resets;
815 static int num_target_resets;
816 static int num_bus_resets;
817 static int num_host_resets;
818 static int dix_writes;
819 static int dix_reads;
820 static int dif_errors;
822 /* ZBC global data */
823 static bool sdeb_zbc_in_use; /* true for host-aware and host-managed disks */
824 static int sdeb_zbc_zone_size_mb;
825 static int sdeb_zbc_max_open = DEF_ZBC_MAX_OPEN_ZONES;
826 static int sdeb_zbc_nr_conv = DEF_ZBC_NR_CONV_ZONES;
828 static int submit_queues = DEF_SUBMIT_QUEUES; /* > 1 for multi-queue (mq) */
829 static struct sdebug_queue *sdebug_q_arr; /* ptr to array of submit queues */
831 static DEFINE_RWLOCK(atomic_rw);
832 static DEFINE_RWLOCK(atomic_rw2);
834 static rwlock_t *ramdisk_lck_a[2];
836 static char sdebug_proc_name[] = MY_NAME;
837 static const char *my_name = MY_NAME;
839 static struct bus_type pseudo_lld_bus;
841 static struct device_driver sdebug_driverfs_driver = {
842 .name = sdebug_proc_name,
843 .bus = &pseudo_lld_bus,
846 static const int check_condition_result =
847 (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
849 static const int illegal_condition_result =
850 (DRIVER_SENSE << 24) | (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
852 static const int device_qfull_result =
853 (DID_OK << 16) | (COMMAND_COMPLETE << 8) | SAM_STAT_TASK_SET_FULL;
855 static const int condition_met_result = SAM_STAT_CONDITION_MET;
858 /* Only do the extra work involved in logical block provisioning if one or
859 * more of the lbpu, lbpws or lbpws10 parameters are given and we are doing
860 * real reads and writes (i.e. not skipping them for speed).
862 static inline bool scsi_debug_lbp(void)
864 return 0 == sdebug_fake_rw &&
865 (sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10);
868 static void *lba2fake_store(struct sdeb_store_info *sip,
869 unsigned long long lba)
871 struct sdeb_store_info *lsip = sip;
873 lba = do_div(lba, sdebug_store_sectors);
874 if (!sip || !sip->storep) {
876 lsip = xa_load(per_store_ap, 0); /* should never be NULL */
878 return lsip->storep + lba * sdebug_sector_size;
881 static struct t10_pi_tuple *dif_store(struct sdeb_store_info *sip,
884 sector = sector_div(sector, sdebug_store_sectors);
886 return sip->dif_storep + sector;
889 static void sdebug_max_tgts_luns(void)
891 struct sdebug_host_info *sdbg_host;
892 struct Scsi_Host *hpnt;
894 spin_lock(&sdebug_host_list_lock);
895 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
896 hpnt = sdbg_host->shost;
897 if ((hpnt->this_id >= 0) &&
898 (sdebug_num_tgts > hpnt->this_id))
899 hpnt->max_id = sdebug_num_tgts + 1;
901 hpnt->max_id = sdebug_num_tgts;
902 /* sdebug_max_luns; */
903 hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
905 spin_unlock(&sdebug_host_list_lock);
908 enum sdeb_cmd_data {SDEB_IN_DATA = 0, SDEB_IN_CDB = 1};
910 /* Set in_bit to -1 to indicate no bit position of invalid field */
911 static void mk_sense_invalid_fld(struct scsi_cmnd *scp,
912 enum sdeb_cmd_data c_d,
913 int in_byte, int in_bit)
915 unsigned char *sbuff;
919 sbuff = scp->sense_buffer;
921 sdev_printk(KERN_ERR, scp->device,
922 "%s: sense_buffer is NULL\n", __func__);
925 asc = c_d ? INVALID_FIELD_IN_CDB : INVALID_FIELD_IN_PARAM_LIST;
926 memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
927 scsi_build_sense_buffer(sdebug_dsense, sbuff, ILLEGAL_REQUEST, asc, 0);
928 memset(sks, 0, sizeof(sks));
934 sks[0] |= 0x7 & in_bit;
936 put_unaligned_be16(in_byte, sks + 1);
942 memcpy(sbuff + sl + 4, sks, 3);
944 memcpy(sbuff + 15, sks, 3);
946 sdev_printk(KERN_INFO, scp->device, "%s: [sense_key,asc,ascq"
947 "]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n",
948 my_name, asc, c_d ? 'C' : 'D', in_byte, in_bit);
951 static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
953 unsigned char *sbuff;
955 sbuff = scp->sense_buffer;
957 sdev_printk(KERN_ERR, scp->device,
958 "%s: sense_buffer is NULL\n", __func__);
961 memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
963 scsi_build_sense_buffer(sdebug_dsense, sbuff, key, asc, asq);
966 sdev_printk(KERN_INFO, scp->device,
967 "%s: [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
968 my_name, key, asc, asq);
971 static void mk_sense_invalid_opcode(struct scsi_cmnd *scp)
973 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
976 static int scsi_debug_ioctl(struct scsi_device *dev, unsigned int cmd,
979 if (sdebug_verbose) {
981 sdev_printk(KERN_INFO, dev,
982 "%s: BLKFLSBUF [0x1261]\n", __func__);
983 else if (0x5331 == cmd)
984 sdev_printk(KERN_INFO, dev,
985 "%s: CDROM_GET_CAPABILITY [0x5331]\n",
988 sdev_printk(KERN_INFO, dev, "%s: cmd=0x%x\n",
992 /* return -ENOTTY; // correct return but upsets fdisk */
995 static void config_cdb_len(struct scsi_device *sdev)
997 switch (sdebug_cdb_len) {
998 case 6: /* suggest 6 byte READ, WRITE and MODE SENSE/SELECT */
999 sdev->use_10_for_rw = false;
1000 sdev->use_16_for_rw = false;
1001 sdev->use_10_for_ms = false;
1003 case 10: /* suggest 10 byte RWs and 6 byte MODE SENSE/SELECT */
1004 sdev->use_10_for_rw = true;
1005 sdev->use_16_for_rw = false;
1006 sdev->use_10_for_ms = false;
1008 case 12: /* suggest 10 byte RWs and 10 byte MODE SENSE/SELECT */
1009 sdev->use_10_for_rw = true;
1010 sdev->use_16_for_rw = false;
1011 sdev->use_10_for_ms = true;
1014 sdev->use_10_for_rw = false;
1015 sdev->use_16_for_rw = true;
1016 sdev->use_10_for_ms = true;
1018 case 32: /* No knobs to suggest this so same as 16 for now */
1019 sdev->use_10_for_rw = false;
1020 sdev->use_16_for_rw = true;
1021 sdev->use_10_for_ms = true;
1024 pr_warn("unexpected cdb_len=%d, force to 10\n",
1026 sdev->use_10_for_rw = true;
1027 sdev->use_16_for_rw = false;
1028 sdev->use_10_for_ms = false;
1029 sdebug_cdb_len = 10;
1034 static void all_config_cdb_len(void)
1036 struct sdebug_host_info *sdbg_host;
1037 struct Scsi_Host *shost;
1038 struct scsi_device *sdev;
1040 spin_lock(&sdebug_host_list_lock);
1041 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
1042 shost = sdbg_host->shost;
1043 shost_for_each_device(sdev, shost) {
1044 config_cdb_len(sdev);
1047 spin_unlock(&sdebug_host_list_lock);
1050 static void clear_luns_changed_on_target(struct sdebug_dev_info *devip)
1052 struct sdebug_host_info *sdhp;
1053 struct sdebug_dev_info *dp;
1055 spin_lock(&sdebug_host_list_lock);
1056 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
1057 list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
1058 if ((devip->sdbg_host == dp->sdbg_host) &&
1059 (devip->target == dp->target))
1060 clear_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
1063 spin_unlock(&sdebug_host_list_lock);
1066 static int make_ua(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1070 k = find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS);
1071 if (k != SDEBUG_NUM_UAS) {
1072 const char *cp = NULL;
1076 mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1077 POWER_ON_RESET_ASCQ);
1079 cp = "power on reset";
1081 case SDEBUG_UA_BUS_RESET:
1082 mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1087 case SDEBUG_UA_MODE_CHANGED:
1088 mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1091 cp = "mode parameters changed";
1093 case SDEBUG_UA_CAPACITY_CHANGED:
1094 mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1095 CAPACITY_CHANGED_ASCQ);
1097 cp = "capacity data changed";
1099 case SDEBUG_UA_MICROCODE_CHANGED:
1100 mk_sense_buffer(scp, UNIT_ATTENTION,
1102 MICROCODE_CHANGED_ASCQ);
1104 cp = "microcode has been changed";
1106 case SDEBUG_UA_MICROCODE_CHANGED_WO_RESET:
1107 mk_sense_buffer(scp, UNIT_ATTENTION,
1109 MICROCODE_CHANGED_WO_RESET_ASCQ);
1111 cp = "microcode has been changed without reset";
1113 case SDEBUG_UA_LUNS_CHANGED:
1115 * SPC-3 behavior is to report a UNIT ATTENTION with
1116 * ASC/ASCQ REPORTED LUNS DATA HAS CHANGED on every LUN
1117 * on the target, until a REPORT LUNS command is
1118 * received. SPC-4 behavior is to report it only once.
1119 * NOTE: sdebug_scsi_level does not use the same
1120 * values as struct scsi_device->scsi_level.
1122 if (sdebug_scsi_level >= 6) /* SPC-4 and above */
1123 clear_luns_changed_on_target(devip);
1124 mk_sense_buffer(scp, UNIT_ATTENTION,
1128 cp = "reported luns data has changed";
1131 pr_warn("unexpected unit attention code=%d\n", k);
1136 clear_bit(k, devip->uas_bm);
1138 sdev_printk(KERN_INFO, scp->device,
1139 "%s reports: Unit attention: %s\n",
1141 return check_condition_result;
1146 /* Build SCSI "data-in" buffer. Returns 0 if ok else (DID_ERROR << 16). */
1147 static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1151 struct scsi_data_buffer *sdb = &scp->sdb;
1155 if (scp->sc_data_direction != DMA_FROM_DEVICE)
1156 return DID_ERROR << 16;
1158 act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
1160 scsi_set_resid(scp, scsi_bufflen(scp) - act_len);
1165 /* Partial build of SCSI "data-in" buffer. Returns 0 if ok else
1166 * (DID_ERROR << 16). Can write to offset in data-in buffer. If multiple
1167 * calls, not required to write in ascending offset order. Assumes resid
1168 * set to scsi_bufflen() prior to any calls.
1170 static int p_fill_from_dev_buffer(struct scsi_cmnd *scp, const void *arr,
1171 int arr_len, unsigned int off_dst)
1173 unsigned int act_len, n;
1174 struct scsi_data_buffer *sdb = &scp->sdb;
1175 off_t skip = off_dst;
1177 if (sdb->length <= off_dst)
1179 if (scp->sc_data_direction != DMA_FROM_DEVICE)
1180 return DID_ERROR << 16;
1182 act_len = sg_pcopy_from_buffer(sdb->table.sgl, sdb->table.nents,
1183 arr, arr_len, skip);
1184 pr_debug("%s: off_dst=%u, scsi_bufflen=%u, act_len=%u, resid=%d\n",
1185 __func__, off_dst, scsi_bufflen(scp), act_len,
1186 scsi_get_resid(scp));
1187 n = scsi_bufflen(scp) - (off_dst + act_len);
1188 scsi_set_resid(scp, min_t(int, scsi_get_resid(scp), n));
1192 /* Fetches from SCSI "data-out" buffer. Returns number of bytes fetched into
1193 * 'arr' or -1 if error.
1195 static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1198 if (!scsi_bufflen(scp))
1200 if (scp->sc_data_direction != DMA_TO_DEVICE)
1203 return scsi_sg_copy_to_buffer(scp, arr, arr_len);
1207 static char sdebug_inq_vendor_id[9] = "Linux ";
1208 static char sdebug_inq_product_id[17] = "scsi_debug ";
1209 static char sdebug_inq_product_rev[5] = SDEBUG_VERSION;
1210 /* Use some locally assigned NAAs for SAS addresses. */
1211 static const u64 naa3_comp_a = 0x3222222000000000ULL;
1212 static const u64 naa3_comp_b = 0x3333333000000000ULL;
1213 static const u64 naa3_comp_c = 0x3111111000000000ULL;
1215 /* Device identification VPD page. Returns number of bytes placed in arr */
1216 static int inquiry_vpd_83(unsigned char *arr, int port_group_id,
1217 int target_dev_id, int dev_id_num,
1218 const char *dev_id_str, int dev_id_str_len,
1219 const uuid_t *lu_name)
1224 port_a = target_dev_id + 1;
1225 /* T10 vendor identifier field format (faked) */
1226 arr[0] = 0x2; /* ASCII */
1229 memcpy(&arr[4], sdebug_inq_vendor_id, 8);
1230 memcpy(&arr[12], sdebug_inq_product_id, 16);
1231 memcpy(&arr[28], dev_id_str, dev_id_str_len);
1232 num = 8 + 16 + dev_id_str_len;
1235 if (dev_id_num >= 0) {
1236 if (sdebug_uuid_ctl) {
1237 /* Locally assigned UUID */
1238 arr[num++] = 0x1; /* binary (not necessarily sas) */
1239 arr[num++] = 0xa; /* PIV=0, lu, naa */
1242 arr[num++] = 0x10; /* uuid type=1, locally assigned */
1244 memcpy(arr + num, lu_name, 16);
1247 /* NAA-3, Logical unit identifier (binary) */
1248 arr[num++] = 0x1; /* binary (not necessarily sas) */
1249 arr[num++] = 0x3; /* PIV=0, lu, naa */
1252 put_unaligned_be64(naa3_comp_b + dev_id_num, arr + num);
1255 /* Target relative port number */
1256 arr[num++] = 0x61; /* proto=sas, binary */
1257 arr[num++] = 0x94; /* PIV=1, target port, rel port */
1258 arr[num++] = 0x0; /* reserved */
1259 arr[num++] = 0x4; /* length */
1260 arr[num++] = 0x0; /* reserved */
1261 arr[num++] = 0x0; /* reserved */
1263 arr[num++] = 0x1; /* relative port A */
1265 /* NAA-3, Target port identifier */
1266 arr[num++] = 0x61; /* proto=sas, binary */
1267 arr[num++] = 0x93; /* piv=1, target port, naa */
1270 put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1272 /* NAA-3, Target port group identifier */
1273 arr[num++] = 0x61; /* proto=sas, binary */
1274 arr[num++] = 0x95; /* piv=1, target port group id */
1279 put_unaligned_be16(port_group_id, arr + num);
1281 /* NAA-3, Target device identifier */
1282 arr[num++] = 0x61; /* proto=sas, binary */
1283 arr[num++] = 0xa3; /* piv=1, target device, naa */
1286 put_unaligned_be64(naa3_comp_a + target_dev_id, arr + num);
1288 /* SCSI name string: Target device identifier */
1289 arr[num++] = 0x63; /* proto=sas, UTF-8 */
1290 arr[num++] = 0xa8; /* piv=1, target device, SCSI name string */
1293 memcpy(arr + num, "naa.32222220", 12);
1295 snprintf(b, sizeof(b), "%08X", target_dev_id);
1296 memcpy(arr + num, b, 8);
1298 memset(arr + num, 0, 4);
1303 static unsigned char vpd84_data[] = {
1304 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
1305 0x22,0x22,0x22,0x0,0xbb,0x1,
1306 0x22,0x22,0x22,0x0,0xbb,0x2,
1309 /* Software interface identification VPD page */
1310 static int inquiry_vpd_84(unsigned char *arr)
1312 memcpy(arr, vpd84_data, sizeof(vpd84_data));
1313 return sizeof(vpd84_data);
1316 /* Management network addresses VPD page */
1317 static int inquiry_vpd_85(unsigned char *arr)
1320 const char *na1 = "https://www.kernel.org/config";
1321 const char *na2 = "http://www.kernel.org/log";
1324 arr[num++] = 0x1; /* lu, storage config */
1325 arr[num++] = 0x0; /* reserved */
1330 plen = ((plen / 4) + 1) * 4;
1331 arr[num++] = plen; /* length, null termianted, padded */
1332 memcpy(arr + num, na1, olen);
1333 memset(arr + num + olen, 0, plen - olen);
1336 arr[num++] = 0x4; /* lu, logging */
1337 arr[num++] = 0x0; /* reserved */
1342 plen = ((plen / 4) + 1) * 4;
1343 arr[num++] = plen; /* length, null terminated, padded */
1344 memcpy(arr + num, na2, olen);
1345 memset(arr + num + olen, 0, plen - olen);
1351 /* SCSI ports VPD page */
1352 static int inquiry_vpd_88(unsigned char *arr, int target_dev_id)
1357 port_a = target_dev_id + 1;
1358 port_b = port_a + 1;
1359 arr[num++] = 0x0; /* reserved */
1360 arr[num++] = 0x0; /* reserved */
1362 arr[num++] = 0x1; /* relative port 1 (primary) */
1363 memset(arr + num, 0, 6);
1366 arr[num++] = 12; /* length tp descriptor */
1367 /* naa-5 target port identifier (A) */
1368 arr[num++] = 0x61; /* proto=sas, binary */
1369 arr[num++] = 0x93; /* PIV=1, target port, NAA */
1370 arr[num++] = 0x0; /* reserved */
1371 arr[num++] = 0x8; /* length */
1372 put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1374 arr[num++] = 0x0; /* reserved */
1375 arr[num++] = 0x0; /* reserved */
1377 arr[num++] = 0x2; /* relative port 2 (secondary) */
1378 memset(arr + num, 0, 6);
1381 arr[num++] = 12; /* length tp descriptor */
1382 /* naa-5 target port identifier (B) */
1383 arr[num++] = 0x61; /* proto=sas, binary */
1384 arr[num++] = 0x93; /* PIV=1, target port, NAA */
1385 arr[num++] = 0x0; /* reserved */
1386 arr[num++] = 0x8; /* length */
1387 put_unaligned_be64(naa3_comp_a + port_b, arr + num);
1394 static unsigned char vpd89_data[] = {
1395 /* from 4th byte */ 0,0,0,0,
1396 'l','i','n','u','x',' ',' ',' ',
1397 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
1399 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
1401 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
1402 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
1403 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
1404 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
1406 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1408 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1410 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
1411 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
1412 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
1413 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
1414 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
1415 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
1416 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
1417 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1418 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1419 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1420 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
1421 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
1422 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
1423 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
1424 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1425 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1426 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1427 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1428 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1429 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1430 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1431 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1432 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1433 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1434 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1435 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
1438 /* ATA Information VPD page */
1439 static int inquiry_vpd_89(unsigned char *arr)
1441 memcpy(arr, vpd89_data, sizeof(vpd89_data));
1442 return sizeof(vpd89_data);
1446 static unsigned char vpdb0_data[] = {
1447 /* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
1448 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1449 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1450 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1453 /* Block limits VPD page (SBC-3) */
1454 static int inquiry_vpd_b0(unsigned char *arr)
1458 memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
1460 /* Optimal transfer length granularity */
1461 if (sdebug_opt_xferlen_exp != 0 &&
1462 sdebug_physblk_exp < sdebug_opt_xferlen_exp)
1463 gran = 1 << sdebug_opt_xferlen_exp;
1465 gran = 1 << sdebug_physblk_exp;
1466 put_unaligned_be16(gran, arr + 2);
1468 /* Maximum Transfer Length */
1469 if (sdebug_store_sectors > 0x400)
1470 put_unaligned_be32(sdebug_store_sectors, arr + 4);
1472 /* Optimal Transfer Length */
1473 put_unaligned_be32(sdebug_opt_blks, &arr[8]);
1476 /* Maximum Unmap LBA Count */
1477 put_unaligned_be32(sdebug_unmap_max_blocks, &arr[16]);
1479 /* Maximum Unmap Block Descriptor Count */
1480 put_unaligned_be32(sdebug_unmap_max_desc, &arr[20]);
1483 /* Unmap Granularity Alignment */
1484 if (sdebug_unmap_alignment) {
1485 put_unaligned_be32(sdebug_unmap_alignment, &arr[28]);
1486 arr[28] |= 0x80; /* UGAVALID */
1489 /* Optimal Unmap Granularity */
1490 put_unaligned_be32(sdebug_unmap_granularity, &arr[24]);
1492 /* Maximum WRITE SAME Length */
1493 put_unaligned_be64(sdebug_write_same_length, &arr[32]);
1495 return 0x3c; /* Mandatory page length for Logical Block Provisioning */
1497 return sizeof(vpdb0_data);
1500 /* Block device characteristics VPD page (SBC-3) */
1501 static int inquiry_vpd_b1(struct sdebug_dev_info *devip, unsigned char *arr)
1503 memset(arr, 0, 0x3c);
1505 arr[1] = 1; /* non rotating medium (e.g. solid state) */
1507 arr[3] = 5; /* less than 1.8" */
1508 if (devip->zmodel == BLK_ZONED_HA)
1509 arr[4] = 1 << 4; /* zoned field = 01b */
1514 /* Logical block provisioning VPD page (SBC-4) */
1515 static int inquiry_vpd_b2(unsigned char *arr)
1517 memset(arr, 0, 0x4);
1518 arr[0] = 0; /* threshold exponent */
1525 if (sdebug_lbprz && scsi_debug_lbp())
1526 arr[1] |= (sdebug_lbprz & 0x7) << 2; /* sbc4r07 and later */
1527 /* anc_sup=0; dp=0 (no provisioning group descriptor) */
1528 /* minimum_percentage=0; provisioning_type=0 (unknown) */
1529 /* threshold_percentage=0 */
1533 /* Zoned block device characteristics VPD page (ZBC mandatory) */
1534 static int inquiry_vpd_b6(struct sdebug_dev_info *devip, unsigned char *arr)
1536 memset(arr, 0, 0x3c);
1537 arr[0] = 0x1; /* set URSWRZ (unrestricted read in seq. wr req zone) */
1539 * Set Optimal number of open sequential write preferred zones and
1540 * Optimal number of non-sequentially written sequential write
1541 * preferred zones fields to 'not reported' (0xffffffff). Leave other
1542 * fields set to zero, apart from Max. number of open swrz_s field.
1544 put_unaligned_be32(0xffffffff, &arr[4]);
1545 put_unaligned_be32(0xffffffff, &arr[8]);
1546 if (sdeb_zbc_model == BLK_ZONED_HM && devip->max_open)
1547 put_unaligned_be32(devip->max_open, &arr[12]);
1549 put_unaligned_be32(0xffffffff, &arr[12]);
1553 #define SDEBUG_LONG_INQ_SZ 96
1554 #define SDEBUG_MAX_INQ_ARR_SZ 584
1556 static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1558 unsigned char pq_pdt;
1560 unsigned char *cmd = scp->cmnd;
1561 int alloc_len, n, ret;
1562 bool have_wlun, is_disk, is_zbc, is_disk_zbc;
1564 alloc_len = get_unaligned_be16(cmd + 3);
1565 arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
1567 return DID_REQUEUE << 16;
1568 is_disk = (sdebug_ptype == TYPE_DISK);
1569 is_zbc = (devip->zmodel != BLK_ZONED_NONE);
1570 is_disk_zbc = (is_disk || is_zbc);
1571 have_wlun = scsi_is_wlun(scp->device->lun);
1573 pq_pdt = TYPE_WLUN; /* present, wlun */
1574 else if (sdebug_no_lun_0 && (devip->lun == SDEBUG_LUN_0_VAL))
1575 pq_pdt = 0x7f; /* not present, PQ=3, PDT=0x1f */
1577 pq_pdt = (sdebug_ptype & 0x1f);
1579 if (0x2 & cmd[1]) { /* CMDDT bit set */
1580 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
1582 return check_condition_result;
1583 } else if (0x1 & cmd[1]) { /* EVPD bit set */
1584 int lu_id_num, port_group_id, target_dev_id, len;
1586 int host_no = devip->sdbg_host->shost->host_no;
1588 port_group_id = (((host_no + 1) & 0x7f) << 8) +
1589 (devip->channel & 0x7f);
1590 if (sdebug_vpd_use_hostno == 0)
1592 lu_id_num = have_wlun ? -1 : (((host_no + 1) * 2000) +
1593 (devip->target * 1000) + devip->lun);
1594 target_dev_id = ((host_no + 1) * 2000) +
1595 (devip->target * 1000) - 3;
1596 len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
1597 if (0 == cmd[2]) { /* supported vital product data pages */
1598 arr[1] = cmd[2]; /*sanity */
1600 arr[n++] = 0x0; /* this page */
1601 arr[n++] = 0x80; /* unit serial number */
1602 arr[n++] = 0x83; /* device identification */
1603 arr[n++] = 0x84; /* software interface ident. */
1604 arr[n++] = 0x85; /* management network addresses */
1605 arr[n++] = 0x86; /* extended inquiry */
1606 arr[n++] = 0x87; /* mode page policy */
1607 arr[n++] = 0x88; /* SCSI ports */
1608 if (is_disk_zbc) { /* SBC or ZBC */
1609 arr[n++] = 0x89; /* ATA information */
1610 arr[n++] = 0xb0; /* Block limits */
1611 arr[n++] = 0xb1; /* Block characteristics */
1613 arr[n++] = 0xb2; /* LB Provisioning */
1615 arr[n++] = 0xb6; /* ZB dev. char. */
1617 arr[3] = n - 4; /* number of supported VPD pages */
1618 } else if (0x80 == cmd[2]) { /* unit serial number */
1619 arr[1] = cmd[2]; /*sanity */
1621 memcpy(&arr[4], lu_id_str, len);
1622 } else if (0x83 == cmd[2]) { /* device identification */
1623 arr[1] = cmd[2]; /*sanity */
1624 arr[3] = inquiry_vpd_83(&arr[4], port_group_id,
1625 target_dev_id, lu_id_num,
1628 } else if (0x84 == cmd[2]) { /* Software interface ident. */
1629 arr[1] = cmd[2]; /*sanity */
1630 arr[3] = inquiry_vpd_84(&arr[4]);
1631 } else if (0x85 == cmd[2]) { /* Management network addresses */
1632 arr[1] = cmd[2]; /*sanity */
1633 arr[3] = inquiry_vpd_85(&arr[4]);
1634 } else if (0x86 == cmd[2]) { /* extended inquiry */
1635 arr[1] = cmd[2]; /*sanity */
1636 arr[3] = 0x3c; /* number of following entries */
1637 if (sdebug_dif == T10_PI_TYPE3_PROTECTION)
1638 arr[4] = 0x4; /* SPT: GRD_CHK:1 */
1639 else if (have_dif_prot)
1640 arr[4] = 0x5; /* SPT: GRD_CHK:1, REF_CHK:1 */
1642 arr[4] = 0x0; /* no protection stuff */
1643 arr[5] = 0x7; /* head of q, ordered + simple q's */
1644 } else if (0x87 == cmd[2]) { /* mode page policy */
1645 arr[1] = cmd[2]; /*sanity */
1646 arr[3] = 0x8; /* number of following entries */
1647 arr[4] = 0x2; /* disconnect-reconnect mp */
1648 arr[6] = 0x80; /* mlus, shared */
1649 arr[8] = 0x18; /* protocol specific lu */
1650 arr[10] = 0x82; /* mlus, per initiator port */
1651 } else if (0x88 == cmd[2]) { /* SCSI Ports */
1652 arr[1] = cmd[2]; /*sanity */
1653 arr[3] = inquiry_vpd_88(&arr[4], target_dev_id);
1654 } else if (is_disk_zbc && 0x89 == cmd[2]) { /* ATA info */
1655 arr[1] = cmd[2]; /*sanity */
1656 n = inquiry_vpd_89(&arr[4]);
1657 put_unaligned_be16(n, arr + 2);
1658 } else if (is_disk_zbc && 0xb0 == cmd[2]) { /* Block limits */
1659 arr[1] = cmd[2]; /*sanity */
1660 arr[3] = inquiry_vpd_b0(&arr[4]);
1661 } else if (is_disk_zbc && 0xb1 == cmd[2]) { /* Block char. */
1662 arr[1] = cmd[2]; /*sanity */
1663 arr[3] = inquiry_vpd_b1(devip, &arr[4]);
1664 } else if (is_disk && 0xb2 == cmd[2]) { /* LB Prov. */
1665 arr[1] = cmd[2]; /*sanity */
1666 arr[3] = inquiry_vpd_b2(&arr[4]);
1667 } else if (is_zbc && cmd[2] == 0xb6) { /* ZB dev. charact. */
1668 arr[1] = cmd[2]; /*sanity */
1669 arr[3] = inquiry_vpd_b6(devip, &arr[4]);
1671 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
1673 return check_condition_result;
1675 len = min(get_unaligned_be16(arr + 2) + 4, alloc_len);
1676 ret = fill_from_dev_buffer(scp, arr,
1677 min(len, SDEBUG_MAX_INQ_ARR_SZ));
1681 /* drops through here for a standard inquiry */
1682 arr[1] = sdebug_removable ? 0x80 : 0; /* Removable disk */
1683 arr[2] = sdebug_scsi_level;
1684 arr[3] = 2; /* response_data_format==2 */
1685 arr[4] = SDEBUG_LONG_INQ_SZ - 5;
1686 arr[5] = (int)have_dif_prot; /* PROTECT bit */
1687 if (sdebug_vpd_use_hostno == 0)
1688 arr[5] |= 0x10; /* claim: implicit TPGS */
1689 arr[6] = 0x10; /* claim: MultiP */
1690 /* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
1691 arr[7] = 0xa; /* claim: LINKED + CMDQUE */
1692 memcpy(&arr[8], sdebug_inq_vendor_id, 8);
1693 memcpy(&arr[16], sdebug_inq_product_id, 16);
1694 memcpy(&arr[32], sdebug_inq_product_rev, 4);
1695 /* Use Vendor Specific area to place driver date in ASCII hex */
1696 memcpy(&arr[36], sdebug_version_date, 8);
1697 /* version descriptors (2 bytes each) follow */
1698 put_unaligned_be16(0xc0, arr + 58); /* SAM-6 no version claimed */
1699 put_unaligned_be16(0x5c0, arr + 60); /* SPC-5 no version claimed */
1701 if (is_disk) { /* SBC-4 no version claimed */
1702 put_unaligned_be16(0x600, arr + n);
1704 } else if (sdebug_ptype == TYPE_TAPE) { /* SSC-4 rev 3 */
1705 put_unaligned_be16(0x525, arr + n);
1707 } else if (is_zbc) { /* ZBC BSR INCITS 536 revision 05 */
1708 put_unaligned_be16(0x624, arr + n);
1711 put_unaligned_be16(0x2100, arr + n); /* SPL-4 no version claimed */
1712 ret = fill_from_dev_buffer(scp, arr,
1713 min_t(int, alloc_len, SDEBUG_LONG_INQ_SZ));
1718 /* See resp_iec_m_pg() for how this data is manipulated */
1719 static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1722 static int resp_requests(struct scsi_cmnd *scp,
1723 struct sdebug_dev_info *devip)
1725 unsigned char *cmd = scp->cmnd;
1726 unsigned char arr[SCSI_SENSE_BUFFERSIZE]; /* assume >= 18 bytes */
1727 bool dsense = !!(cmd[1] & 1);
1728 int alloc_len = cmd[4];
1730 int stopped_state = atomic_read(&devip->stopped);
1732 memset(arr, 0, sizeof(arr));
1733 if (stopped_state > 0) { /* some "pollable" data [spc6r02: 5.12.2] */
1737 arr[2] = LOGICAL_UNIT_NOT_READY;
1738 arr[3] = (stopped_state == 2) ? 0x1 : 0x2;
1742 arr[2] = NOT_READY; /* NO_SENSE in sense_key */
1743 arr[7] = 0xa; /* 18 byte sense buffer */
1744 arr[12] = LOGICAL_UNIT_NOT_READY;
1745 arr[13] = (stopped_state == 2) ? 0x1 : 0x2;
1747 } else if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
1748 /* Information exceptions control mode page: TEST=1, MRIE=6 */
1751 arr[1] = 0x0; /* NO_SENSE in sense_key */
1752 arr[2] = THRESHOLD_EXCEEDED;
1753 arr[3] = 0xff; /* Failure prediction(false) */
1757 arr[2] = 0x0; /* NO_SENSE in sense_key */
1758 arr[7] = 0xa; /* 18 byte sense buffer */
1759 arr[12] = THRESHOLD_EXCEEDED;
1760 arr[13] = 0xff; /* Failure prediction(false) */
1762 } else { /* nothing to report */
1765 memset(arr, 0, len);
1768 memset(arr, 0, len);
1773 return fill_from_dev_buffer(scp, arr, min_t(int, len, alloc_len));
1776 static int resp_start_stop(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1778 unsigned char *cmd = scp->cmnd;
1779 int power_cond, want_stop, stopped_state;
1782 power_cond = (cmd[4] & 0xf0) >> 4;
1784 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 7);
1785 return check_condition_result;
1787 want_stop = !(cmd[4] & 1);
1788 stopped_state = atomic_read(&devip->stopped);
1789 if (stopped_state == 2) {
1790 ktime_t now_ts = ktime_get_boottime();
1792 if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
1793 u64 diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
1795 if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
1796 /* tur_ms_to_ready timer extinguished */
1797 atomic_set(&devip->stopped, 0);
1801 if (stopped_state == 2) {
1803 stopped_state = 1; /* dummy up success */
1804 } else { /* Disallow tur_ms_to_ready delay to be overridden */
1805 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 0 /* START bit */);
1806 return check_condition_result;
1810 changing = (stopped_state != want_stop);
1812 atomic_xchg(&devip->stopped, want_stop);
1813 if (!changing || (cmd[1] & 0x1)) /* state unchanged or IMMED bit set in cdb */
1814 return SDEG_RES_IMMED_MASK;
1819 static sector_t get_sdebug_capacity(void)
1821 static const unsigned int gibibyte = 1073741824;
1823 if (sdebug_virtual_gb > 0)
1824 return (sector_t)sdebug_virtual_gb *
1825 (gibibyte / sdebug_sector_size);
1827 return sdebug_store_sectors;
1830 #define SDEBUG_READCAP_ARR_SZ 8
1831 static int resp_readcap(struct scsi_cmnd *scp,
1832 struct sdebug_dev_info *devip)
1834 unsigned char arr[SDEBUG_READCAP_ARR_SZ];
1837 /* following just in case virtual_gb changed */
1838 sdebug_capacity = get_sdebug_capacity();
1839 memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
1840 if (sdebug_capacity < 0xffffffff) {
1841 capac = (unsigned int)sdebug_capacity - 1;
1842 put_unaligned_be32(capac, arr + 0);
1844 put_unaligned_be32(0xffffffff, arr + 0);
1845 put_unaligned_be16(sdebug_sector_size, arr + 6);
1846 return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
1849 #define SDEBUG_READCAP16_ARR_SZ 32
1850 static int resp_readcap16(struct scsi_cmnd *scp,
1851 struct sdebug_dev_info *devip)
1853 unsigned char *cmd = scp->cmnd;
1854 unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
1857 alloc_len = get_unaligned_be32(cmd + 10);
1858 /* following just in case virtual_gb changed */
1859 sdebug_capacity = get_sdebug_capacity();
1860 memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
1861 put_unaligned_be64((u64)(sdebug_capacity - 1), arr + 0);
1862 put_unaligned_be32(sdebug_sector_size, arr + 8);
1863 arr[13] = sdebug_physblk_exp & 0xf;
1864 arr[14] = (sdebug_lowest_aligned >> 8) & 0x3f;
1866 if (scsi_debug_lbp()) {
1867 arr[14] |= 0x80; /* LBPME */
1868 /* from sbc4r07, this LBPRZ field is 1 bit, but the LBPRZ in
1869 * the LB Provisioning VPD page is 3 bits. Note that lbprz=2
1870 * in the wider field maps to 0 in this field.
1872 if (sdebug_lbprz & 1) /* precisely what the draft requires */
1876 arr[15] = sdebug_lowest_aligned & 0xff;
1878 if (have_dif_prot) {
1879 arr[12] = (sdebug_dif - 1) << 1; /* P_TYPE */
1880 arr[12] |= 1; /* PROT_EN */
1883 return fill_from_dev_buffer(scp, arr,
1884 min_t(int, alloc_len, SDEBUG_READCAP16_ARR_SZ));
1887 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
1889 static int resp_report_tgtpgs(struct scsi_cmnd *scp,
1890 struct sdebug_dev_info *devip)
1892 unsigned char *cmd = scp->cmnd;
1894 int host_no = devip->sdbg_host->shost->host_no;
1895 int n, ret, alen, rlen;
1896 int port_group_a, port_group_b, port_a, port_b;
1898 alen = get_unaligned_be32(cmd + 6);
1899 arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
1901 return DID_REQUEUE << 16;
1903 * EVPD page 0x88 states we have two ports, one
1904 * real and a fake port with no device connected.
1905 * So we create two port groups with one port each
1906 * and set the group with port B to unavailable.
1908 port_a = 0x1; /* relative port A */
1909 port_b = 0x2; /* relative port B */
1910 port_group_a = (((host_no + 1) & 0x7f) << 8) +
1911 (devip->channel & 0x7f);
1912 port_group_b = (((host_no + 1) & 0x7f) << 8) +
1913 (devip->channel & 0x7f) + 0x80;
1916 * The asymmetric access state is cycled according to the host_id.
1919 if (sdebug_vpd_use_hostno == 0) {
1920 arr[n++] = host_no % 3; /* Asymm access state */
1921 arr[n++] = 0x0F; /* claim: all states are supported */
1923 arr[n++] = 0x0; /* Active/Optimized path */
1924 arr[n++] = 0x01; /* only support active/optimized paths */
1926 put_unaligned_be16(port_group_a, arr + n);
1928 arr[n++] = 0; /* Reserved */
1929 arr[n++] = 0; /* Status code */
1930 arr[n++] = 0; /* Vendor unique */
1931 arr[n++] = 0x1; /* One port per group */
1932 arr[n++] = 0; /* Reserved */
1933 arr[n++] = 0; /* Reserved */
1934 put_unaligned_be16(port_a, arr + n);
1936 arr[n++] = 3; /* Port unavailable */
1937 arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
1938 put_unaligned_be16(port_group_b, arr + n);
1940 arr[n++] = 0; /* Reserved */
1941 arr[n++] = 0; /* Status code */
1942 arr[n++] = 0; /* Vendor unique */
1943 arr[n++] = 0x1; /* One port per group */
1944 arr[n++] = 0; /* Reserved */
1945 arr[n++] = 0; /* Reserved */
1946 put_unaligned_be16(port_b, arr + n);
1950 put_unaligned_be32(rlen, arr + 0);
1953 * Return the smallest value of either
1954 * - The allocated length
1955 * - The constructed command length
1956 * - The maximum array size
1958 rlen = min_t(int, alen, n);
1959 ret = fill_from_dev_buffer(scp, arr,
1960 min_t(int, rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
1965 static int resp_rsup_opcodes(struct scsi_cmnd *scp,
1966 struct sdebug_dev_info *devip)
1969 u8 reporting_opts, req_opcode, sdeb_i, supp;
1971 u32 alloc_len, a_len;
1972 int k, offset, len, errsts, count, bump, na;
1973 const struct opcode_info_t *oip;
1974 const struct opcode_info_t *r_oip;
1976 u8 *cmd = scp->cmnd;
1978 rctd = !!(cmd[2] & 0x80);
1979 reporting_opts = cmd[2] & 0x7;
1980 req_opcode = cmd[3];
1981 req_sa = get_unaligned_be16(cmd + 4);
1982 alloc_len = get_unaligned_be32(cmd + 6);
1983 if (alloc_len < 4 || alloc_len > 0xffff) {
1984 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
1985 return check_condition_result;
1987 if (alloc_len > 8192)
1991 arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_ATOMIC);
1993 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
1995 return check_condition_result;
1997 switch (reporting_opts) {
1998 case 0: /* all commands */
1999 /* count number of commands */
2000 for (count = 0, oip = opcode_info_arr;
2001 oip->num_attached != 0xff; ++oip) {
2002 if (F_INV_OP & oip->flags)
2004 count += (oip->num_attached + 1);
2006 bump = rctd ? 20 : 8;
2007 put_unaligned_be32(count * bump, arr);
2008 for (offset = 4, oip = opcode_info_arr;
2009 oip->num_attached != 0xff && offset < a_len; ++oip) {
2010 if (F_INV_OP & oip->flags)
2012 na = oip->num_attached;
2013 arr[offset] = oip->opcode;
2014 put_unaligned_be16(oip->sa, arr + offset + 2);
2016 arr[offset + 5] |= 0x2;
2017 if (FF_SA & oip->flags)
2018 arr[offset + 5] |= 0x1;
2019 put_unaligned_be16(oip->len_mask[0], arr + offset + 6);
2021 put_unaligned_be16(0xa, arr + offset + 8);
2023 for (k = 0, oip = oip->arrp; k < na; ++k, ++oip) {
2024 if (F_INV_OP & oip->flags)
2027 arr[offset] = oip->opcode;
2028 put_unaligned_be16(oip->sa, arr + offset + 2);
2030 arr[offset + 5] |= 0x2;
2031 if (FF_SA & oip->flags)
2032 arr[offset + 5] |= 0x1;
2033 put_unaligned_be16(oip->len_mask[0],
2036 put_unaligned_be16(0xa,
2043 case 1: /* one command: opcode only */
2044 case 2: /* one command: opcode plus service action */
2045 case 3: /* one command: if sa==0 then opcode only else opcode+sa */
2046 sdeb_i = opcode_ind_arr[req_opcode];
2047 oip = &opcode_info_arr[sdeb_i];
2048 if (F_INV_OP & oip->flags) {
2052 if (1 == reporting_opts) {
2053 if (FF_SA & oip->flags) {
2054 mk_sense_invalid_fld(scp, SDEB_IN_CDB,
2057 return check_condition_result;
2060 } else if (2 == reporting_opts &&
2061 0 == (FF_SA & oip->flags)) {
2062 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
2063 kfree(arr); /* point at requested sa */
2064 return check_condition_result;
2066 if (0 == (FF_SA & oip->flags) &&
2067 req_opcode == oip->opcode)
2069 else if (0 == (FF_SA & oip->flags)) {
2070 na = oip->num_attached;
2071 for (k = 0, oip = oip->arrp; k < na;
2073 if (req_opcode == oip->opcode)
2076 supp = (k >= na) ? 1 : 3;
2077 } else if (req_sa != oip->sa) {
2078 na = oip->num_attached;
2079 for (k = 0, oip = oip->arrp; k < na;
2081 if (req_sa == oip->sa)
2084 supp = (k >= na) ? 1 : 3;
2088 u = oip->len_mask[0];
2089 put_unaligned_be16(u, arr + 2);
2090 arr[4] = oip->opcode;
2091 for (k = 1; k < u; ++k)
2092 arr[4 + k] = (k < 16) ?
2093 oip->len_mask[k] : 0xff;
2098 arr[1] = (rctd ? 0x80 : 0) | supp;
2100 put_unaligned_be16(0xa, arr + offset);
2105 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
2107 return check_condition_result;
2109 offset = (offset < a_len) ? offset : a_len;
2110 len = (offset < alloc_len) ? offset : alloc_len;
2111 errsts = fill_from_dev_buffer(scp, arr, len);
2116 static int resp_rsup_tmfs(struct scsi_cmnd *scp,
2117 struct sdebug_dev_info *devip)
2122 u8 *cmd = scp->cmnd;
2124 memset(arr, 0, sizeof(arr));
2125 repd = !!(cmd[2] & 0x80);
2126 alloc_len = get_unaligned_be32(cmd + 6);
2127 if (alloc_len < 4) {
2128 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
2129 return check_condition_result;
2131 arr[0] = 0xc8; /* ATS | ATSS | LURS */
2132 arr[1] = 0x1; /* ITNRS */
2139 len = (len < alloc_len) ? len : alloc_len;
2140 return fill_from_dev_buffer(scp, arr, len);
2143 /* <<Following mode page info copied from ST318451LW>> */
2145 static int resp_err_recov_pg(unsigned char *p, int pcontrol, int target)
2146 { /* Read-Write Error Recovery page for mode_sense */
2147 unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
2150 memcpy(p, err_recov_pg, sizeof(err_recov_pg));
2152 memset(p + 2, 0, sizeof(err_recov_pg) - 2);
2153 return sizeof(err_recov_pg);
2156 static int resp_disconnect_pg(unsigned char *p, int pcontrol, int target)
2157 { /* Disconnect-Reconnect page for mode_sense */
2158 unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
2159 0, 0, 0, 0, 0, 0, 0, 0};
2161 memcpy(p, disconnect_pg, sizeof(disconnect_pg));
2163 memset(p + 2, 0, sizeof(disconnect_pg) - 2);
2164 return sizeof(disconnect_pg);
2167 static int resp_format_pg(unsigned char *p, int pcontrol, int target)
2168 { /* Format device page for mode_sense */
2169 unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
2170 0, 0, 0, 0, 0, 0, 0, 0,
2171 0, 0, 0, 0, 0x40, 0, 0, 0};
2173 memcpy(p, format_pg, sizeof(format_pg));
2174 put_unaligned_be16(sdebug_sectors_per, p + 10);
2175 put_unaligned_be16(sdebug_sector_size, p + 12);
2176 if (sdebug_removable)
2177 p[20] |= 0x20; /* should agree with INQUIRY */
2179 memset(p + 2, 0, sizeof(format_pg) - 2);
2180 return sizeof(format_pg);
2183 static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2184 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,
2187 static int resp_caching_pg(unsigned char *p, int pcontrol, int target)
2188 { /* Caching page for mode_sense */
2189 unsigned char ch_caching_pg[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0,
2190 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
2191 unsigned char d_caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2192 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0, 0, 0, 0, 0};
2194 if (SDEBUG_OPT_N_WCE & sdebug_opts)
2195 caching_pg[2] &= ~0x4; /* set WCE=0 (default WCE=1) */
2196 memcpy(p, caching_pg, sizeof(caching_pg));
2198 memcpy(p + 2, ch_caching_pg, sizeof(ch_caching_pg));
2199 else if (2 == pcontrol)
2200 memcpy(p, d_caching_pg, sizeof(d_caching_pg));
2201 return sizeof(caching_pg);
2204 static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2207 static int resp_ctrl_m_pg(unsigned char *p, int pcontrol, int target)
2208 { /* Control mode page for mode_sense */
2209 unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
2211 unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2215 ctrl_m_pg[2] |= 0x4;
2217 ctrl_m_pg[2] &= ~0x4;
2220 ctrl_m_pg[5] |= 0x80; /* ATO=1 */
2222 memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
2224 memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
2225 else if (2 == pcontrol)
2226 memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
2227 return sizeof(ctrl_m_pg);
2231 static int resp_iec_m_pg(unsigned char *p, int pcontrol, int target)
2232 { /* Informational Exceptions control mode page for mode_sense */
2233 unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
2235 unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
2238 memcpy(p, iec_m_pg, sizeof(iec_m_pg));
2240 memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
2241 else if (2 == pcontrol)
2242 memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
2243 return sizeof(iec_m_pg);
2246 static int resp_sas_sf_m_pg(unsigned char *p, int pcontrol, int target)
2247 { /* SAS SSP mode page - short format for mode_sense */
2248 unsigned char sas_sf_m_pg[] = {0x19, 0x6,
2249 0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
2251 memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
2253 memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
2254 return sizeof(sas_sf_m_pg);
2258 static int resp_sas_pcd_m_spg(unsigned char *p, int pcontrol, int target,
2260 { /* SAS phy control and discover mode page for mode_sense */
2261 unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
2262 0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
2263 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2264 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2265 0x2, 0, 0, 0, 0, 0, 0, 0,
2266 0x88, 0x99, 0, 0, 0, 0, 0, 0,
2267 0, 0, 0, 0, 0, 0, 0, 0,
2268 0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
2269 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2270 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2271 0x3, 0, 0, 0, 0, 0, 0, 0,
2272 0x88, 0x99, 0, 0, 0, 0, 0, 0,
2273 0, 0, 0, 0, 0, 0, 0, 0,
2277 put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 16);
2278 put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 24);
2279 put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 64);
2280 put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 72);
2281 port_a = target_dev_id + 1;
2282 port_b = port_a + 1;
2283 memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
2284 put_unaligned_be32(port_a, p + 20);
2285 put_unaligned_be32(port_b, p + 48 + 20);
2287 memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
2288 return sizeof(sas_pcd_m_pg);
2291 static int resp_sas_sha_m_spg(unsigned char *p, int pcontrol)
2292 { /* SAS SSP shared protocol specific port mode subpage */
2293 unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
2294 0, 0, 0, 0, 0, 0, 0, 0,
2297 memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
2299 memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
2300 return sizeof(sas_sha_m_pg);
2303 #define SDEBUG_MAX_MSENSE_SZ 256
2305 static int resp_mode_sense(struct scsi_cmnd *scp,
2306 struct sdebug_dev_info *devip)
2308 int pcontrol, pcode, subpcode, bd_len;
2309 unsigned char dev_spec;
2310 int alloc_len, offset, len, target_dev_id;
2311 int target = scp->device->id;
2313 unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
2314 unsigned char *cmd = scp->cmnd;
2315 bool dbd, llbaa, msense_6, is_disk, is_zbc, bad_pcode;
2317 dbd = !!(cmd[1] & 0x8); /* disable block descriptors */
2318 pcontrol = (cmd[2] & 0xc0) >> 6;
2319 pcode = cmd[2] & 0x3f;
2321 msense_6 = (MODE_SENSE == cmd[0]);
2322 llbaa = msense_6 ? false : !!(cmd[1] & 0x10);
2323 is_disk = (sdebug_ptype == TYPE_DISK);
2324 is_zbc = (devip->zmodel != BLK_ZONED_NONE);
2325 if ((is_disk || is_zbc) && !dbd)
2326 bd_len = llbaa ? 16 : 8;
2329 alloc_len = msense_6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2330 memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
2331 if (0x3 == pcontrol) { /* Saving values not supported */
2332 mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0);
2333 return check_condition_result;
2335 target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
2336 (devip->target * 1000) - 3;
2337 /* for disks+zbc set DPOFUA bit and clear write protect (WP) bit */
2338 if (is_disk || is_zbc) {
2339 dev_spec = 0x10; /* =0x90 if WP=1 implies read-only */
2351 arr[4] = 0x1; /* set LONGLBA bit */
2352 arr[7] = bd_len; /* assume 255 or less */
2356 if ((bd_len > 0) && (!sdebug_capacity))
2357 sdebug_capacity = get_sdebug_capacity();
2360 if (sdebug_capacity > 0xfffffffe)
2361 put_unaligned_be32(0xffffffff, ap + 0);
2363 put_unaligned_be32(sdebug_capacity, ap + 0);
2364 put_unaligned_be16(sdebug_sector_size, ap + 6);
2367 } else if (16 == bd_len) {
2368 put_unaligned_be64((u64)sdebug_capacity, ap + 0);
2369 put_unaligned_be32(sdebug_sector_size, ap + 12);
2374 if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) {
2375 /* TODO: Control Extension page */
2376 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2377 return check_condition_result;
2382 case 0x1: /* Read-Write error recovery page, direct access */
2383 len = resp_err_recov_pg(ap, pcontrol, target);
2386 case 0x2: /* Disconnect-Reconnect page, all devices */
2387 len = resp_disconnect_pg(ap, pcontrol, target);
2390 case 0x3: /* Format device page, direct access */
2392 len = resp_format_pg(ap, pcontrol, target);
2397 case 0x8: /* Caching page, direct access */
2398 if (is_disk || is_zbc) {
2399 len = resp_caching_pg(ap, pcontrol, target);
2404 case 0xa: /* Control Mode page, all devices */
2405 len = resp_ctrl_m_pg(ap, pcontrol, target);
2408 case 0x19: /* if spc==1 then sas phy, control+discover */
2409 if ((subpcode > 0x2) && (subpcode < 0xff)) {
2410 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2411 return check_condition_result;
2414 if ((0x0 == subpcode) || (0xff == subpcode))
2415 len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2416 if ((0x1 == subpcode) || (0xff == subpcode))
2417 len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
2419 if ((0x2 == subpcode) || (0xff == subpcode))
2420 len += resp_sas_sha_m_spg(ap + len, pcontrol);
2423 case 0x1c: /* Informational Exceptions Mode page, all devices */
2424 len = resp_iec_m_pg(ap, pcontrol, target);
2427 case 0x3f: /* Read all Mode pages */
2428 if ((0 == subpcode) || (0xff == subpcode)) {
2429 len = resp_err_recov_pg(ap, pcontrol, target);
2430 len += resp_disconnect_pg(ap + len, pcontrol, target);
2432 len += resp_format_pg(ap + len, pcontrol,
2434 len += resp_caching_pg(ap + len, pcontrol,
2436 } else if (is_zbc) {
2437 len += resp_caching_pg(ap + len, pcontrol,
2440 len += resp_ctrl_m_pg(ap + len, pcontrol, target);
2441 len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2442 if (0xff == subpcode) {
2443 len += resp_sas_pcd_m_spg(ap + len, pcontrol,
2444 target, target_dev_id);
2445 len += resp_sas_sha_m_spg(ap + len, pcontrol);
2447 len += resp_iec_m_pg(ap + len, pcontrol, target);
2450 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2451 return check_condition_result;
2459 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2460 return check_condition_result;
2463 arr[0] = offset - 1;
2465 put_unaligned_be16((offset - 2), arr + 0);
2466 return fill_from_dev_buffer(scp, arr, min_t(int, alloc_len, offset));
2469 #define SDEBUG_MAX_MSELECT_SZ 512
2471 static int resp_mode_select(struct scsi_cmnd *scp,
2472 struct sdebug_dev_info *devip)
2474 int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
2475 int param_len, res, mpage;
2476 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
2477 unsigned char *cmd = scp->cmnd;
2478 int mselect6 = (MODE_SELECT == cmd[0]);
2480 memset(arr, 0, sizeof(arr));
2483 param_len = mselect6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2484 if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
2485 mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1);
2486 return check_condition_result;
2488 res = fetch_to_dev_buffer(scp, arr, param_len);
2490 return DID_ERROR << 16;
2491 else if (sdebug_verbose && (res < param_len))
2492 sdev_printk(KERN_INFO, scp->device,
2493 "%s: cdb indicated=%d, IO sent=%d bytes\n",
2494 __func__, param_len, res);
2495 md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2);
2496 bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6);
2498 mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
2499 return check_condition_result;
2501 off = bd_len + (mselect6 ? 4 : 8);
2502 mpage = arr[off] & 0x3f;
2503 ps = !!(arr[off] & 0x80);
2505 mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 7);
2506 return check_condition_result;
2508 spf = !!(arr[off] & 0x40);
2509 pg_len = spf ? (get_unaligned_be16(arr + off + 2) + 4) :
2511 if ((pg_len + off) > param_len) {
2512 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2513 PARAMETER_LIST_LENGTH_ERR, 0);
2514 return check_condition_result;
2517 case 0x8: /* Caching Mode page */
2518 if (caching_pg[1] == arr[off + 1]) {
2519 memcpy(caching_pg + 2, arr + off + 2,
2520 sizeof(caching_pg) - 2);
2521 goto set_mode_changed_ua;
2524 case 0xa: /* Control Mode page */
2525 if (ctrl_m_pg[1] == arr[off + 1]) {
2526 memcpy(ctrl_m_pg + 2, arr + off + 2,
2527 sizeof(ctrl_m_pg) - 2);
2528 if (ctrl_m_pg[4] & 0x8)
2532 sdebug_dsense = !!(ctrl_m_pg[2] & 0x4);
2533 goto set_mode_changed_ua;
2536 case 0x1c: /* Informational Exceptions Mode page */
2537 if (iec_m_pg[1] == arr[off + 1]) {
2538 memcpy(iec_m_pg + 2, arr + off + 2,
2539 sizeof(iec_m_pg) - 2);
2540 goto set_mode_changed_ua;
2546 mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 5);
2547 return check_condition_result;
2548 set_mode_changed_ua:
2549 set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm);
2553 static int resp_temp_l_pg(unsigned char *arr)
2555 unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
2556 0x0, 0x1, 0x3, 0x2, 0x0, 65,
2559 memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
2560 return sizeof(temp_l_pg);
2563 static int resp_ie_l_pg(unsigned char *arr)
2565 unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
2568 memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
2569 if (iec_m_pg[2] & 0x4) { /* TEST bit set */
2570 arr[4] = THRESHOLD_EXCEEDED;
2573 return sizeof(ie_l_pg);
2576 #define SDEBUG_MAX_LSENSE_SZ 512
2578 static int resp_log_sense(struct scsi_cmnd *scp,
2579 struct sdebug_dev_info *devip)
2581 int ppc, sp, pcode, subpcode, alloc_len, len, n;
2582 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
2583 unsigned char *cmd = scp->cmnd;
2585 memset(arr, 0, sizeof(arr));
2589 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, ppc ? 1 : 0);
2590 return check_condition_result;
2592 pcode = cmd[2] & 0x3f;
2593 subpcode = cmd[3] & 0xff;
2594 alloc_len = get_unaligned_be16(cmd + 7);
2596 if (0 == subpcode) {
2598 case 0x0: /* Supported log pages log page */
2600 arr[n++] = 0x0; /* this page */
2601 arr[n++] = 0xd; /* Temperature */
2602 arr[n++] = 0x2f; /* Informational exceptions */
2605 case 0xd: /* Temperature log page */
2606 arr[3] = resp_temp_l_pg(arr + 4);
2608 case 0x2f: /* Informational exceptions log page */
2609 arr[3] = resp_ie_l_pg(arr + 4);
2612 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2613 return check_condition_result;
2615 } else if (0xff == subpcode) {
2619 case 0x0: /* Supported log pages and subpages log page */
2622 arr[n++] = 0x0; /* 0,0 page */
2624 arr[n++] = 0xff; /* this page */
2626 arr[n++] = 0x0; /* Temperature */
2628 arr[n++] = 0x0; /* Informational exceptions */
2631 case 0xd: /* Temperature subpages */
2634 arr[n++] = 0x0; /* Temperature */
2637 case 0x2f: /* Informational exceptions subpages */
2640 arr[n++] = 0x0; /* Informational exceptions */
2644 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2645 return check_condition_result;
2648 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2649 return check_condition_result;
2651 len = min_t(int, get_unaligned_be16(arr + 2) + 4, alloc_len);
2652 return fill_from_dev_buffer(scp, arr,
2653 min_t(int, len, SDEBUG_MAX_INQ_ARR_SZ));
2656 static inline bool sdebug_dev_is_zoned(struct sdebug_dev_info *devip)
2658 return devip->nr_zones != 0;
2661 static struct sdeb_zone_state *zbc_zone(struct sdebug_dev_info *devip,
2662 unsigned long long lba)
2664 return &devip->zstate[lba >> devip->zsize_shift];
2667 static inline bool zbc_zone_is_conv(struct sdeb_zone_state *zsp)
2669 return zsp->z_type == ZBC_ZONE_TYPE_CNV;
2672 static void zbc_close_zone(struct sdebug_dev_info *devip,
2673 struct sdeb_zone_state *zsp)
2675 enum sdebug_z_cond zc;
2677 if (zbc_zone_is_conv(zsp))
2681 if (!(zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN))
2684 if (zc == ZC2_IMPLICIT_OPEN)
2685 devip->nr_imp_open--;
2687 devip->nr_exp_open--;
2689 if (zsp->z_wp == zsp->z_start) {
2690 zsp->z_cond = ZC1_EMPTY;
2692 zsp->z_cond = ZC4_CLOSED;
2697 static void zbc_close_imp_open_zone(struct sdebug_dev_info *devip)
2699 struct sdeb_zone_state *zsp = &devip->zstate[0];
2702 for (i = 0; i < devip->nr_zones; i++, zsp++) {
2703 if (zsp->z_cond == ZC2_IMPLICIT_OPEN) {
2704 zbc_close_zone(devip, zsp);
2710 static void zbc_open_zone(struct sdebug_dev_info *devip,
2711 struct sdeb_zone_state *zsp, bool explicit)
2713 enum sdebug_z_cond zc;
2715 if (zbc_zone_is_conv(zsp))
2719 if ((explicit && zc == ZC3_EXPLICIT_OPEN) ||
2720 (!explicit && zc == ZC2_IMPLICIT_OPEN))
2723 /* Close an implicit open zone if necessary */
2724 if (explicit && zsp->z_cond == ZC2_IMPLICIT_OPEN)
2725 zbc_close_zone(devip, zsp);
2726 else if (devip->max_open &&
2727 devip->nr_imp_open + devip->nr_exp_open >= devip->max_open)
2728 zbc_close_imp_open_zone(devip);
2730 if (zsp->z_cond == ZC4_CLOSED)
2733 zsp->z_cond = ZC3_EXPLICIT_OPEN;
2734 devip->nr_exp_open++;
2736 zsp->z_cond = ZC2_IMPLICIT_OPEN;
2737 devip->nr_imp_open++;
2741 static void zbc_inc_wp(struct sdebug_dev_info *devip,
2742 unsigned long long lba, unsigned int num)
2744 struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
2745 unsigned long long n, end, zend = zsp->z_start + zsp->z_size;
2747 if (zbc_zone_is_conv(zsp))
2750 if (zsp->z_type == ZBC_ZONE_TYPE_SWR) {
2752 if (zsp->z_wp >= zend)
2753 zsp->z_cond = ZC5_FULL;
2758 if (lba != zsp->z_wp)
2759 zsp->z_non_seq_resource = true;
2765 } else if (end > zsp->z_wp) {
2771 if (zsp->z_wp >= zend)
2772 zsp->z_cond = ZC5_FULL;
2778 zend = zsp->z_start + zsp->z_size;
2783 static int check_zbc_access_params(struct scsi_cmnd *scp,
2784 unsigned long long lba, unsigned int num, bool write)
2786 struct scsi_device *sdp = scp->device;
2787 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
2788 struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
2789 struct sdeb_zone_state *zsp_end = zbc_zone(devip, lba + num - 1);
2792 if (devip->zmodel == BLK_ZONED_HA)
2794 /* For host-managed, reads cannot cross zone types boundaries */
2795 if (zsp_end != zsp &&
2796 zbc_zone_is_conv(zsp) &&
2797 !zbc_zone_is_conv(zsp_end)) {
2798 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2801 return check_condition_result;
2806 /* No restrictions for writes within conventional zones */
2807 if (zbc_zone_is_conv(zsp)) {
2808 if (!zbc_zone_is_conv(zsp_end)) {
2809 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2811 WRITE_BOUNDARY_ASCQ);
2812 return check_condition_result;
2817 if (zsp->z_type == ZBC_ZONE_TYPE_SWR) {
2818 /* Writes cannot cross sequential zone boundaries */
2819 if (zsp_end != zsp) {
2820 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2822 WRITE_BOUNDARY_ASCQ);
2823 return check_condition_result;
2825 /* Cannot write full zones */
2826 if (zsp->z_cond == ZC5_FULL) {
2827 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2828 INVALID_FIELD_IN_CDB, 0);
2829 return check_condition_result;
2831 /* Writes must be aligned to the zone WP */
2832 if (lba != zsp->z_wp) {
2833 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2835 UNALIGNED_WRITE_ASCQ);
2836 return check_condition_result;
2840 /* Handle implicit open of closed and empty zones */
2841 if (zsp->z_cond == ZC1_EMPTY || zsp->z_cond == ZC4_CLOSED) {
2842 if (devip->max_open &&
2843 devip->nr_exp_open >= devip->max_open) {
2844 mk_sense_buffer(scp, DATA_PROTECT,
2847 return check_condition_result;
2849 zbc_open_zone(devip, zsp, false);
2855 static inline int check_device_access_params
2856 (struct scsi_cmnd *scp, unsigned long long lba,
2857 unsigned int num, bool write)
2859 struct scsi_device *sdp = scp->device;
2860 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
2862 if (lba + num > sdebug_capacity) {
2863 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2864 return check_condition_result;
2866 /* transfer length excessive (tie in to block limits VPD page) */
2867 if (num > sdebug_store_sectors) {
2868 /* needs work to find which cdb byte 'num' comes from */
2869 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2870 return check_condition_result;
2872 if (write && unlikely(sdebug_wp)) {
2873 mk_sense_buffer(scp, DATA_PROTECT, WRITE_PROTECTED, 0x2);
2874 return check_condition_result;
2876 if (sdebug_dev_is_zoned(devip))
2877 return check_zbc_access_params(scp, lba, num, write);
2883 * Note: if BUG_ON() fires it usually indicates a problem with the parser
2884 * tables. Perhaps a missing F_FAKE_RW or FF_MEDIA_IO flag. Response functions
2885 * that access any of the "stores" in struct sdeb_store_info should call this
2886 * function with bug_if_fake_rw set to true.
2888 static inline struct sdeb_store_info *devip2sip(struct sdebug_dev_info *devip,
2889 bool bug_if_fake_rw)
2891 if (sdebug_fake_rw) {
2892 BUG_ON(bug_if_fake_rw); /* See note above */
2895 return xa_load(per_store_ap, devip->sdbg_host->si_idx);
2898 /* Returns number of bytes copied or -1 if error. */
2899 static int do_device_access(struct sdeb_store_info *sip, struct scsi_cmnd *scp,
2900 u32 sg_skip, u64 lba, u32 num, bool do_write)
2903 u64 block, rest = 0;
2904 enum dma_data_direction dir;
2905 struct scsi_data_buffer *sdb = &scp->sdb;
2909 dir = DMA_TO_DEVICE;
2910 write_since_sync = true;
2912 dir = DMA_FROM_DEVICE;
2915 if (!sdb->length || !sip)
2917 if (scp->sc_data_direction != dir)
2921 block = do_div(lba, sdebug_store_sectors);
2922 if (block + num > sdebug_store_sectors)
2923 rest = block + num - sdebug_store_sectors;
2925 ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
2926 fsp + (block * sdebug_sector_size),
2927 (num - rest) * sdebug_sector_size, sg_skip, do_write);
2928 if (ret != (num - rest) * sdebug_sector_size)
2932 ret += sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
2933 fsp, rest * sdebug_sector_size,
2934 sg_skip + ((num - rest) * sdebug_sector_size),
2941 /* Returns number of bytes copied or -1 if error. */
2942 static int do_dout_fetch(struct scsi_cmnd *scp, u32 num, u8 *doutp)
2944 struct scsi_data_buffer *sdb = &scp->sdb;
2948 if (scp->sc_data_direction != DMA_TO_DEVICE)
2950 return sg_copy_buffer(sdb->table.sgl, sdb->table.nents, doutp,
2951 num * sdebug_sector_size, 0, true);
2954 /* If sip->storep+lba compares equal to arr(num), then copy top half of
2955 * arr into sip->storep+lba and return true. If comparison fails then
2957 static bool comp_write_worker(struct sdeb_store_info *sip, u64 lba, u32 num,
2958 const u8 *arr, bool compare_only)
2961 u64 block, rest = 0;
2962 u32 store_blks = sdebug_store_sectors;
2963 u32 lb_size = sdebug_sector_size;
2964 u8 *fsp = sip->storep;
2966 block = do_div(lba, store_blks);
2967 if (block + num > store_blks)
2968 rest = block + num - store_blks;
2970 res = !memcmp(fsp + (block * lb_size), arr, (num - rest) * lb_size);
2974 res = memcmp(fsp, arr + ((num - rest) * lb_size),
2980 arr += num * lb_size;
2981 memcpy(fsp + (block * lb_size), arr, (num - rest) * lb_size);
2983 memcpy(fsp, arr + ((num - rest) * lb_size), rest * lb_size);
2987 static __be16 dif_compute_csum(const void *buf, int len)
2992 csum = (__force __be16)ip_compute_csum(buf, len);
2994 csum = cpu_to_be16(crc_t10dif(buf, len));
2999 static int dif_verify(struct t10_pi_tuple *sdt, const void *data,
3000 sector_t sector, u32 ei_lba)
3002 __be16 csum = dif_compute_csum(data, sdebug_sector_size);
3004 if (sdt->guard_tag != csum) {
3005 pr_err("GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
3006 (unsigned long)sector,
3007 be16_to_cpu(sdt->guard_tag),
3011 if (sdebug_dif == T10_PI_TYPE1_PROTECTION &&
3012 be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
3013 pr_err("REF check failed on sector %lu\n",
3014 (unsigned long)sector);
3017 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3018 be32_to_cpu(sdt->ref_tag) != ei_lba) {
3019 pr_err("REF check failed on sector %lu\n",
3020 (unsigned long)sector);
3026 static void dif_copy_prot(struct scsi_cmnd *scp, sector_t sector,
3027 unsigned int sectors, bool read)
3031 struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3032 scp->device->hostdata, true);
3033 struct t10_pi_tuple *dif_storep = sip->dif_storep;
3034 const void *dif_store_end = dif_storep + sdebug_store_sectors;
3035 struct sg_mapping_iter miter;
3037 /* Bytes of protection data to copy into sgl */
3038 resid = sectors * sizeof(*dif_storep);
3040 sg_miter_start(&miter, scsi_prot_sglist(scp),
3041 scsi_prot_sg_count(scp), SG_MITER_ATOMIC |
3042 (read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
3044 while (sg_miter_next(&miter) && resid > 0) {
3045 size_t len = min_t(size_t, miter.length, resid);
3046 void *start = dif_store(sip, sector);
3049 if (dif_store_end < start + len)
3050 rest = start + len - dif_store_end;
3055 memcpy(paddr, start, len - rest);
3057 memcpy(start, paddr, len - rest);
3061 memcpy(paddr + len - rest, dif_storep, rest);
3063 memcpy(dif_storep, paddr + len - rest, rest);
3066 sector += len / sizeof(*dif_storep);
3069 sg_miter_stop(&miter);
3072 static int prot_verify_read(struct scsi_cmnd *scp, sector_t start_sec,
3073 unsigned int sectors, u32 ei_lba)
3077 struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3078 scp->device->hostdata, true);
3079 struct t10_pi_tuple *sdt;
3081 for (i = 0; i < sectors; i++, ei_lba++) {
3084 sector = start_sec + i;
3085 sdt = dif_store(sip, sector);
3087 if (sdt->app_tag == cpu_to_be16(0xffff))
3090 ret = dif_verify(sdt, lba2fake_store(sip, sector), sector,
3098 dif_copy_prot(scp, start_sec, sectors, true);
3104 static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3111 struct sdeb_store_info *sip = devip2sip(devip, true);
3112 rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
3113 u8 *cmd = scp->cmnd;
3118 lba = get_unaligned_be64(cmd + 2);
3119 num = get_unaligned_be32(cmd + 10);
3124 lba = get_unaligned_be32(cmd + 2);
3125 num = get_unaligned_be16(cmd + 7);
3130 lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3131 (u32)(cmd[1] & 0x1f) << 16;
3132 num = (0 == cmd[4]) ? 256 : cmd[4];
3137 lba = get_unaligned_be32(cmd + 2);
3138 num = get_unaligned_be32(cmd + 6);
3141 case XDWRITEREAD_10:
3143 lba = get_unaligned_be32(cmd + 2);
3144 num = get_unaligned_be16(cmd + 7);
3147 default: /* assume READ(32) */
3148 lba = get_unaligned_be64(cmd + 12);
3149 ei_lba = get_unaligned_be32(cmd + 20);
3150 num = get_unaligned_be32(cmd + 28);
3154 if (unlikely(have_dif_prot && check_prot)) {
3155 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3157 mk_sense_invalid_opcode(scp);
3158 return check_condition_result;
3160 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3161 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3162 (cmd[1] & 0xe0) == 0)
3163 sdev_printk(KERN_ERR, scp->device, "Unprotected RD "
3166 if (unlikely((sdebug_opts & SDEBUG_OPT_SHORT_TRANSFER) &&
3167 atomic_read(&sdeb_inject_pending))) {
3169 atomic_set(&sdeb_inject_pending, 0);
3172 ret = check_device_access_params(scp, lba, num, false);
3175 if (unlikely((SDEBUG_OPT_MEDIUM_ERR & sdebug_opts) &&
3176 (lba <= (sdebug_medium_error_start + sdebug_medium_error_count - 1)) &&
3177 ((lba + num) > sdebug_medium_error_start))) {
3178 /* claim unrecoverable read error */
3179 mk_sense_buffer(scp, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
3180 /* set info field and valid bit for fixed descriptor */
3181 if (0x70 == (scp->sense_buffer[0] & 0x7f)) {
3182 scp->sense_buffer[0] |= 0x80; /* Valid bit */
3183 ret = (lba < OPT_MEDIUM_ERR_ADDR)
3184 ? OPT_MEDIUM_ERR_ADDR : (int)lba;
3185 put_unaligned_be32(ret, scp->sense_buffer + 3);
3187 scsi_set_resid(scp, scsi_bufflen(scp));
3188 return check_condition_result;
3191 read_lock(macc_lckp);
3194 if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3195 int prot_ret = prot_verify_read(scp, lba, num, ei_lba);
3198 read_unlock(macc_lckp);
3199 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, prot_ret);
3200 return illegal_condition_result;
3204 ret = do_device_access(sip, scp, 0, lba, num, false);
3205 read_unlock(macc_lckp);
3206 if (unlikely(ret == -1))
3207 return DID_ERROR << 16;
3209 scsi_set_resid(scp, scsi_bufflen(scp) - ret);
3211 if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3212 atomic_read(&sdeb_inject_pending))) {
3213 if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3214 mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3215 atomic_set(&sdeb_inject_pending, 0);
3216 return check_condition_result;
3217 } else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3218 /* Logical block guard check failed */
3219 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3220 atomic_set(&sdeb_inject_pending, 0);
3221 return illegal_condition_result;
3222 } else if (SDEBUG_OPT_DIX_ERR & sdebug_opts) {
3223 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3224 atomic_set(&sdeb_inject_pending, 0);
3225 return illegal_condition_result;
3231 static void dump_sector(unsigned char *buf, int len)
3235 pr_err(">>> Sector Dump <<<\n");
3236 for (i = 0 ; i < len ; i += 16) {
3239 for (j = 0, n = 0; j < 16; j++) {
3240 unsigned char c = buf[i+j];
3242 if (c >= 0x20 && c < 0x7e)
3243 n += scnprintf(b + n, sizeof(b) - n,
3246 n += scnprintf(b + n, sizeof(b) - n,
3249 pr_err("%04d: %s\n", i, b);
3253 static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
3254 unsigned int sectors, u32 ei_lba)
3257 struct t10_pi_tuple *sdt;
3259 sector_t sector = start_sec;
3262 struct sg_mapping_iter diter;
3263 struct sg_mapping_iter piter;
3265 BUG_ON(scsi_sg_count(SCpnt) == 0);
3266 BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
3268 sg_miter_start(&piter, scsi_prot_sglist(SCpnt),
3269 scsi_prot_sg_count(SCpnt),
3270 SG_MITER_ATOMIC | SG_MITER_FROM_SG);
3271 sg_miter_start(&diter, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
3272 SG_MITER_ATOMIC | SG_MITER_FROM_SG);
3274 /* For each protection page */
3275 while (sg_miter_next(&piter)) {
3277 if (WARN_ON(!sg_miter_next(&diter))) {
3282 for (ppage_offset = 0; ppage_offset < piter.length;
3283 ppage_offset += sizeof(struct t10_pi_tuple)) {
3284 /* If we're at the end of the current
3285 * data page advance to the next one
3287 if (dpage_offset >= diter.length) {
3288 if (WARN_ON(!sg_miter_next(&diter))) {
3295 sdt = piter.addr + ppage_offset;
3296 daddr = diter.addr + dpage_offset;
3298 ret = dif_verify(sdt, daddr, sector, ei_lba);
3300 dump_sector(daddr, sdebug_sector_size);
3306 dpage_offset += sdebug_sector_size;
3308 diter.consumed = dpage_offset;
3309 sg_miter_stop(&diter);
3311 sg_miter_stop(&piter);
3313 dif_copy_prot(SCpnt, start_sec, sectors, false);
3320 sg_miter_stop(&diter);
3321 sg_miter_stop(&piter);
3325 static unsigned long lba_to_map_index(sector_t lba)
3327 if (sdebug_unmap_alignment)
3328 lba += sdebug_unmap_granularity - sdebug_unmap_alignment;
3329 sector_div(lba, sdebug_unmap_granularity);
3333 static sector_t map_index_to_lba(unsigned long index)
3335 sector_t lba = index * sdebug_unmap_granularity;
3337 if (sdebug_unmap_alignment)
3338 lba -= sdebug_unmap_granularity - sdebug_unmap_alignment;
3342 static unsigned int map_state(struct sdeb_store_info *sip, sector_t lba,
3346 unsigned int mapped;
3347 unsigned long index;
3350 index = lba_to_map_index(lba);
3351 mapped = test_bit(index, sip->map_storep);
3354 next = find_next_zero_bit(sip->map_storep, map_size, index);
3356 next = find_next_bit(sip->map_storep, map_size, index);
3358 end = min_t(sector_t, sdebug_store_sectors, map_index_to_lba(next));
3363 static void map_region(struct sdeb_store_info *sip, sector_t lba,
3366 sector_t end = lba + len;
3369 unsigned long index = lba_to_map_index(lba);
3371 if (index < map_size)
3372 set_bit(index, sip->map_storep);
3374 lba = map_index_to_lba(index + 1);
3378 static void unmap_region(struct sdeb_store_info *sip, sector_t lba,
3381 sector_t end = lba + len;
3382 u8 *fsp = sip->storep;
3385 unsigned long index = lba_to_map_index(lba);
3387 if (lba == map_index_to_lba(index) &&
3388 lba + sdebug_unmap_granularity <= end &&
3390 clear_bit(index, sip->map_storep);
3391 if (sdebug_lbprz) { /* for LBPRZ=2 return 0xff_s */
3392 memset(fsp + lba * sdebug_sector_size,
3393 (sdebug_lbprz & 1) ? 0 : 0xff,
3394 sdebug_sector_size *
3395 sdebug_unmap_granularity);
3397 if (sip->dif_storep) {
3398 memset(sip->dif_storep + lba, 0xff,
3399 sizeof(*sip->dif_storep) *
3400 sdebug_unmap_granularity);
3403 lba = map_index_to_lba(index + 1);
3407 static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3414 struct sdeb_store_info *sip = devip2sip(devip, true);
3415 rwlock_t *macc_lckp = &sip->macc_lck;
3416 u8 *cmd = scp->cmnd;
3421 lba = get_unaligned_be64(cmd + 2);
3422 num = get_unaligned_be32(cmd + 10);
3427 lba = get_unaligned_be32(cmd + 2);
3428 num = get_unaligned_be16(cmd + 7);
3433 lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3434 (u32)(cmd[1] & 0x1f) << 16;
3435 num = (0 == cmd[4]) ? 256 : cmd[4];
3440 lba = get_unaligned_be32(cmd + 2);
3441 num = get_unaligned_be32(cmd + 6);
3444 case 0x53: /* XDWRITEREAD(10) */
3446 lba = get_unaligned_be32(cmd + 2);
3447 num = get_unaligned_be16(cmd + 7);
3450 default: /* assume WRITE(32) */
3451 lba = get_unaligned_be64(cmd + 12);
3452 ei_lba = get_unaligned_be32(cmd + 20);
3453 num = get_unaligned_be32(cmd + 28);
3457 if (unlikely(have_dif_prot && check_prot)) {
3458 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3460 mk_sense_invalid_opcode(scp);
3461 return check_condition_result;
3463 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3464 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3465 (cmd[1] & 0xe0) == 0)
3466 sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3470 write_lock(macc_lckp);
3471 ret = check_device_access_params(scp, lba, num, true);
3473 write_unlock(macc_lckp);
3478 if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3479 int prot_ret = prot_verify_write(scp, lba, num, ei_lba);
3482 write_unlock(macc_lckp);
3483 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, prot_ret);
3484 return illegal_condition_result;
3488 ret = do_device_access(sip, scp, 0, lba, num, true);
3489 if (unlikely(scsi_debug_lbp()))
3490 map_region(sip, lba, num);
3491 /* If ZBC zone then bump its write pointer */
3492 if (sdebug_dev_is_zoned(devip))
3493 zbc_inc_wp(devip, lba, num);
3494 write_unlock(macc_lckp);
3495 if (unlikely(-1 == ret))
3496 return DID_ERROR << 16;
3497 else if (unlikely(sdebug_verbose &&
3498 (ret < (num * sdebug_sector_size))))
3499 sdev_printk(KERN_INFO, scp->device,
3500 "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3501 my_name, num * sdebug_sector_size, ret);
3503 if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3504 atomic_read(&sdeb_inject_pending))) {
3505 if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3506 mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3507 atomic_set(&sdeb_inject_pending, 0);
3508 return check_condition_result;
3509 } else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3510 /* Logical block guard check failed */
3511 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3512 atomic_set(&sdeb_inject_pending, 0);
3513 return illegal_condition_result;
3514 } else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
3515 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3516 atomic_set(&sdeb_inject_pending, 0);
3517 return illegal_condition_result;
3524 * T10 has only specified WRITE SCATTERED(16) and WRITE SCATTERED(32).
3525 * No READ GATHERED yet (requires bidi or long cdb holding gather list).
3527 static int resp_write_scat(struct scsi_cmnd *scp,
3528 struct sdebug_dev_info *devip)
3530 u8 *cmd = scp->cmnd;
3533 struct sdeb_store_info *sip = devip2sip(devip, true);
3534 rwlock_t *macc_lckp = &sip->macc_lck;
3536 u16 lbdof, num_lrd, k;
3537 u32 num, num_by, bt_len, lbdof_blen, sg_off, cum_lb;
3538 u32 lb_size = sdebug_sector_size;
3543 static const u32 lrd_size = 32; /* + parameter list header size */
3545 if (cmd[0] == VARIABLE_LENGTH_CMD) {
3547 wrprotect = (cmd[10] >> 5) & 0x7;
3548 lbdof = get_unaligned_be16(cmd + 12);
3549 num_lrd = get_unaligned_be16(cmd + 16);
3550 bt_len = get_unaligned_be32(cmd + 28);
3551 } else { /* that leaves WRITE SCATTERED(16) */
3553 wrprotect = (cmd[2] >> 5) & 0x7;
3554 lbdof = get_unaligned_be16(cmd + 4);
3555 num_lrd = get_unaligned_be16(cmd + 8);
3556 bt_len = get_unaligned_be32(cmd + 10);
3557 if (unlikely(have_dif_prot)) {
3558 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3560 mk_sense_invalid_opcode(scp);
3561 return illegal_condition_result;
3563 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3564 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3566 sdev_printk(KERN_ERR, scp->device,
3567 "Unprotected WR to DIF device\n");
3570 if ((num_lrd == 0) || (bt_len == 0))
3571 return 0; /* T10 says these do-nothings are not errors */
3574 sdev_printk(KERN_INFO, scp->device,
3575 "%s: %s: LB Data Offset field bad\n",
3577 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3578 return illegal_condition_result;
3580 lbdof_blen = lbdof * lb_size;
3581 if ((lrd_size + (num_lrd * lrd_size)) > lbdof_blen) {
3583 sdev_printk(KERN_INFO, scp->device,
3584 "%s: %s: LBA range descriptors don't fit\n",
3586 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3587 return illegal_condition_result;
3589 lrdp = kzalloc(lbdof_blen, GFP_ATOMIC);
3591 return SCSI_MLQUEUE_HOST_BUSY;
3593 sdev_printk(KERN_INFO, scp->device,
3594 "%s: %s: Fetch header+scatter_list, lbdof_blen=%u\n",
3595 my_name, __func__, lbdof_blen);
3596 res = fetch_to_dev_buffer(scp, lrdp, lbdof_blen);
3598 ret = DID_ERROR << 16;
3602 write_lock(macc_lckp);
3603 sg_off = lbdof_blen;
3604 /* Spec says Buffer xfer Length field in number of LBs in dout */
3606 for (k = 0, up = lrdp + lrd_size; k < num_lrd; ++k, up += lrd_size) {
3607 lba = get_unaligned_be64(up + 0);
3608 num = get_unaligned_be32(up + 8);
3610 sdev_printk(KERN_INFO, scp->device,
3611 "%s: %s: k=%d LBA=0x%llx num=%u sg_off=%u\n",
3612 my_name, __func__, k, lba, num, sg_off);
3615 ret = check_device_access_params(scp, lba, num, true);
3617 goto err_out_unlock;
3618 num_by = num * lb_size;
3619 ei_lba = is_16 ? 0 : get_unaligned_be32(up + 12);
3621 if ((cum_lb + num) > bt_len) {
3623 sdev_printk(KERN_INFO, scp->device,
3624 "%s: %s: sum of blocks > data provided\n",
3626 mk_sense_buffer(scp, ILLEGAL_REQUEST, WRITE_ERROR_ASC,
3628 ret = illegal_condition_result;
3629 goto err_out_unlock;
3633 if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3634 int prot_ret = prot_verify_write(scp, lba, num,
3638 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10,
3640 ret = illegal_condition_result;
3641 goto err_out_unlock;
3645 ret = do_device_access(sip, scp, sg_off, lba, num, true);
3646 /* If ZBC zone then bump its write pointer */
3647 if (sdebug_dev_is_zoned(devip))
3648 zbc_inc_wp(devip, lba, num);
3649 if (unlikely(scsi_debug_lbp()))
3650 map_region(sip, lba, num);
3651 if (unlikely(-1 == ret)) {
3652 ret = DID_ERROR << 16;
3653 goto err_out_unlock;
3654 } else if (unlikely(sdebug_verbose && (ret < num_by)))
3655 sdev_printk(KERN_INFO, scp->device,
3656 "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3657 my_name, num_by, ret);
3659 if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3660 atomic_read(&sdeb_inject_pending))) {
3661 if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3662 mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3663 atomic_set(&sdeb_inject_pending, 0);
3664 ret = check_condition_result;
3665 goto err_out_unlock;
3666 } else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3667 /* Logical block guard check failed */
3668 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3669 atomic_set(&sdeb_inject_pending, 0);
3670 ret = illegal_condition_result;
3671 goto err_out_unlock;
3672 } else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
3673 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3674 atomic_set(&sdeb_inject_pending, 0);
3675 ret = illegal_condition_result;
3676 goto err_out_unlock;
3684 write_unlock(macc_lckp);
3690 static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
3691 u32 ei_lba, bool unmap, bool ndob)
3693 struct scsi_device *sdp = scp->device;
3694 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
3695 unsigned long long i;
3697 u32 lb_size = sdebug_sector_size;
3699 struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3700 scp->device->hostdata, true);
3701 rwlock_t *macc_lckp = &sip->macc_lck;
3705 write_lock(macc_lckp);
3707 ret = check_device_access_params(scp, lba, num, true);
3709 write_unlock(macc_lckp);
3713 if (unmap && scsi_debug_lbp()) {
3714 unmap_region(sip, lba, num);
3718 block = do_div(lbaa, sdebug_store_sectors);
3719 /* if ndob then zero 1 logical block, else fetch 1 logical block */
3721 fs1p = fsp + (block * lb_size);
3723 memset(fs1p, 0, lb_size);
3726 ret = fetch_to_dev_buffer(scp, fs1p, lb_size);
3729 write_unlock(&sip->macc_lck);
3730 return DID_ERROR << 16;
3731 } else if (sdebug_verbose && !ndob && (ret < lb_size))
3732 sdev_printk(KERN_INFO, scp->device,
3733 "%s: %s: lb size=%u, IO sent=%d bytes\n",
3734 my_name, "write same", lb_size, ret);
3736 /* Copy first sector to remaining blocks */
3737 for (i = 1 ; i < num ; i++) {
3739 block = do_div(lbaa, sdebug_store_sectors);
3740 memmove(fsp + (block * lb_size), fs1p, lb_size);
3742 if (scsi_debug_lbp())
3743 map_region(sip, lba, num);
3744 /* If ZBC zone then bump its write pointer */
3745 if (sdebug_dev_is_zoned(devip))
3746 zbc_inc_wp(devip, lba, num);
3748 write_unlock(macc_lckp);
3753 static int resp_write_same_10(struct scsi_cmnd *scp,
3754 struct sdebug_dev_info *devip)
3756 u8 *cmd = scp->cmnd;
3763 if (sdebug_lbpws10 == 0) {
3764 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3765 return check_condition_result;
3769 lba = get_unaligned_be32(cmd + 2);
3770 num = get_unaligned_be16(cmd + 7);
3771 if (num > sdebug_write_same_length) {
3772 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3773 return check_condition_result;
3775 return resp_write_same(scp, lba, num, ei_lba, unmap, false);
3778 static int resp_write_same_16(struct scsi_cmnd *scp,
3779 struct sdebug_dev_info *devip)
3781 u8 *cmd = scp->cmnd;
3788 if (cmd[1] & 0x8) { /* UNMAP */
3789 if (sdebug_lbpws == 0) {
3790 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3791 return check_condition_result;
3795 if (cmd[1] & 0x1) /* NDOB (no data-out buffer, assumes zeroes) */
3797 lba = get_unaligned_be64(cmd + 2);
3798 num = get_unaligned_be32(cmd + 10);
3799 if (num > sdebug_write_same_length) {
3800 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
3801 return check_condition_result;
3803 return resp_write_same(scp, lba, num, ei_lba, unmap, ndob);
3806 /* Note the mode field is in the same position as the (lower) service action
3807 * field. For the Report supported operation codes command, SPC-4 suggests
3808 * each mode of this command should be reported separately; for future. */
3809 static int resp_write_buffer(struct scsi_cmnd *scp,
3810 struct sdebug_dev_info *devip)
3812 u8 *cmd = scp->cmnd;
3813 struct scsi_device *sdp = scp->device;
3814 struct sdebug_dev_info *dp;
3817 mode = cmd[1] & 0x1f;
3819 case 0x4: /* download microcode (MC) and activate (ACT) */
3820 /* set UAs on this device only */
3821 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3822 set_bit(SDEBUG_UA_MICROCODE_CHANGED, devip->uas_bm);
3824 case 0x5: /* download MC, save and ACT */
3825 set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET, devip->uas_bm);
3827 case 0x6: /* download MC with offsets and ACT */
3828 /* set UAs on most devices (LUs) in this target */
3829 list_for_each_entry(dp,
3830 &devip->sdbg_host->dev_info_list,
3832 if (dp->target == sdp->id) {
3833 set_bit(SDEBUG_UA_BUS_RESET, dp->uas_bm);
3835 set_bit(SDEBUG_UA_MICROCODE_CHANGED,
3839 case 0x7: /* download MC with offsets, save, and ACT */
3840 /* set UA on all devices (LUs) in this target */
3841 list_for_each_entry(dp,
3842 &devip->sdbg_host->dev_info_list,
3844 if (dp->target == sdp->id)
3845 set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET,
3849 /* do nothing for this command for other mode values */
3855 static int resp_comp_write(struct scsi_cmnd *scp,
3856 struct sdebug_dev_info *devip)
3858 u8 *cmd = scp->cmnd;
3860 struct sdeb_store_info *sip = devip2sip(devip, true);
3861 rwlock_t *macc_lckp = &sip->macc_lck;
3864 u32 lb_size = sdebug_sector_size;
3869 lba = get_unaligned_be64(cmd + 2);
3870 num = cmd[13]; /* 1 to a maximum of 255 logical blocks */
3872 return 0; /* degenerate case, not an error */
3873 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3875 mk_sense_invalid_opcode(scp);
3876 return check_condition_result;
3878 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3879 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3880 (cmd[1] & 0xe0) == 0)
3881 sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3883 ret = check_device_access_params(scp, lba, num, false);
3887 arr = kcalloc(lb_size, dnum, GFP_ATOMIC);
3889 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3891 return check_condition_result;
3894 write_lock(macc_lckp);
3896 ret = do_dout_fetch(scp, dnum, arr);
3898 retval = DID_ERROR << 16;
3900 } else if (sdebug_verbose && (ret < (dnum * lb_size)))
3901 sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb "
3902 "indicated=%u, IO sent=%d bytes\n", my_name,
3903 dnum * lb_size, ret);
3904 if (!comp_write_worker(sip, lba, num, arr, false)) {
3905 mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
3906 retval = check_condition_result;
3909 if (scsi_debug_lbp())
3910 map_region(sip, lba, num);
3912 write_unlock(macc_lckp);
3917 struct unmap_block_desc {
3923 static int resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3926 struct unmap_block_desc *desc;
3927 struct sdeb_store_info *sip = devip2sip(devip, true);
3928 rwlock_t *macc_lckp = &sip->macc_lck;
3929 unsigned int i, payload_len, descriptors;
3932 if (!scsi_debug_lbp())
3933 return 0; /* fib and say its done */
3934 payload_len = get_unaligned_be16(scp->cmnd + 7);
3935 BUG_ON(scsi_bufflen(scp) != payload_len);
3937 descriptors = (payload_len - 8) / 16;
3938 if (descriptors > sdebug_unmap_max_desc) {
3939 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3940 return check_condition_result;
3943 buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC);
3945 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3947 return check_condition_result;
3950 scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
3952 BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
3953 BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
3955 desc = (void *)&buf[8];
3957 write_lock(macc_lckp);
3959 for (i = 0 ; i < descriptors ; i++) {
3960 unsigned long long lba = get_unaligned_be64(&desc[i].lba);
3961 unsigned int num = get_unaligned_be32(&desc[i].blocks);
3963 ret = check_device_access_params(scp, lba, num, true);
3967 unmap_region(sip, lba, num);
3973 write_unlock(macc_lckp);
3979 #define SDEBUG_GET_LBA_STATUS_LEN 32
3981 static int resp_get_lba_status(struct scsi_cmnd *scp,
3982 struct sdebug_dev_info *devip)
3984 u8 *cmd = scp->cmnd;
3986 u32 alloc_len, mapped, num;
3988 u8 arr[SDEBUG_GET_LBA_STATUS_LEN];
3990 lba = get_unaligned_be64(cmd + 2);
3991 alloc_len = get_unaligned_be32(cmd + 10);
3996 ret = check_device_access_params(scp, lba, 1, false);
4000 if (scsi_debug_lbp()) {
4001 struct sdeb_store_info *sip = devip2sip(devip, true);
4003 mapped = map_state(sip, lba, &num);
4006 /* following just in case virtual_gb changed */
4007 sdebug_capacity = get_sdebug_capacity();
4008 if (sdebug_capacity - lba <= 0xffffffff)
4009 num = sdebug_capacity - lba;
4014 memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
4015 put_unaligned_be32(20, arr); /* Parameter Data Length */
4016 put_unaligned_be64(lba, arr + 8); /* LBA */
4017 put_unaligned_be32(num, arr + 16); /* Number of blocks */
4018 arr[20] = !mapped; /* prov_stat=0: mapped; 1: dealloc */
4020 return fill_from_dev_buffer(scp, arr, SDEBUG_GET_LBA_STATUS_LEN);
4023 static int resp_sync_cache(struct scsi_cmnd *scp,
4024 struct sdebug_dev_info *devip)
4029 u8 *cmd = scp->cmnd;
4031 if (cmd[0] == SYNCHRONIZE_CACHE) { /* 10 byte cdb */
4032 lba = get_unaligned_be32(cmd + 2);
4033 num_blocks = get_unaligned_be16(cmd + 7);
4034 } else { /* SYNCHRONIZE_CACHE(16) */
4035 lba = get_unaligned_be64(cmd + 2);
4036 num_blocks = get_unaligned_be32(cmd + 10);
4038 if (lba + num_blocks > sdebug_capacity) {
4039 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4040 return check_condition_result;
4042 if (!write_since_sync || (cmd[1] & 0x2))
4043 res = SDEG_RES_IMMED_MASK;
4044 else /* delay if write_since_sync and IMMED clear */
4045 write_since_sync = false;
4050 * Assuming the LBA+num_blocks is not out-of-range, this function will return
4051 * CONDITION MET if the specified blocks will/have fitted in the cache, and
4052 * a GOOD status otherwise. Model a disk with a big cache and yield
4053 * CONDITION MET. Actually tries to bring range in main memory into the
4054 * cache associated with the CPU(s).
4056 static int resp_pre_fetch(struct scsi_cmnd *scp,
4057 struct sdebug_dev_info *devip)
4061 u64 block, rest = 0;
4063 u8 *cmd = scp->cmnd;
4064 struct sdeb_store_info *sip = devip2sip(devip, true);
4065 rwlock_t *macc_lckp = &sip->macc_lck;
4066 u8 *fsp = sip->storep;
4068 if (cmd[0] == PRE_FETCH) { /* 10 byte cdb */
4069 lba = get_unaligned_be32(cmd + 2);
4070 nblks = get_unaligned_be16(cmd + 7);
4071 } else { /* PRE-FETCH(16) */
4072 lba = get_unaligned_be64(cmd + 2);
4073 nblks = get_unaligned_be32(cmd + 10);
4075 if (lba + nblks > sdebug_capacity) {
4076 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4077 return check_condition_result;
4081 /* PRE-FETCH spec says nothing about LBP or PI so skip them */
4082 block = do_div(lba, sdebug_store_sectors);
4083 if (block + nblks > sdebug_store_sectors)
4084 rest = block + nblks - sdebug_store_sectors;
4086 /* Try to bring the PRE-FETCH range into CPU's cache */
4087 read_lock(macc_lckp);
4088 prefetch_range(fsp + (sdebug_sector_size * block),
4089 (nblks - rest) * sdebug_sector_size);
4091 prefetch_range(fsp, rest * sdebug_sector_size);
4092 read_unlock(macc_lckp);
4095 res = SDEG_RES_IMMED_MASK;
4096 return res | condition_met_result;
4099 #define RL_BUCKET_ELEMS 8
4101 /* Even though each pseudo target has a REPORT LUNS "well known logical unit"
4102 * (W-LUN), the normal Linux scanning logic does not associate it with a
4103 * device (e.g. /dev/sg7). The following magic will make that association:
4104 * "cd /sys/class/scsi_host/host<n> ; echo '- - 49409' > scan"
4105 * where <n> is a host number. If there are multiple targets in a host then
4106 * the above will associate a W-LUN to each target. To only get a W-LUN
4107 * for target 2, then use "echo '- 2 49409' > scan" .
4109 static int resp_report_luns(struct scsi_cmnd *scp,
4110 struct sdebug_dev_info *devip)
4112 unsigned char *cmd = scp->cmnd;
4113 unsigned int alloc_len;
4114 unsigned char select_report;
4116 struct scsi_lun *lun_p;
4117 u8 arr[RL_BUCKET_ELEMS * sizeof(struct scsi_lun)];
4118 unsigned int lun_cnt; /* normal LUN count (max: 256) */
4119 unsigned int wlun_cnt; /* report luns W-LUN count */
4120 unsigned int tlun_cnt; /* total LUN count */
4121 unsigned int rlen; /* response length (in bytes) */
4123 unsigned int off_rsp = 0;
4124 const int sz_lun = sizeof(struct scsi_lun);
4126 clear_luns_changed_on_target(devip);
4128 select_report = cmd[2];
4129 alloc_len = get_unaligned_be32(cmd + 6);
4131 if (alloc_len < 4) {
4132 pr_err("alloc len too small %d\n", alloc_len);
4133 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
4134 return check_condition_result;
4137 switch (select_report) {
4138 case 0: /* all LUNs apart from W-LUNs */
4139 lun_cnt = sdebug_max_luns;
4142 case 1: /* only W-LUNs */
4146 case 2: /* all LUNs */
4147 lun_cnt = sdebug_max_luns;
4150 case 0x10: /* only administrative LUs */
4151 case 0x11: /* see SPC-5 */
4152 case 0x12: /* only subsiduary LUs owned by referenced LU */
4154 pr_debug("select report invalid %d\n", select_report);
4155 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
4156 return check_condition_result;
4159 if (sdebug_no_lun_0 && (lun_cnt > 0))
4162 tlun_cnt = lun_cnt + wlun_cnt;
4163 rlen = tlun_cnt * sz_lun; /* excluding 8 byte header */
4164 scsi_set_resid(scp, scsi_bufflen(scp));
4165 pr_debug("select_report %d luns = %d wluns = %d no_lun0 %d\n",
4166 select_report, lun_cnt, wlun_cnt, sdebug_no_lun_0);
4168 /* loops rely on sizeof response header same as sizeof lun (both 8) */
4169 lun = sdebug_no_lun_0 ? 1 : 0;
4170 for (k = 0, j = 0, res = 0; true; ++k, j = 0) {
4171 memset(arr, 0, sizeof(arr));
4172 lun_p = (struct scsi_lun *)&arr[0];
4174 put_unaligned_be32(rlen, &arr[0]);
4178 for ( ; j < RL_BUCKET_ELEMS; ++j, ++lun_p) {
4179 if ((k * RL_BUCKET_ELEMS) + j > lun_cnt)
4181 int_to_scsilun(lun++, lun_p);
4183 if (j < RL_BUCKET_ELEMS)
4186 res = p_fill_from_dev_buffer(scp, arr, n, off_rsp);
4192 int_to_scsilun(SCSI_W_LUN_REPORT_LUNS, lun_p);
4196 res = p_fill_from_dev_buffer(scp, arr, j * sz_lun, off_rsp);
4200 static int resp_verify(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4202 bool is_bytchk3 = false;
4205 u32 vnum, a_num, off;
4206 const u32 lb_size = sdebug_sector_size;
4209 u8 *cmd = scp->cmnd;
4210 struct sdeb_store_info *sip = devip2sip(devip, true);
4211 rwlock_t *macc_lckp = &sip->macc_lck;
4213 bytchk = (cmd[1] >> 1) & 0x3;
4215 return 0; /* always claim internal verify okay */
4216 } else if (bytchk == 2) {
4217 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
4218 return check_condition_result;
4219 } else if (bytchk == 3) {
4220 is_bytchk3 = true; /* 1 block sent, compared repeatedly */
4224 lba = get_unaligned_be64(cmd + 2);
4225 vnum = get_unaligned_be32(cmd + 10);
4227 case VERIFY: /* is VERIFY(10) */
4228 lba = get_unaligned_be32(cmd + 2);
4229 vnum = get_unaligned_be16(cmd + 7);
4232 mk_sense_invalid_opcode(scp);
4233 return check_condition_result;
4235 a_num = is_bytchk3 ? 1 : vnum;
4236 /* Treat following check like one for read (i.e. no write) access */
4237 ret = check_device_access_params(scp, lba, a_num, false);
4241 arr = kcalloc(lb_size, vnum, GFP_ATOMIC);
4243 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4245 return check_condition_result;
4247 /* Not changing store, so only need read access */
4248 read_lock(macc_lckp);
4250 ret = do_dout_fetch(scp, a_num, arr);
4252 ret = DID_ERROR << 16;
4254 } else if (sdebug_verbose && (ret < (a_num * lb_size))) {
4255 sdev_printk(KERN_INFO, scp->device,
4256 "%s: %s: cdb indicated=%u, IO sent=%d bytes\n",
4257 my_name, __func__, a_num * lb_size, ret);
4260 for (j = 1, off = lb_size; j < vnum; ++j, off += lb_size)
4261 memcpy(arr + off, arr, lb_size);
4264 if (!comp_write_worker(sip, lba, vnum, arr, true)) {
4265 mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
4266 ret = check_condition_result;
4270 read_unlock(macc_lckp);
4275 #define RZONES_DESC_HD 64
4277 /* Report zones depending on start LBA nad reporting options */
4278 static int resp_report_zones(struct scsi_cmnd *scp,
4279 struct sdebug_dev_info *devip)
4281 unsigned int i, max_zones, rep_max_zones, nrz = 0;
4283 u32 alloc_len, rep_opts, rep_len;
4286 u8 *arr = NULL, *desc;
4287 u8 *cmd = scp->cmnd;
4288 struct sdeb_zone_state *zsp;
4289 struct sdeb_store_info *sip = devip2sip(devip, false);
4290 rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4292 if (!sdebug_dev_is_zoned(devip)) {
4293 mk_sense_invalid_opcode(scp);
4294 return check_condition_result;
4296 zs_lba = get_unaligned_be64(cmd + 2);
4297 alloc_len = get_unaligned_be32(cmd + 10);
4298 rep_opts = cmd[14] & 0x3f;
4299 partial = cmd[14] & 0x80;
4301 if (zs_lba >= sdebug_capacity) {
4302 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4303 return check_condition_result;
4306 max_zones = devip->nr_zones - (zs_lba >> devip->zsize_shift);
4307 rep_max_zones = min((alloc_len - 64) >> ilog2(RZONES_DESC_HD),
4310 arr = kcalloc(RZONES_DESC_HD, alloc_len, GFP_ATOMIC);
4312 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4314 return check_condition_result;
4317 read_lock(macc_lckp);
4320 for (i = 0; i < max_zones; i++) {
4321 lba = zs_lba + devip->zsize * i;
4322 if (lba > sdebug_capacity)
4324 zsp = zbc_zone(devip, lba);
4331 if (zsp->z_cond != ZC1_EMPTY)
4335 /* Implicit open zones */
4336 if (zsp->z_cond != ZC2_IMPLICIT_OPEN)
4340 /* Explicit open zones */
4341 if (zsp->z_cond != ZC3_EXPLICIT_OPEN)
4346 if (zsp->z_cond != ZC4_CLOSED)
4351 if (zsp->z_cond != ZC5_FULL)
4358 * Read-only, offline, reset WP recommended are
4359 * not emulated: no zones to report;
4363 /* non-seq-resource set */
4364 if (!zsp->z_non_seq_resource)
4368 /* Not write pointer (conventional) zones */
4369 if (!zbc_zone_is_conv(zsp))
4373 mk_sense_buffer(scp, ILLEGAL_REQUEST,
4374 INVALID_FIELD_IN_CDB, 0);
4375 ret = check_condition_result;
4379 if (nrz < rep_max_zones) {
4380 /* Fill zone descriptor */
4381 desc[0] = zsp->z_type;
4382 desc[1] = zsp->z_cond << 4;
4383 if (zsp->z_non_seq_resource)
4385 put_unaligned_be64((u64)zsp->z_size, desc + 8);
4386 put_unaligned_be64((u64)zsp->z_start, desc + 16);
4387 put_unaligned_be64((u64)zsp->z_wp, desc + 24);
4391 if (partial && nrz >= rep_max_zones)
4398 put_unaligned_be32(nrz * RZONES_DESC_HD, arr + 0);
4399 put_unaligned_be64(sdebug_capacity - 1, arr + 8);
4401 rep_len = (unsigned long)desc - (unsigned long)arr;
4402 ret = fill_from_dev_buffer(scp, arr, min_t(int, alloc_len, rep_len));
4405 read_unlock(macc_lckp);
4410 /* Logic transplanted from tcmu-runner, file_zbc.c */
4411 static void zbc_open_all(struct sdebug_dev_info *devip)
4413 struct sdeb_zone_state *zsp = &devip->zstate[0];
4416 for (i = 0; i < devip->nr_zones; i++, zsp++) {
4417 if (zsp->z_cond == ZC4_CLOSED)
4418 zbc_open_zone(devip, &devip->zstate[i], true);
4422 static int resp_open_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4426 enum sdebug_z_cond zc;
4427 u8 *cmd = scp->cmnd;
4428 struct sdeb_zone_state *zsp;
4429 bool all = cmd[14] & 0x01;
4430 struct sdeb_store_info *sip = devip2sip(devip, false);
4431 rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4433 if (!sdebug_dev_is_zoned(devip)) {
4434 mk_sense_invalid_opcode(scp);
4435 return check_condition_result;
4438 write_lock(macc_lckp);
4441 /* Check if all closed zones can be open */
4442 if (devip->max_open &&
4443 devip->nr_exp_open + devip->nr_closed > devip->max_open) {
4444 mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
4446 res = check_condition_result;
4449 /* Open all closed zones */
4450 zbc_open_all(devip);
4454 /* Open the specified zone */
4455 z_id = get_unaligned_be64(cmd + 2);
4456 if (z_id >= sdebug_capacity) {
4457 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4458 res = check_condition_result;
4462 zsp = zbc_zone(devip, z_id);
4463 if (z_id != zsp->z_start) {
4464 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4465 res = check_condition_result;
4468 if (zbc_zone_is_conv(zsp)) {
4469 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4470 res = check_condition_result;
4475 if (zc == ZC3_EXPLICIT_OPEN || zc == ZC5_FULL)
4478 if (devip->max_open && devip->nr_exp_open >= devip->max_open) {
4479 mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
4481 res = check_condition_result;
4485 if (zc == ZC2_IMPLICIT_OPEN)
4486 zbc_close_zone(devip, zsp);
4487 zbc_open_zone(devip, zsp, true);
4489 write_unlock(macc_lckp);
4493 static void zbc_close_all(struct sdebug_dev_info *devip)
4497 for (i = 0; i < devip->nr_zones; i++)
4498 zbc_close_zone(devip, &devip->zstate[i]);
4501 static int resp_close_zone(struct scsi_cmnd *scp,
4502 struct sdebug_dev_info *devip)
4506 u8 *cmd = scp->cmnd;
4507 struct sdeb_zone_state *zsp;
4508 bool all = cmd[14] & 0x01;
4509 struct sdeb_store_info *sip = devip2sip(devip, false);
4510 rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4512 if (!sdebug_dev_is_zoned(devip)) {
4513 mk_sense_invalid_opcode(scp);
4514 return check_condition_result;
4517 write_lock(macc_lckp);
4520 zbc_close_all(devip);
4524 /* Close specified zone */
4525 z_id = get_unaligned_be64(cmd + 2);
4526 if (z_id >= sdebug_capacity) {
4527 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4528 res = check_condition_result;
4532 zsp = zbc_zone(devip, z_id);
4533 if (z_id != zsp->z_start) {
4534 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4535 res = check_condition_result;
4538 if (zbc_zone_is_conv(zsp)) {
4539 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4540 res = check_condition_result;
4544 zbc_close_zone(devip, zsp);
4546 write_unlock(macc_lckp);
4550 static void zbc_finish_zone(struct sdebug_dev_info *devip,
4551 struct sdeb_zone_state *zsp, bool empty)
4553 enum sdebug_z_cond zc = zsp->z_cond;
4555 if (zc == ZC4_CLOSED || zc == ZC2_IMPLICIT_OPEN ||
4556 zc == ZC3_EXPLICIT_OPEN || (empty && zc == ZC1_EMPTY)) {
4557 if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
4558 zbc_close_zone(devip, zsp);
4559 if (zsp->z_cond == ZC4_CLOSED)
4561 zsp->z_wp = zsp->z_start + zsp->z_size;
4562 zsp->z_cond = ZC5_FULL;
4566 static void zbc_finish_all(struct sdebug_dev_info *devip)
4570 for (i = 0; i < devip->nr_zones; i++)
4571 zbc_finish_zone(devip, &devip->zstate[i], false);
4574 static int resp_finish_zone(struct scsi_cmnd *scp,
4575 struct sdebug_dev_info *devip)
4577 struct sdeb_zone_state *zsp;
4580 u8 *cmd = scp->cmnd;
4581 bool all = cmd[14] & 0x01;
4582 struct sdeb_store_info *sip = devip2sip(devip, false);
4583 rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4585 if (!sdebug_dev_is_zoned(devip)) {
4586 mk_sense_invalid_opcode(scp);
4587 return check_condition_result;
4590 write_lock(macc_lckp);
4593 zbc_finish_all(devip);
4597 /* Finish the specified zone */
4598 z_id = get_unaligned_be64(cmd + 2);
4599 if (z_id >= sdebug_capacity) {
4600 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4601 res = check_condition_result;
4605 zsp = zbc_zone(devip, z_id);
4606 if (z_id != zsp->z_start) {
4607 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4608 res = check_condition_result;
4611 if (zbc_zone_is_conv(zsp)) {
4612 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4613 res = check_condition_result;
4617 zbc_finish_zone(devip, zsp, true);
4619 write_unlock(macc_lckp);
4623 static void zbc_rwp_zone(struct sdebug_dev_info *devip,
4624 struct sdeb_zone_state *zsp)
4626 enum sdebug_z_cond zc;
4628 if (zbc_zone_is_conv(zsp))
4632 if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
4633 zbc_close_zone(devip, zsp);
4635 if (zsp->z_cond == ZC4_CLOSED)
4638 zsp->z_non_seq_resource = false;
4639 zsp->z_wp = zsp->z_start;
4640 zsp->z_cond = ZC1_EMPTY;
4643 static void zbc_rwp_all(struct sdebug_dev_info *devip)
4647 for (i = 0; i < devip->nr_zones; i++)
4648 zbc_rwp_zone(devip, &devip->zstate[i]);
4651 static int resp_rwp_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4653 struct sdeb_zone_state *zsp;
4656 u8 *cmd = scp->cmnd;
4657 bool all = cmd[14] & 0x01;
4658 struct sdeb_store_info *sip = devip2sip(devip, false);
4659 rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4661 if (!sdebug_dev_is_zoned(devip)) {
4662 mk_sense_invalid_opcode(scp);
4663 return check_condition_result;
4666 write_lock(macc_lckp);
4673 z_id = get_unaligned_be64(cmd + 2);
4674 if (z_id >= sdebug_capacity) {
4675 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4676 res = check_condition_result;
4680 zsp = zbc_zone(devip, z_id);
4681 if (z_id != zsp->z_start) {
4682 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4683 res = check_condition_result;
4686 if (zbc_zone_is_conv(zsp)) {
4687 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4688 res = check_condition_result;
4692 zbc_rwp_zone(devip, zsp);
4694 write_unlock(macc_lckp);
4698 static struct sdebug_queue *get_queue(struct scsi_cmnd *cmnd)
4702 if (sdebug_host_max_queue) {
4703 /* Provide a simple method to choose the hwq */
4704 hwq = smp_processor_id() % submit_queues;
4706 u32 tag = blk_mq_unique_tag(cmnd->request);
4708 hwq = blk_mq_unique_tag_to_hwq(tag);
4710 pr_debug("tag=%#x, hwq=%d\n", tag, hwq);
4711 if (WARN_ON_ONCE(hwq >= submit_queues))
4714 return sdebug_q_arr + hwq;
4717 static u32 get_tag(struct scsi_cmnd *cmnd)
4719 return blk_mq_unique_tag(cmnd->request);
4722 /* Queued (deferred) command completions converge here. */
4723 static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
4725 bool aborted = sd_dp->aborted;
4728 unsigned long iflags;
4729 struct sdebug_queue *sqp;
4730 struct sdebug_queued_cmd *sqcp;
4731 struct scsi_cmnd *scp;
4732 struct sdebug_dev_info *devip;
4734 sd_dp->defer_t = SDEB_DEFER_NONE;
4735 if (unlikely(aborted))
4736 sd_dp->aborted = false;
4737 qc_idx = sd_dp->qc_idx;
4738 sqp = sdebug_q_arr + sd_dp->sqa_idx;
4739 if (sdebug_statistics) {
4740 atomic_inc(&sdebug_completions);
4741 if (raw_smp_processor_id() != sd_dp->issuing_cpu)
4742 atomic_inc(&sdebug_miss_cpus);
4744 if (unlikely((qc_idx < 0) || (qc_idx >= SDEBUG_CANQUEUE))) {
4745 pr_err("wild qc_idx=%d\n", qc_idx);
4748 spin_lock_irqsave(&sqp->qc_lock, iflags);
4749 sqcp = &sqp->qc_arr[qc_idx];
4751 if (unlikely(scp == NULL)) {
4752 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4753 pr_err("scp is NULL, sqa_idx=%d, qc_idx=%d, hc_idx=%d\n",
4754 sd_dp->sqa_idx, qc_idx, sd_dp->hc_idx);
4757 devip = (struct sdebug_dev_info *)scp->device->hostdata;
4759 atomic_dec(&devip->num_in_q);
4761 pr_err("devip=NULL\n");
4762 if (unlikely(atomic_read(&retired_max_queue) > 0))
4765 sqcp->a_cmnd = NULL;
4766 if (unlikely(!test_and_clear_bit(qc_idx, sqp->in_use_bm))) {
4767 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4768 pr_err("Unexpected completion\n");
4772 if (unlikely(retiring)) { /* user has reduced max_queue */
4775 retval = atomic_read(&retired_max_queue);
4776 if (qc_idx >= retval) {
4777 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4778 pr_err("index %d too large\n", retval);
4781 k = find_last_bit(sqp->in_use_bm, retval);
4782 if ((k < sdebug_max_queue) || (k == retval))
4783 atomic_set(&retired_max_queue, 0);
4785 atomic_set(&retired_max_queue, k + 1);
4787 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4788 if (unlikely(aborted)) {
4790 pr_info("bypassing scsi_done() due to aborted cmd\n");
4793 scp->scsi_done(scp); /* callback to mid level */
4796 /* When high resolution timer goes off this function is called. */
4797 static enum hrtimer_restart sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
4799 struct sdebug_defer *sd_dp = container_of(timer, struct sdebug_defer,
4801 sdebug_q_cmd_complete(sd_dp);
4802 return HRTIMER_NORESTART;
4805 /* When work queue schedules work, it calls this function. */
4806 static void sdebug_q_cmd_wq_complete(struct work_struct *work)
4808 struct sdebug_defer *sd_dp = container_of(work, struct sdebug_defer,
4810 sdebug_q_cmd_complete(sd_dp);
4813 static bool got_shared_uuid;
4814 static uuid_t shared_uuid;
4816 static int sdebug_device_create_zones(struct sdebug_dev_info *devip)
4818 struct sdeb_zone_state *zsp;
4819 sector_t capacity = get_sdebug_capacity();
4820 sector_t zstart = 0;
4824 * Set the zone size: if sdeb_zbc_zone_size_mb is not set, figure out
4825 * a zone size allowing for at least 4 zones on the device. Otherwise,
4826 * use the specified zone size checking that at least 2 zones can be
4827 * created for the device.
4829 if (!sdeb_zbc_zone_size_mb) {
4830 devip->zsize = (DEF_ZBC_ZONE_SIZE_MB * SZ_1M)
4831 >> ilog2(sdebug_sector_size);
4832 while (capacity < devip->zsize << 2 && devip->zsize >= 2)
4834 if (devip->zsize < 2) {
4835 pr_err("Device capacity too small\n");
4839 if (!is_power_of_2(sdeb_zbc_zone_size_mb)) {
4840 pr_err("Zone size is not a power of 2\n");
4843 devip->zsize = (sdeb_zbc_zone_size_mb * SZ_1M)
4844 >> ilog2(sdebug_sector_size);
4845 if (devip->zsize >= capacity) {
4846 pr_err("Zone size too large for device capacity\n");
4851 devip->zsize_shift = ilog2(devip->zsize);
4852 devip->nr_zones = (capacity + devip->zsize - 1) >> devip->zsize_shift;
4854 if (sdeb_zbc_nr_conv >= devip->nr_zones) {
4855 pr_err("Number of conventional zones too large\n");
4858 devip->nr_conv_zones = sdeb_zbc_nr_conv;
4860 if (devip->zmodel == BLK_ZONED_HM) {
4861 /* zbc_max_open_zones can be 0, meaning "not reported" */
4862 if (sdeb_zbc_max_open >= devip->nr_zones - 1)
4863 devip->max_open = (devip->nr_zones - 1) / 2;
4865 devip->max_open = sdeb_zbc_max_open;
4868 devip->zstate = kcalloc(devip->nr_zones,
4869 sizeof(struct sdeb_zone_state), GFP_KERNEL);
4873 for (i = 0; i < devip->nr_zones; i++) {
4874 zsp = &devip->zstate[i];
4876 zsp->z_start = zstart;
4878 if (i < devip->nr_conv_zones) {
4879 zsp->z_type = ZBC_ZONE_TYPE_CNV;
4880 zsp->z_cond = ZBC_NOT_WRITE_POINTER;
4881 zsp->z_wp = (sector_t)-1;
4883 if (devip->zmodel == BLK_ZONED_HM)
4884 zsp->z_type = ZBC_ZONE_TYPE_SWR;
4886 zsp->z_type = ZBC_ZONE_TYPE_SWP;
4887 zsp->z_cond = ZC1_EMPTY;
4888 zsp->z_wp = zsp->z_start;
4891 if (zsp->z_start + devip->zsize < capacity)
4892 zsp->z_size = devip->zsize;
4894 zsp->z_size = capacity - zsp->z_start;
4896 zstart += zsp->z_size;
4902 static struct sdebug_dev_info *sdebug_device_create(
4903 struct sdebug_host_info *sdbg_host, gfp_t flags)
4905 struct sdebug_dev_info *devip;
4907 devip = kzalloc(sizeof(*devip), flags);
4909 if (sdebug_uuid_ctl == 1)
4910 uuid_gen(&devip->lu_name);
4911 else if (sdebug_uuid_ctl == 2) {
4912 if (got_shared_uuid)
4913 devip->lu_name = shared_uuid;
4915 uuid_gen(&shared_uuid);
4916 got_shared_uuid = true;
4917 devip->lu_name = shared_uuid;
4920 devip->sdbg_host = sdbg_host;
4921 if (sdeb_zbc_in_use) {
4922 devip->zmodel = sdeb_zbc_model;
4923 if (sdebug_device_create_zones(devip)) {
4928 devip->zmodel = BLK_ZONED_NONE;
4930 devip->sdbg_host = sdbg_host;
4931 devip->create_ts = ktime_get_boottime();
4932 atomic_set(&devip->stopped, (sdeb_tur_ms_to_ready > 0 ? 2 : 0));
4933 list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
4938 static struct sdebug_dev_info *find_build_dev_info(struct scsi_device *sdev)
4940 struct sdebug_host_info *sdbg_host;
4941 struct sdebug_dev_info *open_devip = NULL;
4942 struct sdebug_dev_info *devip;
4944 sdbg_host = *(struct sdebug_host_info **)shost_priv(sdev->host);
4946 pr_err("Host info NULL\n");
4949 list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
4950 if ((devip->used) && (devip->channel == sdev->channel) &&
4951 (devip->target == sdev->id) &&
4952 (devip->lun == sdev->lun))
4955 if ((!devip->used) && (!open_devip))
4959 if (!open_devip) { /* try and make a new one */
4960 open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
4962 pr_err("out of memory at line %d\n", __LINE__);
4967 open_devip->channel = sdev->channel;
4968 open_devip->target = sdev->id;
4969 open_devip->lun = sdev->lun;
4970 open_devip->sdbg_host = sdbg_host;
4971 atomic_set(&open_devip->num_in_q, 0);
4972 set_bit(SDEBUG_UA_POR, open_devip->uas_bm);
4973 open_devip->used = true;
4977 static int scsi_debug_slave_alloc(struct scsi_device *sdp)
4980 pr_info("slave_alloc <%u %u %u %llu>\n",
4981 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
4985 static int scsi_debug_slave_configure(struct scsi_device *sdp)
4987 struct sdebug_dev_info *devip =
4988 (struct sdebug_dev_info *)sdp->hostdata;
4991 pr_info("slave_configure <%u %u %u %llu>\n",
4992 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
4993 if (sdp->host->max_cmd_len != SDEBUG_MAX_CMD_LEN)
4994 sdp->host->max_cmd_len = SDEBUG_MAX_CMD_LEN;
4995 if (devip == NULL) {
4996 devip = find_build_dev_info(sdp);
4998 return 1; /* no resources, will be marked offline */
5000 sdp->hostdata = devip;
5002 sdp->no_uld_attach = 1;
5003 config_cdb_len(sdp);
5007 static void scsi_debug_slave_destroy(struct scsi_device *sdp)
5009 struct sdebug_dev_info *devip =
5010 (struct sdebug_dev_info *)sdp->hostdata;
5013 pr_info("slave_destroy <%u %u %u %llu>\n",
5014 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5016 /* make this slot available for re-use */
5017 devip->used = false;
5018 sdp->hostdata = NULL;
5022 static void stop_qc_helper(struct sdebug_defer *sd_dp,
5023 enum sdeb_defer_type defer_t)
5027 if (defer_t == SDEB_DEFER_HRT)
5028 hrtimer_cancel(&sd_dp->hrt);
5029 else if (defer_t == SDEB_DEFER_WQ)
5030 cancel_work_sync(&sd_dp->ew.work);
5033 /* If @cmnd found deletes its timer or work queue and returns true; else
5035 static bool stop_queued_cmnd(struct scsi_cmnd *cmnd)
5037 unsigned long iflags;
5038 int j, k, qmax, r_qmax;
5039 enum sdeb_defer_type l_defer_t;
5040 struct sdebug_queue *sqp;
5041 struct sdebug_queued_cmd *sqcp;
5042 struct sdebug_dev_info *devip;
5043 struct sdebug_defer *sd_dp;
5045 for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5046 spin_lock_irqsave(&sqp->qc_lock, iflags);
5047 qmax = sdebug_max_queue;
5048 r_qmax = atomic_read(&retired_max_queue);
5051 for (k = 0; k < qmax; ++k) {
5052 if (test_bit(k, sqp->in_use_bm)) {
5053 sqcp = &sqp->qc_arr[k];
5054 if (cmnd != sqcp->a_cmnd)
5057 devip = (struct sdebug_dev_info *)
5058 cmnd->device->hostdata;
5060 atomic_dec(&devip->num_in_q);
5061 sqcp->a_cmnd = NULL;
5062 sd_dp = sqcp->sd_dp;
5064 l_defer_t = sd_dp->defer_t;
5065 sd_dp->defer_t = SDEB_DEFER_NONE;
5067 l_defer_t = SDEB_DEFER_NONE;
5068 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5069 stop_qc_helper(sd_dp, l_defer_t);
5070 clear_bit(k, sqp->in_use_bm);
5074 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5079 /* Deletes (stops) timers or work queues of all queued commands */
5080 static void stop_all_queued(void)
5082 unsigned long iflags;
5084 enum sdeb_defer_type l_defer_t;
5085 struct sdebug_queue *sqp;
5086 struct sdebug_queued_cmd *sqcp;
5087 struct sdebug_dev_info *devip;
5088 struct sdebug_defer *sd_dp;
5090 for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5091 spin_lock_irqsave(&sqp->qc_lock, iflags);
5092 for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
5093 if (test_bit(k, sqp->in_use_bm)) {
5094 sqcp = &sqp->qc_arr[k];
5095 if (sqcp->a_cmnd == NULL)
5097 devip = (struct sdebug_dev_info *)
5098 sqcp->a_cmnd->device->hostdata;
5100 atomic_dec(&devip->num_in_q);
5101 sqcp->a_cmnd = NULL;
5102 sd_dp = sqcp->sd_dp;
5104 l_defer_t = sd_dp->defer_t;
5105 sd_dp->defer_t = SDEB_DEFER_NONE;
5107 l_defer_t = SDEB_DEFER_NONE;
5108 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5109 stop_qc_helper(sd_dp, l_defer_t);
5110 clear_bit(k, sqp->in_use_bm);
5111 spin_lock_irqsave(&sqp->qc_lock, iflags);
5114 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5118 /* Free queued command memory on heap */
5119 static void free_all_queued(void)
5122 struct sdebug_queue *sqp;
5123 struct sdebug_queued_cmd *sqcp;
5125 for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5126 for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
5127 sqcp = &sqp->qc_arr[k];
5134 static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
5140 ok = stop_queued_cmnd(SCpnt);
5141 if (SCpnt->device && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
5142 sdev_printk(KERN_INFO, SCpnt->device,
5143 "%s: command%s found\n", __func__,
5149 static int scsi_debug_device_reset(struct scsi_cmnd *SCpnt)
5152 if (SCpnt && SCpnt->device) {
5153 struct scsi_device *sdp = SCpnt->device;
5154 struct sdebug_dev_info *devip =
5155 (struct sdebug_dev_info *)sdp->hostdata;
5157 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5158 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5160 set_bit(SDEBUG_UA_POR, devip->uas_bm);
5165 static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
5167 struct sdebug_host_info *sdbg_host;
5168 struct sdebug_dev_info *devip;
5169 struct scsi_device *sdp;
5170 struct Scsi_Host *hp;
5173 ++num_target_resets;
5176 sdp = SCpnt->device;
5179 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5180 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5184 sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
5186 list_for_each_entry(devip,
5187 &sdbg_host->dev_info_list,
5189 if (devip->target == sdp->id) {
5190 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5194 if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5195 sdev_printk(KERN_INFO, sdp,
5196 "%s: %d device(s) found in target\n", __func__, k);
5201 static int scsi_debug_bus_reset(struct scsi_cmnd *SCpnt)
5203 struct sdebug_host_info *sdbg_host;
5204 struct sdebug_dev_info *devip;
5205 struct scsi_device *sdp;
5206 struct Scsi_Host *hp;
5210 if (!(SCpnt && SCpnt->device))
5212 sdp = SCpnt->device;
5213 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5214 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5217 sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
5219 list_for_each_entry(devip,
5220 &sdbg_host->dev_info_list,
5222 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5227 if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5228 sdev_printk(KERN_INFO, sdp,
5229 "%s: %d device(s) found in host\n", __func__, k);
5234 static int scsi_debug_host_reset(struct scsi_cmnd *SCpnt)
5236 struct sdebug_host_info *sdbg_host;
5237 struct sdebug_dev_info *devip;
5241 if ((SCpnt->device) && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
5242 sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__);
5243 spin_lock(&sdebug_host_list_lock);
5244 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
5245 list_for_each_entry(devip, &sdbg_host->dev_info_list,
5247 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5251 spin_unlock(&sdebug_host_list_lock);
5253 if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5254 sdev_printk(KERN_INFO, SCpnt->device,
5255 "%s: %d device(s) found\n", __func__, k);
5259 static void sdebug_build_parts(unsigned char *ramp, unsigned long store_size)
5261 struct msdos_partition *pp;
5262 int starts[SDEBUG_MAX_PARTS + 2];
5263 int sectors_per_part, num_sectors, k;
5264 int heads_by_sects, start_sec, end_sec;
5266 /* assume partition table already zeroed */
5267 if ((sdebug_num_parts < 1) || (store_size < 1048576))
5269 if (sdebug_num_parts > SDEBUG_MAX_PARTS) {
5270 sdebug_num_parts = SDEBUG_MAX_PARTS;
5271 pr_warn("reducing partitions to %d\n", SDEBUG_MAX_PARTS);
5273 num_sectors = (int)sdebug_store_sectors;
5274 sectors_per_part = (num_sectors - sdebug_sectors_per)
5276 heads_by_sects = sdebug_heads * sdebug_sectors_per;
5277 starts[0] = sdebug_sectors_per;
5278 for (k = 1; k < sdebug_num_parts; ++k)
5279 starts[k] = ((k * sectors_per_part) / heads_by_sects)
5281 starts[sdebug_num_parts] = num_sectors;
5282 starts[sdebug_num_parts + 1] = 0;
5284 ramp[510] = 0x55; /* magic partition markings */
5286 pp = (struct msdos_partition *)(ramp + 0x1be);
5287 for (k = 0; starts[k + 1]; ++k, ++pp) {
5288 start_sec = starts[k];
5289 end_sec = starts[k + 1] - 1;
5292 pp->cyl = start_sec / heads_by_sects;
5293 pp->head = (start_sec - (pp->cyl * heads_by_sects))
5294 / sdebug_sectors_per;
5295 pp->sector = (start_sec % sdebug_sectors_per) + 1;
5297 pp->end_cyl = end_sec / heads_by_sects;
5298 pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
5299 / sdebug_sectors_per;
5300 pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
5302 pp->start_sect = cpu_to_le32(start_sec);
5303 pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1);
5304 pp->sys_ind = 0x83; /* plain Linux partition */
5308 static void block_unblock_all_queues(bool block)
5311 struct sdebug_queue *sqp;
5313 for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp)
5314 atomic_set(&sqp->blocked, (int)block);
5317 /* Adjust (by rounding down) the sdebug_cmnd_count so abs(every_nth)-1
5318 * commands will be processed normally before triggers occur.
5320 static void tweak_cmnd_count(void)
5324 modulo = abs(sdebug_every_nth);
5327 block_unblock_all_queues(true);
5328 count = atomic_read(&sdebug_cmnd_count);
5329 atomic_set(&sdebug_cmnd_count, (count / modulo) * modulo);
5330 block_unblock_all_queues(false);
5333 static void clear_queue_stats(void)
5335 atomic_set(&sdebug_cmnd_count, 0);
5336 atomic_set(&sdebug_completions, 0);
5337 atomic_set(&sdebug_miss_cpus, 0);
5338 atomic_set(&sdebug_a_tsf, 0);
5341 static bool inject_on_this_cmd(void)
5343 if (sdebug_every_nth == 0)
5345 return (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) == 0;
5348 #define INCLUSIVE_TIMING_MAX_NS 1000000 /* 1 millisecond */
5350 /* Complete the processing of the thread that queued a SCSI command to this
5351 * driver. It either completes the command by calling cmnd_done() or
5352 * schedules a hr timer or work queue then returns 0. Returns
5353 * SCSI_MLQUEUE_HOST_BUSY if temporarily out of resources.
5355 static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
5357 int (*pfp)(struct scsi_cmnd *,
5358 struct sdebug_dev_info *),
5359 int delta_jiff, int ndelay)
5362 bool inject = false;
5363 int k, num_in_q, qdepth;
5364 unsigned long iflags;
5365 u64 ns_from_boot = 0;
5366 struct sdebug_queue *sqp;
5367 struct sdebug_queued_cmd *sqcp;
5368 struct scsi_device *sdp;
5369 struct sdebug_defer *sd_dp;
5371 if (unlikely(devip == NULL)) {
5372 if (scsi_result == 0)
5373 scsi_result = DID_NO_CONNECT << 16;
5374 goto respond_in_thread;
5378 if (delta_jiff == 0)
5379 goto respond_in_thread;
5381 sqp = get_queue(cmnd);
5382 spin_lock_irqsave(&sqp->qc_lock, iflags);
5383 if (unlikely(atomic_read(&sqp->blocked))) {
5384 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5385 return SCSI_MLQUEUE_HOST_BUSY;
5387 num_in_q = atomic_read(&devip->num_in_q);
5388 qdepth = cmnd->device->queue_depth;
5389 if (unlikely((qdepth > 0) && (num_in_q >= qdepth))) {
5391 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5392 goto respond_in_thread;
5394 scsi_result = device_qfull_result;
5395 } else if (unlikely(sdebug_every_nth &&
5396 (SDEBUG_OPT_RARE_TSF & sdebug_opts) &&
5397 (scsi_result == 0))) {
5398 if ((num_in_q == (qdepth - 1)) &&
5399 (atomic_inc_return(&sdebug_a_tsf) >=
5400 abs(sdebug_every_nth))) {
5401 atomic_set(&sdebug_a_tsf, 0);
5403 scsi_result = device_qfull_result;
5407 k = find_first_zero_bit(sqp->in_use_bm, sdebug_max_queue);
5408 if (unlikely(k >= sdebug_max_queue)) {
5409 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5411 goto respond_in_thread;
5412 else if (SDEBUG_OPT_ALL_TSF & sdebug_opts)
5413 scsi_result = device_qfull_result;
5414 if (SDEBUG_OPT_Q_NOISE & sdebug_opts)
5415 sdev_printk(KERN_INFO, sdp,
5416 "%s: max_queue=%d exceeded, %s\n",
5417 __func__, sdebug_max_queue,
5418 (scsi_result ? "status: TASK SET FULL" :
5419 "report: host busy"));
5421 goto respond_in_thread;
5423 return SCSI_MLQUEUE_HOST_BUSY;
5425 set_bit(k, sqp->in_use_bm);
5426 atomic_inc(&devip->num_in_q);
5427 sqcp = &sqp->qc_arr[k];
5428 sqcp->a_cmnd = cmnd;
5429 cmnd->host_scribble = (unsigned char *)sqcp;
5430 sd_dp = sqcp->sd_dp;
5431 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5433 sd_dp = kzalloc(sizeof(*sd_dp), GFP_ATOMIC);
5435 atomic_dec(&devip->num_in_q);
5436 clear_bit(k, sqp->in_use_bm);
5437 return SCSI_MLQUEUE_HOST_BUSY;
5444 /* Set the hostwide tag */
5445 if (sdebug_host_max_queue)
5446 sd_dp->hc_idx = get_tag(cmnd);
5448 if (ndelay > 0 && ndelay < INCLUSIVE_TIMING_MAX_NS)
5449 ns_from_boot = ktime_get_boottime_ns();
5451 /* one of the resp_*() response functions is called here */
5452 cmnd->result = pfp ? pfp(cmnd, devip) : 0;
5453 if (cmnd->result & SDEG_RES_IMMED_MASK) {
5454 cmnd->result &= ~SDEG_RES_IMMED_MASK;
5455 delta_jiff = ndelay = 0;
5457 if (cmnd->result == 0 && scsi_result != 0)
5458 cmnd->result = scsi_result;
5459 if (cmnd->result == 0 && unlikely(sdebug_opts & SDEBUG_OPT_TRANSPORT_ERR)) {
5460 if (atomic_read(&sdeb_inject_pending)) {
5461 mk_sense_buffer(cmnd, ABORTED_COMMAND, TRANSPORT_PROBLEM, ACK_NAK_TO);
5462 atomic_set(&sdeb_inject_pending, 0);
5463 cmnd->result = check_condition_result;
5467 if (unlikely(sdebug_verbose && cmnd->result))
5468 sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n",
5469 __func__, cmnd->result);
5471 if (delta_jiff > 0 || ndelay > 0) {
5474 if (delta_jiff > 0) {
5475 u64 ns = jiffies_to_nsecs(delta_jiff);
5477 if (sdebug_random && ns < U32_MAX) {
5478 ns = prandom_u32_max((u32)ns);
5479 } else if (sdebug_random) {
5480 ns >>= 12; /* scale to 4 usec precision */
5481 if (ns < U32_MAX) /* over 4 hours max */
5482 ns = prandom_u32_max((u32)ns);
5485 kt = ns_to_ktime(ns);
5486 } else { /* ndelay has a 4.2 second max */
5487 kt = sdebug_random ? prandom_u32_max((u32)ndelay) :
5489 if (ndelay < INCLUSIVE_TIMING_MAX_NS) {
5490 u64 d = ktime_get_boottime_ns() - ns_from_boot;
5492 if (kt <= d) { /* elapsed duration >= kt */
5493 sqcp->a_cmnd = NULL;
5494 atomic_dec(&devip->num_in_q);
5495 clear_bit(k, sqp->in_use_bm);
5498 /* call scsi_done() from this thread */
5499 cmnd->scsi_done(cmnd);
5502 /* otherwise reduce kt by elapsed time */
5506 if (!sd_dp->init_hrt) {
5507 sd_dp->init_hrt = true;
5508 sqcp->sd_dp = sd_dp;
5509 hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC,
5510 HRTIMER_MODE_REL_PINNED);
5511 sd_dp->hrt.function = sdebug_q_cmd_hrt_complete;
5512 sd_dp->sqa_idx = sqp - sdebug_q_arr;
5515 if (sdebug_statistics)
5516 sd_dp->issuing_cpu = raw_smp_processor_id();
5517 sd_dp->defer_t = SDEB_DEFER_HRT;
5518 /* schedule the invocation of scsi_done() for a later time */
5519 hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED);
5520 } else { /* jdelay < 0, use work queue */
5521 if (!sd_dp->init_wq) {
5522 sd_dp->init_wq = true;
5523 sqcp->sd_dp = sd_dp;
5524 sd_dp->sqa_idx = sqp - sdebug_q_arr;
5526 INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete);
5528 if (sdebug_statistics)
5529 sd_dp->issuing_cpu = raw_smp_processor_id();
5530 sd_dp->defer_t = SDEB_DEFER_WQ;
5531 if (unlikely((sdebug_opts & SDEBUG_OPT_CMD_ABORT) &&
5532 atomic_read(&sdeb_inject_pending)))
5533 sd_dp->aborted = true;
5534 schedule_work(&sd_dp->ew.work);
5535 if (unlikely((sdebug_opts & SDEBUG_OPT_CMD_ABORT) &&
5536 atomic_read(&sdeb_inject_pending))) {
5537 sdev_printk(KERN_INFO, sdp, "abort request tag %d\n", cmnd->request->tag);
5538 blk_abort_request(cmnd->request);
5539 atomic_set(&sdeb_inject_pending, 0);
5542 if (unlikely((SDEBUG_OPT_Q_NOISE & sdebug_opts) && scsi_result == device_qfull_result))
5543 sdev_printk(KERN_INFO, sdp, "%s: num_in_q=%d +1, %s%s\n", __func__,
5544 num_in_q, (inject ? "<inject> " : ""), "status: TASK SET FULL");
5547 respond_in_thread: /* call back to mid-layer using invocation thread */
5548 cmnd->result = pfp != NULL ? pfp(cmnd, devip) : 0;
5549 cmnd->result &= ~SDEG_RES_IMMED_MASK;
5550 if (cmnd->result == 0 && scsi_result != 0)
5551 cmnd->result = scsi_result;
5552 cmnd->scsi_done(cmnd);
5556 /* Note: The following macros create attribute files in the
5557 /sys/module/scsi_debug/parameters directory. Unfortunately this
5558 driver is unaware of a change and cannot trigger auxiliary actions
5559 as it can when the corresponding attribute in the
5560 /sys/bus/pseudo/drivers/scsi_debug directory is changed.
5562 module_param_named(add_host, sdebug_add_host, int, S_IRUGO | S_IWUSR);
5563 module_param_named(ato, sdebug_ato, int, S_IRUGO);
5564 module_param_named(cdb_len, sdebug_cdb_len, int, 0644);
5565 module_param_named(clustering, sdebug_clustering, bool, S_IRUGO | S_IWUSR);
5566 module_param_named(delay, sdebug_jdelay, int, S_IRUGO | S_IWUSR);
5567 module_param_named(dev_size_mb, sdebug_dev_size_mb, int, S_IRUGO);
5568 module_param_named(dif, sdebug_dif, int, S_IRUGO);
5569 module_param_named(dix, sdebug_dix, int, S_IRUGO);
5570 module_param_named(dsense, sdebug_dsense, int, S_IRUGO | S_IWUSR);
5571 module_param_named(every_nth, sdebug_every_nth, int, S_IRUGO | S_IWUSR);
5572 module_param_named(fake_rw, sdebug_fake_rw, int, S_IRUGO | S_IWUSR);
5573 module_param_named(guard, sdebug_guard, uint, S_IRUGO);
5574 module_param_named(host_lock, sdebug_host_lock, bool, S_IRUGO | S_IWUSR);
5575 module_param_named(host_max_queue, sdebug_host_max_queue, int, S_IRUGO);
5576 module_param_string(inq_product, sdebug_inq_product_id,
5577 sizeof(sdebug_inq_product_id), S_IRUGO | S_IWUSR);
5578 module_param_string(inq_rev, sdebug_inq_product_rev,
5579 sizeof(sdebug_inq_product_rev), S_IRUGO | S_IWUSR);
5580 module_param_string(inq_vendor, sdebug_inq_vendor_id,
5581 sizeof(sdebug_inq_vendor_id), S_IRUGO | S_IWUSR);
5582 module_param_named(lbprz, sdebug_lbprz, int, S_IRUGO);
5583 module_param_named(lbpu, sdebug_lbpu, int, S_IRUGO);
5584 module_param_named(lbpws, sdebug_lbpws, int, S_IRUGO);
5585 module_param_named(lbpws10, sdebug_lbpws10, int, S_IRUGO);
5586 module_param_named(lowest_aligned, sdebug_lowest_aligned, int, S_IRUGO);
5587 module_param_named(max_luns, sdebug_max_luns, int, S_IRUGO | S_IWUSR);
5588 module_param_named(max_queue, sdebug_max_queue, int, S_IRUGO | S_IWUSR);
5589 module_param_named(medium_error_count, sdebug_medium_error_count, int,
5591 module_param_named(medium_error_start, sdebug_medium_error_start, int,
5593 module_param_named(ndelay, sdebug_ndelay, int, S_IRUGO | S_IWUSR);
5594 module_param_named(no_lun_0, sdebug_no_lun_0, int, S_IRUGO | S_IWUSR);
5595 module_param_named(no_uld, sdebug_no_uld, int, S_IRUGO);
5596 module_param_named(num_parts, sdebug_num_parts, int, S_IRUGO);
5597 module_param_named(num_tgts, sdebug_num_tgts, int, S_IRUGO | S_IWUSR);
5598 module_param_named(opt_blks, sdebug_opt_blks, int, S_IRUGO);
5599 module_param_named(opt_xferlen_exp, sdebug_opt_xferlen_exp, int, S_IRUGO);
5600 module_param_named(opts, sdebug_opts, int, S_IRUGO | S_IWUSR);
5601 module_param_named(per_host_store, sdebug_per_host_store, bool,
5603 module_param_named(physblk_exp, sdebug_physblk_exp, int, S_IRUGO);
5604 module_param_named(ptype, sdebug_ptype, int, S_IRUGO | S_IWUSR);
5605 module_param_named(random, sdebug_random, bool, S_IRUGO | S_IWUSR);
5606 module_param_named(removable, sdebug_removable, bool, S_IRUGO | S_IWUSR);
5607 module_param_named(scsi_level, sdebug_scsi_level, int, S_IRUGO);
5608 module_param_named(sector_size, sdebug_sector_size, int, S_IRUGO);
5609 module_param_named(statistics, sdebug_statistics, bool, S_IRUGO | S_IWUSR);
5610 module_param_named(strict, sdebug_strict, bool, S_IRUGO | S_IWUSR);
5611 module_param_named(submit_queues, submit_queues, int, S_IRUGO);
5612 module_param_named(tur_ms_to_ready, sdeb_tur_ms_to_ready, int, S_IRUGO);
5613 module_param_named(unmap_alignment, sdebug_unmap_alignment, int, S_IRUGO);
5614 module_param_named(unmap_granularity, sdebug_unmap_granularity, int, S_IRUGO);
5615 module_param_named(unmap_max_blocks, sdebug_unmap_max_blocks, int, S_IRUGO);
5616 module_param_named(unmap_max_desc, sdebug_unmap_max_desc, int, S_IRUGO);
5617 module_param_named(uuid_ctl, sdebug_uuid_ctl, int, S_IRUGO);
5618 module_param_named(virtual_gb, sdebug_virtual_gb, int, S_IRUGO | S_IWUSR);
5619 module_param_named(vpd_use_hostno, sdebug_vpd_use_hostno, int,
5621 module_param_named(wp, sdebug_wp, bool, S_IRUGO | S_IWUSR);
5622 module_param_named(write_same_length, sdebug_write_same_length, int,
5624 module_param_named(zbc, sdeb_zbc_model_s, charp, S_IRUGO);
5625 module_param_named(zone_max_open, sdeb_zbc_max_open, int, S_IRUGO);
5626 module_param_named(zone_nr_conv, sdeb_zbc_nr_conv, int, S_IRUGO);
5627 module_param_named(zone_size_mb, sdeb_zbc_zone_size_mb, int, S_IRUGO);
5629 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
5630 MODULE_DESCRIPTION("SCSI debug adapter driver");
5631 MODULE_LICENSE("GPL");
5632 MODULE_VERSION(SDEBUG_VERSION);
5634 MODULE_PARM_DESC(add_host, "add n hosts, in sysfs if negative remove host(s) (def=1)");
5635 MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
5636 MODULE_PARM_DESC(cdb_len, "suggest CDB lengths to drivers (def=10)");
5637 MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
5638 MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
5639 MODULE_PARM_DESC(dev_size_mb, "size in MiB of ram shared by devs(def=8)");
5640 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
5641 MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
5642 MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
5643 MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
5644 MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
5645 MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
5646 MODULE_PARM_DESC(host_lock, "host_lock is ignored (def=0)");
5647 MODULE_PARM_DESC(host_max_queue,
5648 "host max # of queued cmds (0 to max(def) [max_queue fixed equal for !0])");
5649 MODULE_PARM_DESC(inq_product, "SCSI INQUIRY product string (def=\"scsi_debug\")");
5650 MODULE_PARM_DESC(inq_rev, "SCSI INQUIRY revision string (def=\""
5651 SDEBUG_VERSION "\")");
5652 MODULE_PARM_DESC(inq_vendor, "SCSI INQUIRY vendor string (def=\"Linux\")");
5653 MODULE_PARM_DESC(lbprz,
5654 "on read unmapped LBs return 0 when 1 (def), return 0xff when 2");
5655 MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
5656 MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
5657 MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
5658 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
5659 MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
5660 MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))");
5661 MODULE_PARM_DESC(medium_error_count, "count of sectors to return follow on MEDIUM error");
5662 MODULE_PARM_DESC(medium_error_start, "starting sector number to return MEDIUM error");
5663 MODULE_PARM_DESC(ndelay, "response delay in nanoseconds (def=0 -> ignore)");
5664 MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
5665 MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
5666 MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
5667 MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
5668 MODULE_PARM_DESC(opt_blks, "optimal transfer length in blocks (def=1024)");
5669 MODULE_PARM_DESC(opt_xferlen_exp, "optimal transfer length granularity exponent (def=physblk_exp)");
5670 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
5671 MODULE_PARM_DESC(per_host_store, "If set, next positive add_host will get new store (def=0)");
5672 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
5673 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
5674 MODULE_PARM_DESC(random, "If set, uniformly randomize command duration between 0 and delay_in_ns");
5675 MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
5676 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=7[SPC-5])");
5677 MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
5678 MODULE_PARM_DESC(statistics, "collect statistics on commands, queues (def=0)");
5679 MODULE_PARM_DESC(strict, "stricter checks: reserved field in cdb (def=0)");
5680 MODULE_PARM_DESC(submit_queues, "support for block multi-queue (def=1)");
5681 MODULE_PARM_DESC(tur_ms_to_ready, "TEST UNIT READY millisecs before initial good status (def=0)");
5682 MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
5683 MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
5684 MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
5685 MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
5686 MODULE_PARM_DESC(uuid_ctl,
5687 "1->use uuid for lu name, 0->don't, 2->all use same (def=0)");
5688 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)");
5689 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
5690 MODULE_PARM_DESC(wp, "Write Protect (def=0)");
5691 MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
5692 MODULE_PARM_DESC(zbc, "'none' [0]; 'aware' [1]; 'managed' [2] (def=0). Can have 'host-' prefix");
5693 MODULE_PARM_DESC(zone_max_open, "Maximum number of open zones; [0] for no limit (def=auto)");
5694 MODULE_PARM_DESC(zone_nr_conv, "Number of conventional zones (def=1)");
5695 MODULE_PARM_DESC(zone_size_mb, "Zone size in MiB (def=auto)");
5697 #define SDEBUG_INFO_LEN 256
5698 static char sdebug_info[SDEBUG_INFO_LEN];
5700 static const char *scsi_debug_info(struct Scsi_Host *shp)
5704 k = scnprintf(sdebug_info, SDEBUG_INFO_LEN, "%s: version %s [%s]\n",
5705 my_name, SDEBUG_VERSION, sdebug_version_date);
5706 if (k >= (SDEBUG_INFO_LEN - 1))
5708 scnprintf(sdebug_info + k, SDEBUG_INFO_LEN - k,
5709 " dev_size_mb=%d, opts=0x%x, submit_queues=%d, %s=%d",
5710 sdebug_dev_size_mb, sdebug_opts, submit_queues,
5711 "statistics", (int)sdebug_statistics);
5715 /* 'echo <val> > /proc/scsi/scsi_debug/<host_id>' writes to opts */
5716 static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer,
5721 int minLen = length > 15 ? 15 : length;
5723 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
5725 memcpy(arr, buffer, minLen);
5727 if (1 != sscanf(arr, "%d", &opts))
5730 sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
5731 sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
5732 if (sdebug_every_nth != 0)
5737 /* Output seen with 'cat /proc/scsi/scsi_debug/<host_id>'. It will be the
5738 * same for each scsi_debug host (if more than one). Some of the counters
5739 * output are not atomics so might be inaccurate in a busy system. */
5740 static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
5743 struct sdebug_queue *sqp;
5744 struct sdebug_host_info *sdhp;
5746 seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n",
5747 SDEBUG_VERSION, sdebug_version_date);
5748 seq_printf(m, "num_tgts=%d, %ssize=%d MB, opts=0x%x, every_nth=%d\n",
5749 sdebug_num_tgts, "shared (ram) ", sdebug_dev_size_mb,
5750 sdebug_opts, sdebug_every_nth);
5751 seq_printf(m, "delay=%d, ndelay=%d, max_luns=%d, sector_size=%d %s\n",
5752 sdebug_jdelay, sdebug_ndelay, sdebug_max_luns,
5753 sdebug_sector_size, "bytes");
5754 seq_printf(m, "cylinders=%d, heads=%d, sectors=%d, command aborts=%d\n",
5755 sdebug_cylinders_per, sdebug_heads, sdebug_sectors_per,
5757 seq_printf(m, "RESETs: device=%d, target=%d, bus=%d, host=%d\n",
5758 num_dev_resets, num_target_resets, num_bus_resets,
5760 seq_printf(m, "dix_reads=%d, dix_writes=%d, dif_errors=%d\n",
5761 dix_reads, dix_writes, dif_errors);
5762 seq_printf(m, "usec_in_jiffy=%lu, statistics=%d\n", TICK_NSEC / 1000,
5764 seq_printf(m, "cmnd_count=%d, completions=%d, %s=%d, a_tsf=%d\n",
5765 atomic_read(&sdebug_cmnd_count),
5766 atomic_read(&sdebug_completions),
5767 "miss_cpus", atomic_read(&sdebug_miss_cpus),
5768 atomic_read(&sdebug_a_tsf));
5770 seq_printf(m, "submit_queues=%d\n", submit_queues);
5771 for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5772 seq_printf(m, " queue %d:\n", j);
5773 f = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
5774 if (f != sdebug_max_queue) {
5775 l = find_last_bit(sqp->in_use_bm, sdebug_max_queue);
5776 seq_printf(m, " in_use_bm BUSY: %s: %d,%d\n",
5777 "first,last bits", f, l);
5781 seq_printf(m, "this host_no=%d\n", host->host_no);
5782 if (!xa_empty(per_store_ap)) {
5785 unsigned long l_idx;
5786 struct sdeb_store_info *sip;
5788 seq_puts(m, "\nhost list:\n");
5790 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
5792 seq_printf(m, " %d: host_no=%d, si_idx=%d\n", j,
5793 sdhp->shost->host_no, idx);
5796 seq_printf(m, "\nper_store array [most_recent_idx=%d]:\n",
5797 sdeb_most_recent_idx);
5799 xa_for_each(per_store_ap, l_idx, sip) {
5800 niu = xa_get_mark(per_store_ap, l_idx,
5801 SDEB_XA_NOT_IN_USE);
5803 seq_printf(m, " %d: idx=%d%s\n", j, idx,
5804 (niu ? " not_in_use" : ""));
5811 static ssize_t delay_show(struct device_driver *ddp, char *buf)
5813 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_jdelay);
5815 /* Returns -EBUSY if jdelay is being changed and commands are queued. The unit
5816 * of delay is jiffies.
5818 static ssize_t delay_store(struct device_driver *ddp, const char *buf,
5823 if (count > 0 && sscanf(buf, "%d", &jdelay) == 1) {
5825 if (sdebug_jdelay != jdelay) {
5827 struct sdebug_queue *sqp;
5829 block_unblock_all_queues(true);
5830 for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
5832 k = find_first_bit(sqp->in_use_bm,
5834 if (k != sdebug_max_queue) {
5835 res = -EBUSY; /* queued commands */
5840 sdebug_jdelay = jdelay;
5843 block_unblock_all_queues(false);
5849 static DRIVER_ATTR_RW(delay);
5851 static ssize_t ndelay_show(struct device_driver *ddp, char *buf)
5853 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ndelay);
5855 /* Returns -EBUSY if ndelay is being changed and commands are queued */
5856 /* If > 0 and accepted then sdebug_jdelay is set to JDELAY_OVERRIDDEN */
5857 static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
5862 if ((count > 0) && (1 == sscanf(buf, "%d", &ndelay)) &&
5863 (ndelay >= 0) && (ndelay < (1000 * 1000 * 1000))) {
5865 if (sdebug_ndelay != ndelay) {
5867 struct sdebug_queue *sqp;
5869 block_unblock_all_queues(true);
5870 for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
5872 k = find_first_bit(sqp->in_use_bm,
5874 if (k != sdebug_max_queue) {
5875 res = -EBUSY; /* queued commands */
5880 sdebug_ndelay = ndelay;
5881 sdebug_jdelay = ndelay ? JDELAY_OVERRIDDEN
5884 block_unblock_all_queues(false);
5890 static DRIVER_ATTR_RW(ndelay);
5892 static ssize_t opts_show(struct device_driver *ddp, char *buf)
5894 return scnprintf(buf, PAGE_SIZE, "0x%x\n", sdebug_opts);
5897 static ssize_t opts_store(struct device_driver *ddp, const char *buf,
5903 if (sscanf(buf, "%10s", work) == 1) {
5904 if (strncasecmp(work, "0x", 2) == 0) {
5905 if (kstrtoint(work + 2, 16, &opts) == 0)
5908 if (kstrtoint(work, 10, &opts) == 0)
5915 sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
5916 sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
5920 static DRIVER_ATTR_RW(opts);
5922 static ssize_t ptype_show(struct device_driver *ddp, char *buf)
5924 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ptype);
5926 static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
5931 /* Cannot change from or to TYPE_ZBC with sysfs */
5932 if (sdebug_ptype == TYPE_ZBC)
5935 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
5943 static DRIVER_ATTR_RW(ptype);
5945 static ssize_t dsense_show(struct device_driver *ddp, char *buf)
5947 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dsense);
5949 static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
5954 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
5960 static DRIVER_ATTR_RW(dsense);
5962 static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
5964 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_fake_rw);
5966 static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
5971 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
5972 bool want_store = (n == 0);
5973 struct sdebug_host_info *sdhp;
5976 sdebug_fake_rw = (sdebug_fake_rw > 0);
5977 if (sdebug_fake_rw == n)
5978 return count; /* not transitioning so do nothing */
5980 if (want_store) { /* 1 --> 0 transition, set up store */
5981 if (sdeb_first_idx < 0) {
5982 idx = sdebug_add_store();
5986 idx = sdeb_first_idx;
5987 xa_clear_mark(per_store_ap, idx,
5988 SDEB_XA_NOT_IN_USE);
5990 /* make all hosts use same store */
5991 list_for_each_entry(sdhp, &sdebug_host_list,
5993 if (sdhp->si_idx != idx) {
5994 xa_set_mark(per_store_ap, sdhp->si_idx,
5995 SDEB_XA_NOT_IN_USE);
5999 sdeb_most_recent_idx = idx;
6000 } else { /* 0 --> 1 transition is trigger for shrink */
6001 sdebug_erase_all_stores(true /* apart from first */);
6008 static DRIVER_ATTR_RW(fake_rw);
6010 static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
6012 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_lun_0);
6014 static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
6019 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6020 sdebug_no_lun_0 = n;
6025 static DRIVER_ATTR_RW(no_lun_0);
6027 static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
6029 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_tgts);
6031 static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
6036 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6037 sdebug_num_tgts = n;
6038 sdebug_max_tgts_luns();
6043 static DRIVER_ATTR_RW(num_tgts);
6045 static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)
6047 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dev_size_mb);
6049 static DRIVER_ATTR_RO(dev_size_mb);
6051 static ssize_t per_host_store_show(struct device_driver *ddp, char *buf)
6053 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_per_host_store);
6056 static ssize_t per_host_store_store(struct device_driver *ddp, const char *buf,
6061 if (kstrtobool(buf, &v))
6064 sdebug_per_host_store = v;
6067 static DRIVER_ATTR_RW(per_host_store);
6069 static ssize_t num_parts_show(struct device_driver *ddp, char *buf)
6071 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_parts);
6073 static DRIVER_ATTR_RO(num_parts);
6075 static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
6077 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_every_nth);
6079 static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
6085 if (sscanf(buf, "%10s", work) == 1) {
6086 if (strncasecmp(work, "0x", 2) == 0) {
6087 if (kstrtoint(work + 2, 16, &nth) == 0)
6088 goto every_nth_done;
6090 if (kstrtoint(work, 10, &nth) == 0)
6091 goto every_nth_done;
6097 sdebug_every_nth = nth;
6098 if (nth && !sdebug_statistics) {
6099 pr_info("every_nth needs statistics=1, set it\n");
6100 sdebug_statistics = true;
6105 static DRIVER_ATTR_RW(every_nth);
6107 static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
6109 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_luns);
6111 static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
6117 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6119 pr_warn("max_luns can be no more than 256\n");
6122 changed = (sdebug_max_luns != n);
6123 sdebug_max_luns = n;
6124 sdebug_max_tgts_luns();
6125 if (changed && (sdebug_scsi_level >= 5)) { /* >= SPC-3 */
6126 struct sdebug_host_info *sdhp;
6127 struct sdebug_dev_info *dp;
6129 spin_lock(&sdebug_host_list_lock);
6130 list_for_each_entry(sdhp, &sdebug_host_list,
6132 list_for_each_entry(dp, &sdhp->dev_info_list,
6134 set_bit(SDEBUG_UA_LUNS_CHANGED,
6138 spin_unlock(&sdebug_host_list_lock);
6144 static DRIVER_ATTR_RW(max_luns);
6146 static ssize_t max_queue_show(struct device_driver *ddp, char *buf)
6148 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_queue);
6150 /* N.B. max_queue can be changed while there are queued commands. In flight
6151 * commands beyond the new max_queue will be completed. */
6152 static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
6156 struct sdebug_queue *sqp;
6158 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
6159 (n <= SDEBUG_CANQUEUE) &&
6160 (sdebug_host_max_queue == 0)) {
6161 block_unblock_all_queues(true);
6163 for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
6165 a = find_last_bit(sqp->in_use_bm, SDEBUG_CANQUEUE);
6169 sdebug_max_queue = n;
6170 if (k == SDEBUG_CANQUEUE)
6171 atomic_set(&retired_max_queue, 0);
6173 atomic_set(&retired_max_queue, k + 1);
6175 atomic_set(&retired_max_queue, 0);
6176 block_unblock_all_queues(false);
6181 static DRIVER_ATTR_RW(max_queue);
6183 static ssize_t host_max_queue_show(struct device_driver *ddp, char *buf)
6185 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_host_max_queue);
6189 * Since this is used for .can_queue, and we get the hc_idx tag from the bitmap
6190 * in range [0, sdebug_host_max_queue), we can't change it.
6192 static DRIVER_ATTR_RO(host_max_queue);
6194 static ssize_t no_uld_show(struct device_driver *ddp, char *buf)
6196 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_uld);
6198 static DRIVER_ATTR_RO(no_uld);
6200 static ssize_t scsi_level_show(struct device_driver *ddp, char *buf)
6202 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_scsi_level);
6204 static DRIVER_ATTR_RO(scsi_level);
6206 static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
6208 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_virtual_gb);
6210 static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
6216 /* Ignore capacity change for ZBC drives for now */
6217 if (sdeb_zbc_in_use)
6220 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6221 changed = (sdebug_virtual_gb != n);
6222 sdebug_virtual_gb = n;
6223 sdebug_capacity = get_sdebug_capacity();
6225 struct sdebug_host_info *sdhp;
6226 struct sdebug_dev_info *dp;
6228 spin_lock(&sdebug_host_list_lock);
6229 list_for_each_entry(sdhp, &sdebug_host_list,
6231 list_for_each_entry(dp, &sdhp->dev_info_list,
6233 set_bit(SDEBUG_UA_CAPACITY_CHANGED,
6237 spin_unlock(&sdebug_host_list_lock);
6243 static DRIVER_ATTR_RW(virtual_gb);
6245 static ssize_t add_host_show(struct device_driver *ddp, char *buf)
6247 /* absolute number of hosts currently active is what is shown */
6248 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_hosts);
6251 static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
6256 struct sdeb_store_info *sip;
6257 bool want_phs = (sdebug_fake_rw == 0) && sdebug_per_host_store;
6260 if (sscanf(buf, "%d", &delta_hosts) != 1)
6262 if (delta_hosts > 0) {
6266 xa_for_each_marked(per_store_ap, idx, sip,
6267 SDEB_XA_NOT_IN_USE) {
6268 sdeb_most_recent_idx = (int)idx;
6272 if (found) /* re-use case */
6273 sdebug_add_host_helper((int)idx);
6275 sdebug_do_add_host(true);
6277 sdebug_do_add_host(false);
6279 } while (--delta_hosts);
6280 } else if (delta_hosts < 0) {
6282 sdebug_do_remove_host(false);
6283 } while (++delta_hosts);
6287 static DRIVER_ATTR_RW(add_host);
6289 static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf)
6291 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_vpd_use_hostno);
6293 static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf,
6298 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6299 sdebug_vpd_use_hostno = n;
6304 static DRIVER_ATTR_RW(vpd_use_hostno);
6306 static ssize_t statistics_show(struct device_driver *ddp, char *buf)
6308 return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_statistics);
6310 static ssize_t statistics_store(struct device_driver *ddp, const char *buf,
6315 if ((count > 0) && (sscanf(buf, "%d", &n) == 1) && (n >= 0)) {
6317 sdebug_statistics = true;
6319 clear_queue_stats();
6320 sdebug_statistics = false;
6326 static DRIVER_ATTR_RW(statistics);
6328 static ssize_t sector_size_show(struct device_driver *ddp, char *buf)
6330 return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_sector_size);
6332 static DRIVER_ATTR_RO(sector_size);
6334 static ssize_t submit_queues_show(struct device_driver *ddp, char *buf)
6336 return scnprintf(buf, PAGE_SIZE, "%d\n", submit_queues);
6338 static DRIVER_ATTR_RO(submit_queues);
6340 static ssize_t dix_show(struct device_driver *ddp, char *buf)
6342 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dix);
6344 static DRIVER_ATTR_RO(dix);
6346 static ssize_t dif_show(struct device_driver *ddp, char *buf)
6348 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dif);
6350 static DRIVER_ATTR_RO(dif);
6352 static ssize_t guard_show(struct device_driver *ddp, char *buf)
6354 return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_guard);
6356 static DRIVER_ATTR_RO(guard);
6358 static ssize_t ato_show(struct device_driver *ddp, char *buf)
6360 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ato);
6362 static DRIVER_ATTR_RO(ato);
6364 static ssize_t map_show(struct device_driver *ddp, char *buf)
6368 if (!scsi_debug_lbp())
6369 return scnprintf(buf, PAGE_SIZE, "0-%u\n",
6370 sdebug_store_sectors);
6372 if (sdebug_fake_rw == 0 && !xa_empty(per_store_ap)) {
6373 struct sdeb_store_info *sip = xa_load(per_store_ap, 0);
6376 count = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
6377 (int)map_size, sip->map_storep);
6379 buf[count++] = '\n';
6384 static DRIVER_ATTR_RO(map);
6386 static ssize_t random_show(struct device_driver *ddp, char *buf)
6388 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_random);
6391 static ssize_t random_store(struct device_driver *ddp, const char *buf,
6396 if (kstrtobool(buf, &v))
6402 static DRIVER_ATTR_RW(random);
6404 static ssize_t removable_show(struct device_driver *ddp, char *buf)
6406 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_removable ? 1 : 0);
6408 static ssize_t removable_store(struct device_driver *ddp, const char *buf,
6413 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6414 sdebug_removable = (n > 0);
6419 static DRIVER_ATTR_RW(removable);
6421 static ssize_t host_lock_show(struct device_driver *ddp, char *buf)
6423 return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_host_lock);
6425 /* N.B. sdebug_host_lock does nothing, kept for backward compatibility */
6426 static ssize_t host_lock_store(struct device_driver *ddp, const char *buf,
6431 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6432 sdebug_host_lock = (n > 0);
6437 static DRIVER_ATTR_RW(host_lock);
6439 static ssize_t strict_show(struct device_driver *ddp, char *buf)
6441 return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_strict);
6443 static ssize_t strict_store(struct device_driver *ddp, const char *buf,
6448 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6449 sdebug_strict = (n > 0);
6454 static DRIVER_ATTR_RW(strict);
6456 static ssize_t uuid_ctl_show(struct device_driver *ddp, char *buf)
6458 return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_uuid_ctl);
6460 static DRIVER_ATTR_RO(uuid_ctl);
6462 static ssize_t cdb_len_show(struct device_driver *ddp, char *buf)
6464 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_cdb_len);
6466 static ssize_t cdb_len_store(struct device_driver *ddp, const char *buf,
6471 ret = kstrtoint(buf, 0, &n);
6475 all_config_cdb_len();
6478 static DRIVER_ATTR_RW(cdb_len);
6480 static const char * const zbc_model_strs_a[] = {
6481 [BLK_ZONED_NONE] = "none",
6482 [BLK_ZONED_HA] = "host-aware",
6483 [BLK_ZONED_HM] = "host-managed",
6486 static const char * const zbc_model_strs_b[] = {
6487 [BLK_ZONED_NONE] = "no",
6488 [BLK_ZONED_HA] = "aware",
6489 [BLK_ZONED_HM] = "managed",
6492 static const char * const zbc_model_strs_c[] = {
6493 [BLK_ZONED_NONE] = "0",
6494 [BLK_ZONED_HA] = "1",
6495 [BLK_ZONED_HM] = "2",
6498 static int sdeb_zbc_model_str(const char *cp)
6500 int res = sysfs_match_string(zbc_model_strs_a, cp);
6503 res = sysfs_match_string(zbc_model_strs_b, cp);
6505 res = sysfs_match_string(zbc_model_strs_c, cp);
6513 static ssize_t zbc_show(struct device_driver *ddp, char *buf)
6515 return scnprintf(buf, PAGE_SIZE, "%s\n",
6516 zbc_model_strs_a[sdeb_zbc_model]);
6518 static DRIVER_ATTR_RO(zbc);
6520 static ssize_t tur_ms_to_ready_show(struct device_driver *ddp, char *buf)
6522 return scnprintf(buf, PAGE_SIZE, "%d\n", sdeb_tur_ms_to_ready);
6524 static DRIVER_ATTR_RO(tur_ms_to_ready);
6526 /* Note: The following array creates attribute files in the
6527 /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
6528 files (over those found in the /sys/module/scsi_debug/parameters
6529 directory) is that auxiliary actions can be triggered when an attribute
6530 is changed. For example see: add_host_store() above.
6533 static struct attribute *sdebug_drv_attrs[] = {
6534 &driver_attr_delay.attr,
6535 &driver_attr_opts.attr,
6536 &driver_attr_ptype.attr,
6537 &driver_attr_dsense.attr,
6538 &driver_attr_fake_rw.attr,
6539 &driver_attr_host_max_queue.attr,
6540 &driver_attr_no_lun_0.attr,
6541 &driver_attr_num_tgts.attr,
6542 &driver_attr_dev_size_mb.attr,
6543 &driver_attr_num_parts.attr,
6544 &driver_attr_every_nth.attr,
6545 &driver_attr_max_luns.attr,
6546 &driver_attr_max_queue.attr,
6547 &driver_attr_no_uld.attr,
6548 &driver_attr_scsi_level.attr,
6549 &driver_attr_virtual_gb.attr,
6550 &driver_attr_add_host.attr,
6551 &driver_attr_per_host_store.attr,
6552 &driver_attr_vpd_use_hostno.attr,
6553 &driver_attr_sector_size.attr,
6554 &driver_attr_statistics.attr,
6555 &driver_attr_submit_queues.attr,
6556 &driver_attr_dix.attr,
6557 &driver_attr_dif.attr,
6558 &driver_attr_guard.attr,
6559 &driver_attr_ato.attr,
6560 &driver_attr_map.attr,
6561 &driver_attr_random.attr,
6562 &driver_attr_removable.attr,
6563 &driver_attr_host_lock.attr,
6564 &driver_attr_ndelay.attr,
6565 &driver_attr_strict.attr,
6566 &driver_attr_uuid_ctl.attr,
6567 &driver_attr_cdb_len.attr,
6568 &driver_attr_tur_ms_to_ready.attr,
6569 &driver_attr_zbc.attr,
6572 ATTRIBUTE_GROUPS(sdebug_drv);
6574 static struct device *pseudo_primary;
6576 static int __init scsi_debug_init(void)
6578 bool want_store = (sdebug_fake_rw == 0);
6580 int k, ret, hosts_to_add;
6583 ramdisk_lck_a[0] = &atomic_rw;
6584 ramdisk_lck_a[1] = &atomic_rw2;
6585 atomic_set(&retired_max_queue, 0);
6587 if (sdebug_ndelay >= 1000 * 1000 * 1000) {
6588 pr_warn("ndelay must be less than 1 second, ignored\n");
6590 } else if (sdebug_ndelay > 0)
6591 sdebug_jdelay = JDELAY_OVERRIDDEN;
6593 switch (sdebug_sector_size) {
6600 pr_err("invalid sector_size %d\n", sdebug_sector_size);
6604 switch (sdebug_dif) {
6605 case T10_PI_TYPE0_PROTECTION:
6607 case T10_PI_TYPE1_PROTECTION:
6608 case T10_PI_TYPE2_PROTECTION:
6609 case T10_PI_TYPE3_PROTECTION:
6610 have_dif_prot = true;
6614 pr_err("dif must be 0, 1, 2 or 3\n");
6618 if (sdebug_num_tgts < 0) {
6619 pr_err("num_tgts must be >= 0\n");
6623 if (sdebug_guard > 1) {
6624 pr_err("guard must be 0 or 1\n");
6628 if (sdebug_ato > 1) {
6629 pr_err("ato must be 0 or 1\n");
6633 if (sdebug_physblk_exp > 15) {
6634 pr_err("invalid physblk_exp %u\n", sdebug_physblk_exp);
6637 if (sdebug_max_luns > 256) {
6638 pr_warn("max_luns can be no more than 256, use default\n");
6639 sdebug_max_luns = DEF_MAX_LUNS;
6642 if (sdebug_lowest_aligned > 0x3fff) {
6643 pr_err("lowest_aligned too big: %u\n", sdebug_lowest_aligned);
6647 if (submit_queues < 1) {
6648 pr_err("submit_queues must be 1 or more\n");
6652 if ((sdebug_max_queue > SDEBUG_CANQUEUE) || (sdebug_max_queue < 1)) {
6653 pr_err("max_queue must be in range [1, %d]\n", SDEBUG_CANQUEUE);
6657 if ((sdebug_host_max_queue > SDEBUG_CANQUEUE) ||
6658 (sdebug_host_max_queue < 0)) {
6659 pr_err("host_max_queue must be in range [0 %d]\n",
6664 if (sdebug_host_max_queue &&
6665 (sdebug_max_queue != sdebug_host_max_queue)) {
6666 sdebug_max_queue = sdebug_host_max_queue;
6667 pr_warn("fixing max submit queue depth to host max queue depth, %d\n",
6671 sdebug_q_arr = kcalloc(submit_queues, sizeof(struct sdebug_queue),
6673 if (sdebug_q_arr == NULL)
6675 for (k = 0; k < submit_queues; ++k)
6676 spin_lock_init(&sdebug_q_arr[k].qc_lock);
6679 * check for host managed zoned block device specified with
6680 * ptype=0x14 or zbc=XXX.
6682 if (sdebug_ptype == TYPE_ZBC) {
6683 sdeb_zbc_model = BLK_ZONED_HM;
6684 } else if (sdeb_zbc_model_s && *sdeb_zbc_model_s) {
6685 k = sdeb_zbc_model_str(sdeb_zbc_model_s);
6691 switch (sdeb_zbc_model) {
6692 case BLK_ZONED_NONE:
6694 sdebug_ptype = TYPE_DISK;
6697 sdebug_ptype = TYPE_ZBC;
6700 pr_err("Invalid ZBC model\n");
6704 if (sdeb_zbc_model != BLK_ZONED_NONE) {
6705 sdeb_zbc_in_use = true;
6706 if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
6707 sdebug_dev_size_mb = DEF_ZBC_DEV_SIZE_MB;
6710 if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
6711 sdebug_dev_size_mb = DEF_DEV_SIZE_MB;
6712 if (sdebug_dev_size_mb < 1)
6713 sdebug_dev_size_mb = 1; /* force minimum 1 MB ramdisk */
6714 sz = (unsigned long)sdebug_dev_size_mb * 1048576;
6715 sdebug_store_sectors = sz / sdebug_sector_size;
6716 sdebug_capacity = get_sdebug_capacity();
6718 /* play around with geometry, don't waste too much on track 0 */
6720 sdebug_sectors_per = 32;
6721 if (sdebug_dev_size_mb >= 256)
6723 else if (sdebug_dev_size_mb >= 16)
6725 sdebug_cylinders_per = (unsigned long)sdebug_capacity /
6726 (sdebug_sectors_per * sdebug_heads);
6727 if (sdebug_cylinders_per >= 1024) {
6728 /* other LLDs do this; implies >= 1GB ram disk ... */
6730 sdebug_sectors_per = 63;
6731 sdebug_cylinders_per = (unsigned long)sdebug_capacity /
6732 (sdebug_sectors_per * sdebug_heads);
6734 if (scsi_debug_lbp()) {
6735 sdebug_unmap_max_blocks =
6736 clamp(sdebug_unmap_max_blocks, 0U, 0xffffffffU);
6738 sdebug_unmap_max_desc =
6739 clamp(sdebug_unmap_max_desc, 0U, 256U);
6741 sdebug_unmap_granularity =
6742 clamp(sdebug_unmap_granularity, 1U, 0xffffffffU);
6744 if (sdebug_unmap_alignment &&
6745 sdebug_unmap_granularity <=
6746 sdebug_unmap_alignment) {
6747 pr_err("ERR: unmap_granularity <= unmap_alignment\n");
6752 xa_init_flags(per_store_ap, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
6754 idx = sdebug_add_store();
6761 pseudo_primary = root_device_register("pseudo_0");
6762 if (IS_ERR(pseudo_primary)) {
6763 pr_warn("root_device_register() error\n");
6764 ret = PTR_ERR(pseudo_primary);
6767 ret = bus_register(&pseudo_lld_bus);
6769 pr_warn("bus_register error: %d\n", ret);
6772 ret = driver_register(&sdebug_driverfs_driver);
6774 pr_warn("driver_register error: %d\n", ret);
6778 hosts_to_add = sdebug_add_host;
6779 sdebug_add_host = 0;
6781 for (k = 0; k < hosts_to_add; k++) {
6782 if (want_store && k == 0) {
6783 ret = sdebug_add_host_helper(idx);
6785 pr_err("add_host_helper k=%d, error=%d\n",
6790 ret = sdebug_do_add_host(want_store &&
6791 sdebug_per_host_store);
6793 pr_err("add_host k=%d error=%d\n", k, -ret);
6799 pr_info("built %d host(s)\n", sdebug_num_hosts);
6804 bus_unregister(&pseudo_lld_bus);
6806 root_device_unregister(pseudo_primary);
6808 sdebug_erase_store(idx, NULL);
6810 kfree(sdebug_q_arr);
6814 static void __exit scsi_debug_exit(void)
6816 int k = sdebug_num_hosts;
6820 sdebug_do_remove_host(true);
6822 driver_unregister(&sdebug_driverfs_driver);
6823 bus_unregister(&pseudo_lld_bus);
6824 root_device_unregister(pseudo_primary);
6826 sdebug_erase_all_stores(false);
6827 xa_destroy(per_store_ap);
6830 device_initcall(scsi_debug_init);
6831 module_exit(scsi_debug_exit);
6833 static void sdebug_release_adapter(struct device *dev)
6835 struct sdebug_host_info *sdbg_host;
6837 sdbg_host = to_sdebug_host(dev);
6841 /* idx must be valid, if sip is NULL then it will be obtained using idx */
6842 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip)
6847 if (xa_empty(per_store_ap))
6849 sip = xa_load(per_store_ap, idx);
6853 vfree(sip->map_storep);
6854 vfree(sip->dif_storep);
6856 xa_erase(per_store_ap, idx);
6860 /* Assume apart_from_first==false only in shutdown case. */
6861 static void sdebug_erase_all_stores(bool apart_from_first)
6864 struct sdeb_store_info *sip = NULL;
6866 xa_for_each(per_store_ap, idx, sip) {
6867 if (apart_from_first)
6868 apart_from_first = false;
6870 sdebug_erase_store(idx, sip);
6872 if (apart_from_first)
6873 sdeb_most_recent_idx = sdeb_first_idx;
6877 * Returns store xarray new element index (idx) if >=0 else negated errno.
6878 * Limit the number of stores to 65536.
6880 static int sdebug_add_store(void)
6884 unsigned long iflags;
6885 unsigned long sz = (unsigned long)sdebug_dev_size_mb * 1048576;
6886 struct sdeb_store_info *sip = NULL;
6887 struct xa_limit xal = { .max = 1 << 16, .min = 0 };
6889 sip = kzalloc(sizeof(*sip), GFP_KERNEL);
6893 xa_lock_irqsave(per_store_ap, iflags);
6894 res = __xa_alloc(per_store_ap, &n_idx, sip, xal, GFP_ATOMIC);
6895 if (unlikely(res < 0)) {
6896 xa_unlock_irqrestore(per_store_ap, iflags);
6898 pr_warn("%s: xa_alloc() errno=%d\n", __func__, -res);
6901 sdeb_most_recent_idx = n_idx;
6902 if (sdeb_first_idx < 0)
6903 sdeb_first_idx = n_idx;
6904 xa_unlock_irqrestore(per_store_ap, iflags);
6907 sip->storep = vzalloc(sz);
6909 pr_err("user data oom\n");
6912 if (sdebug_num_parts > 0)
6913 sdebug_build_parts(sip->storep, sz);
6915 /* DIF/DIX: what T10 calls Protection Information (PI) */
6919 dif_size = sdebug_store_sectors * sizeof(struct t10_pi_tuple);
6920 sip->dif_storep = vmalloc(dif_size);
6922 pr_info("dif_storep %u bytes @ %pK\n", dif_size,
6925 if (!sip->dif_storep) {
6926 pr_err("DIX oom\n");
6929 memset(sip->dif_storep, 0xff, dif_size);
6931 /* Logical Block Provisioning */
6932 if (scsi_debug_lbp()) {
6933 map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
6934 sip->map_storep = vmalloc(array_size(sizeof(long),
6935 BITS_TO_LONGS(map_size)));
6937 pr_info("%lu provisioning blocks\n", map_size);
6939 if (!sip->map_storep) {
6940 pr_err("LBP map oom\n");
6944 bitmap_zero(sip->map_storep, map_size);
6946 /* Map first 1KB for partition table */
6947 if (sdebug_num_parts)
6948 map_region(sip, 0, 2);
6951 rwlock_init(&sip->macc_lck);
6954 sdebug_erase_store((int)n_idx, sip);
6955 pr_warn("%s: failed, errno=%d\n", __func__, -res);
6959 static int sdebug_add_host_helper(int per_host_idx)
6961 int k, devs_per_host, idx;
6962 int error = -ENOMEM;
6963 struct sdebug_host_info *sdbg_host;
6964 struct sdebug_dev_info *sdbg_devinfo, *tmp;
6966 sdbg_host = kzalloc(sizeof(*sdbg_host), GFP_KERNEL);
6969 idx = (per_host_idx < 0) ? sdeb_first_idx : per_host_idx;
6970 if (xa_get_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE))
6971 xa_clear_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
6972 sdbg_host->si_idx = idx;
6974 INIT_LIST_HEAD(&sdbg_host->dev_info_list);
6976 devs_per_host = sdebug_num_tgts * sdebug_max_luns;
6977 for (k = 0; k < devs_per_host; k++) {
6978 sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
6983 spin_lock(&sdebug_host_list_lock);
6984 list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
6985 spin_unlock(&sdebug_host_list_lock);
6987 sdbg_host->dev.bus = &pseudo_lld_bus;
6988 sdbg_host->dev.parent = pseudo_primary;
6989 sdbg_host->dev.release = &sdebug_release_adapter;
6990 dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_num_hosts);
6992 error = device_register(&sdbg_host->dev);
7000 list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
7002 list_del(&sdbg_devinfo->dev_list);
7003 kfree(sdbg_devinfo->zstate);
7004 kfree(sdbg_devinfo);
7007 pr_warn("%s: failed, errno=%d\n", __func__, -error);
7011 static int sdebug_do_add_host(bool mk_new_store)
7013 int ph_idx = sdeb_most_recent_idx;
7016 ph_idx = sdebug_add_store();
7020 return sdebug_add_host_helper(ph_idx);
7023 static void sdebug_do_remove_host(bool the_end)
7026 struct sdebug_host_info *sdbg_host = NULL;
7027 struct sdebug_host_info *sdbg_host2;
7029 spin_lock(&sdebug_host_list_lock);
7030 if (!list_empty(&sdebug_host_list)) {
7031 sdbg_host = list_entry(sdebug_host_list.prev,
7032 struct sdebug_host_info, host_list);
7033 idx = sdbg_host->si_idx;
7035 if (!the_end && idx >= 0) {
7038 list_for_each_entry(sdbg_host2, &sdebug_host_list, host_list) {
7039 if (sdbg_host2 == sdbg_host)
7041 if (idx == sdbg_host2->si_idx) {
7047 xa_set_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
7048 if (idx == sdeb_most_recent_idx)
7049 --sdeb_most_recent_idx;
7053 list_del(&sdbg_host->host_list);
7054 spin_unlock(&sdebug_host_list_lock);
7059 device_unregister(&sdbg_host->dev);
7063 static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
7066 struct sdebug_dev_info *devip;
7068 block_unblock_all_queues(true);
7069 devip = (struct sdebug_dev_info *)sdev->hostdata;
7070 if (NULL == devip) {
7071 block_unblock_all_queues(false);
7074 num_in_q = atomic_read(&devip->num_in_q);
7078 /* allow to exceed max host qc_arr elements for testing */
7079 if (qdepth > SDEBUG_CANQUEUE + 10)
7080 qdepth = SDEBUG_CANQUEUE + 10;
7081 scsi_change_queue_depth(sdev, qdepth);
7083 if (SDEBUG_OPT_Q_NOISE & sdebug_opts) {
7084 sdev_printk(KERN_INFO, sdev, "%s: qdepth=%d, num_in_q=%d\n",
7085 __func__, qdepth, num_in_q);
7087 block_unblock_all_queues(false);
7088 return sdev->queue_depth;
7091 static bool fake_timeout(struct scsi_cmnd *scp)
7093 if (0 == (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth))) {
7094 if (sdebug_every_nth < -1)
7095 sdebug_every_nth = -1;
7096 if (SDEBUG_OPT_TIMEOUT & sdebug_opts)
7097 return true; /* ignore command causing timeout */
7098 else if (SDEBUG_OPT_MAC_TIMEOUT & sdebug_opts &&
7099 scsi_medium_access_command(scp))
7100 return true; /* time out reads and writes */
7105 /* Response to TUR or media access command when device stopped */
7106 static int resp_not_ready(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
7110 ktime_t now_ts = ktime_get_boottime();
7111 struct scsi_device *sdp = scp->device;
7113 stopped_state = atomic_read(&devip->stopped);
7114 if (stopped_state == 2) {
7115 if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
7116 diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
7117 if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
7118 /* tur_ms_to_ready timer extinguished */
7119 atomic_set(&devip->stopped, 0);
7123 mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x1);
7125 sdev_printk(KERN_INFO, sdp,
7126 "%s: Not ready: in process of becoming ready\n", my_name);
7127 if (scp->cmnd[0] == TEST_UNIT_READY) {
7128 u64 tur_nanosecs_to_ready = (u64)sdeb_tur_ms_to_ready * 1000000;
7130 if (diff_ns <= tur_nanosecs_to_ready)
7131 diff_ns = tur_nanosecs_to_ready - diff_ns;
7133 diff_ns = tur_nanosecs_to_ready;
7134 /* As per 20-061r2 approved for spc6 by T10 on 20200716 */
7135 do_div(diff_ns, 1000000); /* diff_ns becomes milliseconds */
7136 scsi_set_sense_information(scp->sense_buffer, SCSI_SENSE_BUFFERSIZE,
7138 return check_condition_result;
7141 mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x2);
7143 sdev_printk(KERN_INFO, sdp, "%s: Not ready: initializing command required\n",
7145 return check_condition_result;
7148 static int scsi_debug_queuecommand(struct Scsi_Host *shost,
7149 struct scsi_cmnd *scp)
7152 struct scsi_device *sdp = scp->device;
7153 const struct opcode_info_t *oip;
7154 const struct opcode_info_t *r_oip;
7155 struct sdebug_dev_info *devip;
7156 u8 *cmd = scp->cmnd;
7157 int (*r_pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
7158 int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *) = NULL;
7167 scsi_set_resid(scp, 0);
7168 if (sdebug_statistics) {
7169 atomic_inc(&sdebug_cmnd_count);
7170 inject_now = inject_on_this_cmd();
7174 if (unlikely(sdebug_verbose &&
7175 !(SDEBUG_OPT_NO_CDB_NOISE & sdebug_opts))) {
7180 sb = (int)sizeof(b);
7182 strcpy(b, "too long, over 32 bytes");
7184 for (k = 0, n = 0; k < len && n < sb; ++k)
7185 n += scnprintf(b + n, sb - n, "%02x ",
7188 sdev_printk(KERN_INFO, sdp, "%s: tag=%#x, cmd %s\n", my_name,
7189 blk_mq_unique_tag(scp->request), b);
7191 if (unlikely(inject_now && (sdebug_opts & SDEBUG_OPT_HOST_BUSY)))
7192 return SCSI_MLQUEUE_HOST_BUSY;
7193 has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS);
7194 if (unlikely((sdp->lun >= sdebug_max_luns) && !has_wlun_rl))
7197 sdeb_i = opcode_ind_arr[opcode]; /* fully mapped */
7198 oip = &opcode_info_arr[sdeb_i]; /* safe if table consistent */
7199 devip = (struct sdebug_dev_info *)sdp->hostdata;
7200 if (unlikely(!devip)) {
7201 devip = find_build_dev_info(sdp);
7205 if (unlikely(inject_now && !atomic_read(&sdeb_inject_pending)))
7206 atomic_set(&sdeb_inject_pending, 1);
7208 na = oip->num_attached;
7210 if (na) { /* multiple commands with this opcode */
7212 if (FF_SA & r_oip->flags) {
7213 if (F_SA_LOW & oip->flags)
7216 sa = get_unaligned_be16(cmd + 8);
7217 for (k = 0; k <= na; oip = r_oip->arrp + k++) {
7218 if (opcode == oip->opcode && sa == oip->sa)
7221 } else { /* since no service action only check opcode */
7222 for (k = 0; k <= na; oip = r_oip->arrp + k++) {
7223 if (opcode == oip->opcode)
7228 if (F_SA_LOW & r_oip->flags)
7229 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 4);
7230 else if (F_SA_HIGH & r_oip->flags)
7231 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, 7);
7233 mk_sense_invalid_opcode(scp);
7236 } /* else (when na==0) we assume the oip is a match */
7238 if (unlikely(F_INV_OP & flags)) {
7239 mk_sense_invalid_opcode(scp);
7242 if (unlikely(has_wlun_rl && !(F_RL_WLUN_OK & flags))) {
7244 sdev_printk(KERN_INFO, sdp, "%s: Opcode 0x%x not%s\n",
7245 my_name, opcode, " supported for wlun");
7246 mk_sense_invalid_opcode(scp);
7249 if (unlikely(sdebug_strict)) { /* check cdb against mask */
7253 for (k = 1; k < oip->len_mask[0] && k < 16; ++k) {
7254 rem = ~oip->len_mask[k] & cmd[k];
7256 for (j = 7; j >= 0; --j, rem <<= 1) {
7260 mk_sense_invalid_fld(scp, SDEB_IN_CDB, k, j);
7265 if (unlikely(!(F_SKIP_UA & flags) &&
7266 find_first_bit(devip->uas_bm,
7267 SDEBUG_NUM_UAS) != SDEBUG_NUM_UAS)) {
7268 errsts = make_ua(scp, devip);
7272 if (unlikely(((F_M_ACCESS & flags) || scp->cmnd[0] == TEST_UNIT_READY) &&
7273 atomic_read(&devip->stopped))) {
7274 errsts = resp_not_ready(scp, devip);
7278 if (sdebug_fake_rw && (F_FAKE_RW & flags))
7280 if (unlikely(sdebug_every_nth)) {
7281 if (fake_timeout(scp))
7282 return 0; /* ignore command: make trouble */
7284 if (likely(oip->pfp))
7285 pfp = oip->pfp; /* calls a resp_* function */
7287 pfp = r_pfp; /* if leaf function ptr NULL, try the root's */
7290 if (F_DELAY_OVERR & flags) /* cmds like INQUIRY respond asap */
7291 return schedule_resp(scp, devip, errsts, pfp, 0, 0);
7292 else if ((flags & F_LONG_DELAY) && (sdebug_jdelay > 0 ||
7293 sdebug_ndelay > 10000)) {
7295 * Skip long delays if ndelay <= 10 microseconds. Otherwise
7296 * for Start Stop Unit (SSU) want at least 1 second delay and
7297 * if sdebug_jdelay>1 want a long delay of that many seconds.
7298 * For Synchronize Cache want 1/20 of SSU's delay.
7300 int jdelay = (sdebug_jdelay < 2) ? 1 : sdebug_jdelay;
7301 int denom = (flags & F_SYNC_DELAY) ? 20 : 1;
7303 jdelay = mult_frac(USER_HZ * jdelay, HZ, denom * USER_HZ);
7304 return schedule_resp(scp, devip, errsts, pfp, jdelay, 0);
7306 return schedule_resp(scp, devip, errsts, pfp, sdebug_jdelay,
7309 return schedule_resp(scp, devip, check_condition_result, NULL, 0, 0);
7311 return schedule_resp(scp, NULL, DID_NO_CONNECT << 16, NULL, 0, 0);
7314 static struct scsi_host_template sdebug_driver_template = {
7315 .show_info = scsi_debug_show_info,
7316 .write_info = scsi_debug_write_info,
7317 .proc_name = sdebug_proc_name,
7318 .name = "SCSI DEBUG",
7319 .info = scsi_debug_info,
7320 .slave_alloc = scsi_debug_slave_alloc,
7321 .slave_configure = scsi_debug_slave_configure,
7322 .slave_destroy = scsi_debug_slave_destroy,
7323 .ioctl = scsi_debug_ioctl,
7324 .queuecommand = scsi_debug_queuecommand,
7325 .change_queue_depth = sdebug_change_qdepth,
7326 .eh_abort_handler = scsi_debug_abort,
7327 .eh_device_reset_handler = scsi_debug_device_reset,
7328 .eh_target_reset_handler = scsi_debug_target_reset,
7329 .eh_bus_reset_handler = scsi_debug_bus_reset,
7330 .eh_host_reset_handler = scsi_debug_host_reset,
7331 .can_queue = SDEBUG_CANQUEUE,
7333 .sg_tablesize = SG_MAX_SEGMENTS,
7334 .cmd_per_lun = DEF_CMD_PER_LUN,
7336 .max_segment_size = -1U,
7337 .module = THIS_MODULE,
7338 .track_queue_depth = 1,
7341 static int sdebug_driver_probe(struct device *dev)
7344 struct sdebug_host_info *sdbg_host;
7345 struct Scsi_Host *hpnt;
7348 sdbg_host = to_sdebug_host(dev);
7350 if (sdebug_host_max_queue)
7351 sdebug_driver_template.can_queue = sdebug_host_max_queue;
7353 sdebug_driver_template.can_queue = sdebug_max_queue;
7354 if (!sdebug_clustering)
7355 sdebug_driver_template.dma_boundary = PAGE_SIZE - 1;
7357 hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
7359 pr_err("scsi_host_alloc failed\n");
7363 if (submit_queues > nr_cpu_ids) {
7364 pr_warn("%s: trim submit_queues (was %d) to nr_cpu_ids=%u\n",
7365 my_name, submit_queues, nr_cpu_ids);
7366 submit_queues = nr_cpu_ids;
7369 * Decide whether to tell scsi subsystem that we want mq. The
7370 * following should give the same answer for each host. If the host
7371 * has a limit of hostwide max commands, then do not set.
7373 if (!sdebug_host_max_queue)
7374 hpnt->nr_hw_queues = submit_queues;
7376 sdbg_host->shost = hpnt;
7377 *((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host;
7378 if ((hpnt->this_id >= 0) && (sdebug_num_tgts > hpnt->this_id))
7379 hpnt->max_id = sdebug_num_tgts + 1;
7381 hpnt->max_id = sdebug_num_tgts;
7382 /* = sdebug_max_luns; */
7383 hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
7387 switch (sdebug_dif) {
7389 case T10_PI_TYPE1_PROTECTION:
7390 hprot = SHOST_DIF_TYPE1_PROTECTION;
7392 hprot |= SHOST_DIX_TYPE1_PROTECTION;
7395 case T10_PI_TYPE2_PROTECTION:
7396 hprot = SHOST_DIF_TYPE2_PROTECTION;
7398 hprot |= SHOST_DIX_TYPE2_PROTECTION;
7401 case T10_PI_TYPE3_PROTECTION:
7402 hprot = SHOST_DIF_TYPE3_PROTECTION;
7404 hprot |= SHOST_DIX_TYPE3_PROTECTION;
7409 hprot |= SHOST_DIX_TYPE0_PROTECTION;
7413 scsi_host_set_prot(hpnt, hprot);
7415 if (have_dif_prot || sdebug_dix)
7416 pr_info("host protection%s%s%s%s%s%s%s\n",
7417 (hprot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
7418 (hprot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
7419 (hprot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
7420 (hprot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
7421 (hprot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
7422 (hprot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
7423 (hprot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
7425 if (sdebug_guard == 1)
7426 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
7428 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
7430 sdebug_verbose = !!(SDEBUG_OPT_NOISE & sdebug_opts);
7431 sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & sdebug_opts);
7432 if (sdebug_every_nth) /* need stats counters for every_nth */
7433 sdebug_statistics = true;
7434 error = scsi_add_host(hpnt, &sdbg_host->dev);
7436 pr_err("scsi_add_host failed\n");
7438 scsi_host_put(hpnt);
7440 scsi_scan_host(hpnt);
7446 static int sdebug_driver_remove(struct device *dev)
7448 struct sdebug_host_info *sdbg_host;
7449 struct sdebug_dev_info *sdbg_devinfo, *tmp;
7451 sdbg_host = to_sdebug_host(dev);
7454 pr_err("Unable to locate host info\n");
7458 scsi_remove_host(sdbg_host->shost);
7460 list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
7462 list_del(&sdbg_devinfo->dev_list);
7463 kfree(sdbg_devinfo->zstate);
7464 kfree(sdbg_devinfo);
7467 scsi_host_put(sdbg_host->shost);
7471 static int pseudo_lld_bus_match(struct device *dev,
7472 struct device_driver *dev_driver)
7477 static struct bus_type pseudo_lld_bus = {
7479 .match = pseudo_lld_bus_match,
7480 .probe = sdebug_driver_probe,
7481 .remove = sdebug_driver_remove,
7482 .drv_groups = sdebug_drv_groups,