1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
4 * Copyright (C) 1992 Eric Youngdale
5 * Simulate a host adapter with 2 disks attached. Do a lot of checking
6 * to make sure that we are not getting blocks mixed up, and PANIC if
7 * anything out of the ordinary is seen.
8 * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
10 * Copyright (C) 2001 - 2020 Douglas Gilbert
12 * For documentation see http://sg.danny.cz/sg/sdebug26.html
16 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
18 #include <linux/module.h>
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/jiffies.h>
23 #include <linux/slab.h>
24 #include <linux/types.h>
25 #include <linux/string.h>
26 #include <linux/genhd.h>
28 #include <linux/init.h>
29 #include <linux/proc_fs.h>
30 #include <linux/vmalloc.h>
31 #include <linux/moduleparam.h>
32 #include <linux/scatterlist.h>
33 #include <linux/blkdev.h>
34 #include <linux/crc-t10dif.h>
35 #include <linux/spinlock.h>
36 #include <linux/interrupt.h>
37 #include <linux/atomic.h>
38 #include <linux/hrtimer.h>
39 #include <linux/uuid.h>
40 #include <linux/t10-pi.h>
41 #include <linux/msdos_partition.h>
42 #include <linux/random.h>
43 #include <linux/xarray.h>
44 #include <linux/prefetch.h>
46 #include <net/checksum.h>
48 #include <asm/unaligned.h>
50 #include <scsi/scsi.h>
51 #include <scsi/scsi_cmnd.h>
52 #include <scsi/scsi_device.h>
53 #include <scsi/scsi_host.h>
54 #include <scsi/scsicam.h>
55 #include <scsi/scsi_eh.h>
56 #include <scsi/scsi_tcq.h>
57 #include <scsi/scsi_dbg.h>
60 #include "scsi_logging.h"
62 /* make sure inq_product_rev string corresponds to this version */
63 #define SDEBUG_VERSION "0189" /* format to fit INQUIRY revision field */
64 static const char *sdebug_version_date = "20200421";
66 #define MY_NAME "scsi_debug"
68 /* Additional Sense Code (ASC) */
69 #define NO_ADDITIONAL_SENSE 0x0
70 #define LOGICAL_UNIT_NOT_READY 0x4
71 #define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
72 #define UNRECOVERED_READ_ERR 0x11
73 #define PARAMETER_LIST_LENGTH_ERR 0x1a
74 #define INVALID_OPCODE 0x20
75 #define LBA_OUT_OF_RANGE 0x21
76 #define INVALID_FIELD_IN_CDB 0x24
77 #define INVALID_FIELD_IN_PARAM_LIST 0x26
78 #define WRITE_PROTECTED 0x27
79 #define UA_RESET_ASC 0x29
80 #define UA_CHANGED_ASC 0x2a
81 #define TARGET_CHANGED_ASC 0x3f
82 #define LUNS_CHANGED_ASCQ 0x0e
83 #define INSUFF_RES_ASC 0x55
84 #define INSUFF_RES_ASCQ 0x3
85 #define POWER_ON_RESET_ASCQ 0x0
86 #define BUS_RESET_ASCQ 0x2 /* scsi bus reset occurred */
87 #define MODE_CHANGED_ASCQ 0x1 /* mode parameters changed */
88 #define CAPACITY_CHANGED_ASCQ 0x9
89 #define SAVING_PARAMS_UNSUP 0x39
90 #define TRANSPORT_PROBLEM 0x4b
91 #define THRESHOLD_EXCEEDED 0x5d
92 #define LOW_POWER_COND_ON 0x5e
93 #define MISCOMPARE_VERIFY_ASC 0x1d
94 #define MICROCODE_CHANGED_ASCQ 0x1 /* with TARGET_CHANGED_ASC */
95 #define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16
96 #define WRITE_ERROR_ASC 0xc
97 #define UNALIGNED_WRITE_ASCQ 0x4
98 #define WRITE_BOUNDARY_ASCQ 0x5
99 #define READ_INVDATA_ASCQ 0x6
100 #define READ_BOUNDARY_ASCQ 0x7
101 #define INSUFF_ZONE_ASCQ 0xe
103 /* Additional Sense Code Qualifier (ASCQ) */
104 #define ACK_NAK_TO 0x3
106 /* Default values for driver parameters */
107 #define DEF_NUM_HOST 1
108 #define DEF_NUM_TGTS 1
109 #define DEF_MAX_LUNS 1
110 /* With these defaults, this driver will make 1 host with 1 target
111 * (id 0) containing 1 logical unit (lun 0). That is 1 device.
114 #define DEF_CDB_LEN 10
115 #define DEF_JDELAY 1 /* if > 0 unit is a jiffy */
116 #define DEF_DEV_SIZE_PRE_INIT 0
117 #define DEF_DEV_SIZE_MB 8
118 #define DEF_ZBC_DEV_SIZE_MB 128
121 #define DEF_PER_HOST_STORE false
122 #define DEF_D_SENSE 0
123 #define DEF_EVERY_NTH 0
124 #define DEF_FAKE_RW 0
126 #define DEF_HOST_LOCK 0
129 #define DEF_LBPWS10 0
131 #define DEF_LOWEST_ALIGNED 0
132 #define DEF_NDELAY 0 /* if > 0 unit is a nanosecond */
133 #define DEF_NO_LUN_0 0
134 #define DEF_NUM_PARTS 0
136 #define DEF_OPT_BLKS 1024
137 #define DEF_PHYSBLK_EXP 0
138 #define DEF_OPT_XFERLEN_EXP 0
139 #define DEF_PTYPE TYPE_DISK
140 #define DEF_RANDOM false
141 #define DEF_REMOVABLE false
142 #define DEF_SCSI_LEVEL 7 /* INQUIRY, byte2 [6->SPC-4; 7->SPC-5] */
143 #define DEF_SECTOR_SIZE 512
144 #define DEF_UNMAP_ALIGNMENT 0
145 #define DEF_UNMAP_GRANULARITY 1
146 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
147 #define DEF_UNMAP_MAX_DESC 256
148 #define DEF_VIRTUAL_GB 0
149 #define DEF_VPD_USE_HOSTNO 1
150 #define DEF_WRITESAME_LENGTH 0xFFFF
152 #define DEF_STATISTICS false
153 #define DEF_SUBMIT_QUEUES 1
154 #define DEF_UUID_CTL 0
155 #define JDELAY_OVERRIDDEN -9999
157 /* Default parameters for ZBC drives */
158 #define DEF_ZBC_ZONE_SIZE_MB 128
159 #define DEF_ZBC_MAX_OPEN_ZONES 8
161 #define SDEBUG_LUN_0_VAL 0
163 /* bit mask values for sdebug_opts */
164 #define SDEBUG_OPT_NOISE 1
165 #define SDEBUG_OPT_MEDIUM_ERR 2
166 #define SDEBUG_OPT_TIMEOUT 4
167 #define SDEBUG_OPT_RECOVERED_ERR 8
168 #define SDEBUG_OPT_TRANSPORT_ERR 16
169 #define SDEBUG_OPT_DIF_ERR 32
170 #define SDEBUG_OPT_DIX_ERR 64
171 #define SDEBUG_OPT_MAC_TIMEOUT 128
172 #define SDEBUG_OPT_SHORT_TRANSFER 0x100
173 #define SDEBUG_OPT_Q_NOISE 0x200
174 #define SDEBUG_OPT_ALL_TSF 0x400
175 #define SDEBUG_OPT_RARE_TSF 0x800
176 #define SDEBUG_OPT_N_WCE 0x1000
177 #define SDEBUG_OPT_RESET_NOISE 0x2000
178 #define SDEBUG_OPT_NO_CDB_NOISE 0x4000
179 #define SDEBUG_OPT_HOST_BUSY 0x8000
180 #define SDEBUG_OPT_CMD_ABORT 0x10000
181 #define SDEBUG_OPT_ALL_NOISE (SDEBUG_OPT_NOISE | SDEBUG_OPT_Q_NOISE | \
182 SDEBUG_OPT_RESET_NOISE)
183 #define SDEBUG_OPT_ALL_INJECTING (SDEBUG_OPT_RECOVERED_ERR | \
184 SDEBUG_OPT_TRANSPORT_ERR | \
185 SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR | \
186 SDEBUG_OPT_SHORT_TRANSFER | \
187 SDEBUG_OPT_HOST_BUSY | \
188 SDEBUG_OPT_CMD_ABORT)
189 /* When "every_nth" > 0 then modulo "every_nth" commands:
190 * - a missing response is simulated if SDEBUG_OPT_TIMEOUT is set
191 * - a RECOVERED_ERROR is simulated on successful read and write
192 * commands if SDEBUG_OPT_RECOVERED_ERR is set.
193 * - a TRANSPORT_ERROR is simulated on successful read and write
194 * commands if SDEBUG_OPT_TRANSPORT_ERR is set.
195 * - similarly for DIF_ERR, DIX_ERR, SHORT_TRANSFER, HOST_BUSY and
198 * When "every_nth" < 0 then after "- every_nth" commands the selected
199 * error will be injected. The error will be injected on every subsequent
200 * command until some other action occurs; for example, the user writing
201 * a new value (other than -1 or 1) to every_nth:
202 * echo 0 > /sys/bus/pseudo/drivers/scsi_debug/every_nth
205 /* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs) are returned in
206 * priority order. In the subset implemented here lower numbers have higher
207 * priority. The UA numbers should be a sequence starting from 0 with
208 * SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */
209 #define SDEBUG_UA_POR 0 /* Power on, reset, or bus device reset */
210 #define SDEBUG_UA_BUS_RESET 1
211 #define SDEBUG_UA_MODE_CHANGED 2
212 #define SDEBUG_UA_CAPACITY_CHANGED 3
213 #define SDEBUG_UA_LUNS_CHANGED 4
214 #define SDEBUG_UA_MICROCODE_CHANGED 5 /* simulate firmware change */
215 #define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 6
216 #define SDEBUG_NUM_UAS 7
218 /* when 1==SDEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
219 * sector on read commands: */
220 #define OPT_MEDIUM_ERR_ADDR 0x1234 /* that's sector 4660 in decimal */
221 #define OPT_MEDIUM_ERR_NUM 10 /* number of consecutive medium errs */
223 /* If REPORT LUNS has luns >= 256 it can choose "flat space" (value 1)
224 * or "peripheral device" addressing (value 0) */
225 #define SAM2_LUN_ADDRESS_METHOD 0
227 /* SDEBUG_CANQUEUE is the maximum number of commands that can be queued
228 * (for response) per submit queue at one time. Can be reduced by max_queue
229 * option. Command responses are not queued when jdelay=0 and ndelay=0. The
230 * per-device DEF_CMD_PER_LUN can be changed via sysfs:
231 * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth
232 * but cannot exceed SDEBUG_CANQUEUE .
234 #define SDEBUG_CANQUEUE_WORDS 3 /* a WORD is bits in a long */
235 #define SDEBUG_CANQUEUE (SDEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
236 #define DEF_CMD_PER_LUN 255
240 #define F_D_OUT_MAYBE 4 /* WRITE SAME, NDOB bit */
242 #define F_RL_WLUN_OK 0x10
243 #define F_SKIP_UA 0x20
244 #define F_DELAY_OVERR 0x40
245 #define F_SA_LOW 0x80 /* cdb byte 1, bits 4 to 0 */
246 #define F_SA_HIGH 0x100 /* as used by variable length cdbs */
247 #define F_INV_OP 0x200
248 #define F_FAKE_RW 0x400
249 #define F_M_ACCESS 0x800 /* media access */
250 #define F_SSU_DELAY 0x1000
251 #define F_SYNC_DELAY 0x2000
253 #define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
254 #define FF_MEDIA_IO (F_M_ACCESS | F_FAKE_RW)
255 #define FF_SA (F_SA_HIGH | F_SA_LOW)
256 #define F_LONG_DELAY (F_SSU_DELAY | F_SYNC_DELAY)
258 #define SDEBUG_MAX_PARTS 4
260 #define SDEBUG_MAX_CMD_LEN 32
262 #define SDEB_XA_NOT_IN_USE XA_MARK_1
264 /* enumeration names taken from table 26, zbcr05 */
266 ZBC_NOT_WRITE_POINTER = 0x0,
268 ZC2_IMPLICIT_OPEN = 0x2,
269 ZC3_EXPLICIT_OPEN = 0x3,
276 struct sdeb_zone_state { /* ZBC: per zone state */
277 enum sdebug_z_cond z_cond;
283 struct sdebug_dev_info {
284 struct list_head dev_list;
285 unsigned int channel;
289 struct sdebug_host_info *sdbg_host;
290 unsigned long uas_bm[1];
295 /* For ZBC devices */
297 unsigned int zsize_shift;
298 unsigned int nr_zones;
299 unsigned int nr_imp_open;
300 unsigned int nr_exp_open;
301 unsigned int nr_closed;
302 unsigned int max_open;
303 struct sdeb_zone_state *zstate;
306 struct sdebug_host_info {
307 struct list_head host_list;
308 int si_idx; /* sdeb_store_info (per host) xarray index */
309 struct Scsi_Host *shost;
311 struct list_head dev_info_list;
314 /* There is an xarray of pointers to this struct's objects, one per host */
315 struct sdeb_store_info {
316 rwlock_t macc_lck; /* for atomic media access on this store */
317 u8 *storep; /* user data storage (ram) */
318 struct t10_pi_tuple *dif_storep; /* protection info */
319 void *map_storep; /* provisioning map */
322 #define to_sdebug_host(d) \
323 container_of(d, struct sdebug_host_info, dev)
325 enum sdeb_defer_type {SDEB_DEFER_NONE = 0, SDEB_DEFER_HRT = 1,
328 struct sdebug_defer {
330 struct execute_work ew;
331 int sqa_idx; /* index of sdebug_queue array */
332 int qc_idx; /* index of sdebug_queued_cmd array within sqa_idx */
336 bool aborted; /* true when blk_abort_request() already called */
337 enum sdeb_defer_type defer_t;
340 struct sdebug_queued_cmd {
341 /* corresponding bit set in in_use_bm[] in owning struct sdebug_queue
342 * instance indicates this slot is in use.
344 struct sdebug_defer *sd_dp;
345 struct scsi_cmnd *a_cmnd;
346 unsigned int inj_recovered:1;
347 unsigned int inj_transport:1;
348 unsigned int inj_dif:1;
349 unsigned int inj_dix:1;
350 unsigned int inj_short:1;
351 unsigned int inj_host_busy:1;
352 unsigned int inj_cmd_abort:1;
355 struct sdebug_queue {
356 struct sdebug_queued_cmd qc_arr[SDEBUG_CANQUEUE];
357 unsigned long in_use_bm[SDEBUG_CANQUEUE_WORDS];
359 atomic_t blocked; /* to temporarily stop more being queued */
362 static atomic_t sdebug_cmnd_count; /* number of incoming commands */
363 static atomic_t sdebug_completions; /* count of deferred completions */
364 static atomic_t sdebug_miss_cpus; /* submission + completion cpus differ */
365 static atomic_t sdebug_a_tsf; /* 'almost task set full' counter */
367 struct opcode_info_t {
368 u8 num_attached; /* 0 if this is it (i.e. a leaf); use 0xff */
369 /* for terminating element */
370 u8 opcode; /* if num_attached > 0, preferred */
371 u16 sa; /* service action */
372 u32 flags; /* OR-ed set of SDEB_F_* */
373 int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
374 const struct opcode_info_t *arrp; /* num_attached elements or NULL */
375 u8 len_mask[16]; /* len_mask[0]-->cdb_len, then mask for cdb */
376 /* 1 to min(cdb_len, 15); ignore cdb[15...] */
379 /* SCSI opcodes (first byte of cdb) of interest mapped onto these indexes */
380 enum sdeb_opcode_index {
381 SDEB_I_INVALID_OPCODE = 0,
383 SDEB_I_REPORT_LUNS = 2,
384 SDEB_I_REQUEST_SENSE = 3,
385 SDEB_I_TEST_UNIT_READY = 4,
386 SDEB_I_MODE_SENSE = 5, /* 6, 10 */
387 SDEB_I_MODE_SELECT = 6, /* 6, 10 */
388 SDEB_I_LOG_SENSE = 7,
389 SDEB_I_READ_CAPACITY = 8, /* 10; 16 is in SA_IN(16) */
390 SDEB_I_READ = 9, /* 6, 10, 12, 16 */
391 SDEB_I_WRITE = 10, /* 6, 10, 12, 16 */
392 SDEB_I_START_STOP = 11,
393 SDEB_I_SERV_ACT_IN_16 = 12, /* add ...SERV_ACT_IN_12 if needed */
394 SDEB_I_SERV_ACT_OUT_16 = 13, /* add ...SERV_ACT_OUT_12 if needed */
395 SDEB_I_MAINT_IN = 14,
396 SDEB_I_MAINT_OUT = 15,
397 SDEB_I_VERIFY = 16, /* VERIFY(10), VERIFY(16) */
398 SDEB_I_VARIABLE_LEN = 17, /* READ(32), WRITE(32), WR_SCAT(32) */
399 SDEB_I_RESERVE = 18, /* 6, 10 */
400 SDEB_I_RELEASE = 19, /* 6, 10 */
401 SDEB_I_ALLOW_REMOVAL = 20, /* PREVENT ALLOW MEDIUM REMOVAL */
402 SDEB_I_REZERO_UNIT = 21, /* REWIND in SSC */
403 SDEB_I_ATA_PT = 22, /* 12, 16 */
404 SDEB_I_SEND_DIAG = 23,
406 SDEB_I_WRITE_BUFFER = 25,
407 SDEB_I_WRITE_SAME = 26, /* 10, 16 */
408 SDEB_I_SYNC_CACHE = 27, /* 10, 16 */
409 SDEB_I_COMP_WRITE = 28,
410 SDEB_I_PRE_FETCH = 29, /* 10, 16 */
411 SDEB_I_ZONE_OUT = 30, /* 0x94+SA; includes no data xfer */
412 SDEB_I_ZONE_IN = 31, /* 0x95+SA; all have data-in */
413 SDEB_I_LAST_ELEM_P1 = 32, /* keep this last (previous + 1) */
417 static const unsigned char opcode_ind_arr[256] = {
418 /* 0x0; 0x0->0x1f: 6 byte cdbs */
419 SDEB_I_TEST_UNIT_READY, SDEB_I_REZERO_UNIT, 0, SDEB_I_REQUEST_SENSE,
421 SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, 0,
422 0, 0, SDEB_I_INQUIRY, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
424 0, 0, SDEB_I_MODE_SENSE, SDEB_I_START_STOP, 0, SDEB_I_SEND_DIAG,
425 SDEB_I_ALLOW_REMOVAL, 0,
426 /* 0x20; 0x20->0x3f: 10 byte cdbs */
427 0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY, 0, 0,
428 SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, SDEB_I_VERIFY,
429 0, 0, 0, 0, SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, 0,
430 0, 0, 0, SDEB_I_WRITE_BUFFER, 0, 0, 0, 0,
431 /* 0x40; 0x40->0x5f: 10 byte cdbs */
432 0, SDEB_I_WRITE_SAME, SDEB_I_UNMAP, 0, 0, 0, 0, 0,
433 0, 0, 0, 0, 0, SDEB_I_LOG_SENSE, 0, 0,
434 0, 0, 0, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
436 0, 0, SDEB_I_MODE_SENSE, 0, 0, 0, 0, 0,
437 /* 0x60; 0x60->0x7d are reserved, 0x7e is "extended cdb" */
438 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
439 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
440 0, SDEB_I_VARIABLE_LEN,
441 /* 0x80; 0x80->0x9f: 16 byte cdbs */
442 0, 0, 0, 0, 0, SDEB_I_ATA_PT, 0, 0,
443 SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0,
444 0, 0, 0, SDEB_I_VERIFY,
445 SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, SDEB_I_WRITE_SAME,
446 SDEB_I_ZONE_OUT, SDEB_I_ZONE_IN, 0, 0,
447 0, 0, 0, 0, 0, 0, SDEB_I_SERV_ACT_IN_16, SDEB_I_SERV_ACT_OUT_16,
448 /* 0xa0; 0xa0->0xbf: 12 byte cdbs */
449 SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN,
450 SDEB_I_MAINT_OUT, 0, 0, 0,
451 SDEB_I_READ, 0 /* SDEB_I_SERV_ACT_OUT_12 */, SDEB_I_WRITE,
452 0 /* SDEB_I_SERV_ACT_IN_12 */, 0, 0, 0, 0,
453 0, 0, 0, 0, 0, 0, 0, 0,
454 0, 0, 0, 0, 0, 0, 0, 0,
455 /* 0xc0; 0xc0->0xff: vendor specific */
456 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
457 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
458 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
459 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
463 * The following "response" functions return the SCSI mid-level's 4 byte
464 * tuple-in-an-int. To handle commands with an IMMED bit, for a faster
465 * command completion, they can mask their return value with
466 * SDEG_RES_IMMED_MASK .
468 #define SDEG_RES_IMMED_MASK 0x40000000
470 static int resp_inquiry(struct scsi_cmnd *, struct sdebug_dev_info *);
471 static int resp_report_luns(struct scsi_cmnd *, struct sdebug_dev_info *);
472 static int resp_requests(struct scsi_cmnd *, struct sdebug_dev_info *);
473 static int resp_mode_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
474 static int resp_mode_select(struct scsi_cmnd *, struct sdebug_dev_info *);
475 static int resp_log_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
476 static int resp_readcap(struct scsi_cmnd *, struct sdebug_dev_info *);
477 static int resp_read_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
478 static int resp_write_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
479 static int resp_write_scat(struct scsi_cmnd *, struct sdebug_dev_info *);
480 static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *);
481 static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *);
482 static int resp_get_lba_status(struct scsi_cmnd *, struct sdebug_dev_info *);
483 static int resp_report_tgtpgs(struct scsi_cmnd *, struct sdebug_dev_info *);
484 static int resp_unmap(struct scsi_cmnd *, struct sdebug_dev_info *);
485 static int resp_rsup_opcodes(struct scsi_cmnd *, struct sdebug_dev_info *);
486 static int resp_rsup_tmfs(struct scsi_cmnd *, struct sdebug_dev_info *);
487 static int resp_verify(struct scsi_cmnd *, struct sdebug_dev_info *);
488 static int resp_write_same_10(struct scsi_cmnd *, struct sdebug_dev_info *);
489 static int resp_write_same_16(struct scsi_cmnd *, struct sdebug_dev_info *);
490 static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *);
491 static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *);
492 static int resp_sync_cache(struct scsi_cmnd *, struct sdebug_dev_info *);
493 static int resp_pre_fetch(struct scsi_cmnd *, struct sdebug_dev_info *);
494 static int resp_report_zones(struct scsi_cmnd *, struct sdebug_dev_info *);
495 static int resp_open_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
496 static int resp_close_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
497 static int resp_finish_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
498 static int resp_rwp_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
500 static int sdebug_do_add_host(bool mk_new_store);
501 static int sdebug_add_host_helper(int per_host_idx);
502 static void sdebug_do_remove_host(bool the_end);
503 static int sdebug_add_store(void);
504 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip);
505 static void sdebug_erase_all_stores(bool apart_from_first);
508 * The following are overflow arrays for cdbs that "hit" the same index in
509 * the opcode_info_arr array. The most time sensitive (or commonly used) cdb
510 * should be placed in opcode_info_arr[], the others should be placed here.
512 static const struct opcode_info_t msense_iarr[] = {
513 {0, 0x1a, 0, F_D_IN, NULL, NULL,
514 {6, 0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
517 static const struct opcode_info_t mselect_iarr[] = {
518 {0, 0x15, 0, F_D_OUT, NULL, NULL,
519 {6, 0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
522 static const struct opcode_info_t read_iarr[] = {
523 {0, 0x28, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(10) */
524 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
526 {0, 0x8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL, /* READ(6) */
527 {6, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
528 {0, 0xa8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(12) */
529 {12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf,
533 static const struct opcode_info_t write_iarr[] = {
534 {0, 0x2a, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(10) */
535 NULL, {10, 0xfb, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7,
537 {0, 0xa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(6) */
538 NULL, {6, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0,
540 {0, 0xaa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(12) */
541 NULL, {12, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
542 0xbf, 0xc7, 0, 0, 0, 0} },
545 static const struct opcode_info_t verify_iarr[] = {
546 {0, 0x2f, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,/* VERIFY(10) */
547 NULL, {10, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xff, 0xff, 0xc7,
551 static const struct opcode_info_t sa_in_16_iarr[] = {
552 {0, 0x9e, 0x12, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
553 {16, 0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
554 0xff, 0xff, 0xff, 0, 0xc7} }, /* GET LBA STATUS(16) */
557 static const struct opcode_info_t vl_iarr[] = { /* VARIABLE LENGTH */
558 {0, 0x7f, 0xb, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_dt0,
559 NULL, {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0xb, 0xfa,
560 0, 0xff, 0xff, 0xff, 0xff} }, /* WRITE(32) */
561 {0, 0x7f, 0x11, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
562 NULL, {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x11, 0xf8,
563 0, 0xff, 0xff, 0x0, 0x0} }, /* WRITE SCATTERED(32) */
566 static const struct opcode_info_t maint_in_iarr[] = { /* MAINT IN */
567 {0, 0xa3, 0xc, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL,
568 {12, 0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
569 0xc7, 0, 0, 0, 0} }, /* REPORT SUPPORTED OPERATION CODES */
570 {0, 0xa3, 0xd, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL,
571 {12, 0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
572 0, 0} }, /* REPORTED SUPPORTED TASK MANAGEMENT FUNCTIONS */
575 static const struct opcode_info_t write_same_iarr[] = {
576 {0, 0x93, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_write_same_16, NULL,
577 {16, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
578 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* WRITE SAME(16) */
581 static const struct opcode_info_t reserve_iarr[] = {
582 {0, 0x16, 0, F_D_OUT, NULL, NULL, /* RESERVE(6) */
583 {6, 0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
586 static const struct opcode_info_t release_iarr[] = {
587 {0, 0x17, 0, F_D_OUT, NULL, NULL, /* RELEASE(6) */
588 {6, 0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
591 static const struct opcode_info_t sync_cache_iarr[] = {
592 {0, 0x91, 0, F_SYNC_DELAY | F_M_ACCESS, resp_sync_cache, NULL,
593 {16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
594 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* SYNC_CACHE (16) */
597 static const struct opcode_info_t pre_fetch_iarr[] = {
598 {0, 0x90, 0, F_SYNC_DELAY | F_M_ACCESS, resp_pre_fetch, NULL,
599 {16, 0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
600 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* PRE-FETCH (16) */
603 static const struct opcode_info_t zone_out_iarr[] = { /* ZONE OUT(16) */
604 {0, 0x94, 0x1, F_SA_LOW, resp_close_zone, NULL,
605 {16, 0x1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
606 0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} }, /* CLOSE ZONE */
607 {0, 0x94, 0x2, F_SA_LOW, resp_finish_zone, NULL,
608 {16, 0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
609 0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} }, /* FINISH ZONE */
610 {0, 0x94, 0x4, F_SA_LOW, resp_rwp_zone, NULL,
611 {16, 0x4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
612 0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} }, /* RESET WRITE POINTER */
615 static const struct opcode_info_t zone_in_iarr[] = { /* ZONE IN(16) */
616 {0, 0x95, 0x6, F_SA_LOW | F_D_IN, NULL, NULL,
617 {16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
618 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* REPORT ZONES */
622 /* This array is accessed via SDEB_I_* values. Make sure all are mapped,
623 * plus the terminating elements for logic that scans this table such as
624 * REPORT SUPPORTED OPERATION CODES. */
625 static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEM_P1 + 1] = {
627 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* unknown opcodes */
628 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
629 {0, 0x12, 0, FF_RESPOND | F_D_IN, resp_inquiry, NULL, /* INQUIRY */
630 {6, 0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
631 {0, 0xa0, 0, FF_RESPOND | F_D_IN, resp_report_luns, NULL,
632 {12, 0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
633 0, 0} }, /* REPORT LUNS */
634 {0, 0x3, 0, FF_RESPOND | F_D_IN, resp_requests, NULL,
635 {6, 0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
636 {0, 0x0, 0, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */
637 {6, 0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
639 {ARRAY_SIZE(msense_iarr), 0x5a, 0, F_D_IN, /* MODE SENSE(10) */
640 resp_mode_sense, msense_iarr, {10, 0xf8, 0xff, 0xff, 0, 0, 0,
641 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
642 {ARRAY_SIZE(mselect_iarr), 0x55, 0, F_D_OUT, /* MODE SELECT(10) */
643 resp_mode_select, mselect_iarr, {10, 0xf1, 0, 0, 0, 0, 0, 0xff,
644 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
645 {0, 0x4d, 0, F_D_IN, resp_log_sense, NULL, /* LOG SENSE */
646 {10, 0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0,
648 {0, 0x25, 0, F_D_IN, resp_readcap, NULL, /* READ CAPACITY(10) */
649 {10, 0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0,
651 {ARRAY_SIZE(read_iarr), 0x88, 0, F_D_IN | FF_MEDIA_IO, /* READ(16) */
652 resp_read_dt0, read_iarr, {16, 0xfe, 0xff, 0xff, 0xff, 0xff,
653 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
655 {ARRAY_SIZE(write_iarr), 0x8a, 0, F_D_OUT | FF_MEDIA_IO,
656 resp_write_dt0, write_iarr, /* WRITE(16) */
657 {16, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
658 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
659 {0, 0x1b, 0, F_SSU_DELAY, resp_start_stop, NULL,/* START STOP UNIT */
660 {6, 0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
661 {ARRAY_SIZE(sa_in_16_iarr), 0x9e, 0x10, F_SA_LOW | F_D_IN,
662 resp_readcap16, sa_in_16_iarr, /* SA_IN(16), READ CAPACITY(16) */
663 {16, 0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
664 0xff, 0xff, 0xff, 0xff, 0x1, 0xc7} },
665 {0, 0x9f, 0x12, F_SA_LOW | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
666 NULL, {16, 0x12, 0xf9, 0x0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff,
667 0xff, 0xff, 0xff, 0xff, 0xc7} }, /* SA_OUT(16), WRITE SCAT(16) */
668 {ARRAY_SIZE(maint_in_iarr), 0xa3, 0xa, F_SA_LOW | F_D_IN,
669 resp_report_tgtpgs, /* MAINT IN, REPORT TARGET PORT GROUPS */
670 maint_in_iarr, {12, 0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff,
671 0xff, 0, 0xc7, 0, 0, 0, 0} },
673 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
674 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
675 {ARRAY_SIZE(verify_iarr), 0x8f, 0,
676 F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify, /* VERIFY(16) */
677 verify_iarr, {16, 0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
678 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },
679 {ARRAY_SIZE(vl_iarr), 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_MEDIA_IO,
680 resp_read_dt0, vl_iarr, /* VARIABLE LENGTH, READ(32) */
681 {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x9, 0xfe, 0, 0xff, 0xff,
683 {ARRAY_SIZE(reserve_iarr), 0x56, 0, F_D_OUT,
684 NULL, reserve_iarr, /* RESERVE(10) <no response function> */
685 {10, 0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
687 {ARRAY_SIZE(release_iarr), 0x57, 0, F_D_OUT,
688 NULL, release_iarr, /* RELEASE(10) <no response function> */
689 {10, 0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
692 {0, 0x1e, 0, 0, NULL, NULL, /* ALLOW REMOVAL */
693 {6, 0, 0, 0, 0x3, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
694 {0, 0x1, 0, 0, resp_start_stop, NULL, /* REWIND ?? */
695 {6, 0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
696 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */
697 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
698 {0, 0x1d, F_D_OUT, 0, NULL, NULL, /* SEND DIAGNOSTIC */
699 {6, 0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
700 {0, 0x42, 0, F_D_OUT | FF_MEDIA_IO, resp_unmap, NULL, /* UNMAP */
701 {10, 0x1, 0, 0, 0, 0, 0x3f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
703 {0, 0x3b, 0, F_D_OUT_MAYBE, resp_write_buffer, NULL,
704 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0,
705 0, 0, 0, 0} }, /* WRITE_BUFFER */
706 {ARRAY_SIZE(write_same_iarr), 0x41, 0, F_D_OUT_MAYBE | FF_MEDIA_IO,
707 resp_write_same_10, write_same_iarr, /* WRITE SAME(10) */
708 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0,
710 {ARRAY_SIZE(sync_cache_iarr), 0x35, 0, F_SYNC_DELAY | F_M_ACCESS,
711 resp_sync_cache, sync_cache_iarr,
712 {10, 0x7, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
713 0, 0, 0, 0} }, /* SYNC_CACHE (10) */
714 {0, 0x89, 0, F_D_OUT | FF_MEDIA_IO, resp_comp_write, NULL,
715 {16, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
716 0, 0xff, 0x3f, 0xc7} }, /* COMPARE AND WRITE */
717 {ARRAY_SIZE(pre_fetch_iarr), 0x34, 0, F_SYNC_DELAY | F_M_ACCESS,
718 resp_pre_fetch, pre_fetch_iarr,
719 {10, 0x2, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
720 0, 0, 0, 0} }, /* PRE-FETCH (10) */
723 {ARRAY_SIZE(zone_out_iarr), 0x94, 0x3, F_SA_LOW,
724 resp_open_zone, zone_out_iarr, /* ZONE_OUT(16), OPEN ZONE) */
725 {16, 0x3 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
726 0xff, 0xff, 0x0, 0x0, 0xff, 0xff, 0x1, 0xc7} },
727 {ARRAY_SIZE(zone_in_iarr), 0x95, 0x0, F_SA_LOW | F_D_IN,
728 resp_report_zones, zone_in_iarr, /* ZONE_IN(16), REPORT ZONES) */
729 {16, 0x0 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
730 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xc7} },
732 {0xff, 0, 0, 0, NULL, NULL, /* terminating element */
733 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
736 static int sdebug_num_hosts;
737 static int sdebug_add_host = DEF_NUM_HOST; /* in sysfs this is relative */
738 static int sdebug_ato = DEF_ATO;
739 static int sdebug_cdb_len = DEF_CDB_LEN;
740 static int sdebug_jdelay = DEF_JDELAY; /* if > 0 then unit is jiffies */
741 static int sdebug_dev_size_mb = DEF_DEV_SIZE_PRE_INIT;
742 static int sdebug_dif = DEF_DIF;
743 static int sdebug_dix = DEF_DIX;
744 static int sdebug_dsense = DEF_D_SENSE;
745 static int sdebug_every_nth = DEF_EVERY_NTH;
746 static int sdebug_fake_rw = DEF_FAKE_RW;
747 static unsigned int sdebug_guard = DEF_GUARD;
748 static int sdebug_lowest_aligned = DEF_LOWEST_ALIGNED;
749 static int sdebug_max_luns = DEF_MAX_LUNS;
750 static int sdebug_max_queue = SDEBUG_CANQUEUE; /* per submit queue */
751 static unsigned int sdebug_medium_error_start = OPT_MEDIUM_ERR_ADDR;
752 static int sdebug_medium_error_count = OPT_MEDIUM_ERR_NUM;
753 static atomic_t retired_max_queue; /* if > 0 then was prior max_queue */
754 static int sdebug_ndelay = DEF_NDELAY; /* if > 0 then unit is nanoseconds */
755 static int sdebug_no_lun_0 = DEF_NO_LUN_0;
756 static int sdebug_no_uld;
757 static int sdebug_num_parts = DEF_NUM_PARTS;
758 static int sdebug_num_tgts = DEF_NUM_TGTS; /* targets per host */
759 static int sdebug_opt_blks = DEF_OPT_BLKS;
760 static int sdebug_opts = DEF_OPTS;
761 static int sdebug_physblk_exp = DEF_PHYSBLK_EXP;
762 static int sdebug_opt_xferlen_exp = DEF_OPT_XFERLEN_EXP;
763 static int sdebug_ptype = DEF_PTYPE; /* SCSI peripheral device type */
764 static int sdebug_scsi_level = DEF_SCSI_LEVEL;
765 static int sdebug_sector_size = DEF_SECTOR_SIZE;
766 static int sdebug_virtual_gb = DEF_VIRTUAL_GB;
767 static int sdebug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
768 static unsigned int sdebug_lbpu = DEF_LBPU;
769 static unsigned int sdebug_lbpws = DEF_LBPWS;
770 static unsigned int sdebug_lbpws10 = DEF_LBPWS10;
771 static unsigned int sdebug_lbprz = DEF_LBPRZ;
772 static unsigned int sdebug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
773 static unsigned int sdebug_unmap_granularity = DEF_UNMAP_GRANULARITY;
774 static unsigned int sdebug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
775 static unsigned int sdebug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
776 static unsigned int sdebug_write_same_length = DEF_WRITESAME_LENGTH;
777 static int sdebug_uuid_ctl = DEF_UUID_CTL;
778 static bool sdebug_random = DEF_RANDOM;
779 static bool sdebug_per_host_store = DEF_PER_HOST_STORE;
780 static bool sdebug_removable = DEF_REMOVABLE;
781 static bool sdebug_clustering;
782 static bool sdebug_host_lock = DEF_HOST_LOCK;
783 static bool sdebug_strict = DEF_STRICT;
784 static bool sdebug_any_injecting_opt;
785 static bool sdebug_verbose;
786 static bool have_dif_prot;
787 static bool write_since_sync;
788 static bool sdebug_statistics = DEF_STATISTICS;
789 static bool sdebug_wp;
790 /* Following enum: 0: no zbc, def; 1: host aware; 2: host managed */
791 static enum blk_zoned_model sdeb_zbc_model = BLK_ZONED_NONE;
792 static char *sdeb_zbc_model_s;
794 static unsigned int sdebug_store_sectors;
795 static sector_t sdebug_capacity; /* in sectors */
797 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
798 may still need them */
799 static int sdebug_heads; /* heads per disk */
800 static int sdebug_cylinders_per; /* cylinders per surface */
801 static int sdebug_sectors_per; /* sectors per cylinder */
803 static LIST_HEAD(sdebug_host_list);
804 static DEFINE_SPINLOCK(sdebug_host_list_lock);
806 static struct xarray per_store_arr;
807 static struct xarray *per_store_ap = &per_store_arr;
808 static int sdeb_first_idx = -1; /* invalid index ==> none created */
809 static int sdeb_most_recent_idx = -1;
810 static DEFINE_RWLOCK(sdeb_fake_rw_lck); /* need a RW lock when fake_rw=1 */
812 static unsigned long map_size;
813 static int num_aborts;
814 static int num_dev_resets;
815 static int num_target_resets;
816 static int num_bus_resets;
817 static int num_host_resets;
818 static int dix_writes;
819 static int dix_reads;
820 static int dif_errors;
822 /* ZBC global data */
823 static bool sdeb_zbc_in_use; /* true when ptype=TYPE_ZBC [0x14] */
824 static const int zbc_zone_size_mb;
825 static int sdeb_zbc_max_open = DEF_ZBC_MAX_OPEN_ZONES;
827 static int submit_queues = DEF_SUBMIT_QUEUES; /* > 1 for multi-queue (mq) */
828 static struct sdebug_queue *sdebug_q_arr; /* ptr to array of submit queues */
830 static DEFINE_RWLOCK(atomic_rw);
831 static DEFINE_RWLOCK(atomic_rw2);
833 static rwlock_t *ramdisk_lck_a[2];
835 static char sdebug_proc_name[] = MY_NAME;
836 static const char *my_name = MY_NAME;
838 static struct bus_type pseudo_lld_bus;
840 static struct device_driver sdebug_driverfs_driver = {
841 .name = sdebug_proc_name,
842 .bus = &pseudo_lld_bus,
845 static const int check_condition_result =
846 (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
848 static const int illegal_condition_result =
849 (DRIVER_SENSE << 24) | (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
851 static const int device_qfull_result =
852 (DID_OK << 16) | (COMMAND_COMPLETE << 8) | SAM_STAT_TASK_SET_FULL;
854 static const int condition_met_result = SAM_STAT_CONDITION_MET;
857 /* Only do the extra work involved in logical block provisioning if one or
858 * more of the lbpu, lbpws or lbpws10 parameters are given and we are doing
859 * real reads and writes (i.e. not skipping them for speed).
861 static inline bool scsi_debug_lbp(void)
863 return 0 == sdebug_fake_rw &&
864 (sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10);
867 static void *lba2fake_store(struct sdeb_store_info *sip,
868 unsigned long long lba)
870 struct sdeb_store_info *lsip = sip;
872 lba = do_div(lba, sdebug_store_sectors);
873 if (!sip || !sip->storep) {
875 lsip = xa_load(per_store_ap, 0); /* should never be NULL */
877 return lsip->storep + lba * sdebug_sector_size;
880 static struct t10_pi_tuple *dif_store(struct sdeb_store_info *sip,
883 sector = sector_div(sector, sdebug_store_sectors);
885 return sip->dif_storep + sector;
888 static void sdebug_max_tgts_luns(void)
890 struct sdebug_host_info *sdbg_host;
891 struct Scsi_Host *hpnt;
893 spin_lock(&sdebug_host_list_lock);
894 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
895 hpnt = sdbg_host->shost;
896 if ((hpnt->this_id >= 0) &&
897 (sdebug_num_tgts > hpnt->this_id))
898 hpnt->max_id = sdebug_num_tgts + 1;
900 hpnt->max_id = sdebug_num_tgts;
901 /* sdebug_max_luns; */
902 hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
904 spin_unlock(&sdebug_host_list_lock);
907 enum sdeb_cmd_data {SDEB_IN_DATA = 0, SDEB_IN_CDB = 1};
909 /* Set in_bit to -1 to indicate no bit position of invalid field */
910 static void mk_sense_invalid_fld(struct scsi_cmnd *scp,
911 enum sdeb_cmd_data c_d,
912 int in_byte, int in_bit)
914 unsigned char *sbuff;
918 sbuff = scp->sense_buffer;
920 sdev_printk(KERN_ERR, scp->device,
921 "%s: sense_buffer is NULL\n", __func__);
924 asc = c_d ? INVALID_FIELD_IN_CDB : INVALID_FIELD_IN_PARAM_LIST;
925 memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
926 scsi_build_sense_buffer(sdebug_dsense, sbuff, ILLEGAL_REQUEST, asc, 0);
927 memset(sks, 0, sizeof(sks));
933 sks[0] |= 0x7 & in_bit;
935 put_unaligned_be16(in_byte, sks + 1);
941 memcpy(sbuff + sl + 4, sks, 3);
943 memcpy(sbuff + 15, sks, 3);
945 sdev_printk(KERN_INFO, scp->device, "%s: [sense_key,asc,ascq"
946 "]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n",
947 my_name, asc, c_d ? 'C' : 'D', in_byte, in_bit);
950 static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
952 unsigned char *sbuff;
954 sbuff = scp->sense_buffer;
956 sdev_printk(KERN_ERR, scp->device,
957 "%s: sense_buffer is NULL\n", __func__);
960 memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
962 scsi_build_sense_buffer(sdebug_dsense, sbuff, key, asc, asq);
965 sdev_printk(KERN_INFO, scp->device,
966 "%s: [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
967 my_name, key, asc, asq);
970 static void mk_sense_invalid_opcode(struct scsi_cmnd *scp)
972 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
975 static int scsi_debug_ioctl(struct scsi_device *dev, unsigned int cmd,
978 if (sdebug_verbose) {
980 sdev_printk(KERN_INFO, dev,
981 "%s: BLKFLSBUF [0x1261]\n", __func__);
982 else if (0x5331 == cmd)
983 sdev_printk(KERN_INFO, dev,
984 "%s: CDROM_GET_CAPABILITY [0x5331]\n",
987 sdev_printk(KERN_INFO, dev, "%s: cmd=0x%x\n",
991 /* return -ENOTTY; // correct return but upsets fdisk */
994 static void config_cdb_len(struct scsi_device *sdev)
996 switch (sdebug_cdb_len) {
997 case 6: /* suggest 6 byte READ, WRITE and MODE SENSE/SELECT */
998 sdev->use_10_for_rw = false;
999 sdev->use_16_for_rw = false;
1000 sdev->use_10_for_ms = false;
1002 case 10: /* suggest 10 byte RWs and 6 byte MODE SENSE/SELECT */
1003 sdev->use_10_for_rw = true;
1004 sdev->use_16_for_rw = false;
1005 sdev->use_10_for_ms = false;
1007 case 12: /* suggest 10 byte RWs and 10 byte MODE SENSE/SELECT */
1008 sdev->use_10_for_rw = true;
1009 sdev->use_16_for_rw = false;
1010 sdev->use_10_for_ms = true;
1013 sdev->use_10_for_rw = false;
1014 sdev->use_16_for_rw = true;
1015 sdev->use_10_for_ms = true;
1017 case 32: /* No knobs to suggest this so same as 16 for now */
1018 sdev->use_10_for_rw = false;
1019 sdev->use_16_for_rw = true;
1020 sdev->use_10_for_ms = true;
1023 pr_warn("unexpected cdb_len=%d, force to 10\n",
1025 sdev->use_10_for_rw = true;
1026 sdev->use_16_for_rw = false;
1027 sdev->use_10_for_ms = false;
1028 sdebug_cdb_len = 10;
1033 static void all_config_cdb_len(void)
1035 struct sdebug_host_info *sdbg_host;
1036 struct Scsi_Host *shost;
1037 struct scsi_device *sdev;
1039 spin_lock(&sdebug_host_list_lock);
1040 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
1041 shost = sdbg_host->shost;
1042 shost_for_each_device(sdev, shost) {
1043 config_cdb_len(sdev);
1046 spin_unlock(&sdebug_host_list_lock);
1049 static void clear_luns_changed_on_target(struct sdebug_dev_info *devip)
1051 struct sdebug_host_info *sdhp;
1052 struct sdebug_dev_info *dp;
1054 spin_lock(&sdebug_host_list_lock);
1055 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
1056 list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
1057 if ((devip->sdbg_host == dp->sdbg_host) &&
1058 (devip->target == dp->target))
1059 clear_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
1062 spin_unlock(&sdebug_host_list_lock);
1065 static int make_ua(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1069 k = find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS);
1070 if (k != SDEBUG_NUM_UAS) {
1071 const char *cp = NULL;
1075 mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1076 POWER_ON_RESET_ASCQ);
1078 cp = "power on reset";
1080 case SDEBUG_UA_BUS_RESET:
1081 mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1086 case SDEBUG_UA_MODE_CHANGED:
1087 mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1090 cp = "mode parameters changed";
1092 case SDEBUG_UA_CAPACITY_CHANGED:
1093 mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1094 CAPACITY_CHANGED_ASCQ);
1096 cp = "capacity data changed";
1098 case SDEBUG_UA_MICROCODE_CHANGED:
1099 mk_sense_buffer(scp, UNIT_ATTENTION,
1101 MICROCODE_CHANGED_ASCQ);
1103 cp = "microcode has been changed";
1105 case SDEBUG_UA_MICROCODE_CHANGED_WO_RESET:
1106 mk_sense_buffer(scp, UNIT_ATTENTION,
1108 MICROCODE_CHANGED_WO_RESET_ASCQ);
1110 cp = "microcode has been changed without reset";
1112 case SDEBUG_UA_LUNS_CHANGED:
1114 * SPC-3 behavior is to report a UNIT ATTENTION with
1115 * ASC/ASCQ REPORTED LUNS DATA HAS CHANGED on every LUN
1116 * on the target, until a REPORT LUNS command is
1117 * received. SPC-4 behavior is to report it only once.
1118 * NOTE: sdebug_scsi_level does not use the same
1119 * values as struct scsi_device->scsi_level.
1121 if (sdebug_scsi_level >= 6) /* SPC-4 and above */
1122 clear_luns_changed_on_target(devip);
1123 mk_sense_buffer(scp, UNIT_ATTENTION,
1127 cp = "reported luns data has changed";
1130 pr_warn("unexpected unit attention code=%d\n", k);
1135 clear_bit(k, devip->uas_bm);
1137 sdev_printk(KERN_INFO, scp->device,
1138 "%s reports: Unit attention: %s\n",
1140 return check_condition_result;
1145 /* Build SCSI "data-in" buffer. Returns 0 if ok else (DID_ERROR << 16). */
1146 static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1150 struct scsi_data_buffer *sdb = &scp->sdb;
1154 if (scp->sc_data_direction != DMA_FROM_DEVICE)
1155 return DID_ERROR << 16;
1157 act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
1159 scsi_set_resid(scp, scsi_bufflen(scp) - act_len);
1164 /* Partial build of SCSI "data-in" buffer. Returns 0 if ok else
1165 * (DID_ERROR << 16). Can write to offset in data-in buffer. If multiple
1166 * calls, not required to write in ascending offset order. Assumes resid
1167 * set to scsi_bufflen() prior to any calls.
1169 static int p_fill_from_dev_buffer(struct scsi_cmnd *scp, const void *arr,
1170 int arr_len, unsigned int off_dst)
1172 unsigned int act_len, n;
1173 struct scsi_data_buffer *sdb = &scp->sdb;
1174 off_t skip = off_dst;
1176 if (sdb->length <= off_dst)
1178 if (scp->sc_data_direction != DMA_FROM_DEVICE)
1179 return DID_ERROR << 16;
1181 act_len = sg_pcopy_from_buffer(sdb->table.sgl, sdb->table.nents,
1182 arr, arr_len, skip);
1183 pr_debug("%s: off_dst=%u, scsi_bufflen=%u, act_len=%u, resid=%d\n",
1184 __func__, off_dst, scsi_bufflen(scp), act_len,
1185 scsi_get_resid(scp));
1186 n = scsi_bufflen(scp) - (off_dst + act_len);
1187 scsi_set_resid(scp, min_t(int, scsi_get_resid(scp), n));
1191 /* Fetches from SCSI "data-out" buffer. Returns number of bytes fetched into
1192 * 'arr' or -1 if error.
1194 static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1197 if (!scsi_bufflen(scp))
1199 if (scp->sc_data_direction != DMA_TO_DEVICE)
1202 return scsi_sg_copy_to_buffer(scp, arr, arr_len);
1206 static char sdebug_inq_vendor_id[9] = "Linux ";
1207 static char sdebug_inq_product_id[17] = "scsi_debug ";
1208 static char sdebug_inq_product_rev[5] = SDEBUG_VERSION;
1209 /* Use some locally assigned NAAs for SAS addresses. */
1210 static const u64 naa3_comp_a = 0x3222222000000000ULL;
1211 static const u64 naa3_comp_b = 0x3333333000000000ULL;
1212 static const u64 naa3_comp_c = 0x3111111000000000ULL;
1214 /* Device identification VPD page. Returns number of bytes placed in arr */
1215 static int inquiry_vpd_83(unsigned char *arr, int port_group_id,
1216 int target_dev_id, int dev_id_num,
1217 const char *dev_id_str, int dev_id_str_len,
1218 const uuid_t *lu_name)
1223 port_a = target_dev_id + 1;
1224 /* T10 vendor identifier field format (faked) */
1225 arr[0] = 0x2; /* ASCII */
1228 memcpy(&arr[4], sdebug_inq_vendor_id, 8);
1229 memcpy(&arr[12], sdebug_inq_product_id, 16);
1230 memcpy(&arr[28], dev_id_str, dev_id_str_len);
1231 num = 8 + 16 + dev_id_str_len;
1234 if (dev_id_num >= 0) {
1235 if (sdebug_uuid_ctl) {
1236 /* Locally assigned UUID */
1237 arr[num++] = 0x1; /* binary (not necessarily sas) */
1238 arr[num++] = 0xa; /* PIV=0, lu, naa */
1241 arr[num++] = 0x10; /* uuid type=1, locally assigned */
1243 memcpy(arr + num, lu_name, 16);
1246 /* NAA-3, Logical unit identifier (binary) */
1247 arr[num++] = 0x1; /* binary (not necessarily sas) */
1248 arr[num++] = 0x3; /* PIV=0, lu, naa */
1251 put_unaligned_be64(naa3_comp_b + dev_id_num, arr + num);
1254 /* Target relative port number */
1255 arr[num++] = 0x61; /* proto=sas, binary */
1256 arr[num++] = 0x94; /* PIV=1, target port, rel port */
1257 arr[num++] = 0x0; /* reserved */
1258 arr[num++] = 0x4; /* length */
1259 arr[num++] = 0x0; /* reserved */
1260 arr[num++] = 0x0; /* reserved */
1262 arr[num++] = 0x1; /* relative port A */
1264 /* NAA-3, Target port identifier */
1265 arr[num++] = 0x61; /* proto=sas, binary */
1266 arr[num++] = 0x93; /* piv=1, target port, naa */
1269 put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1271 /* NAA-3, Target port group identifier */
1272 arr[num++] = 0x61; /* proto=sas, binary */
1273 arr[num++] = 0x95; /* piv=1, target port group id */
1278 put_unaligned_be16(port_group_id, arr + num);
1280 /* NAA-3, Target device identifier */
1281 arr[num++] = 0x61; /* proto=sas, binary */
1282 arr[num++] = 0xa3; /* piv=1, target device, naa */
1285 put_unaligned_be64(naa3_comp_a + target_dev_id, arr + num);
1287 /* SCSI name string: Target device identifier */
1288 arr[num++] = 0x63; /* proto=sas, UTF-8 */
1289 arr[num++] = 0xa8; /* piv=1, target device, SCSI name string */
1292 memcpy(arr + num, "naa.32222220", 12);
1294 snprintf(b, sizeof(b), "%08X", target_dev_id);
1295 memcpy(arr + num, b, 8);
1297 memset(arr + num, 0, 4);
1302 static unsigned char vpd84_data[] = {
1303 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
1304 0x22,0x22,0x22,0x0,0xbb,0x1,
1305 0x22,0x22,0x22,0x0,0xbb,0x2,
1308 /* Software interface identification VPD page */
1309 static int inquiry_vpd_84(unsigned char *arr)
1311 memcpy(arr, vpd84_data, sizeof(vpd84_data));
1312 return sizeof(vpd84_data);
1315 /* Management network addresses VPD page */
1316 static int inquiry_vpd_85(unsigned char *arr)
1319 const char *na1 = "https://www.kernel.org/config";
1320 const char *na2 = "http://www.kernel.org/log";
1323 arr[num++] = 0x1; /* lu, storage config */
1324 arr[num++] = 0x0; /* reserved */
1329 plen = ((plen / 4) + 1) * 4;
1330 arr[num++] = plen; /* length, null termianted, padded */
1331 memcpy(arr + num, na1, olen);
1332 memset(arr + num + olen, 0, plen - olen);
1335 arr[num++] = 0x4; /* lu, logging */
1336 arr[num++] = 0x0; /* reserved */
1341 plen = ((plen / 4) + 1) * 4;
1342 arr[num++] = plen; /* length, null terminated, padded */
1343 memcpy(arr + num, na2, olen);
1344 memset(arr + num + olen, 0, plen - olen);
1350 /* SCSI ports VPD page */
1351 static int inquiry_vpd_88(unsigned char *arr, int target_dev_id)
1356 port_a = target_dev_id + 1;
1357 port_b = port_a + 1;
1358 arr[num++] = 0x0; /* reserved */
1359 arr[num++] = 0x0; /* reserved */
1361 arr[num++] = 0x1; /* relative port 1 (primary) */
1362 memset(arr + num, 0, 6);
1365 arr[num++] = 12; /* length tp descriptor */
1366 /* naa-5 target port identifier (A) */
1367 arr[num++] = 0x61; /* proto=sas, binary */
1368 arr[num++] = 0x93; /* PIV=1, target port, NAA */
1369 arr[num++] = 0x0; /* reserved */
1370 arr[num++] = 0x8; /* length */
1371 put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1373 arr[num++] = 0x0; /* reserved */
1374 arr[num++] = 0x0; /* reserved */
1376 arr[num++] = 0x2; /* relative port 2 (secondary) */
1377 memset(arr + num, 0, 6);
1380 arr[num++] = 12; /* length tp descriptor */
1381 /* naa-5 target port identifier (B) */
1382 arr[num++] = 0x61; /* proto=sas, binary */
1383 arr[num++] = 0x93; /* PIV=1, target port, NAA */
1384 arr[num++] = 0x0; /* reserved */
1385 arr[num++] = 0x8; /* length */
1386 put_unaligned_be64(naa3_comp_a + port_b, arr + num);
1393 static unsigned char vpd89_data[] = {
1394 /* from 4th byte */ 0,0,0,0,
1395 'l','i','n','u','x',' ',' ',' ',
1396 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
1398 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
1400 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
1401 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
1402 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
1403 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
1405 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1407 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1409 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
1410 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
1411 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
1412 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
1413 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
1414 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
1415 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
1416 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1417 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1418 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1419 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
1420 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
1421 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
1422 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
1423 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1424 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1425 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1426 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1427 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1428 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1429 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1430 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1431 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1432 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1433 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1434 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
1437 /* ATA Information VPD page */
1438 static int inquiry_vpd_89(unsigned char *arr)
1440 memcpy(arr, vpd89_data, sizeof(vpd89_data));
1441 return sizeof(vpd89_data);
1445 static unsigned char vpdb0_data[] = {
1446 /* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
1447 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1448 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1449 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1452 /* Block limits VPD page (SBC-3) */
1453 static int inquiry_vpd_b0(unsigned char *arr)
1457 memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
1459 /* Optimal transfer length granularity */
1460 if (sdebug_opt_xferlen_exp != 0 &&
1461 sdebug_physblk_exp < sdebug_opt_xferlen_exp)
1462 gran = 1 << sdebug_opt_xferlen_exp;
1464 gran = 1 << sdebug_physblk_exp;
1465 put_unaligned_be16(gran, arr + 2);
1467 /* Maximum Transfer Length */
1468 if (sdebug_store_sectors > 0x400)
1469 put_unaligned_be32(sdebug_store_sectors, arr + 4);
1471 /* Optimal Transfer Length */
1472 put_unaligned_be32(sdebug_opt_blks, &arr[8]);
1475 /* Maximum Unmap LBA Count */
1476 put_unaligned_be32(sdebug_unmap_max_blocks, &arr[16]);
1478 /* Maximum Unmap Block Descriptor Count */
1479 put_unaligned_be32(sdebug_unmap_max_desc, &arr[20]);
1482 /* Unmap Granularity Alignment */
1483 if (sdebug_unmap_alignment) {
1484 put_unaligned_be32(sdebug_unmap_alignment, &arr[28]);
1485 arr[28] |= 0x80; /* UGAVALID */
1488 /* Optimal Unmap Granularity */
1489 put_unaligned_be32(sdebug_unmap_granularity, &arr[24]);
1491 /* Maximum WRITE SAME Length */
1492 put_unaligned_be64(sdebug_write_same_length, &arr[32]);
1494 return 0x3c; /* Mandatory page length for Logical Block Provisioning */
1496 return sizeof(vpdb0_data);
1499 /* Block device characteristics VPD page (SBC-3) */
1500 static int inquiry_vpd_b1(unsigned char *arr)
1502 memset(arr, 0, 0x3c);
1504 arr[1] = 1; /* non rotating medium (e.g. solid state) */
1506 arr[3] = 5; /* less than 1.8" */
1511 /* Logical block provisioning VPD page (SBC-4) */
1512 static int inquiry_vpd_b2(unsigned char *arr)
1514 memset(arr, 0, 0x4);
1515 arr[0] = 0; /* threshold exponent */
1522 if (sdebug_lbprz && scsi_debug_lbp())
1523 arr[1] |= (sdebug_lbprz & 0x7) << 2; /* sbc4r07 and later */
1524 /* anc_sup=0; dp=0 (no provisioning group descriptor) */
1525 /* minimum_percentage=0; provisioning_type=0 (unknown) */
1526 /* threshold_percentage=0 */
1530 /* Zoned block device characteristics VPD page (ZBC mandatory) */
1531 static int inquiry_vpd_b6(struct sdebug_dev_info *devip, unsigned char *arr)
1533 memset(arr, 0, 0x3c);
1534 arr[0] = 0x1; /* set URSWRZ (unrestricted read in seq. wr req zone) */
1536 * Set Optimal number of open sequential write preferred zones and
1537 * Optimal number of non-sequentially written sequential write
1538 * preferred zones fields to 'not reported' (0xffffffff). Leave other
1539 * fields set to zero, apart from Max. number of open swrz_s field.
1541 put_unaligned_be32(0xffffffff, &arr[4]);
1542 put_unaligned_be32(0xffffffff, &arr[8]);
1543 if (devip->max_open)
1544 put_unaligned_be32(devip->max_open, &arr[12]);
1546 put_unaligned_be32(0xffffffff, &arr[12]);
1550 #define SDEBUG_LONG_INQ_SZ 96
1551 #define SDEBUG_MAX_INQ_ARR_SZ 584
1553 static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1555 unsigned char pq_pdt;
1557 unsigned char *cmd = scp->cmnd;
1558 int alloc_len, n, ret;
1559 bool have_wlun, is_disk, is_zbc, is_disk_zbc;
1561 alloc_len = get_unaligned_be16(cmd + 3);
1562 arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
1564 return DID_REQUEUE << 16;
1565 is_disk = (sdebug_ptype == TYPE_DISK);
1566 is_zbc = (sdebug_ptype == TYPE_ZBC);
1567 is_disk_zbc = (is_disk || is_zbc);
1568 have_wlun = scsi_is_wlun(scp->device->lun);
1570 pq_pdt = TYPE_WLUN; /* present, wlun */
1571 else if (sdebug_no_lun_0 && (devip->lun == SDEBUG_LUN_0_VAL))
1572 pq_pdt = 0x7f; /* not present, PQ=3, PDT=0x1f */
1574 pq_pdt = (sdebug_ptype & 0x1f);
1576 if (0x2 & cmd[1]) { /* CMDDT bit set */
1577 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
1579 return check_condition_result;
1580 } else if (0x1 & cmd[1]) { /* EVPD bit set */
1581 int lu_id_num, port_group_id, target_dev_id, len;
1583 int host_no = devip->sdbg_host->shost->host_no;
1585 port_group_id = (((host_no + 1) & 0x7f) << 8) +
1586 (devip->channel & 0x7f);
1587 if (sdebug_vpd_use_hostno == 0)
1589 lu_id_num = have_wlun ? -1 : (((host_no + 1) * 2000) +
1590 (devip->target * 1000) + devip->lun);
1591 target_dev_id = ((host_no + 1) * 2000) +
1592 (devip->target * 1000) - 3;
1593 len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
1594 if (0 == cmd[2]) { /* supported vital product data pages */
1595 arr[1] = cmd[2]; /*sanity */
1597 arr[n++] = 0x0; /* this page */
1598 arr[n++] = 0x80; /* unit serial number */
1599 arr[n++] = 0x83; /* device identification */
1600 arr[n++] = 0x84; /* software interface ident. */
1601 arr[n++] = 0x85; /* management network addresses */
1602 arr[n++] = 0x86; /* extended inquiry */
1603 arr[n++] = 0x87; /* mode page policy */
1604 arr[n++] = 0x88; /* SCSI ports */
1605 if (is_disk_zbc) { /* SBC or ZBC */
1606 arr[n++] = 0x89; /* ATA information */
1607 arr[n++] = 0xb0; /* Block limits */
1608 arr[n++] = 0xb1; /* Block characteristics */
1610 arr[n++] = 0xb2; /* LB Provisioning */
1612 arr[n++] = 0xb6; /* ZB dev. char. */
1614 arr[3] = n - 4; /* number of supported VPD pages */
1615 } else if (0x80 == cmd[2]) { /* unit serial number */
1616 arr[1] = cmd[2]; /*sanity */
1618 memcpy(&arr[4], lu_id_str, len);
1619 } else if (0x83 == cmd[2]) { /* device identification */
1620 arr[1] = cmd[2]; /*sanity */
1621 arr[3] = inquiry_vpd_83(&arr[4], port_group_id,
1622 target_dev_id, lu_id_num,
1625 } else if (0x84 == cmd[2]) { /* Software interface ident. */
1626 arr[1] = cmd[2]; /*sanity */
1627 arr[3] = inquiry_vpd_84(&arr[4]);
1628 } else if (0x85 == cmd[2]) { /* Management network addresses */
1629 arr[1] = cmd[2]; /*sanity */
1630 arr[3] = inquiry_vpd_85(&arr[4]);
1631 } else if (0x86 == cmd[2]) { /* extended inquiry */
1632 arr[1] = cmd[2]; /*sanity */
1633 arr[3] = 0x3c; /* number of following entries */
1634 if (sdebug_dif == T10_PI_TYPE3_PROTECTION)
1635 arr[4] = 0x4; /* SPT: GRD_CHK:1 */
1636 else if (have_dif_prot)
1637 arr[4] = 0x5; /* SPT: GRD_CHK:1, REF_CHK:1 */
1639 arr[4] = 0x0; /* no protection stuff */
1640 arr[5] = 0x7; /* head of q, ordered + simple q's */
1641 } else if (0x87 == cmd[2]) { /* mode page policy */
1642 arr[1] = cmd[2]; /*sanity */
1643 arr[3] = 0x8; /* number of following entries */
1644 arr[4] = 0x2; /* disconnect-reconnect mp */
1645 arr[6] = 0x80; /* mlus, shared */
1646 arr[8] = 0x18; /* protocol specific lu */
1647 arr[10] = 0x82; /* mlus, per initiator port */
1648 } else if (0x88 == cmd[2]) { /* SCSI Ports */
1649 arr[1] = cmd[2]; /*sanity */
1650 arr[3] = inquiry_vpd_88(&arr[4], target_dev_id);
1651 } else if (is_disk_zbc && 0x89 == cmd[2]) { /* ATA info */
1652 arr[1] = cmd[2]; /*sanity */
1653 n = inquiry_vpd_89(&arr[4]);
1654 put_unaligned_be16(n, arr + 2);
1655 } else if (is_disk_zbc && 0xb0 == cmd[2]) { /* Block limits */
1656 arr[1] = cmd[2]; /*sanity */
1657 arr[3] = inquiry_vpd_b0(&arr[4]);
1658 } else if (is_disk_zbc && 0xb1 == cmd[2]) { /* Block char. */
1659 arr[1] = cmd[2]; /*sanity */
1660 arr[3] = inquiry_vpd_b1(&arr[4]);
1661 } else if (is_disk && 0xb2 == cmd[2]) { /* LB Prov. */
1662 arr[1] = cmd[2]; /*sanity */
1663 arr[3] = inquiry_vpd_b2(&arr[4]);
1664 } else if (is_zbc && cmd[2] == 0xb6) { /* ZB dev. charact. */
1665 arr[1] = cmd[2]; /*sanity */
1666 arr[3] = inquiry_vpd_b6(devip, &arr[4]);
1668 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
1670 return check_condition_result;
1672 len = min(get_unaligned_be16(arr + 2) + 4, alloc_len);
1673 ret = fill_from_dev_buffer(scp, arr,
1674 min(len, SDEBUG_MAX_INQ_ARR_SZ));
1678 /* drops through here for a standard inquiry */
1679 arr[1] = sdebug_removable ? 0x80 : 0; /* Removable disk */
1680 arr[2] = sdebug_scsi_level;
1681 arr[3] = 2; /* response_data_format==2 */
1682 arr[4] = SDEBUG_LONG_INQ_SZ - 5;
1683 arr[5] = (int)have_dif_prot; /* PROTECT bit */
1684 if (sdebug_vpd_use_hostno == 0)
1685 arr[5] |= 0x10; /* claim: implicit TPGS */
1686 arr[6] = 0x10; /* claim: MultiP */
1687 /* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
1688 arr[7] = 0xa; /* claim: LINKED + CMDQUE */
1689 memcpy(&arr[8], sdebug_inq_vendor_id, 8);
1690 memcpy(&arr[16], sdebug_inq_product_id, 16);
1691 memcpy(&arr[32], sdebug_inq_product_rev, 4);
1692 /* Use Vendor Specific area to place driver date in ASCII hex */
1693 memcpy(&arr[36], sdebug_version_date, 8);
1694 /* version descriptors (2 bytes each) follow */
1695 put_unaligned_be16(0xc0, arr + 58); /* SAM-6 no version claimed */
1696 put_unaligned_be16(0x5c0, arr + 60); /* SPC-5 no version claimed */
1698 if (is_disk) { /* SBC-4 no version claimed */
1699 put_unaligned_be16(0x600, arr + n);
1701 } else if (sdebug_ptype == TYPE_TAPE) { /* SSC-4 rev 3 */
1702 put_unaligned_be16(0x525, arr + n);
1704 } else if (is_zbc) { /* ZBC BSR INCITS 536 revision 05 */
1705 put_unaligned_be16(0x624, arr + n);
1708 put_unaligned_be16(0x2100, arr + n); /* SPL-4 no version claimed */
1709 ret = fill_from_dev_buffer(scp, arr,
1710 min_t(int, alloc_len, SDEBUG_LONG_INQ_SZ));
1715 static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1718 static int resp_requests(struct scsi_cmnd *scp,
1719 struct sdebug_dev_info *devip)
1721 unsigned char *sbuff;
1722 unsigned char *cmd = scp->cmnd;
1723 unsigned char arr[SCSI_SENSE_BUFFERSIZE];
1727 memset(arr, 0, sizeof(arr));
1728 dsense = !!(cmd[1] & 1);
1729 sbuff = scp->sense_buffer;
1730 if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
1733 arr[1] = 0x0; /* NO_SENSE in sense_key */
1734 arr[2] = THRESHOLD_EXCEEDED;
1735 arr[3] = 0xff; /* TEST set and MRIE==6 */
1739 arr[2] = 0x0; /* NO_SENSE in sense_key */
1740 arr[7] = 0xa; /* 18 byte sense buffer */
1741 arr[12] = THRESHOLD_EXCEEDED;
1742 arr[13] = 0xff; /* TEST set and MRIE==6 */
1745 memcpy(arr, sbuff, SCSI_SENSE_BUFFERSIZE);
1746 if (arr[0] >= 0x70 && dsense == sdebug_dsense)
1747 ; /* have sense and formats match */
1748 else if (arr[0] <= 0x70) {
1758 } else if (dsense) {
1761 arr[1] = sbuff[2]; /* sense key */
1762 arr[2] = sbuff[12]; /* asc */
1763 arr[3] = sbuff[13]; /* ascq */
1775 mk_sense_buffer(scp, 0, NO_ADDITIONAL_SENSE, 0);
1776 return fill_from_dev_buffer(scp, arr, len);
1779 static int resp_start_stop(struct scsi_cmnd *scp,
1780 struct sdebug_dev_info *devip)
1782 unsigned char *cmd = scp->cmnd;
1783 int power_cond, stop;
1786 power_cond = (cmd[4] & 0xf0) >> 4;
1788 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 7);
1789 return check_condition_result;
1791 stop = !(cmd[4] & 1);
1792 changing = atomic_read(&devip->stopped) == !stop;
1793 atomic_xchg(&devip->stopped, stop);
1794 if (!changing || cmd[1] & 0x1) /* state unchanged or IMMED set */
1795 return SDEG_RES_IMMED_MASK;
1800 static sector_t get_sdebug_capacity(void)
1802 static const unsigned int gibibyte = 1073741824;
1804 if (sdebug_virtual_gb > 0)
1805 return (sector_t)sdebug_virtual_gb *
1806 (gibibyte / sdebug_sector_size);
1808 return sdebug_store_sectors;
1811 #define SDEBUG_READCAP_ARR_SZ 8
1812 static int resp_readcap(struct scsi_cmnd *scp,
1813 struct sdebug_dev_info *devip)
1815 unsigned char arr[SDEBUG_READCAP_ARR_SZ];
1818 /* following just in case virtual_gb changed */
1819 sdebug_capacity = get_sdebug_capacity();
1820 memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
1821 if (sdebug_capacity < 0xffffffff) {
1822 capac = (unsigned int)sdebug_capacity - 1;
1823 put_unaligned_be32(capac, arr + 0);
1825 put_unaligned_be32(0xffffffff, arr + 0);
1826 put_unaligned_be16(sdebug_sector_size, arr + 6);
1827 return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
1830 #define SDEBUG_READCAP16_ARR_SZ 32
1831 static int resp_readcap16(struct scsi_cmnd *scp,
1832 struct sdebug_dev_info *devip)
1834 unsigned char *cmd = scp->cmnd;
1835 unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
1838 alloc_len = get_unaligned_be32(cmd + 10);
1839 /* following just in case virtual_gb changed */
1840 sdebug_capacity = get_sdebug_capacity();
1841 memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
1842 put_unaligned_be64((u64)(sdebug_capacity - 1), arr + 0);
1843 put_unaligned_be32(sdebug_sector_size, arr + 8);
1844 arr[13] = sdebug_physblk_exp & 0xf;
1845 arr[14] = (sdebug_lowest_aligned >> 8) & 0x3f;
1847 if (scsi_debug_lbp()) {
1848 arr[14] |= 0x80; /* LBPME */
1849 /* from sbc4r07, this LBPRZ field is 1 bit, but the LBPRZ in
1850 * the LB Provisioning VPD page is 3 bits. Note that lbprz=2
1851 * in the wider field maps to 0 in this field.
1853 if (sdebug_lbprz & 1) /* precisely what the draft requires */
1857 arr[15] = sdebug_lowest_aligned & 0xff;
1859 if (have_dif_prot) {
1860 arr[12] = (sdebug_dif - 1) << 1; /* P_TYPE */
1861 arr[12] |= 1; /* PROT_EN */
1864 return fill_from_dev_buffer(scp, arr,
1865 min_t(int, alloc_len, SDEBUG_READCAP16_ARR_SZ));
1868 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
1870 static int resp_report_tgtpgs(struct scsi_cmnd *scp,
1871 struct sdebug_dev_info *devip)
1873 unsigned char *cmd = scp->cmnd;
1875 int host_no = devip->sdbg_host->shost->host_no;
1876 int n, ret, alen, rlen;
1877 int port_group_a, port_group_b, port_a, port_b;
1879 alen = get_unaligned_be32(cmd + 6);
1880 arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
1882 return DID_REQUEUE << 16;
1884 * EVPD page 0x88 states we have two ports, one
1885 * real and a fake port with no device connected.
1886 * So we create two port groups with one port each
1887 * and set the group with port B to unavailable.
1889 port_a = 0x1; /* relative port A */
1890 port_b = 0x2; /* relative port B */
1891 port_group_a = (((host_no + 1) & 0x7f) << 8) +
1892 (devip->channel & 0x7f);
1893 port_group_b = (((host_no + 1) & 0x7f) << 8) +
1894 (devip->channel & 0x7f) + 0x80;
1897 * The asymmetric access state is cycled according to the host_id.
1900 if (sdebug_vpd_use_hostno == 0) {
1901 arr[n++] = host_no % 3; /* Asymm access state */
1902 arr[n++] = 0x0F; /* claim: all states are supported */
1904 arr[n++] = 0x0; /* Active/Optimized path */
1905 arr[n++] = 0x01; /* only support active/optimized paths */
1907 put_unaligned_be16(port_group_a, arr + n);
1909 arr[n++] = 0; /* Reserved */
1910 arr[n++] = 0; /* Status code */
1911 arr[n++] = 0; /* Vendor unique */
1912 arr[n++] = 0x1; /* One port per group */
1913 arr[n++] = 0; /* Reserved */
1914 arr[n++] = 0; /* Reserved */
1915 put_unaligned_be16(port_a, arr + n);
1917 arr[n++] = 3; /* Port unavailable */
1918 arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
1919 put_unaligned_be16(port_group_b, arr + n);
1921 arr[n++] = 0; /* Reserved */
1922 arr[n++] = 0; /* Status code */
1923 arr[n++] = 0; /* Vendor unique */
1924 arr[n++] = 0x1; /* One port per group */
1925 arr[n++] = 0; /* Reserved */
1926 arr[n++] = 0; /* Reserved */
1927 put_unaligned_be16(port_b, arr + n);
1931 put_unaligned_be32(rlen, arr + 0);
1934 * Return the smallest value of either
1935 * - The allocated length
1936 * - The constructed command length
1937 * - The maximum array size
1939 rlen = min_t(int, alen, n);
1940 ret = fill_from_dev_buffer(scp, arr,
1941 min_t(int, rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
1946 static int resp_rsup_opcodes(struct scsi_cmnd *scp,
1947 struct sdebug_dev_info *devip)
1950 u8 reporting_opts, req_opcode, sdeb_i, supp;
1952 u32 alloc_len, a_len;
1953 int k, offset, len, errsts, count, bump, na;
1954 const struct opcode_info_t *oip;
1955 const struct opcode_info_t *r_oip;
1957 u8 *cmd = scp->cmnd;
1959 rctd = !!(cmd[2] & 0x80);
1960 reporting_opts = cmd[2] & 0x7;
1961 req_opcode = cmd[3];
1962 req_sa = get_unaligned_be16(cmd + 4);
1963 alloc_len = get_unaligned_be32(cmd + 6);
1964 if (alloc_len < 4 || alloc_len > 0xffff) {
1965 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
1966 return check_condition_result;
1968 if (alloc_len > 8192)
1972 arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_ATOMIC);
1974 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
1976 return check_condition_result;
1978 switch (reporting_opts) {
1979 case 0: /* all commands */
1980 /* count number of commands */
1981 for (count = 0, oip = opcode_info_arr;
1982 oip->num_attached != 0xff; ++oip) {
1983 if (F_INV_OP & oip->flags)
1985 count += (oip->num_attached + 1);
1987 bump = rctd ? 20 : 8;
1988 put_unaligned_be32(count * bump, arr);
1989 for (offset = 4, oip = opcode_info_arr;
1990 oip->num_attached != 0xff && offset < a_len; ++oip) {
1991 if (F_INV_OP & oip->flags)
1993 na = oip->num_attached;
1994 arr[offset] = oip->opcode;
1995 put_unaligned_be16(oip->sa, arr + offset + 2);
1997 arr[offset + 5] |= 0x2;
1998 if (FF_SA & oip->flags)
1999 arr[offset + 5] |= 0x1;
2000 put_unaligned_be16(oip->len_mask[0], arr + offset + 6);
2002 put_unaligned_be16(0xa, arr + offset + 8);
2004 for (k = 0, oip = oip->arrp; k < na; ++k, ++oip) {
2005 if (F_INV_OP & oip->flags)
2008 arr[offset] = oip->opcode;
2009 put_unaligned_be16(oip->sa, arr + offset + 2);
2011 arr[offset + 5] |= 0x2;
2012 if (FF_SA & oip->flags)
2013 arr[offset + 5] |= 0x1;
2014 put_unaligned_be16(oip->len_mask[0],
2017 put_unaligned_be16(0xa,
2024 case 1: /* one command: opcode only */
2025 case 2: /* one command: opcode plus service action */
2026 case 3: /* one command: if sa==0 then opcode only else opcode+sa */
2027 sdeb_i = opcode_ind_arr[req_opcode];
2028 oip = &opcode_info_arr[sdeb_i];
2029 if (F_INV_OP & oip->flags) {
2033 if (1 == reporting_opts) {
2034 if (FF_SA & oip->flags) {
2035 mk_sense_invalid_fld(scp, SDEB_IN_CDB,
2038 return check_condition_result;
2041 } else if (2 == reporting_opts &&
2042 0 == (FF_SA & oip->flags)) {
2043 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
2044 kfree(arr); /* point at requested sa */
2045 return check_condition_result;
2047 if (0 == (FF_SA & oip->flags) &&
2048 req_opcode == oip->opcode)
2050 else if (0 == (FF_SA & oip->flags)) {
2051 na = oip->num_attached;
2052 for (k = 0, oip = oip->arrp; k < na;
2054 if (req_opcode == oip->opcode)
2057 supp = (k >= na) ? 1 : 3;
2058 } else if (req_sa != oip->sa) {
2059 na = oip->num_attached;
2060 for (k = 0, oip = oip->arrp; k < na;
2062 if (req_sa == oip->sa)
2065 supp = (k >= na) ? 1 : 3;
2069 u = oip->len_mask[0];
2070 put_unaligned_be16(u, arr + 2);
2071 arr[4] = oip->opcode;
2072 for (k = 1; k < u; ++k)
2073 arr[4 + k] = (k < 16) ?
2074 oip->len_mask[k] : 0xff;
2079 arr[1] = (rctd ? 0x80 : 0) | supp;
2081 put_unaligned_be16(0xa, arr + offset);
2086 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
2088 return check_condition_result;
2090 offset = (offset < a_len) ? offset : a_len;
2091 len = (offset < alloc_len) ? offset : alloc_len;
2092 errsts = fill_from_dev_buffer(scp, arr, len);
2097 static int resp_rsup_tmfs(struct scsi_cmnd *scp,
2098 struct sdebug_dev_info *devip)
2103 u8 *cmd = scp->cmnd;
2105 memset(arr, 0, sizeof(arr));
2106 repd = !!(cmd[2] & 0x80);
2107 alloc_len = get_unaligned_be32(cmd + 6);
2108 if (alloc_len < 4) {
2109 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
2110 return check_condition_result;
2112 arr[0] = 0xc8; /* ATS | ATSS | LURS */
2113 arr[1] = 0x1; /* ITNRS */
2120 len = (len < alloc_len) ? len : alloc_len;
2121 return fill_from_dev_buffer(scp, arr, len);
2124 /* <<Following mode page info copied from ST318451LW>> */
2126 static int resp_err_recov_pg(unsigned char *p, int pcontrol, int target)
2127 { /* Read-Write Error Recovery page for mode_sense */
2128 unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
2131 memcpy(p, err_recov_pg, sizeof(err_recov_pg));
2133 memset(p + 2, 0, sizeof(err_recov_pg) - 2);
2134 return sizeof(err_recov_pg);
2137 static int resp_disconnect_pg(unsigned char *p, int pcontrol, int target)
2138 { /* Disconnect-Reconnect page for mode_sense */
2139 unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
2140 0, 0, 0, 0, 0, 0, 0, 0};
2142 memcpy(p, disconnect_pg, sizeof(disconnect_pg));
2144 memset(p + 2, 0, sizeof(disconnect_pg) - 2);
2145 return sizeof(disconnect_pg);
2148 static int resp_format_pg(unsigned char *p, int pcontrol, int target)
2149 { /* Format device page for mode_sense */
2150 unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
2151 0, 0, 0, 0, 0, 0, 0, 0,
2152 0, 0, 0, 0, 0x40, 0, 0, 0};
2154 memcpy(p, format_pg, sizeof(format_pg));
2155 put_unaligned_be16(sdebug_sectors_per, p + 10);
2156 put_unaligned_be16(sdebug_sector_size, p + 12);
2157 if (sdebug_removable)
2158 p[20] |= 0x20; /* should agree with INQUIRY */
2160 memset(p + 2, 0, sizeof(format_pg) - 2);
2161 return sizeof(format_pg);
2164 static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2165 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,
2168 static int resp_caching_pg(unsigned char *p, int pcontrol, int target)
2169 { /* Caching page for mode_sense */
2170 unsigned char ch_caching_pg[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0,
2171 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
2172 unsigned char d_caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2173 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0, 0, 0, 0, 0};
2175 if (SDEBUG_OPT_N_WCE & sdebug_opts)
2176 caching_pg[2] &= ~0x4; /* set WCE=0 (default WCE=1) */
2177 memcpy(p, caching_pg, sizeof(caching_pg));
2179 memcpy(p + 2, ch_caching_pg, sizeof(ch_caching_pg));
2180 else if (2 == pcontrol)
2181 memcpy(p, d_caching_pg, sizeof(d_caching_pg));
2182 return sizeof(caching_pg);
2185 static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2188 static int resp_ctrl_m_pg(unsigned char *p, int pcontrol, int target)
2189 { /* Control mode page for mode_sense */
2190 unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
2192 unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2196 ctrl_m_pg[2] |= 0x4;
2198 ctrl_m_pg[2] &= ~0x4;
2201 ctrl_m_pg[5] |= 0x80; /* ATO=1 */
2203 memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
2205 memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
2206 else if (2 == pcontrol)
2207 memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
2208 return sizeof(ctrl_m_pg);
2212 static int resp_iec_m_pg(unsigned char *p, int pcontrol, int target)
2213 { /* Informational Exceptions control mode page for mode_sense */
2214 unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
2216 unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
2219 memcpy(p, iec_m_pg, sizeof(iec_m_pg));
2221 memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
2222 else if (2 == pcontrol)
2223 memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
2224 return sizeof(iec_m_pg);
2227 static int resp_sas_sf_m_pg(unsigned char *p, int pcontrol, int target)
2228 { /* SAS SSP mode page - short format for mode_sense */
2229 unsigned char sas_sf_m_pg[] = {0x19, 0x6,
2230 0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
2232 memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
2234 memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
2235 return sizeof(sas_sf_m_pg);
2239 static int resp_sas_pcd_m_spg(unsigned char *p, int pcontrol, int target,
2241 { /* SAS phy control and discover mode page for mode_sense */
2242 unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
2243 0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
2244 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2245 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2246 0x2, 0, 0, 0, 0, 0, 0, 0,
2247 0x88, 0x99, 0, 0, 0, 0, 0, 0,
2248 0, 0, 0, 0, 0, 0, 0, 0,
2249 0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
2250 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2251 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2252 0x3, 0, 0, 0, 0, 0, 0, 0,
2253 0x88, 0x99, 0, 0, 0, 0, 0, 0,
2254 0, 0, 0, 0, 0, 0, 0, 0,
2258 put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 16);
2259 put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 24);
2260 put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 64);
2261 put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 72);
2262 port_a = target_dev_id + 1;
2263 port_b = port_a + 1;
2264 memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
2265 put_unaligned_be32(port_a, p + 20);
2266 put_unaligned_be32(port_b, p + 48 + 20);
2268 memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
2269 return sizeof(sas_pcd_m_pg);
2272 static int resp_sas_sha_m_spg(unsigned char *p, int pcontrol)
2273 { /* SAS SSP shared protocol specific port mode subpage */
2274 unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
2275 0, 0, 0, 0, 0, 0, 0, 0,
2278 memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
2280 memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
2281 return sizeof(sas_sha_m_pg);
2284 #define SDEBUG_MAX_MSENSE_SZ 256
2286 static int resp_mode_sense(struct scsi_cmnd *scp,
2287 struct sdebug_dev_info *devip)
2289 int pcontrol, pcode, subpcode, bd_len;
2290 unsigned char dev_spec;
2291 int alloc_len, offset, len, target_dev_id;
2292 int target = scp->device->id;
2294 unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
2295 unsigned char *cmd = scp->cmnd;
2296 bool dbd, llbaa, msense_6, is_disk, is_zbc, bad_pcode;
2298 dbd = !!(cmd[1] & 0x8); /* disable block descriptors */
2299 pcontrol = (cmd[2] & 0xc0) >> 6;
2300 pcode = cmd[2] & 0x3f;
2302 msense_6 = (MODE_SENSE == cmd[0]);
2303 llbaa = msense_6 ? false : !!(cmd[1] & 0x10);
2304 is_disk = (sdebug_ptype == TYPE_DISK);
2305 is_zbc = (sdebug_ptype == TYPE_ZBC);
2306 if ((is_disk || is_zbc) && !dbd)
2307 bd_len = llbaa ? 16 : 8;
2310 alloc_len = msense_6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2311 memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
2312 if (0x3 == pcontrol) { /* Saving values not supported */
2313 mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0);
2314 return check_condition_result;
2316 target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
2317 (devip->target * 1000) - 3;
2318 /* for disks+zbc set DPOFUA bit and clear write protect (WP) bit */
2319 if (is_disk || is_zbc) {
2320 dev_spec = 0x10; /* =0x90 if WP=1 implies read-only */
2332 arr[4] = 0x1; /* set LONGLBA bit */
2333 arr[7] = bd_len; /* assume 255 or less */
2337 if ((bd_len > 0) && (!sdebug_capacity))
2338 sdebug_capacity = get_sdebug_capacity();
2341 if (sdebug_capacity > 0xfffffffe)
2342 put_unaligned_be32(0xffffffff, ap + 0);
2344 put_unaligned_be32(sdebug_capacity, ap + 0);
2345 put_unaligned_be16(sdebug_sector_size, ap + 6);
2348 } else if (16 == bd_len) {
2349 put_unaligned_be64((u64)sdebug_capacity, ap + 0);
2350 put_unaligned_be32(sdebug_sector_size, ap + 12);
2355 if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) {
2356 /* TODO: Control Extension page */
2357 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2358 return check_condition_result;
2363 case 0x1: /* Read-Write error recovery page, direct access */
2364 len = resp_err_recov_pg(ap, pcontrol, target);
2367 case 0x2: /* Disconnect-Reconnect page, all devices */
2368 len = resp_disconnect_pg(ap, pcontrol, target);
2371 case 0x3: /* Format device page, direct access */
2373 len = resp_format_pg(ap, pcontrol, target);
2378 case 0x8: /* Caching page, direct access */
2379 if (is_disk || is_zbc) {
2380 len = resp_caching_pg(ap, pcontrol, target);
2385 case 0xa: /* Control Mode page, all devices */
2386 len = resp_ctrl_m_pg(ap, pcontrol, target);
2389 case 0x19: /* if spc==1 then sas phy, control+discover */
2390 if ((subpcode > 0x2) && (subpcode < 0xff)) {
2391 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2392 return check_condition_result;
2395 if ((0x0 == subpcode) || (0xff == subpcode))
2396 len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2397 if ((0x1 == subpcode) || (0xff == subpcode))
2398 len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
2400 if ((0x2 == subpcode) || (0xff == subpcode))
2401 len += resp_sas_sha_m_spg(ap + len, pcontrol);
2404 case 0x1c: /* Informational Exceptions Mode page, all devices */
2405 len = resp_iec_m_pg(ap, pcontrol, target);
2408 case 0x3f: /* Read all Mode pages */
2409 if ((0 == subpcode) || (0xff == subpcode)) {
2410 len = resp_err_recov_pg(ap, pcontrol, target);
2411 len += resp_disconnect_pg(ap + len, pcontrol, target);
2413 len += resp_format_pg(ap + len, pcontrol,
2415 len += resp_caching_pg(ap + len, pcontrol,
2417 } else if (is_zbc) {
2418 len += resp_caching_pg(ap + len, pcontrol,
2421 len += resp_ctrl_m_pg(ap + len, pcontrol, target);
2422 len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2423 if (0xff == subpcode) {
2424 len += resp_sas_pcd_m_spg(ap + len, pcontrol,
2425 target, target_dev_id);
2426 len += resp_sas_sha_m_spg(ap + len, pcontrol);
2428 len += resp_iec_m_pg(ap + len, pcontrol, target);
2431 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2432 return check_condition_result;
2440 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2441 return check_condition_result;
2444 arr[0] = offset - 1;
2446 put_unaligned_be16((offset - 2), arr + 0);
2447 return fill_from_dev_buffer(scp, arr, min_t(int, alloc_len, offset));
2450 #define SDEBUG_MAX_MSELECT_SZ 512
2452 static int resp_mode_select(struct scsi_cmnd *scp,
2453 struct sdebug_dev_info *devip)
2455 int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
2456 int param_len, res, mpage;
2457 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
2458 unsigned char *cmd = scp->cmnd;
2459 int mselect6 = (MODE_SELECT == cmd[0]);
2461 memset(arr, 0, sizeof(arr));
2464 param_len = mselect6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2465 if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
2466 mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1);
2467 return check_condition_result;
2469 res = fetch_to_dev_buffer(scp, arr, param_len);
2471 return DID_ERROR << 16;
2472 else if (sdebug_verbose && (res < param_len))
2473 sdev_printk(KERN_INFO, scp->device,
2474 "%s: cdb indicated=%d, IO sent=%d bytes\n",
2475 __func__, param_len, res);
2476 md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2);
2477 bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6);
2479 mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
2480 return check_condition_result;
2482 off = bd_len + (mselect6 ? 4 : 8);
2483 mpage = arr[off] & 0x3f;
2484 ps = !!(arr[off] & 0x80);
2486 mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 7);
2487 return check_condition_result;
2489 spf = !!(arr[off] & 0x40);
2490 pg_len = spf ? (get_unaligned_be16(arr + off + 2) + 4) :
2492 if ((pg_len + off) > param_len) {
2493 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2494 PARAMETER_LIST_LENGTH_ERR, 0);
2495 return check_condition_result;
2498 case 0x8: /* Caching Mode page */
2499 if (caching_pg[1] == arr[off + 1]) {
2500 memcpy(caching_pg + 2, arr + off + 2,
2501 sizeof(caching_pg) - 2);
2502 goto set_mode_changed_ua;
2505 case 0xa: /* Control Mode page */
2506 if (ctrl_m_pg[1] == arr[off + 1]) {
2507 memcpy(ctrl_m_pg + 2, arr + off + 2,
2508 sizeof(ctrl_m_pg) - 2);
2509 if (ctrl_m_pg[4] & 0x8)
2513 sdebug_dsense = !!(ctrl_m_pg[2] & 0x4);
2514 goto set_mode_changed_ua;
2517 case 0x1c: /* Informational Exceptions Mode page */
2518 if (iec_m_pg[1] == arr[off + 1]) {
2519 memcpy(iec_m_pg + 2, arr + off + 2,
2520 sizeof(iec_m_pg) - 2);
2521 goto set_mode_changed_ua;
2527 mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 5);
2528 return check_condition_result;
2529 set_mode_changed_ua:
2530 set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm);
2534 static int resp_temp_l_pg(unsigned char *arr)
2536 unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
2537 0x0, 0x1, 0x3, 0x2, 0x0, 65,
2540 memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
2541 return sizeof(temp_l_pg);
2544 static int resp_ie_l_pg(unsigned char *arr)
2546 unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
2549 memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
2550 if (iec_m_pg[2] & 0x4) { /* TEST bit set */
2551 arr[4] = THRESHOLD_EXCEEDED;
2554 return sizeof(ie_l_pg);
2557 #define SDEBUG_MAX_LSENSE_SZ 512
2559 static int resp_log_sense(struct scsi_cmnd *scp,
2560 struct sdebug_dev_info *devip)
2562 int ppc, sp, pcode, subpcode, alloc_len, len, n;
2563 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
2564 unsigned char *cmd = scp->cmnd;
2566 memset(arr, 0, sizeof(arr));
2570 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, ppc ? 1 : 0);
2571 return check_condition_result;
2573 pcode = cmd[2] & 0x3f;
2574 subpcode = cmd[3] & 0xff;
2575 alloc_len = get_unaligned_be16(cmd + 7);
2577 if (0 == subpcode) {
2579 case 0x0: /* Supported log pages log page */
2581 arr[n++] = 0x0; /* this page */
2582 arr[n++] = 0xd; /* Temperature */
2583 arr[n++] = 0x2f; /* Informational exceptions */
2586 case 0xd: /* Temperature log page */
2587 arr[3] = resp_temp_l_pg(arr + 4);
2589 case 0x2f: /* Informational exceptions log page */
2590 arr[3] = resp_ie_l_pg(arr + 4);
2593 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2594 return check_condition_result;
2596 } else if (0xff == subpcode) {
2600 case 0x0: /* Supported log pages and subpages log page */
2603 arr[n++] = 0x0; /* 0,0 page */
2605 arr[n++] = 0xff; /* this page */
2607 arr[n++] = 0x0; /* Temperature */
2609 arr[n++] = 0x0; /* Informational exceptions */
2612 case 0xd: /* Temperature subpages */
2615 arr[n++] = 0x0; /* Temperature */
2618 case 0x2f: /* Informational exceptions subpages */
2621 arr[n++] = 0x0; /* Informational exceptions */
2625 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2626 return check_condition_result;
2629 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2630 return check_condition_result;
2632 len = min_t(int, get_unaligned_be16(arr + 2) + 4, alloc_len);
2633 return fill_from_dev_buffer(scp, arr,
2634 min_t(int, len, SDEBUG_MAX_INQ_ARR_SZ));
2637 static inline bool sdebug_dev_is_zoned(struct sdebug_dev_info *devip)
2639 return devip->nr_zones != 0;
2642 static struct sdeb_zone_state *zbc_zone(struct sdebug_dev_info *devip,
2643 unsigned long long lba)
2647 if (devip->zsize_shift)
2648 zno = lba >> devip->zsize_shift;
2650 zno = lba / devip->zsize;
2651 return &devip->zstate[zno];
2654 static inline bool zbc_zone_is_conv(struct sdeb_zone_state *zsp)
2656 return zsp->z_cond == ZBC_NOT_WRITE_POINTER;
2659 static void zbc_close_zone(struct sdebug_dev_info *devip,
2660 struct sdeb_zone_state *zsp)
2662 enum sdebug_z_cond zc;
2664 if (zbc_zone_is_conv(zsp))
2668 if (!(zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN))
2671 if (zc == ZC2_IMPLICIT_OPEN)
2672 devip->nr_imp_open--;
2674 devip->nr_exp_open--;
2676 if (zsp->z_wp == zsp->z_start) {
2677 zsp->z_cond = ZC1_EMPTY;
2679 zsp->z_cond = ZC4_CLOSED;
2684 static void zbc_close_imp_open_zone(struct sdebug_dev_info *devip)
2686 struct sdeb_zone_state *zsp = &devip->zstate[0];
2689 for (i = 0; i < devip->nr_zones; i++, zsp++) {
2690 if (zsp->z_cond == ZC2_IMPLICIT_OPEN) {
2691 zbc_close_zone(devip, zsp);
2697 static void zbc_open_zone(struct sdebug_dev_info *devip,
2698 struct sdeb_zone_state *zsp, bool explicit)
2700 enum sdebug_z_cond zc;
2702 if (zbc_zone_is_conv(zsp))
2706 if ((explicit && zc == ZC3_EXPLICIT_OPEN) ||
2707 (!explicit && zc == ZC2_IMPLICIT_OPEN))
2710 /* Close an implicit open zone if necessary */
2711 if (explicit && zsp->z_cond == ZC2_IMPLICIT_OPEN)
2712 zbc_close_zone(devip, zsp);
2713 else if (devip->max_open &&
2714 devip->nr_imp_open + devip->nr_exp_open >= devip->max_open)
2715 zbc_close_imp_open_zone(devip);
2717 if (zsp->z_cond == ZC4_CLOSED)
2720 zsp->z_cond = ZC3_EXPLICIT_OPEN;
2721 devip->nr_exp_open++;
2723 zsp->z_cond = ZC2_IMPLICIT_OPEN;
2724 devip->nr_imp_open++;
2728 static void zbc_inc_wp(struct sdebug_dev_info *devip,
2729 unsigned long long lba, unsigned int num)
2731 struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
2733 if (zbc_zone_is_conv(zsp))
2737 if (zsp->z_wp >= zsp->z_start + zsp->z_size)
2738 zsp->z_cond = ZC5_FULL;
2741 static int check_zbc_access_params(struct scsi_cmnd *scp,
2742 unsigned long long lba, unsigned int num, bool write)
2744 struct scsi_device *sdp = scp->device;
2745 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
2746 struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
2747 struct sdeb_zone_state *zsp_end = zbc_zone(devip, lba + num - 1);
2750 /* Reads cannot cross zone types boundaries */
2751 if (zsp_end != zsp &&
2752 zbc_zone_is_conv(zsp) &&
2753 !zbc_zone_is_conv(zsp_end)) {
2754 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2757 return check_condition_result;
2762 /* No restrictions for writes within conventional zones */
2763 if (zbc_zone_is_conv(zsp)) {
2764 if (!zbc_zone_is_conv(zsp_end)) {
2765 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2767 WRITE_BOUNDARY_ASCQ);
2768 return check_condition_result;
2773 /* Writes cannot cross sequential zone boundaries */
2774 if (zsp_end != zsp) {
2775 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2777 WRITE_BOUNDARY_ASCQ);
2778 return check_condition_result;
2780 /* Cannot write full zones */
2781 if (zsp->z_cond == ZC5_FULL) {
2782 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2783 INVALID_FIELD_IN_CDB, 0);
2784 return check_condition_result;
2786 /* Writes must be aligned to the zone WP */
2787 if (lba != zsp->z_wp) {
2788 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2790 UNALIGNED_WRITE_ASCQ);
2791 return check_condition_result;
2794 /* Handle implicit open of closed and empty zones */
2795 if (zsp->z_cond == ZC1_EMPTY || zsp->z_cond == ZC4_CLOSED) {
2796 if (devip->max_open &&
2797 devip->nr_exp_open >= devip->max_open) {
2798 mk_sense_buffer(scp, DATA_PROTECT,
2801 return check_condition_result;
2803 zbc_open_zone(devip, zsp, false);
2809 static inline int check_device_access_params
2810 (struct scsi_cmnd *scp, unsigned long long lba,
2811 unsigned int num, bool write)
2813 struct scsi_device *sdp = scp->device;
2814 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
2816 if (lba + num > sdebug_capacity) {
2817 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2818 return check_condition_result;
2820 /* transfer length excessive (tie in to block limits VPD page) */
2821 if (num > sdebug_store_sectors) {
2822 /* needs work to find which cdb byte 'num' comes from */
2823 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2824 return check_condition_result;
2826 if (write && unlikely(sdebug_wp)) {
2827 mk_sense_buffer(scp, DATA_PROTECT, WRITE_PROTECTED, 0x2);
2828 return check_condition_result;
2830 if (sdebug_dev_is_zoned(devip))
2831 return check_zbc_access_params(scp, lba, num, write);
2836 static inline struct sdeb_store_info *devip2sip(struct sdebug_dev_info *devip)
2838 return sdebug_fake_rw ?
2839 NULL : xa_load(per_store_ap, devip->sdbg_host->si_idx);
2842 /* Returns number of bytes copied or -1 if error. */
2843 static int do_device_access(struct sdeb_store_info *sip, struct scsi_cmnd *scp,
2844 u32 sg_skip, u64 lba, u32 num, bool do_write)
2847 u64 block, rest = 0;
2848 enum dma_data_direction dir;
2849 struct scsi_data_buffer *sdb = &scp->sdb;
2853 dir = DMA_TO_DEVICE;
2854 write_since_sync = true;
2856 dir = DMA_FROM_DEVICE;
2859 if (!sdb->length || !sip)
2861 if (scp->sc_data_direction != dir)
2865 block = do_div(lba, sdebug_store_sectors);
2866 if (block + num > sdebug_store_sectors)
2867 rest = block + num - sdebug_store_sectors;
2869 ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
2870 fsp + (block * sdebug_sector_size),
2871 (num - rest) * sdebug_sector_size, sg_skip, do_write);
2872 if (ret != (num - rest) * sdebug_sector_size)
2876 ret += sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
2877 fsp, rest * sdebug_sector_size,
2878 sg_skip + ((num - rest) * sdebug_sector_size),
2885 /* Returns number of bytes copied or -1 if error. */
2886 static int do_dout_fetch(struct scsi_cmnd *scp, u32 num, u8 *doutp)
2888 struct scsi_data_buffer *sdb = &scp->sdb;
2892 if (scp->sc_data_direction != DMA_TO_DEVICE)
2894 return sg_copy_buffer(sdb->table.sgl, sdb->table.nents, doutp,
2895 num * sdebug_sector_size, 0, true);
2898 /* If sip->storep+lba compares equal to arr(num), then copy top half of
2899 * arr into sip->storep+lba and return true. If comparison fails then
2901 static bool comp_write_worker(struct sdeb_store_info *sip, u64 lba, u32 num,
2902 const u8 *arr, bool compare_only)
2905 u64 block, rest = 0;
2906 u32 store_blks = sdebug_store_sectors;
2907 u32 lb_size = sdebug_sector_size;
2908 u8 *fsp = sip->storep;
2910 block = do_div(lba, store_blks);
2911 if (block + num > store_blks)
2912 rest = block + num - store_blks;
2914 res = !memcmp(fsp + (block * lb_size), arr, (num - rest) * lb_size);
2918 res = memcmp(fsp, arr + ((num - rest) * lb_size),
2924 arr += num * lb_size;
2925 memcpy(fsp + (block * lb_size), arr, (num - rest) * lb_size);
2927 memcpy(fsp, arr + ((num - rest) * lb_size), rest * lb_size);
2931 static __be16 dif_compute_csum(const void *buf, int len)
2936 csum = (__force __be16)ip_compute_csum(buf, len);
2938 csum = cpu_to_be16(crc_t10dif(buf, len));
2943 static int dif_verify(struct t10_pi_tuple *sdt, const void *data,
2944 sector_t sector, u32 ei_lba)
2946 __be16 csum = dif_compute_csum(data, sdebug_sector_size);
2948 if (sdt->guard_tag != csum) {
2949 pr_err("GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
2950 (unsigned long)sector,
2951 be16_to_cpu(sdt->guard_tag),
2955 if (sdebug_dif == T10_PI_TYPE1_PROTECTION &&
2956 be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
2957 pr_err("REF check failed on sector %lu\n",
2958 (unsigned long)sector);
2961 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
2962 be32_to_cpu(sdt->ref_tag) != ei_lba) {
2963 pr_err("REF check failed on sector %lu\n",
2964 (unsigned long)sector);
2970 static void dif_copy_prot(struct scsi_cmnd *scp, sector_t sector,
2971 unsigned int sectors, bool read)
2975 struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
2976 scp->device->hostdata);
2977 struct t10_pi_tuple *dif_storep = sip->dif_storep;
2978 const void *dif_store_end = dif_storep + sdebug_store_sectors;
2979 struct sg_mapping_iter miter;
2981 /* Bytes of protection data to copy into sgl */
2982 resid = sectors * sizeof(*dif_storep);
2984 sg_miter_start(&miter, scsi_prot_sglist(scp),
2985 scsi_prot_sg_count(scp), SG_MITER_ATOMIC |
2986 (read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
2988 while (sg_miter_next(&miter) && resid > 0) {
2989 size_t len = min_t(size_t, miter.length, resid);
2990 void *start = dif_store(sip, sector);
2993 if (dif_store_end < start + len)
2994 rest = start + len - dif_store_end;
2999 memcpy(paddr, start, len - rest);
3001 memcpy(start, paddr, len - rest);
3005 memcpy(paddr + len - rest, dif_storep, rest);
3007 memcpy(dif_storep, paddr + len - rest, rest);
3010 sector += len / sizeof(*dif_storep);
3013 sg_miter_stop(&miter);
3016 static int prot_verify_read(struct scsi_cmnd *scp, sector_t start_sec,
3017 unsigned int sectors, u32 ei_lba)
3021 struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3022 scp->device->hostdata);
3023 struct t10_pi_tuple *sdt;
3025 for (i = 0; i < sectors; i++, ei_lba++) {
3028 sector = start_sec + i;
3029 sdt = dif_store(sip, sector);
3031 if (sdt->app_tag == cpu_to_be16(0xffff))
3034 ret = dif_verify(sdt, lba2fake_store(sip, sector), sector,
3042 dif_copy_prot(scp, start_sec, sectors, true);
3048 static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3055 struct sdeb_store_info *sip = devip2sip(devip);
3056 rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
3057 u8 *cmd = scp->cmnd;
3058 struct sdebug_queued_cmd *sqcp;
3063 lba = get_unaligned_be64(cmd + 2);
3064 num = get_unaligned_be32(cmd + 10);
3069 lba = get_unaligned_be32(cmd + 2);
3070 num = get_unaligned_be16(cmd + 7);
3075 lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3076 (u32)(cmd[1] & 0x1f) << 16;
3077 num = (0 == cmd[4]) ? 256 : cmd[4];
3082 lba = get_unaligned_be32(cmd + 2);
3083 num = get_unaligned_be32(cmd + 6);
3086 case XDWRITEREAD_10:
3088 lba = get_unaligned_be32(cmd + 2);
3089 num = get_unaligned_be16(cmd + 7);
3092 default: /* assume READ(32) */
3093 lba = get_unaligned_be64(cmd + 12);
3094 ei_lba = get_unaligned_be32(cmd + 20);
3095 num = get_unaligned_be32(cmd + 28);
3099 if (unlikely(have_dif_prot && check_prot)) {
3100 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3102 mk_sense_invalid_opcode(scp);
3103 return check_condition_result;
3105 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3106 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3107 (cmd[1] & 0xe0) == 0)
3108 sdev_printk(KERN_ERR, scp->device, "Unprotected RD "
3111 if (unlikely(sdebug_any_injecting_opt)) {
3112 sqcp = (struct sdebug_queued_cmd *)scp->host_scribble;
3115 if (sqcp->inj_short)
3121 ret = check_device_access_params(scp, lba, num, false);
3124 if (unlikely((SDEBUG_OPT_MEDIUM_ERR & sdebug_opts) &&
3125 (lba <= (sdebug_medium_error_start + sdebug_medium_error_count - 1)) &&
3126 ((lba + num) > sdebug_medium_error_start))) {
3127 /* claim unrecoverable read error */
3128 mk_sense_buffer(scp, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
3129 /* set info field and valid bit for fixed descriptor */
3130 if (0x70 == (scp->sense_buffer[0] & 0x7f)) {
3131 scp->sense_buffer[0] |= 0x80; /* Valid bit */
3132 ret = (lba < OPT_MEDIUM_ERR_ADDR)
3133 ? OPT_MEDIUM_ERR_ADDR : (int)lba;
3134 put_unaligned_be32(ret, scp->sense_buffer + 3);
3136 scsi_set_resid(scp, scsi_bufflen(scp));
3137 return check_condition_result;
3140 read_lock(macc_lckp);
3143 if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3144 int prot_ret = prot_verify_read(scp, lba, num, ei_lba);
3147 read_unlock(macc_lckp);
3148 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, prot_ret);
3149 return illegal_condition_result;
3153 ret = do_device_access(sip, scp, 0, lba, num, false);
3154 read_unlock(macc_lckp);
3155 if (unlikely(ret == -1))
3156 return DID_ERROR << 16;
3158 scsi_set_resid(scp, scsi_bufflen(scp) - ret);
3160 if (unlikely(sqcp)) {
3161 if (sqcp->inj_recovered) {
3162 mk_sense_buffer(scp, RECOVERED_ERROR,
3163 THRESHOLD_EXCEEDED, 0);
3164 return check_condition_result;
3165 } else if (sqcp->inj_transport) {
3166 mk_sense_buffer(scp, ABORTED_COMMAND,
3167 TRANSPORT_PROBLEM, ACK_NAK_TO);
3168 return check_condition_result;
3169 } else if (sqcp->inj_dif) {
3170 /* Logical block guard check failed */
3171 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3172 return illegal_condition_result;
3173 } else if (sqcp->inj_dix) {
3174 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3175 return illegal_condition_result;
3181 static void dump_sector(unsigned char *buf, int len)
3185 pr_err(">>> Sector Dump <<<\n");
3186 for (i = 0 ; i < len ; i += 16) {
3189 for (j = 0, n = 0; j < 16; j++) {
3190 unsigned char c = buf[i+j];
3192 if (c >= 0x20 && c < 0x7e)
3193 n += scnprintf(b + n, sizeof(b) - n,
3196 n += scnprintf(b + n, sizeof(b) - n,
3199 pr_err("%04d: %s\n", i, b);
3203 static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
3204 unsigned int sectors, u32 ei_lba)
3207 struct t10_pi_tuple *sdt;
3209 sector_t sector = start_sec;
3212 struct sg_mapping_iter diter;
3213 struct sg_mapping_iter piter;
3215 BUG_ON(scsi_sg_count(SCpnt) == 0);
3216 BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
3218 sg_miter_start(&piter, scsi_prot_sglist(SCpnt),
3219 scsi_prot_sg_count(SCpnt),
3220 SG_MITER_ATOMIC | SG_MITER_FROM_SG);
3221 sg_miter_start(&diter, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
3222 SG_MITER_ATOMIC | SG_MITER_FROM_SG);
3224 /* For each protection page */
3225 while (sg_miter_next(&piter)) {
3227 if (WARN_ON(!sg_miter_next(&diter))) {
3232 for (ppage_offset = 0; ppage_offset < piter.length;
3233 ppage_offset += sizeof(struct t10_pi_tuple)) {
3234 /* If we're at the end of the current
3235 * data page advance to the next one
3237 if (dpage_offset >= diter.length) {
3238 if (WARN_ON(!sg_miter_next(&diter))) {
3245 sdt = piter.addr + ppage_offset;
3246 daddr = diter.addr + dpage_offset;
3248 ret = dif_verify(sdt, daddr, sector, ei_lba);
3250 dump_sector(daddr, sdebug_sector_size);
3256 dpage_offset += sdebug_sector_size;
3258 diter.consumed = dpage_offset;
3259 sg_miter_stop(&diter);
3261 sg_miter_stop(&piter);
3263 dif_copy_prot(SCpnt, start_sec, sectors, false);
3270 sg_miter_stop(&diter);
3271 sg_miter_stop(&piter);
3275 static unsigned long lba_to_map_index(sector_t lba)
3277 if (sdebug_unmap_alignment)
3278 lba += sdebug_unmap_granularity - sdebug_unmap_alignment;
3279 sector_div(lba, sdebug_unmap_granularity);
3283 static sector_t map_index_to_lba(unsigned long index)
3285 sector_t lba = index * sdebug_unmap_granularity;
3287 if (sdebug_unmap_alignment)
3288 lba -= sdebug_unmap_granularity - sdebug_unmap_alignment;
3292 static unsigned int map_state(struct sdeb_store_info *sip, sector_t lba,
3296 unsigned int mapped;
3297 unsigned long index;
3300 index = lba_to_map_index(lba);
3301 mapped = test_bit(index, sip->map_storep);
3304 next = find_next_zero_bit(sip->map_storep, map_size, index);
3306 next = find_next_bit(sip->map_storep, map_size, index);
3308 end = min_t(sector_t, sdebug_store_sectors, map_index_to_lba(next));
3313 static void map_region(struct sdeb_store_info *sip, sector_t lba,
3316 sector_t end = lba + len;
3319 unsigned long index = lba_to_map_index(lba);
3321 if (index < map_size)
3322 set_bit(index, sip->map_storep);
3324 lba = map_index_to_lba(index + 1);
3328 static void unmap_region(struct sdeb_store_info *sip, sector_t lba,
3331 sector_t end = lba + len;
3332 u8 *fsp = sip->storep;
3335 unsigned long index = lba_to_map_index(lba);
3337 if (lba == map_index_to_lba(index) &&
3338 lba + sdebug_unmap_granularity <= end &&
3340 clear_bit(index, sip->map_storep);
3341 if (sdebug_lbprz) { /* for LBPRZ=2 return 0xff_s */
3342 memset(fsp + lba * sdebug_sector_size,
3343 (sdebug_lbprz & 1) ? 0 : 0xff,
3344 sdebug_sector_size *
3345 sdebug_unmap_granularity);
3347 if (sip->dif_storep) {
3348 memset(sip->dif_storep + lba, 0xff,
3349 sizeof(*sip->dif_storep) *
3350 sdebug_unmap_granularity);
3353 lba = map_index_to_lba(index + 1);
3357 static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3364 struct sdeb_store_info *sip = devip2sip(devip);
3365 rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
3366 u8 *cmd = scp->cmnd;
3371 lba = get_unaligned_be64(cmd + 2);
3372 num = get_unaligned_be32(cmd + 10);
3377 lba = get_unaligned_be32(cmd + 2);
3378 num = get_unaligned_be16(cmd + 7);
3383 lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3384 (u32)(cmd[1] & 0x1f) << 16;
3385 num = (0 == cmd[4]) ? 256 : cmd[4];
3390 lba = get_unaligned_be32(cmd + 2);
3391 num = get_unaligned_be32(cmd + 6);
3394 case 0x53: /* XDWRITEREAD(10) */
3396 lba = get_unaligned_be32(cmd + 2);
3397 num = get_unaligned_be16(cmd + 7);
3400 default: /* assume WRITE(32) */
3401 lba = get_unaligned_be64(cmd + 12);
3402 ei_lba = get_unaligned_be32(cmd + 20);
3403 num = get_unaligned_be32(cmd + 28);
3407 if (unlikely(have_dif_prot && check_prot)) {
3408 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3410 mk_sense_invalid_opcode(scp);
3411 return check_condition_result;
3413 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3414 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3415 (cmd[1] & 0xe0) == 0)
3416 sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3420 write_lock(macc_lckp);
3421 ret = check_device_access_params(scp, lba, num, true);
3423 write_unlock(macc_lckp);
3428 if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3429 int prot_ret = prot_verify_write(scp, lba, num, ei_lba);
3432 write_unlock(macc_lckp);
3433 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, prot_ret);
3434 return illegal_condition_result;
3438 ret = do_device_access(sip, scp, 0, lba, num, true);
3439 if (unlikely(scsi_debug_lbp()))
3440 map_region(sip, lba, num);
3441 /* If ZBC zone then bump its write pointer */
3442 if (sdebug_dev_is_zoned(devip))
3443 zbc_inc_wp(devip, lba, num);
3444 write_unlock(macc_lckp);
3445 if (unlikely(-1 == ret))
3446 return DID_ERROR << 16;
3447 else if (unlikely(sdebug_verbose &&
3448 (ret < (num * sdebug_sector_size))))
3449 sdev_printk(KERN_INFO, scp->device,
3450 "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3451 my_name, num * sdebug_sector_size, ret);
3453 if (unlikely(sdebug_any_injecting_opt)) {
3454 struct sdebug_queued_cmd *sqcp =
3455 (struct sdebug_queued_cmd *)scp->host_scribble;
3458 if (sqcp->inj_recovered) {
3459 mk_sense_buffer(scp, RECOVERED_ERROR,
3460 THRESHOLD_EXCEEDED, 0);
3461 return check_condition_result;
3462 } else if (sqcp->inj_dif) {
3463 /* Logical block guard check failed */
3464 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3465 return illegal_condition_result;
3466 } else if (sqcp->inj_dix) {
3467 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3468 return illegal_condition_result;
3476 * T10 has only specified WRITE SCATTERED(16) and WRITE SCATTERED(32).
3477 * No READ GATHERED yet (requires bidi or long cdb holding gather list).
3479 static int resp_write_scat(struct scsi_cmnd *scp,
3480 struct sdebug_dev_info *devip)
3482 u8 *cmd = scp->cmnd;
3485 struct sdeb_store_info *sip = devip2sip(devip);
3486 rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
3488 u16 lbdof, num_lrd, k;
3489 u32 num, num_by, bt_len, lbdof_blen, sg_off, cum_lb;
3490 u32 lb_size = sdebug_sector_size;
3495 static const u32 lrd_size = 32; /* + parameter list header size */
3497 if (cmd[0] == VARIABLE_LENGTH_CMD) {
3499 wrprotect = (cmd[10] >> 5) & 0x7;
3500 lbdof = get_unaligned_be16(cmd + 12);
3501 num_lrd = get_unaligned_be16(cmd + 16);
3502 bt_len = get_unaligned_be32(cmd + 28);
3503 } else { /* that leaves WRITE SCATTERED(16) */
3505 wrprotect = (cmd[2] >> 5) & 0x7;
3506 lbdof = get_unaligned_be16(cmd + 4);
3507 num_lrd = get_unaligned_be16(cmd + 8);
3508 bt_len = get_unaligned_be32(cmd + 10);
3509 if (unlikely(have_dif_prot)) {
3510 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3512 mk_sense_invalid_opcode(scp);
3513 return illegal_condition_result;
3515 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3516 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3518 sdev_printk(KERN_ERR, scp->device,
3519 "Unprotected WR to DIF device\n");
3522 if ((num_lrd == 0) || (bt_len == 0))
3523 return 0; /* T10 says these do-nothings are not errors */
3526 sdev_printk(KERN_INFO, scp->device,
3527 "%s: %s: LB Data Offset field bad\n",
3529 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3530 return illegal_condition_result;
3532 lbdof_blen = lbdof * lb_size;
3533 if ((lrd_size + (num_lrd * lrd_size)) > lbdof_blen) {
3535 sdev_printk(KERN_INFO, scp->device,
3536 "%s: %s: LBA range descriptors don't fit\n",
3538 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3539 return illegal_condition_result;
3541 lrdp = kzalloc(lbdof_blen, GFP_ATOMIC);
3543 return SCSI_MLQUEUE_HOST_BUSY;
3545 sdev_printk(KERN_INFO, scp->device,
3546 "%s: %s: Fetch header+scatter_list, lbdof_blen=%u\n",
3547 my_name, __func__, lbdof_blen);
3548 res = fetch_to_dev_buffer(scp, lrdp, lbdof_blen);
3550 ret = DID_ERROR << 16;
3554 write_lock(macc_lckp);
3555 sg_off = lbdof_blen;
3556 /* Spec says Buffer xfer Length field in number of LBs in dout */
3558 for (k = 0, up = lrdp + lrd_size; k < num_lrd; ++k, up += lrd_size) {
3559 lba = get_unaligned_be64(up + 0);
3560 num = get_unaligned_be32(up + 8);
3562 sdev_printk(KERN_INFO, scp->device,
3563 "%s: %s: k=%d LBA=0x%llx num=%u sg_off=%u\n",
3564 my_name, __func__, k, lba, num, sg_off);
3567 ret = check_device_access_params(scp, lba, num, true);
3569 goto err_out_unlock;
3570 num_by = num * lb_size;
3571 ei_lba = is_16 ? 0 : get_unaligned_be32(up + 12);
3573 if ((cum_lb + num) > bt_len) {
3575 sdev_printk(KERN_INFO, scp->device,
3576 "%s: %s: sum of blocks > data provided\n",
3578 mk_sense_buffer(scp, ILLEGAL_REQUEST, WRITE_ERROR_ASC,
3580 ret = illegal_condition_result;
3581 goto err_out_unlock;
3585 if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3586 int prot_ret = prot_verify_write(scp, lba, num,
3590 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10,
3592 ret = illegal_condition_result;
3593 goto err_out_unlock;
3597 ret = do_device_access(sip, scp, sg_off, lba, num, true);
3598 /* If ZBC zone then bump its write pointer */
3599 if (sdebug_dev_is_zoned(devip))
3600 zbc_inc_wp(devip, lba, num);
3601 if (unlikely(scsi_debug_lbp()))
3602 map_region(sip, lba, num);
3603 if (unlikely(-1 == ret)) {
3604 ret = DID_ERROR << 16;
3605 goto err_out_unlock;
3606 } else if (unlikely(sdebug_verbose && (ret < num_by)))
3607 sdev_printk(KERN_INFO, scp->device,
3608 "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3609 my_name, num_by, ret);
3611 if (unlikely(sdebug_any_injecting_opt)) {
3612 struct sdebug_queued_cmd *sqcp =
3613 (struct sdebug_queued_cmd *)scp->host_scribble;
3616 if (sqcp->inj_recovered) {
3617 mk_sense_buffer(scp, RECOVERED_ERROR,
3618 THRESHOLD_EXCEEDED, 0);
3619 ret = illegal_condition_result;
3620 goto err_out_unlock;
3621 } else if (sqcp->inj_dif) {
3622 /* Logical block guard check failed */
3623 mk_sense_buffer(scp, ABORTED_COMMAND,
3625 ret = illegal_condition_result;
3626 goto err_out_unlock;
3627 } else if (sqcp->inj_dix) {
3628 mk_sense_buffer(scp, ILLEGAL_REQUEST,
3630 ret = illegal_condition_result;
3631 goto err_out_unlock;
3640 write_unlock(macc_lckp);
3646 static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
3647 u32 ei_lba, bool unmap, bool ndob)
3649 struct scsi_device *sdp = scp->device;
3650 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
3651 unsigned long long i;
3653 u32 lb_size = sdebug_sector_size;
3655 struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3656 scp->device->hostdata);
3657 rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
3661 write_lock(macc_lckp);
3663 ret = check_device_access_params(scp, lba, num, true);
3665 write_unlock(macc_lckp);
3669 if (unmap && scsi_debug_lbp()) {
3670 unmap_region(sip, lba, num);
3674 block = do_div(lbaa, sdebug_store_sectors);
3675 /* if ndob then zero 1 logical block, else fetch 1 logical block */
3677 fs1p = fsp + (block * lb_size);
3679 memset(fs1p, 0, lb_size);
3682 ret = fetch_to_dev_buffer(scp, fs1p, lb_size);
3685 write_unlock(&sip->macc_lck);
3686 return DID_ERROR << 16;
3687 } else if (sdebug_verbose && !ndob && (ret < lb_size))
3688 sdev_printk(KERN_INFO, scp->device,
3689 "%s: %s: lb size=%u, IO sent=%d bytes\n",
3690 my_name, "write same", lb_size, ret);
3692 /* Copy first sector to remaining blocks */
3693 for (i = 1 ; i < num ; i++) {
3695 block = do_div(lbaa, sdebug_store_sectors);
3696 memmove(fsp + (block * lb_size), fs1p, lb_size);
3698 if (scsi_debug_lbp())
3699 map_region(sip, lba, num);
3700 /* If ZBC zone then bump its write pointer */
3701 if (sdebug_dev_is_zoned(devip))
3702 zbc_inc_wp(devip, lba, num);
3704 write_unlock(macc_lckp);
3709 static int resp_write_same_10(struct scsi_cmnd *scp,
3710 struct sdebug_dev_info *devip)
3712 u8 *cmd = scp->cmnd;
3719 if (sdebug_lbpws10 == 0) {
3720 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3721 return check_condition_result;
3725 lba = get_unaligned_be32(cmd + 2);
3726 num = get_unaligned_be16(cmd + 7);
3727 if (num > sdebug_write_same_length) {
3728 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3729 return check_condition_result;
3731 return resp_write_same(scp, lba, num, ei_lba, unmap, false);
3734 static int resp_write_same_16(struct scsi_cmnd *scp,
3735 struct sdebug_dev_info *devip)
3737 u8 *cmd = scp->cmnd;
3744 if (cmd[1] & 0x8) { /* UNMAP */
3745 if (sdebug_lbpws == 0) {
3746 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3747 return check_condition_result;
3751 if (cmd[1] & 0x1) /* NDOB (no data-out buffer, assumes zeroes) */
3753 lba = get_unaligned_be64(cmd + 2);
3754 num = get_unaligned_be32(cmd + 10);
3755 if (num > sdebug_write_same_length) {
3756 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
3757 return check_condition_result;
3759 return resp_write_same(scp, lba, num, ei_lba, unmap, ndob);
3762 /* Note the mode field is in the same position as the (lower) service action
3763 * field. For the Report supported operation codes command, SPC-4 suggests
3764 * each mode of this command should be reported separately; for future. */
3765 static int resp_write_buffer(struct scsi_cmnd *scp,
3766 struct sdebug_dev_info *devip)
3768 u8 *cmd = scp->cmnd;
3769 struct scsi_device *sdp = scp->device;
3770 struct sdebug_dev_info *dp;
3773 mode = cmd[1] & 0x1f;
3775 case 0x4: /* download microcode (MC) and activate (ACT) */
3776 /* set UAs on this device only */
3777 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3778 set_bit(SDEBUG_UA_MICROCODE_CHANGED, devip->uas_bm);
3780 case 0x5: /* download MC, save and ACT */
3781 set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET, devip->uas_bm);
3783 case 0x6: /* download MC with offsets and ACT */
3784 /* set UAs on most devices (LUs) in this target */
3785 list_for_each_entry(dp,
3786 &devip->sdbg_host->dev_info_list,
3788 if (dp->target == sdp->id) {
3789 set_bit(SDEBUG_UA_BUS_RESET, dp->uas_bm);
3791 set_bit(SDEBUG_UA_MICROCODE_CHANGED,
3795 case 0x7: /* download MC with offsets, save, and ACT */
3796 /* set UA on all devices (LUs) in this target */
3797 list_for_each_entry(dp,
3798 &devip->sdbg_host->dev_info_list,
3800 if (dp->target == sdp->id)
3801 set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET,
3805 /* do nothing for this command for other mode values */
3811 static int resp_comp_write(struct scsi_cmnd *scp,
3812 struct sdebug_dev_info *devip)
3814 u8 *cmd = scp->cmnd;
3816 struct sdeb_store_info *sip = devip2sip(devip);
3817 rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
3820 u32 lb_size = sdebug_sector_size;
3825 lba = get_unaligned_be64(cmd + 2);
3826 num = cmd[13]; /* 1 to a maximum of 255 logical blocks */
3828 return 0; /* degenerate case, not an error */
3829 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3831 mk_sense_invalid_opcode(scp);
3832 return check_condition_result;
3834 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3835 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3836 (cmd[1] & 0xe0) == 0)
3837 sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3839 ret = check_device_access_params(scp, lba, num, false);
3843 arr = kcalloc(lb_size, dnum, GFP_ATOMIC);
3845 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3847 return check_condition_result;
3850 write_lock(macc_lckp);
3852 ret = do_dout_fetch(scp, dnum, arr);
3854 retval = DID_ERROR << 16;
3856 } else if (sdebug_verbose && (ret < (dnum * lb_size)))
3857 sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb "
3858 "indicated=%u, IO sent=%d bytes\n", my_name,
3859 dnum * lb_size, ret);
3860 if (!comp_write_worker(sip, lba, num, arr, false)) {
3861 mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
3862 retval = check_condition_result;
3865 if (scsi_debug_lbp())
3866 map_region(sip, lba, num);
3868 write_unlock(macc_lckp);
3873 struct unmap_block_desc {
3879 static int resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3882 struct unmap_block_desc *desc;
3883 struct sdeb_store_info *sip = devip2sip(devip);
3884 rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
3885 unsigned int i, payload_len, descriptors;
3888 if (!scsi_debug_lbp())
3889 return 0; /* fib and say its done */
3890 payload_len = get_unaligned_be16(scp->cmnd + 7);
3891 BUG_ON(scsi_bufflen(scp) != payload_len);
3893 descriptors = (payload_len - 8) / 16;
3894 if (descriptors > sdebug_unmap_max_desc) {
3895 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3896 return check_condition_result;
3899 buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC);
3901 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3903 return check_condition_result;
3906 scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
3908 BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
3909 BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
3911 desc = (void *)&buf[8];
3913 write_lock(macc_lckp);
3915 for (i = 0 ; i < descriptors ; i++) {
3916 unsigned long long lba = get_unaligned_be64(&desc[i].lba);
3917 unsigned int num = get_unaligned_be32(&desc[i].blocks);
3919 ret = check_device_access_params(scp, lba, num, true);
3923 unmap_region(sip, lba, num);
3929 write_unlock(macc_lckp);
3935 #define SDEBUG_GET_LBA_STATUS_LEN 32
3937 static int resp_get_lba_status(struct scsi_cmnd *scp,
3938 struct sdebug_dev_info *devip)
3940 u8 *cmd = scp->cmnd;
3941 struct sdeb_store_info *sip = devip2sip(devip);
3943 u32 alloc_len, mapped, num;
3945 u8 arr[SDEBUG_GET_LBA_STATUS_LEN];
3947 lba = get_unaligned_be64(cmd + 2);
3948 alloc_len = get_unaligned_be32(cmd + 10);
3953 ret = check_device_access_params(scp, lba, 1, false);
3957 if (scsi_debug_lbp())
3958 mapped = map_state(sip, lba, &num);
3961 /* following just in case virtual_gb changed */
3962 sdebug_capacity = get_sdebug_capacity();
3963 if (sdebug_capacity - lba <= 0xffffffff)
3964 num = sdebug_capacity - lba;
3969 memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
3970 put_unaligned_be32(20, arr); /* Parameter Data Length */
3971 put_unaligned_be64(lba, arr + 8); /* LBA */
3972 put_unaligned_be32(num, arr + 16); /* Number of blocks */
3973 arr[20] = !mapped; /* prov_stat=0: mapped; 1: dealloc */
3975 return fill_from_dev_buffer(scp, arr, SDEBUG_GET_LBA_STATUS_LEN);
3978 static int resp_sync_cache(struct scsi_cmnd *scp,
3979 struct sdebug_dev_info *devip)
3984 u8 *cmd = scp->cmnd;
3986 if (cmd[0] == SYNCHRONIZE_CACHE) { /* 10 byte cdb */
3987 lba = get_unaligned_be32(cmd + 2);
3988 num_blocks = get_unaligned_be16(cmd + 7);
3989 } else { /* SYNCHRONIZE_CACHE(16) */
3990 lba = get_unaligned_be64(cmd + 2);
3991 num_blocks = get_unaligned_be32(cmd + 10);
3993 if (lba + num_blocks > sdebug_capacity) {
3994 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
3995 return check_condition_result;
3997 if (!write_since_sync || cmd[1] & 0x2)
3998 res = SDEG_RES_IMMED_MASK;
3999 else /* delay if write_since_sync and IMMED clear */
4000 write_since_sync = false;
4005 * Assuming the LBA+num_blocks is not out-of-range, this function will return
4006 * CONDITION MET if the specified blocks will/have fitted in the cache, and
4007 * a GOOD status otherwise. Model a disk with a big cache and yield
4008 * CONDITION MET. Actually tries to bring range in main memory into the
4009 * cache associated with the CPU(s).
4011 static int resp_pre_fetch(struct scsi_cmnd *scp,
4012 struct sdebug_dev_info *devip)
4016 u64 block, rest = 0;
4018 u8 *cmd = scp->cmnd;
4019 struct sdeb_store_info *sip = devip2sip(devip);
4020 rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4021 u8 *fsp = sip ? sip->storep : NULL;
4023 if (cmd[0] == PRE_FETCH) { /* 10 byte cdb */
4024 lba = get_unaligned_be32(cmd + 2);
4025 nblks = get_unaligned_be16(cmd + 7);
4026 } else { /* PRE-FETCH(16) */
4027 lba = get_unaligned_be64(cmd + 2);
4028 nblks = get_unaligned_be32(cmd + 10);
4030 if (lba + nblks > sdebug_capacity) {
4031 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4032 return check_condition_result;
4036 /* PRE-FETCH spec says nothing about LBP or PI so skip them */
4037 block = do_div(lba, sdebug_store_sectors);
4038 if (block + nblks > sdebug_store_sectors)
4039 rest = block + nblks - sdebug_store_sectors;
4041 /* Try to bring the PRE-FETCH range into CPU's cache */
4042 read_lock(macc_lckp);
4043 prefetch_range(fsp + (sdebug_sector_size * block),
4044 (nblks - rest) * sdebug_sector_size);
4046 prefetch_range(fsp, rest * sdebug_sector_size);
4047 read_unlock(macc_lckp);
4050 res = SDEG_RES_IMMED_MASK;
4051 return res | condition_met_result;
4054 #define RL_BUCKET_ELEMS 8
4056 /* Even though each pseudo target has a REPORT LUNS "well known logical unit"
4057 * (W-LUN), the normal Linux scanning logic does not associate it with a
4058 * device (e.g. /dev/sg7). The following magic will make that association:
4059 * "cd /sys/class/scsi_host/host<n> ; echo '- - 49409' > scan"
4060 * where <n> is a host number. If there are multiple targets in a host then
4061 * the above will associate a W-LUN to each target. To only get a W-LUN
4062 * for target 2, then use "echo '- 2 49409' > scan" .
4064 static int resp_report_luns(struct scsi_cmnd *scp,
4065 struct sdebug_dev_info *devip)
4067 unsigned char *cmd = scp->cmnd;
4068 unsigned int alloc_len;
4069 unsigned char select_report;
4071 struct scsi_lun *lun_p;
4072 u8 arr[RL_BUCKET_ELEMS * sizeof(struct scsi_lun)];
4073 unsigned int lun_cnt; /* normal LUN count (max: 256) */
4074 unsigned int wlun_cnt; /* report luns W-LUN count */
4075 unsigned int tlun_cnt; /* total LUN count */
4076 unsigned int rlen; /* response length (in bytes) */
4078 unsigned int off_rsp = 0;
4079 const int sz_lun = sizeof(struct scsi_lun);
4081 clear_luns_changed_on_target(devip);
4083 select_report = cmd[2];
4084 alloc_len = get_unaligned_be32(cmd + 6);
4086 if (alloc_len < 4) {
4087 pr_err("alloc len too small %d\n", alloc_len);
4088 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
4089 return check_condition_result;
4092 switch (select_report) {
4093 case 0: /* all LUNs apart from W-LUNs */
4094 lun_cnt = sdebug_max_luns;
4097 case 1: /* only W-LUNs */
4101 case 2: /* all LUNs */
4102 lun_cnt = sdebug_max_luns;
4105 case 0x10: /* only administrative LUs */
4106 case 0x11: /* see SPC-5 */
4107 case 0x12: /* only subsiduary LUs owned by referenced LU */
4109 pr_debug("select report invalid %d\n", select_report);
4110 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
4111 return check_condition_result;
4114 if (sdebug_no_lun_0 && (lun_cnt > 0))
4117 tlun_cnt = lun_cnt + wlun_cnt;
4118 rlen = tlun_cnt * sz_lun; /* excluding 8 byte header */
4119 scsi_set_resid(scp, scsi_bufflen(scp));
4120 pr_debug("select_report %d luns = %d wluns = %d no_lun0 %d\n",
4121 select_report, lun_cnt, wlun_cnt, sdebug_no_lun_0);
4123 /* loops rely on sizeof response header same as sizeof lun (both 8) */
4124 lun = sdebug_no_lun_0 ? 1 : 0;
4125 for (k = 0, j = 0, res = 0; true; ++k, j = 0) {
4126 memset(arr, 0, sizeof(arr));
4127 lun_p = (struct scsi_lun *)&arr[0];
4129 put_unaligned_be32(rlen, &arr[0]);
4133 for ( ; j < RL_BUCKET_ELEMS; ++j, ++lun_p) {
4134 if ((k * RL_BUCKET_ELEMS) + j > lun_cnt)
4136 int_to_scsilun(lun++, lun_p);
4138 if (j < RL_BUCKET_ELEMS)
4141 res = p_fill_from_dev_buffer(scp, arr, n, off_rsp);
4147 int_to_scsilun(SCSI_W_LUN_REPORT_LUNS, lun_p);
4151 res = p_fill_from_dev_buffer(scp, arr, j * sz_lun, off_rsp);
4155 static int resp_verify(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4157 bool is_bytchk3 = false;
4160 u32 vnum, a_num, off;
4161 const u32 lb_size = sdebug_sector_size;
4164 u8 *cmd = scp->cmnd;
4165 struct sdeb_store_info *sip = devip2sip(devip);
4166 rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4168 bytchk = (cmd[1] >> 1) & 0x3;
4170 return 0; /* always claim internal verify okay */
4171 } else if (bytchk == 2) {
4172 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
4173 return check_condition_result;
4174 } else if (bytchk == 3) {
4175 is_bytchk3 = true; /* 1 block sent, compared repeatedly */
4179 lba = get_unaligned_be64(cmd + 2);
4180 vnum = get_unaligned_be32(cmd + 10);
4182 case VERIFY: /* is VERIFY(10) */
4183 lba = get_unaligned_be32(cmd + 2);
4184 vnum = get_unaligned_be16(cmd + 7);
4187 mk_sense_invalid_opcode(scp);
4188 return check_condition_result;
4190 a_num = is_bytchk3 ? 1 : vnum;
4191 /* Treat following check like one for read (i.e. no write) access */
4192 ret = check_device_access_params(scp, lba, a_num, false);
4196 arr = kcalloc(lb_size, vnum, GFP_ATOMIC);
4198 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4200 return check_condition_result;
4202 /* Not changing store, so only need read access */
4203 read_lock(macc_lckp);
4205 ret = do_dout_fetch(scp, a_num, arr);
4207 ret = DID_ERROR << 16;
4209 } else if (sdebug_verbose && (ret < (a_num * lb_size))) {
4210 sdev_printk(KERN_INFO, scp->device,
4211 "%s: %s: cdb indicated=%u, IO sent=%d bytes\n",
4212 my_name, __func__, a_num * lb_size, ret);
4215 for (j = 1, off = lb_size; j < vnum; ++j, off += lb_size)
4216 memcpy(arr + off, arr, lb_size);
4219 if (!comp_write_worker(sip, lba, vnum, arr, true)) {
4220 mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
4221 ret = check_condition_result;
4225 read_unlock(macc_lckp);
4230 #define RZONES_DESC_HD 64
4232 /* Report zones depending on start LBA nad reporting options */
4233 static int resp_report_zones(struct scsi_cmnd *scp,
4234 struct sdebug_dev_info *devip)
4236 unsigned int i, max_zones, rep_max_zones, nrz = 0;
4238 u32 alloc_len, rep_opts, rep_len;
4241 u8 *arr = NULL, *desc;
4242 u8 *cmd = scp->cmnd;
4243 struct sdeb_zone_state *zsp;
4244 struct sdeb_store_info *sip = devip2sip(devip);
4245 rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4247 if (!sdebug_dev_is_zoned(devip)) {
4248 mk_sense_invalid_opcode(scp);
4249 return check_condition_result;
4251 zs_lba = get_unaligned_be64(cmd + 2);
4252 alloc_len = get_unaligned_be32(cmd + 10);
4253 rep_opts = cmd[14] & 0x3f;
4254 partial = cmd[14] & 0x80;
4256 if (zs_lba >= sdebug_capacity) {
4257 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4258 return check_condition_result;
4261 max_zones = devip->nr_zones - zs_lba / devip->zsize;
4262 rep_max_zones = min((alloc_len - 64) >> ilog2(RZONES_DESC_HD),
4265 arr = kcalloc(RZONES_DESC_HD, alloc_len, GFP_ATOMIC);
4267 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4269 return check_condition_result;
4272 read_lock(macc_lckp);
4275 for (i = 0; i < max_zones; i++) {
4276 lba = zs_lba + devip->zsize * i;
4277 if (lba > sdebug_capacity)
4279 zsp = zbc_zone(devip, lba);
4286 if (zsp->z_cond != ZC1_EMPTY)
4290 /* Implicit open zones */
4291 if (zsp->z_cond != ZC2_IMPLICIT_OPEN)
4295 /* Explicit open zones */
4296 if (zsp->z_cond != ZC3_EXPLICIT_OPEN)
4301 if (zsp->z_cond != ZC4_CLOSED)
4306 if (zsp->z_cond != ZC5_FULL)
4314 * Read-only, offline, reset WP recommended and
4315 * non-seq-resource-used are not emulated: no zones
4320 /* Not write pointer (conventional) zones */
4321 if (!zbc_zone_is_conv(zsp))
4325 mk_sense_buffer(scp, ILLEGAL_REQUEST,
4326 INVALID_FIELD_IN_CDB, 0);
4327 ret = check_condition_result;
4331 if (nrz < rep_max_zones) {
4332 /* Fill zone descriptor */
4333 if (zbc_zone_is_conv(zsp))
4337 desc[1] = zsp->z_cond << 4;
4338 put_unaligned_be64((u64)zsp->z_size, desc + 8);
4339 put_unaligned_be64((u64)zsp->z_start, desc + 16);
4340 put_unaligned_be64((u64)zsp->z_wp, desc + 24);
4344 if (partial && nrz >= rep_max_zones)
4351 put_unaligned_be32(nrz * RZONES_DESC_HD, arr + 0);
4352 put_unaligned_be64(sdebug_capacity - 1, arr + 8);
4354 rep_len = (unsigned long)desc - (unsigned long)arr;
4355 ret = fill_from_dev_buffer(scp, arr, min_t(int, alloc_len, rep_len));
4358 read_unlock(macc_lckp);
4363 /* Logic transplanted from tcmu-runner, file_zbc.c */
4364 static void zbc_open_all(struct sdebug_dev_info *devip)
4366 struct sdeb_zone_state *zsp = &devip->zstate[0];
4369 for (i = 0; i < devip->nr_zones; i++, zsp++) {
4370 if (zsp->z_cond == ZC4_CLOSED)
4371 zbc_open_zone(devip, &devip->zstate[i], true);
4375 static int resp_open_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4379 enum sdebug_z_cond zc;
4380 u8 *cmd = scp->cmnd;
4381 struct sdeb_zone_state *zsp;
4382 bool all = cmd[14] & 0x01;
4383 struct sdeb_store_info *sip = devip2sip(devip);
4384 rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4386 if (!sdebug_dev_is_zoned(devip)) {
4387 mk_sense_invalid_opcode(scp);
4388 return check_condition_result;
4391 write_lock(macc_lckp);
4394 /* Check if all closed zones can be open */
4395 if (devip->max_open &&
4396 devip->nr_exp_open + devip->nr_closed > devip->max_open) {
4397 mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
4399 res = check_condition_result;
4402 /* Open all closed zones */
4403 zbc_open_all(devip);
4407 /* Open the specified zone */
4408 z_id = get_unaligned_be64(cmd + 2);
4409 if (z_id >= sdebug_capacity) {
4410 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4411 res = check_condition_result;
4415 zsp = zbc_zone(devip, z_id);
4416 if (z_id != zsp->z_start) {
4417 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4418 res = check_condition_result;
4421 if (zbc_zone_is_conv(zsp)) {
4422 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4423 res = check_condition_result;
4428 if (zc == ZC3_EXPLICIT_OPEN || zc == ZC5_FULL)
4431 if (devip->max_open && devip->nr_exp_open >= devip->max_open) {
4432 mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
4434 res = check_condition_result;
4438 if (zc == ZC2_IMPLICIT_OPEN)
4439 zbc_close_zone(devip, zsp);
4440 zbc_open_zone(devip, zsp, true);
4442 write_unlock(macc_lckp);
4446 static void zbc_close_all(struct sdebug_dev_info *devip)
4450 for (i = 0; i < devip->nr_zones; i++)
4451 zbc_close_zone(devip, &devip->zstate[i]);
4454 static int resp_close_zone(struct scsi_cmnd *scp,
4455 struct sdebug_dev_info *devip)
4459 u8 *cmd = scp->cmnd;
4460 struct sdeb_zone_state *zsp;
4461 bool all = cmd[14] & 0x01;
4462 struct sdeb_store_info *sip = devip2sip(devip);
4463 rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4465 if (!sdebug_dev_is_zoned(devip)) {
4466 mk_sense_invalid_opcode(scp);
4467 return check_condition_result;
4470 write_lock(macc_lckp);
4473 zbc_close_all(devip);
4477 /* Close specified zone */
4478 z_id = get_unaligned_be64(cmd + 2);
4479 if (z_id >= sdebug_capacity) {
4480 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4481 res = check_condition_result;
4485 zsp = zbc_zone(devip, z_id);
4486 if (z_id != zsp->z_start) {
4487 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4488 res = check_condition_result;
4491 if (zbc_zone_is_conv(zsp)) {
4492 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4493 res = check_condition_result;
4497 zbc_close_zone(devip, zsp);
4499 write_unlock(macc_lckp);
4503 static void zbc_finish_zone(struct sdebug_dev_info *devip,
4504 struct sdeb_zone_state *zsp, bool empty)
4506 enum sdebug_z_cond zc = zsp->z_cond;
4508 if (zc == ZC4_CLOSED || zc == ZC2_IMPLICIT_OPEN ||
4509 zc == ZC3_EXPLICIT_OPEN || (empty && zc == ZC1_EMPTY)) {
4510 if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
4511 zbc_close_zone(devip, zsp);
4512 if (zsp->z_cond == ZC4_CLOSED)
4514 zsp->z_wp = zsp->z_start + zsp->z_size;
4515 zsp->z_cond = ZC5_FULL;
4519 static void zbc_finish_all(struct sdebug_dev_info *devip)
4523 for (i = 0; i < devip->nr_zones; i++)
4524 zbc_finish_zone(devip, &devip->zstate[i], false);
4527 static int resp_finish_zone(struct scsi_cmnd *scp,
4528 struct sdebug_dev_info *devip)
4530 struct sdeb_zone_state *zsp;
4533 u8 *cmd = scp->cmnd;
4534 bool all = cmd[14] & 0x01;
4535 struct sdeb_store_info *sip = devip2sip(devip);
4536 rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4538 if (!sdebug_dev_is_zoned(devip)) {
4539 mk_sense_invalid_opcode(scp);
4540 return check_condition_result;
4543 write_lock(macc_lckp);
4546 zbc_finish_all(devip);
4550 /* Finish the specified zone */
4551 z_id = get_unaligned_be64(cmd + 2);
4552 if (z_id >= sdebug_capacity) {
4553 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4554 res = check_condition_result;
4558 zsp = zbc_zone(devip, z_id);
4559 if (z_id != zsp->z_start) {
4560 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4561 res = check_condition_result;
4564 if (zbc_zone_is_conv(zsp)) {
4565 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4566 res = check_condition_result;
4570 zbc_finish_zone(devip, zsp, true);
4572 write_unlock(macc_lckp);
4576 static void zbc_rwp_zone(struct sdebug_dev_info *devip,
4577 struct sdeb_zone_state *zsp)
4579 enum sdebug_z_cond zc;
4581 if (zbc_zone_is_conv(zsp))
4585 if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
4586 zbc_close_zone(devip, zsp);
4588 if (zsp->z_cond == ZC4_CLOSED)
4591 zsp->z_wp = zsp->z_start;
4592 zsp->z_cond = ZC1_EMPTY;
4595 static void zbc_rwp_all(struct sdebug_dev_info *devip)
4599 for (i = 0; i < devip->nr_zones; i++)
4600 zbc_rwp_zone(devip, &devip->zstate[i]);
4603 static int resp_rwp_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4605 struct sdeb_zone_state *zsp;
4608 u8 *cmd = scp->cmnd;
4609 bool all = cmd[14] & 0x01;
4610 struct sdeb_store_info *sip = devip2sip(devip);
4611 rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4613 if (!sdebug_dev_is_zoned(devip)) {
4614 mk_sense_invalid_opcode(scp);
4615 return check_condition_result;
4618 write_lock(macc_lckp);
4625 z_id = get_unaligned_be64(cmd + 2);
4626 if (z_id >= sdebug_capacity) {
4627 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4628 res = check_condition_result;
4632 zsp = zbc_zone(devip, z_id);
4633 if (z_id != zsp->z_start) {
4634 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4635 res = check_condition_result;
4638 if (zbc_zone_is_conv(zsp)) {
4639 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4640 res = check_condition_result;
4644 zbc_rwp_zone(devip, zsp);
4646 write_unlock(macc_lckp);
4650 static struct sdebug_queue *get_queue(struct scsi_cmnd *cmnd)
4652 u32 tag = blk_mq_unique_tag(cmnd->request);
4653 u16 hwq = blk_mq_unique_tag_to_hwq(tag);
4655 pr_debug("tag=%#x, hwq=%d\n", tag, hwq);
4656 if (WARN_ON_ONCE(hwq >= submit_queues))
4658 return sdebug_q_arr + hwq;
4661 /* Queued (deferred) command completions converge here. */
4662 static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
4664 bool aborted = sd_dp->aborted;
4667 unsigned long iflags;
4668 struct sdebug_queue *sqp;
4669 struct sdebug_queued_cmd *sqcp;
4670 struct scsi_cmnd *scp;
4671 struct sdebug_dev_info *devip;
4673 sd_dp->defer_t = SDEB_DEFER_NONE;
4674 if (unlikely(aborted))
4675 sd_dp->aborted = false;
4676 qc_idx = sd_dp->qc_idx;
4677 sqp = sdebug_q_arr + sd_dp->sqa_idx;
4678 if (sdebug_statistics) {
4679 atomic_inc(&sdebug_completions);
4680 if (raw_smp_processor_id() != sd_dp->issuing_cpu)
4681 atomic_inc(&sdebug_miss_cpus);
4683 if (unlikely((qc_idx < 0) || (qc_idx >= SDEBUG_CANQUEUE))) {
4684 pr_err("wild qc_idx=%d\n", qc_idx);
4687 spin_lock_irqsave(&sqp->qc_lock, iflags);
4688 sqcp = &sqp->qc_arr[qc_idx];
4690 if (unlikely(scp == NULL)) {
4691 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4692 pr_err("scp is NULL, sqa_idx=%d, qc_idx=%d\n",
4693 sd_dp->sqa_idx, qc_idx);
4696 devip = (struct sdebug_dev_info *)scp->device->hostdata;
4698 atomic_dec(&devip->num_in_q);
4700 pr_err("devip=NULL\n");
4701 if (unlikely(atomic_read(&retired_max_queue) > 0))
4704 sqcp->a_cmnd = NULL;
4705 if (unlikely(!test_and_clear_bit(qc_idx, sqp->in_use_bm))) {
4706 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4707 pr_err("Unexpected completion\n");
4711 if (unlikely(retiring)) { /* user has reduced max_queue */
4714 retval = atomic_read(&retired_max_queue);
4715 if (qc_idx >= retval) {
4716 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4717 pr_err("index %d too large\n", retval);
4720 k = find_last_bit(sqp->in_use_bm, retval);
4721 if ((k < sdebug_max_queue) || (k == retval))
4722 atomic_set(&retired_max_queue, 0);
4724 atomic_set(&retired_max_queue, k + 1);
4726 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4727 if (unlikely(aborted)) {
4729 pr_info("bypassing scsi_done() due to aborted cmd\n");
4732 scp->scsi_done(scp); /* callback to mid level */
4735 /* When high resolution timer goes off this function is called. */
4736 static enum hrtimer_restart sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
4738 struct sdebug_defer *sd_dp = container_of(timer, struct sdebug_defer,
4740 sdebug_q_cmd_complete(sd_dp);
4741 return HRTIMER_NORESTART;
4744 /* When work queue schedules work, it calls this function. */
4745 static void sdebug_q_cmd_wq_complete(struct work_struct *work)
4747 struct sdebug_defer *sd_dp = container_of(work, struct sdebug_defer,
4749 sdebug_q_cmd_complete(sd_dp);
4752 static bool got_shared_uuid;
4753 static uuid_t shared_uuid;
4755 static int sdebug_device_create_zones(struct sdebug_dev_info *devip)
4757 struct sdeb_zone_state *zsp;
4758 sector_t capacity = get_sdebug_capacity();
4759 sector_t zstart = 0;
4763 * Set the zone size: if zbc_zone_size_mb is not set, figure out a
4764 * zone size allowing for at least 4 zones on the device. Otherwise,
4765 * use the specified zone size checking that at least 2 zones can be
4766 * created for the device.
4768 if (!zbc_zone_size_mb) {
4769 devip->zsize = (DEF_ZBC_ZONE_SIZE_MB * SZ_1M)
4770 >> ilog2(sdebug_sector_size);
4771 while (capacity < devip->zsize << 2 && devip->zsize >= 2)
4773 if (devip->zsize < 2) {
4774 pr_err("Device capacity too small\n");
4778 devip->zsize = (zbc_zone_size_mb * SZ_1M)
4779 >> ilog2(sdebug_sector_size);
4780 if (devip->zsize >= capacity) {
4781 pr_err("Zone size too large for device capacity\n");
4786 if (is_power_of_2(devip->zsize))
4787 devip->zsize_shift = ilog2(devip->zsize);
4788 devip->nr_zones = (capacity + devip->zsize - 1) >> devip->zsize_shift;
4790 /* zbc_max_open_zones can be 0, meaning "not reported" (no limit) */
4791 if (sdeb_zbc_max_open >= devip->nr_zones - 1)
4792 devip->max_open = (devip->nr_zones - 1) / 2;
4794 devip->max_open = sdeb_zbc_max_open;
4796 devip->zstate = kcalloc(devip->nr_zones,
4797 sizeof(struct sdeb_zone_state), GFP_KERNEL);
4801 for (i = 0; i < devip->nr_zones; i++) {
4802 zsp = &devip->zstate[i];
4804 zsp->z_start = zstart;
4807 zsp->z_cond = ZBC_NOT_WRITE_POINTER;
4808 zsp->z_wp = (sector_t)-1;
4810 zsp->z_cond = ZC1_EMPTY;
4811 zsp->z_wp = zsp->z_start;
4814 if (zsp->z_start + devip->zsize < capacity)
4815 zsp->z_size = devip->zsize;
4817 zsp->z_size = capacity - zsp->z_start;
4819 zstart += zsp->z_size;
4825 static struct sdebug_dev_info *sdebug_device_create(
4826 struct sdebug_host_info *sdbg_host, gfp_t flags)
4828 struct sdebug_dev_info *devip;
4830 devip = kzalloc(sizeof(*devip), flags);
4832 if (sdebug_uuid_ctl == 1)
4833 uuid_gen(&devip->lu_name);
4834 else if (sdebug_uuid_ctl == 2) {
4835 if (got_shared_uuid)
4836 devip->lu_name = shared_uuid;
4838 uuid_gen(&shared_uuid);
4839 got_shared_uuid = true;
4840 devip->lu_name = shared_uuid;
4843 devip->sdbg_host = sdbg_host;
4844 if (sdeb_zbc_in_use) {
4845 if (sdebug_device_create_zones(devip)) {
4850 devip->sdbg_host = sdbg_host;
4851 list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
4856 static struct sdebug_dev_info *find_build_dev_info(struct scsi_device *sdev)
4858 struct sdebug_host_info *sdbg_host;
4859 struct sdebug_dev_info *open_devip = NULL;
4860 struct sdebug_dev_info *devip;
4862 sdbg_host = *(struct sdebug_host_info **)shost_priv(sdev->host);
4864 pr_err("Host info NULL\n");
4867 list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
4868 if ((devip->used) && (devip->channel == sdev->channel) &&
4869 (devip->target == sdev->id) &&
4870 (devip->lun == sdev->lun))
4873 if ((!devip->used) && (!open_devip))
4877 if (!open_devip) { /* try and make a new one */
4878 open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
4880 pr_err("out of memory at line %d\n", __LINE__);
4885 open_devip->channel = sdev->channel;
4886 open_devip->target = sdev->id;
4887 open_devip->lun = sdev->lun;
4888 open_devip->sdbg_host = sdbg_host;
4889 atomic_set(&open_devip->num_in_q, 0);
4890 set_bit(SDEBUG_UA_POR, open_devip->uas_bm);
4891 open_devip->used = true;
4895 static int scsi_debug_slave_alloc(struct scsi_device *sdp)
4898 pr_info("slave_alloc <%u %u %u %llu>\n",
4899 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
4903 static int scsi_debug_slave_configure(struct scsi_device *sdp)
4905 struct sdebug_dev_info *devip =
4906 (struct sdebug_dev_info *)sdp->hostdata;
4909 pr_info("slave_configure <%u %u %u %llu>\n",
4910 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
4911 if (sdp->host->max_cmd_len != SDEBUG_MAX_CMD_LEN)
4912 sdp->host->max_cmd_len = SDEBUG_MAX_CMD_LEN;
4913 if (devip == NULL) {
4914 devip = find_build_dev_info(sdp);
4916 return 1; /* no resources, will be marked offline */
4918 sdp->hostdata = devip;
4920 sdp->no_uld_attach = 1;
4921 config_cdb_len(sdp);
4925 static void scsi_debug_slave_destroy(struct scsi_device *sdp)
4927 struct sdebug_dev_info *devip =
4928 (struct sdebug_dev_info *)sdp->hostdata;
4931 pr_info("slave_destroy <%u %u %u %llu>\n",
4932 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
4934 /* make this slot available for re-use */
4935 devip->used = false;
4936 sdp->hostdata = NULL;
4940 static void stop_qc_helper(struct sdebug_defer *sd_dp,
4941 enum sdeb_defer_type defer_t)
4945 if (defer_t == SDEB_DEFER_HRT)
4946 hrtimer_cancel(&sd_dp->hrt);
4947 else if (defer_t == SDEB_DEFER_WQ)
4948 cancel_work_sync(&sd_dp->ew.work);
4951 /* If @cmnd found deletes its timer or work queue and returns true; else
4953 static bool stop_queued_cmnd(struct scsi_cmnd *cmnd)
4955 unsigned long iflags;
4956 int j, k, qmax, r_qmax;
4957 enum sdeb_defer_type l_defer_t;
4958 struct sdebug_queue *sqp;
4959 struct sdebug_queued_cmd *sqcp;
4960 struct sdebug_dev_info *devip;
4961 struct sdebug_defer *sd_dp;
4963 for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
4964 spin_lock_irqsave(&sqp->qc_lock, iflags);
4965 qmax = sdebug_max_queue;
4966 r_qmax = atomic_read(&retired_max_queue);
4969 for (k = 0; k < qmax; ++k) {
4970 if (test_bit(k, sqp->in_use_bm)) {
4971 sqcp = &sqp->qc_arr[k];
4972 if (cmnd != sqcp->a_cmnd)
4975 devip = (struct sdebug_dev_info *)
4976 cmnd->device->hostdata;
4978 atomic_dec(&devip->num_in_q);
4979 sqcp->a_cmnd = NULL;
4980 sd_dp = sqcp->sd_dp;
4982 l_defer_t = sd_dp->defer_t;
4983 sd_dp->defer_t = SDEB_DEFER_NONE;
4985 l_defer_t = SDEB_DEFER_NONE;
4986 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4987 stop_qc_helper(sd_dp, l_defer_t);
4988 clear_bit(k, sqp->in_use_bm);
4992 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4997 /* Deletes (stops) timers or work queues of all queued commands */
4998 static void stop_all_queued(void)
5000 unsigned long iflags;
5002 enum sdeb_defer_type l_defer_t;
5003 struct sdebug_queue *sqp;
5004 struct sdebug_queued_cmd *sqcp;
5005 struct sdebug_dev_info *devip;
5006 struct sdebug_defer *sd_dp;
5008 for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5009 spin_lock_irqsave(&sqp->qc_lock, iflags);
5010 for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
5011 if (test_bit(k, sqp->in_use_bm)) {
5012 sqcp = &sqp->qc_arr[k];
5013 if (sqcp->a_cmnd == NULL)
5015 devip = (struct sdebug_dev_info *)
5016 sqcp->a_cmnd->device->hostdata;
5018 atomic_dec(&devip->num_in_q);
5019 sqcp->a_cmnd = NULL;
5020 sd_dp = sqcp->sd_dp;
5022 l_defer_t = sd_dp->defer_t;
5023 sd_dp->defer_t = SDEB_DEFER_NONE;
5025 l_defer_t = SDEB_DEFER_NONE;
5026 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5027 stop_qc_helper(sd_dp, l_defer_t);
5028 clear_bit(k, sqp->in_use_bm);
5029 spin_lock_irqsave(&sqp->qc_lock, iflags);
5032 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5036 /* Free queued command memory on heap */
5037 static void free_all_queued(void)
5040 struct sdebug_queue *sqp;
5041 struct sdebug_queued_cmd *sqcp;
5043 for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5044 for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
5045 sqcp = &sqp->qc_arr[k];
5052 static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
5058 ok = stop_queued_cmnd(SCpnt);
5059 if (SCpnt->device && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
5060 sdev_printk(KERN_INFO, SCpnt->device,
5061 "%s: command%s found\n", __func__,
5067 static int scsi_debug_device_reset(struct scsi_cmnd *SCpnt)
5070 if (SCpnt && SCpnt->device) {
5071 struct scsi_device *sdp = SCpnt->device;
5072 struct sdebug_dev_info *devip =
5073 (struct sdebug_dev_info *)sdp->hostdata;
5075 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5076 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5078 set_bit(SDEBUG_UA_POR, devip->uas_bm);
5083 static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
5085 struct sdebug_host_info *sdbg_host;
5086 struct sdebug_dev_info *devip;
5087 struct scsi_device *sdp;
5088 struct Scsi_Host *hp;
5091 ++num_target_resets;
5094 sdp = SCpnt->device;
5097 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5098 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5102 sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
5104 list_for_each_entry(devip,
5105 &sdbg_host->dev_info_list,
5107 if (devip->target == sdp->id) {
5108 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5112 if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5113 sdev_printk(KERN_INFO, sdp,
5114 "%s: %d device(s) found in target\n", __func__, k);
5119 static int scsi_debug_bus_reset(struct scsi_cmnd *SCpnt)
5121 struct sdebug_host_info *sdbg_host;
5122 struct sdebug_dev_info *devip;
5123 struct scsi_device *sdp;
5124 struct Scsi_Host *hp;
5128 if (!(SCpnt && SCpnt->device))
5130 sdp = SCpnt->device;
5131 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5132 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5135 sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
5137 list_for_each_entry(devip,
5138 &sdbg_host->dev_info_list,
5140 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5145 if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5146 sdev_printk(KERN_INFO, sdp,
5147 "%s: %d device(s) found in host\n", __func__, k);
5152 static int scsi_debug_host_reset(struct scsi_cmnd *SCpnt)
5154 struct sdebug_host_info *sdbg_host;
5155 struct sdebug_dev_info *devip;
5159 if ((SCpnt->device) && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
5160 sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__);
5161 spin_lock(&sdebug_host_list_lock);
5162 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
5163 list_for_each_entry(devip, &sdbg_host->dev_info_list,
5165 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5169 spin_unlock(&sdebug_host_list_lock);
5171 if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5172 sdev_printk(KERN_INFO, SCpnt->device,
5173 "%s: %d device(s) found\n", __func__, k);
5177 static void sdebug_build_parts(unsigned char *ramp, unsigned long store_size)
5179 struct msdos_partition *pp;
5180 int starts[SDEBUG_MAX_PARTS + 2];
5181 int sectors_per_part, num_sectors, k;
5182 int heads_by_sects, start_sec, end_sec;
5184 /* assume partition table already zeroed */
5185 if ((sdebug_num_parts < 1) || (store_size < 1048576))
5187 if (sdebug_num_parts > SDEBUG_MAX_PARTS) {
5188 sdebug_num_parts = SDEBUG_MAX_PARTS;
5189 pr_warn("reducing partitions to %d\n", SDEBUG_MAX_PARTS);
5191 num_sectors = (int)sdebug_store_sectors;
5192 sectors_per_part = (num_sectors - sdebug_sectors_per)
5194 heads_by_sects = sdebug_heads * sdebug_sectors_per;
5195 starts[0] = sdebug_sectors_per;
5196 for (k = 1; k < sdebug_num_parts; ++k)
5197 starts[k] = ((k * sectors_per_part) / heads_by_sects)
5199 starts[sdebug_num_parts] = num_sectors;
5200 starts[sdebug_num_parts + 1] = 0;
5202 ramp[510] = 0x55; /* magic partition markings */
5204 pp = (struct msdos_partition *)(ramp + 0x1be);
5205 for (k = 0; starts[k + 1]; ++k, ++pp) {
5206 start_sec = starts[k];
5207 end_sec = starts[k + 1] - 1;
5210 pp->cyl = start_sec / heads_by_sects;
5211 pp->head = (start_sec - (pp->cyl * heads_by_sects))
5212 / sdebug_sectors_per;
5213 pp->sector = (start_sec % sdebug_sectors_per) + 1;
5215 pp->end_cyl = end_sec / heads_by_sects;
5216 pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
5217 / sdebug_sectors_per;
5218 pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
5220 pp->start_sect = cpu_to_le32(start_sec);
5221 pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1);
5222 pp->sys_ind = 0x83; /* plain Linux partition */
5226 static void block_unblock_all_queues(bool block)
5229 struct sdebug_queue *sqp;
5231 for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp)
5232 atomic_set(&sqp->blocked, (int)block);
5235 /* Adjust (by rounding down) the sdebug_cmnd_count so abs(every_nth)-1
5236 * commands will be processed normally before triggers occur.
5238 static void tweak_cmnd_count(void)
5242 modulo = abs(sdebug_every_nth);
5245 block_unblock_all_queues(true);
5246 count = atomic_read(&sdebug_cmnd_count);
5247 atomic_set(&sdebug_cmnd_count, (count / modulo) * modulo);
5248 block_unblock_all_queues(false);
5251 static void clear_queue_stats(void)
5253 atomic_set(&sdebug_cmnd_count, 0);
5254 atomic_set(&sdebug_completions, 0);
5255 atomic_set(&sdebug_miss_cpus, 0);
5256 atomic_set(&sdebug_a_tsf, 0);
5259 static void setup_inject(struct sdebug_queue *sqp,
5260 struct sdebug_queued_cmd *sqcp)
5262 if ((atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) > 0) {
5263 if (sdebug_every_nth > 0)
5264 sqcp->inj_recovered = sqcp->inj_transport
5266 = sqcp->inj_dix = sqcp->inj_short
5267 = sqcp->inj_host_busy = sqcp->inj_cmd_abort = 0;
5270 sqcp->inj_recovered = !!(SDEBUG_OPT_RECOVERED_ERR & sdebug_opts);
5271 sqcp->inj_transport = !!(SDEBUG_OPT_TRANSPORT_ERR & sdebug_opts);
5272 sqcp->inj_dif = !!(SDEBUG_OPT_DIF_ERR & sdebug_opts);
5273 sqcp->inj_dix = !!(SDEBUG_OPT_DIX_ERR & sdebug_opts);
5274 sqcp->inj_short = !!(SDEBUG_OPT_SHORT_TRANSFER & sdebug_opts);
5275 sqcp->inj_host_busy = !!(SDEBUG_OPT_HOST_BUSY & sdebug_opts);
5276 sqcp->inj_cmd_abort = !!(SDEBUG_OPT_CMD_ABORT & sdebug_opts);
5279 #define INCLUSIVE_TIMING_MAX_NS 1000000 /* 1 millisecond */
5281 /* Complete the processing of the thread that queued a SCSI command to this
5282 * driver. It either completes the command by calling cmnd_done() or
5283 * schedules a hr timer or work queue then returns 0. Returns
5284 * SCSI_MLQUEUE_HOST_BUSY if temporarily out of resources.
5286 static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
5288 int (*pfp)(struct scsi_cmnd *,
5289 struct sdebug_dev_info *),
5290 int delta_jiff, int ndelay)
5293 int k, num_in_q, qdepth, inject;
5294 unsigned long iflags;
5295 u64 ns_from_boot = 0;
5296 struct sdebug_queue *sqp;
5297 struct sdebug_queued_cmd *sqcp;
5298 struct scsi_device *sdp;
5299 struct sdebug_defer *sd_dp;
5301 if (unlikely(devip == NULL)) {
5302 if (scsi_result == 0)
5303 scsi_result = DID_NO_CONNECT << 16;
5304 goto respond_in_thread;
5308 if (delta_jiff == 0)
5309 goto respond_in_thread;
5311 sqp = get_queue(cmnd);
5312 spin_lock_irqsave(&sqp->qc_lock, iflags);
5313 if (unlikely(atomic_read(&sqp->blocked))) {
5314 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5315 return SCSI_MLQUEUE_HOST_BUSY;
5317 num_in_q = atomic_read(&devip->num_in_q);
5318 qdepth = cmnd->device->queue_depth;
5320 if (unlikely((qdepth > 0) && (num_in_q >= qdepth))) {
5322 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5323 goto respond_in_thread;
5325 scsi_result = device_qfull_result;
5326 } else if (unlikely(sdebug_every_nth &&
5327 (SDEBUG_OPT_RARE_TSF & sdebug_opts) &&
5328 (scsi_result == 0))) {
5329 if ((num_in_q == (qdepth - 1)) &&
5330 (atomic_inc_return(&sdebug_a_tsf) >=
5331 abs(sdebug_every_nth))) {
5332 atomic_set(&sdebug_a_tsf, 0);
5334 scsi_result = device_qfull_result;
5338 k = find_first_zero_bit(sqp->in_use_bm, sdebug_max_queue);
5339 if (unlikely(k >= sdebug_max_queue)) {
5340 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5342 goto respond_in_thread;
5343 else if (SDEBUG_OPT_ALL_TSF & sdebug_opts)
5344 scsi_result = device_qfull_result;
5345 if (SDEBUG_OPT_Q_NOISE & sdebug_opts)
5346 sdev_printk(KERN_INFO, sdp,
5347 "%s: max_queue=%d exceeded, %s\n",
5348 __func__, sdebug_max_queue,
5349 (scsi_result ? "status: TASK SET FULL" :
5350 "report: host busy"));
5352 goto respond_in_thread;
5354 return SCSI_MLQUEUE_HOST_BUSY;
5356 __set_bit(k, sqp->in_use_bm);
5357 atomic_inc(&devip->num_in_q);
5358 sqcp = &sqp->qc_arr[k];
5359 sqcp->a_cmnd = cmnd;
5360 cmnd->host_scribble = (unsigned char *)sqcp;
5361 sd_dp = sqcp->sd_dp;
5362 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5363 if (unlikely(sdebug_every_nth && sdebug_any_injecting_opt))
5364 setup_inject(sqp, sqcp);
5365 if (sd_dp == NULL) {
5366 sd_dp = kzalloc(sizeof(*sd_dp), GFP_ATOMIC);
5368 return SCSI_MLQUEUE_HOST_BUSY;
5374 if (ndelay > 0 && ndelay < INCLUSIVE_TIMING_MAX_NS)
5375 ns_from_boot = ktime_get_boottime_ns();
5377 /* one of the resp_*() response functions is called here */
5378 cmnd->result = pfp != NULL ? pfp(cmnd, devip) : 0;
5379 if (cmnd->result & SDEG_RES_IMMED_MASK) {
5380 cmnd->result &= ~SDEG_RES_IMMED_MASK;
5381 delta_jiff = ndelay = 0;
5383 if (cmnd->result == 0 && scsi_result != 0)
5384 cmnd->result = scsi_result;
5386 if (unlikely(sdebug_verbose && cmnd->result))
5387 sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n",
5388 __func__, cmnd->result);
5390 if (delta_jiff > 0 || ndelay > 0) {
5393 if (delta_jiff > 0) {
5394 u64 ns = jiffies_to_nsecs(delta_jiff);
5396 if (sdebug_random && ns < U32_MAX) {
5397 ns = prandom_u32_max((u32)ns);
5398 } else if (sdebug_random) {
5399 ns >>= 12; /* scale to 4 usec precision */
5400 if (ns < U32_MAX) /* over 4 hours max */
5401 ns = prandom_u32_max((u32)ns);
5404 kt = ns_to_ktime(ns);
5405 } else { /* ndelay has a 4.2 second max */
5406 kt = sdebug_random ? prandom_u32_max((u32)ndelay) :
5408 if (ndelay < INCLUSIVE_TIMING_MAX_NS) {
5409 u64 d = ktime_get_boottime_ns() - ns_from_boot;
5411 if (kt <= d) { /* elapsed duration >= kt */
5412 sqcp->a_cmnd = NULL;
5413 atomic_dec(&devip->num_in_q);
5414 clear_bit(k, sqp->in_use_bm);
5417 /* call scsi_done() from this thread */
5418 cmnd->scsi_done(cmnd);
5421 /* otherwise reduce kt by elapsed time */
5425 if (!sd_dp->init_hrt) {
5426 sd_dp->init_hrt = true;
5427 sqcp->sd_dp = sd_dp;
5428 hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC,
5429 HRTIMER_MODE_REL_PINNED);
5430 sd_dp->hrt.function = sdebug_q_cmd_hrt_complete;
5431 sd_dp->sqa_idx = sqp - sdebug_q_arr;
5434 if (sdebug_statistics)
5435 sd_dp->issuing_cpu = raw_smp_processor_id();
5436 sd_dp->defer_t = SDEB_DEFER_HRT;
5437 /* schedule the invocation of scsi_done() for a later time */
5438 hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED);
5439 } else { /* jdelay < 0, use work queue */
5440 if (!sd_dp->init_wq) {
5441 sd_dp->init_wq = true;
5442 sqcp->sd_dp = sd_dp;
5443 sd_dp->sqa_idx = sqp - sdebug_q_arr;
5445 INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete);
5447 if (sdebug_statistics)
5448 sd_dp->issuing_cpu = raw_smp_processor_id();
5449 sd_dp->defer_t = SDEB_DEFER_WQ;
5450 if (unlikely(sqcp->inj_cmd_abort))
5451 sd_dp->aborted = true;
5452 schedule_work(&sd_dp->ew.work);
5453 if (unlikely(sqcp->inj_cmd_abort)) {
5454 sdev_printk(KERN_INFO, sdp, "abort request tag %d\n",
5455 cmnd->request->tag);
5456 blk_abort_request(cmnd->request);
5459 if (unlikely((SDEBUG_OPT_Q_NOISE & sdebug_opts) &&
5460 (scsi_result == device_qfull_result)))
5461 sdev_printk(KERN_INFO, sdp,
5462 "%s: num_in_q=%d +1, %s%s\n", __func__,
5463 num_in_q, (inject ? "<inject> " : ""),
5464 "status: TASK SET FULL");
5467 respond_in_thread: /* call back to mid-layer using invocation thread */
5468 cmnd->result = pfp != NULL ? pfp(cmnd, devip) : 0;
5469 cmnd->result &= ~SDEG_RES_IMMED_MASK;
5470 if (cmnd->result == 0 && scsi_result != 0)
5471 cmnd->result = scsi_result;
5472 cmnd->scsi_done(cmnd);
5476 /* Note: The following macros create attribute files in the
5477 /sys/module/scsi_debug/parameters directory. Unfortunately this
5478 driver is unaware of a change and cannot trigger auxiliary actions
5479 as it can when the corresponding attribute in the
5480 /sys/bus/pseudo/drivers/scsi_debug directory is changed.
5482 module_param_named(add_host, sdebug_add_host, int, S_IRUGO | S_IWUSR);
5483 module_param_named(ato, sdebug_ato, int, S_IRUGO);
5484 module_param_named(cdb_len, sdebug_cdb_len, int, 0644);
5485 module_param_named(clustering, sdebug_clustering, bool, S_IRUGO | S_IWUSR);
5486 module_param_named(delay, sdebug_jdelay, int, S_IRUGO | S_IWUSR);
5487 module_param_named(dev_size_mb, sdebug_dev_size_mb, int, S_IRUGO);
5488 module_param_named(dif, sdebug_dif, int, S_IRUGO);
5489 module_param_named(dix, sdebug_dix, int, S_IRUGO);
5490 module_param_named(dsense, sdebug_dsense, int, S_IRUGO | S_IWUSR);
5491 module_param_named(every_nth, sdebug_every_nth, int, S_IRUGO | S_IWUSR);
5492 module_param_named(fake_rw, sdebug_fake_rw, int, S_IRUGO | S_IWUSR);
5493 module_param_named(guard, sdebug_guard, uint, S_IRUGO);
5494 module_param_named(host_lock, sdebug_host_lock, bool, S_IRUGO | S_IWUSR);
5495 module_param_string(inq_product, sdebug_inq_product_id,
5496 sizeof(sdebug_inq_product_id), S_IRUGO | S_IWUSR);
5497 module_param_string(inq_rev, sdebug_inq_product_rev,
5498 sizeof(sdebug_inq_product_rev), S_IRUGO | S_IWUSR);
5499 module_param_string(inq_vendor, sdebug_inq_vendor_id,
5500 sizeof(sdebug_inq_vendor_id), S_IRUGO | S_IWUSR);
5501 module_param_named(lbprz, sdebug_lbprz, int, S_IRUGO);
5502 module_param_named(lbpu, sdebug_lbpu, int, S_IRUGO);
5503 module_param_named(lbpws, sdebug_lbpws, int, S_IRUGO);
5504 module_param_named(lbpws10, sdebug_lbpws10, int, S_IRUGO);
5505 module_param_named(lowest_aligned, sdebug_lowest_aligned, int, S_IRUGO);
5506 module_param_named(max_luns, sdebug_max_luns, int, S_IRUGO | S_IWUSR);
5507 module_param_named(max_queue, sdebug_max_queue, int, S_IRUGO | S_IWUSR);
5508 module_param_named(medium_error_count, sdebug_medium_error_count, int,
5510 module_param_named(medium_error_start, sdebug_medium_error_start, int,
5512 module_param_named(ndelay, sdebug_ndelay, int, S_IRUGO | S_IWUSR);
5513 module_param_named(no_lun_0, sdebug_no_lun_0, int, S_IRUGO | S_IWUSR);
5514 module_param_named(no_uld, sdebug_no_uld, int, S_IRUGO);
5515 module_param_named(num_parts, sdebug_num_parts, int, S_IRUGO);
5516 module_param_named(num_tgts, sdebug_num_tgts, int, S_IRUGO | S_IWUSR);
5517 module_param_named(opt_blks, sdebug_opt_blks, int, S_IRUGO);
5518 module_param_named(opt_xferlen_exp, sdebug_opt_xferlen_exp, int, S_IRUGO);
5519 module_param_named(opts, sdebug_opts, int, S_IRUGO | S_IWUSR);
5520 module_param_named(per_host_store, sdebug_per_host_store, bool,
5522 module_param_named(physblk_exp, sdebug_physblk_exp, int, S_IRUGO);
5523 module_param_named(ptype, sdebug_ptype, int, S_IRUGO | S_IWUSR);
5524 module_param_named(random, sdebug_random, bool, S_IRUGO | S_IWUSR);
5525 module_param_named(removable, sdebug_removable, bool, S_IRUGO | S_IWUSR);
5526 module_param_named(scsi_level, sdebug_scsi_level, int, S_IRUGO);
5527 module_param_named(sector_size, sdebug_sector_size, int, S_IRUGO);
5528 module_param_named(statistics, sdebug_statistics, bool, S_IRUGO | S_IWUSR);
5529 module_param_named(strict, sdebug_strict, bool, S_IRUGO | S_IWUSR);
5530 module_param_named(submit_queues, submit_queues, int, S_IRUGO);
5531 module_param_named(unmap_alignment, sdebug_unmap_alignment, int, S_IRUGO);
5532 module_param_named(unmap_granularity, sdebug_unmap_granularity, int, S_IRUGO);
5533 module_param_named(unmap_max_blocks, sdebug_unmap_max_blocks, int, S_IRUGO);
5534 module_param_named(unmap_max_desc, sdebug_unmap_max_desc, int, S_IRUGO);
5535 module_param_named(uuid_ctl, sdebug_uuid_ctl, int, S_IRUGO);
5536 module_param_named(virtual_gb, sdebug_virtual_gb, int, S_IRUGO | S_IWUSR);
5537 module_param_named(vpd_use_hostno, sdebug_vpd_use_hostno, int,
5539 module_param_named(wp, sdebug_wp, bool, S_IRUGO | S_IWUSR);
5540 module_param_named(write_same_length, sdebug_write_same_length, int,
5542 module_param_named(zbc, sdeb_zbc_model_s, charp, S_IRUGO);
5543 module_param_named(zone_max_open, sdeb_zbc_max_open, int, S_IRUGO);
5545 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
5546 MODULE_DESCRIPTION("SCSI debug adapter driver");
5547 MODULE_LICENSE("GPL");
5548 MODULE_VERSION(SDEBUG_VERSION);
5550 MODULE_PARM_DESC(add_host, "add n hosts, in sysfs if negative remove host(s) (def=1)");
5551 MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
5552 MODULE_PARM_DESC(cdb_len, "suggest CDB lengths to drivers (def=10)");
5553 MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
5554 MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
5555 MODULE_PARM_DESC(dev_size_mb, "size in MiB of ram shared by devs(def=8)");
5556 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
5557 MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
5558 MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
5559 MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
5560 MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
5561 MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
5562 MODULE_PARM_DESC(host_lock, "host_lock is ignored (def=0)");
5563 MODULE_PARM_DESC(inq_product, "SCSI INQUIRY product string (def=\"scsi_debug\")");
5564 MODULE_PARM_DESC(inq_rev, "SCSI INQUIRY revision string (def=\""
5565 SDEBUG_VERSION "\")");
5566 MODULE_PARM_DESC(inq_vendor, "SCSI INQUIRY vendor string (def=\"Linux\")");
5567 MODULE_PARM_DESC(lbprz,
5568 "on read unmapped LBs return 0 when 1 (def), return 0xff when 2");
5569 MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
5570 MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
5571 MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
5572 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
5573 MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
5574 MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))");
5575 MODULE_PARM_DESC(medium_error_count, "count of sectors to return follow on MEDIUM error");
5576 MODULE_PARM_DESC(medium_error_start, "starting sector number to return MEDIUM error");
5577 MODULE_PARM_DESC(ndelay, "response delay in nanoseconds (def=0 -> ignore)");
5578 MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
5579 MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
5580 MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
5581 MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
5582 MODULE_PARM_DESC(opt_blks, "optimal transfer length in blocks (def=1024)");
5583 MODULE_PARM_DESC(opt_xferlen_exp, "optimal transfer length granularity exponent (def=physblk_exp)");
5584 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
5585 MODULE_PARM_DESC(per_host_store, "If set, next positive add_host will get new store (def=0)");
5586 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
5587 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
5588 MODULE_PARM_DESC(random, "If set, uniformly randomize command duration between 0 and delay_in_ns");
5589 MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
5590 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=7[SPC-5])");
5591 MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
5592 MODULE_PARM_DESC(statistics, "collect statistics on commands, queues (def=0)");
5593 MODULE_PARM_DESC(strict, "stricter checks: reserved field in cdb (def=0)");
5594 MODULE_PARM_DESC(submit_queues, "support for block multi-queue (def=1)");
5595 MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
5596 MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
5597 MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
5598 MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
5599 MODULE_PARM_DESC(uuid_ctl,
5600 "1->use uuid for lu name, 0->don't, 2->all use same (def=0)");
5601 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)");
5602 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
5603 MODULE_PARM_DESC(wp, "Write Protect (def=0)");
5604 MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
5605 MODULE_PARM_DESC(zbc, "'none' [0]; 'aware' [1]; 'managed' [2] (def=0). Can have 'host-' prefix");
5606 MODULE_PARM_DESC(zone_max_open, "Maximum number of open zones; [0] for no limit (def=auto)");
5608 #define SDEBUG_INFO_LEN 256
5609 static char sdebug_info[SDEBUG_INFO_LEN];
5611 static const char *scsi_debug_info(struct Scsi_Host *shp)
5615 k = scnprintf(sdebug_info, SDEBUG_INFO_LEN, "%s: version %s [%s]\n",
5616 my_name, SDEBUG_VERSION, sdebug_version_date);
5617 if (k >= (SDEBUG_INFO_LEN - 1))
5619 scnprintf(sdebug_info + k, SDEBUG_INFO_LEN - k,
5620 " dev_size_mb=%d, opts=0x%x, submit_queues=%d, %s=%d",
5621 sdebug_dev_size_mb, sdebug_opts, submit_queues,
5622 "statistics", (int)sdebug_statistics);
5626 /* 'echo <val> > /proc/scsi/scsi_debug/<host_id>' writes to opts */
5627 static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer,
5632 int minLen = length > 15 ? 15 : length;
5634 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
5636 memcpy(arr, buffer, minLen);
5638 if (1 != sscanf(arr, "%d", &opts))
5641 sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
5642 sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
5643 if (sdebug_every_nth != 0)
5648 /* Output seen with 'cat /proc/scsi/scsi_debug/<host_id>'. It will be the
5649 * same for each scsi_debug host (if more than one). Some of the counters
5650 * output are not atomics so might be inaccurate in a busy system. */
5651 static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
5654 struct sdebug_queue *sqp;
5655 struct sdebug_host_info *sdhp;
5657 seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n",
5658 SDEBUG_VERSION, sdebug_version_date);
5659 seq_printf(m, "num_tgts=%d, %ssize=%d MB, opts=0x%x, every_nth=%d\n",
5660 sdebug_num_tgts, "shared (ram) ", sdebug_dev_size_mb,
5661 sdebug_opts, sdebug_every_nth);
5662 seq_printf(m, "delay=%d, ndelay=%d, max_luns=%d, sector_size=%d %s\n",
5663 sdebug_jdelay, sdebug_ndelay, sdebug_max_luns,
5664 sdebug_sector_size, "bytes");
5665 seq_printf(m, "cylinders=%d, heads=%d, sectors=%d, command aborts=%d\n",
5666 sdebug_cylinders_per, sdebug_heads, sdebug_sectors_per,
5668 seq_printf(m, "RESETs: device=%d, target=%d, bus=%d, host=%d\n",
5669 num_dev_resets, num_target_resets, num_bus_resets,
5671 seq_printf(m, "dix_reads=%d, dix_writes=%d, dif_errors=%d\n",
5672 dix_reads, dix_writes, dif_errors);
5673 seq_printf(m, "usec_in_jiffy=%lu, statistics=%d\n", TICK_NSEC / 1000,
5675 seq_printf(m, "cmnd_count=%d, completions=%d, %s=%d, a_tsf=%d\n",
5676 atomic_read(&sdebug_cmnd_count),
5677 atomic_read(&sdebug_completions),
5678 "miss_cpus", atomic_read(&sdebug_miss_cpus),
5679 atomic_read(&sdebug_a_tsf));
5681 seq_printf(m, "submit_queues=%d\n", submit_queues);
5682 for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5683 seq_printf(m, " queue %d:\n", j);
5684 f = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
5685 if (f != sdebug_max_queue) {
5686 l = find_last_bit(sqp->in_use_bm, sdebug_max_queue);
5687 seq_printf(m, " in_use_bm BUSY: %s: %d,%d\n",
5688 "first,last bits", f, l);
5692 seq_printf(m, "this host_no=%d\n", host->host_no);
5693 if (!xa_empty(per_store_ap)) {
5696 unsigned long l_idx;
5697 struct sdeb_store_info *sip;
5699 seq_puts(m, "\nhost list:\n");
5701 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
5703 seq_printf(m, " %d: host_no=%d, si_idx=%d\n", j,
5704 sdhp->shost->host_no, idx);
5707 seq_printf(m, "\nper_store array [most_recent_idx=%d]:\n",
5708 sdeb_most_recent_idx);
5710 xa_for_each(per_store_ap, l_idx, sip) {
5711 niu = xa_get_mark(per_store_ap, l_idx,
5712 SDEB_XA_NOT_IN_USE);
5714 seq_printf(m, " %d: idx=%d%s\n", j, idx,
5715 (niu ? " not_in_use" : ""));
5722 static ssize_t delay_show(struct device_driver *ddp, char *buf)
5724 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_jdelay);
5726 /* Returns -EBUSY if jdelay is being changed and commands are queued. The unit
5727 * of delay is jiffies.
5729 static ssize_t delay_store(struct device_driver *ddp, const char *buf,
5734 if (count > 0 && sscanf(buf, "%d", &jdelay) == 1) {
5736 if (sdebug_jdelay != jdelay) {
5738 struct sdebug_queue *sqp;
5740 block_unblock_all_queues(true);
5741 for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
5743 k = find_first_bit(sqp->in_use_bm,
5745 if (k != sdebug_max_queue) {
5746 res = -EBUSY; /* queued commands */
5751 sdebug_jdelay = jdelay;
5754 block_unblock_all_queues(false);
5760 static DRIVER_ATTR_RW(delay);
5762 static ssize_t ndelay_show(struct device_driver *ddp, char *buf)
5764 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ndelay);
5766 /* Returns -EBUSY if ndelay is being changed and commands are queued */
5767 /* If > 0 and accepted then sdebug_jdelay is set to JDELAY_OVERRIDDEN */
5768 static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
5773 if ((count > 0) && (1 == sscanf(buf, "%d", &ndelay)) &&
5774 (ndelay >= 0) && (ndelay < (1000 * 1000 * 1000))) {
5776 if (sdebug_ndelay != ndelay) {
5778 struct sdebug_queue *sqp;
5780 block_unblock_all_queues(true);
5781 for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
5783 k = find_first_bit(sqp->in_use_bm,
5785 if (k != sdebug_max_queue) {
5786 res = -EBUSY; /* queued commands */
5791 sdebug_ndelay = ndelay;
5792 sdebug_jdelay = ndelay ? JDELAY_OVERRIDDEN
5795 block_unblock_all_queues(false);
5801 static DRIVER_ATTR_RW(ndelay);
5803 static ssize_t opts_show(struct device_driver *ddp, char *buf)
5805 return scnprintf(buf, PAGE_SIZE, "0x%x\n", sdebug_opts);
5808 static ssize_t opts_store(struct device_driver *ddp, const char *buf,
5814 if (sscanf(buf, "%10s", work) == 1) {
5815 if (strncasecmp(work, "0x", 2) == 0) {
5816 if (kstrtoint(work + 2, 16, &opts) == 0)
5819 if (kstrtoint(work, 10, &opts) == 0)
5826 sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
5827 sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
5831 static DRIVER_ATTR_RW(opts);
5833 static ssize_t ptype_show(struct device_driver *ddp, char *buf)
5835 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ptype);
5837 static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
5842 /* Cannot change from or to TYPE_ZBC with sysfs */
5843 if (sdebug_ptype == TYPE_ZBC)
5846 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
5854 static DRIVER_ATTR_RW(ptype);
5856 static ssize_t dsense_show(struct device_driver *ddp, char *buf)
5858 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dsense);
5860 static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
5865 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
5871 static DRIVER_ATTR_RW(dsense);
5873 static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
5875 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_fake_rw);
5877 static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
5882 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
5883 bool want_store = (n == 0);
5884 struct sdebug_host_info *sdhp;
5887 sdebug_fake_rw = (sdebug_fake_rw > 0);
5888 if (sdebug_fake_rw == n)
5889 return count; /* not transitioning so do nothing */
5891 if (want_store) { /* 1 --> 0 transition, set up store */
5892 if (sdeb_first_idx < 0) {
5893 idx = sdebug_add_store();
5897 idx = sdeb_first_idx;
5898 xa_clear_mark(per_store_ap, idx,
5899 SDEB_XA_NOT_IN_USE);
5901 /* make all hosts use same store */
5902 list_for_each_entry(sdhp, &sdebug_host_list,
5904 if (sdhp->si_idx != idx) {
5905 xa_set_mark(per_store_ap, sdhp->si_idx,
5906 SDEB_XA_NOT_IN_USE);
5910 sdeb_most_recent_idx = idx;
5911 } else { /* 0 --> 1 transition is trigger for shrink */
5912 sdebug_erase_all_stores(true /* apart from first */);
5919 static DRIVER_ATTR_RW(fake_rw);
5921 static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
5923 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_lun_0);
5925 static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
5930 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
5931 sdebug_no_lun_0 = n;
5936 static DRIVER_ATTR_RW(no_lun_0);
5938 static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
5940 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_tgts);
5942 static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
5947 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
5948 sdebug_num_tgts = n;
5949 sdebug_max_tgts_luns();
5954 static DRIVER_ATTR_RW(num_tgts);
5956 static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)
5958 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dev_size_mb);
5960 static DRIVER_ATTR_RO(dev_size_mb);
5962 static ssize_t per_host_store_show(struct device_driver *ddp, char *buf)
5964 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_per_host_store);
5967 static ssize_t per_host_store_store(struct device_driver *ddp, const char *buf,
5972 if (kstrtobool(buf, &v))
5975 sdebug_per_host_store = v;
5978 static DRIVER_ATTR_RW(per_host_store);
5980 static ssize_t num_parts_show(struct device_driver *ddp, char *buf)
5982 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_parts);
5984 static DRIVER_ATTR_RO(num_parts);
5986 static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
5988 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_every_nth);
5990 static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
5995 if ((count > 0) && (1 == sscanf(buf, "%d", &nth))) {
5996 sdebug_every_nth = nth;
5997 if (nth && !sdebug_statistics) {
5998 pr_info("every_nth needs statistics=1, set it\n");
5999 sdebug_statistics = true;
6006 static DRIVER_ATTR_RW(every_nth);
6008 static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
6010 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_luns);
6012 static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
6018 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6020 pr_warn("max_luns can be no more than 256\n");
6023 changed = (sdebug_max_luns != n);
6024 sdebug_max_luns = n;
6025 sdebug_max_tgts_luns();
6026 if (changed && (sdebug_scsi_level >= 5)) { /* >= SPC-3 */
6027 struct sdebug_host_info *sdhp;
6028 struct sdebug_dev_info *dp;
6030 spin_lock(&sdebug_host_list_lock);
6031 list_for_each_entry(sdhp, &sdebug_host_list,
6033 list_for_each_entry(dp, &sdhp->dev_info_list,
6035 set_bit(SDEBUG_UA_LUNS_CHANGED,
6039 spin_unlock(&sdebug_host_list_lock);
6045 static DRIVER_ATTR_RW(max_luns);
6047 static ssize_t max_queue_show(struct device_driver *ddp, char *buf)
6049 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_queue);
6051 /* N.B. max_queue can be changed while there are queued commands. In flight
6052 * commands beyond the new max_queue will be completed. */
6053 static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
6057 struct sdebug_queue *sqp;
6059 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
6060 (n <= SDEBUG_CANQUEUE)) {
6061 block_unblock_all_queues(true);
6063 for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
6065 a = find_last_bit(sqp->in_use_bm, SDEBUG_CANQUEUE);
6069 sdebug_max_queue = n;
6070 if (k == SDEBUG_CANQUEUE)
6071 atomic_set(&retired_max_queue, 0);
6073 atomic_set(&retired_max_queue, k + 1);
6075 atomic_set(&retired_max_queue, 0);
6076 block_unblock_all_queues(false);
6081 static DRIVER_ATTR_RW(max_queue);
6083 static ssize_t no_uld_show(struct device_driver *ddp, char *buf)
6085 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_uld);
6087 static DRIVER_ATTR_RO(no_uld);
6089 static ssize_t scsi_level_show(struct device_driver *ddp, char *buf)
6091 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_scsi_level);
6093 static DRIVER_ATTR_RO(scsi_level);
6095 static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
6097 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_virtual_gb);
6099 static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
6105 /* Ignore capacity change for ZBC drives for now */
6106 if (sdeb_zbc_in_use)
6109 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6110 changed = (sdebug_virtual_gb != n);
6111 sdebug_virtual_gb = n;
6112 sdebug_capacity = get_sdebug_capacity();
6114 struct sdebug_host_info *sdhp;
6115 struct sdebug_dev_info *dp;
6117 spin_lock(&sdebug_host_list_lock);
6118 list_for_each_entry(sdhp, &sdebug_host_list,
6120 list_for_each_entry(dp, &sdhp->dev_info_list,
6122 set_bit(SDEBUG_UA_CAPACITY_CHANGED,
6126 spin_unlock(&sdebug_host_list_lock);
6132 static DRIVER_ATTR_RW(virtual_gb);
6134 static ssize_t add_host_show(struct device_driver *ddp, char *buf)
6136 /* absolute number of hosts currently active is what is shown */
6137 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_hosts);
6140 static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
6145 struct sdeb_store_info *sip;
6146 bool want_phs = (sdebug_fake_rw == 0) && sdebug_per_host_store;
6149 if (sscanf(buf, "%d", &delta_hosts) != 1)
6151 if (delta_hosts > 0) {
6155 xa_for_each_marked(per_store_ap, idx, sip,
6156 SDEB_XA_NOT_IN_USE) {
6157 sdeb_most_recent_idx = (int)idx;
6161 if (found) /* re-use case */
6162 sdebug_add_host_helper((int)idx);
6164 sdebug_do_add_host(true);
6166 sdebug_do_add_host(false);
6168 } while (--delta_hosts);
6169 } else if (delta_hosts < 0) {
6171 sdebug_do_remove_host(false);
6172 } while (++delta_hosts);
6176 static DRIVER_ATTR_RW(add_host);
6178 static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf)
6180 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_vpd_use_hostno);
6182 static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf,
6187 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6188 sdebug_vpd_use_hostno = n;
6193 static DRIVER_ATTR_RW(vpd_use_hostno);
6195 static ssize_t statistics_show(struct device_driver *ddp, char *buf)
6197 return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_statistics);
6199 static ssize_t statistics_store(struct device_driver *ddp, const char *buf,
6204 if ((count > 0) && (sscanf(buf, "%d", &n) == 1) && (n >= 0)) {
6206 sdebug_statistics = true;
6208 clear_queue_stats();
6209 sdebug_statistics = false;
6215 static DRIVER_ATTR_RW(statistics);
6217 static ssize_t sector_size_show(struct device_driver *ddp, char *buf)
6219 return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_sector_size);
6221 static DRIVER_ATTR_RO(sector_size);
6223 static ssize_t submit_queues_show(struct device_driver *ddp, char *buf)
6225 return scnprintf(buf, PAGE_SIZE, "%d\n", submit_queues);
6227 static DRIVER_ATTR_RO(submit_queues);
6229 static ssize_t dix_show(struct device_driver *ddp, char *buf)
6231 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dix);
6233 static DRIVER_ATTR_RO(dix);
6235 static ssize_t dif_show(struct device_driver *ddp, char *buf)
6237 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dif);
6239 static DRIVER_ATTR_RO(dif);
6241 static ssize_t guard_show(struct device_driver *ddp, char *buf)
6243 return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_guard);
6245 static DRIVER_ATTR_RO(guard);
6247 static ssize_t ato_show(struct device_driver *ddp, char *buf)
6249 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ato);
6251 static DRIVER_ATTR_RO(ato);
6253 static ssize_t map_show(struct device_driver *ddp, char *buf)
6257 if (!scsi_debug_lbp())
6258 return scnprintf(buf, PAGE_SIZE, "0-%u\n",
6259 sdebug_store_sectors);
6261 if (sdebug_fake_rw == 0 && !xa_empty(per_store_ap)) {
6262 struct sdeb_store_info *sip = xa_load(per_store_ap, 0);
6265 count = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
6266 (int)map_size, sip->map_storep);
6268 buf[count++] = '\n';
6273 static DRIVER_ATTR_RO(map);
6275 static ssize_t random_show(struct device_driver *ddp, char *buf)
6277 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_random);
6280 static ssize_t random_store(struct device_driver *ddp, const char *buf,
6285 if (kstrtobool(buf, &v))
6291 static DRIVER_ATTR_RW(random);
6293 static ssize_t removable_show(struct device_driver *ddp, char *buf)
6295 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_removable ? 1 : 0);
6297 static ssize_t removable_store(struct device_driver *ddp, const char *buf,
6302 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6303 sdebug_removable = (n > 0);
6308 static DRIVER_ATTR_RW(removable);
6310 static ssize_t host_lock_show(struct device_driver *ddp, char *buf)
6312 return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_host_lock);
6314 /* N.B. sdebug_host_lock does nothing, kept for backward compatibility */
6315 static ssize_t host_lock_store(struct device_driver *ddp, const char *buf,
6320 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6321 sdebug_host_lock = (n > 0);
6326 static DRIVER_ATTR_RW(host_lock);
6328 static ssize_t strict_show(struct device_driver *ddp, char *buf)
6330 return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_strict);
6332 static ssize_t strict_store(struct device_driver *ddp, const char *buf,
6337 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6338 sdebug_strict = (n > 0);
6343 static DRIVER_ATTR_RW(strict);
6345 static ssize_t uuid_ctl_show(struct device_driver *ddp, char *buf)
6347 return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_uuid_ctl);
6349 static DRIVER_ATTR_RO(uuid_ctl);
6351 static ssize_t cdb_len_show(struct device_driver *ddp, char *buf)
6353 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_cdb_len);
6355 static ssize_t cdb_len_store(struct device_driver *ddp, const char *buf,
6360 ret = kstrtoint(buf, 0, &n);
6364 all_config_cdb_len();
6367 static DRIVER_ATTR_RW(cdb_len);
6369 static const char * const zbc_model_strs_a[] = {
6370 [BLK_ZONED_NONE] = "none",
6371 [BLK_ZONED_HA] = "host-aware",
6372 [BLK_ZONED_HM] = "host-managed",
6375 static const char * const zbc_model_strs_b[] = {
6376 [BLK_ZONED_NONE] = "no",
6377 [BLK_ZONED_HA] = "aware",
6378 [BLK_ZONED_HM] = "managed",
6381 static const char * const zbc_model_strs_c[] = {
6382 [BLK_ZONED_NONE] = "0",
6383 [BLK_ZONED_HA] = "1",
6384 [BLK_ZONED_HM] = "2",
6387 static int sdeb_zbc_model_str(const char *cp)
6389 int res = sysfs_match_string(zbc_model_strs_a, cp);
6392 res = sysfs_match_string(zbc_model_strs_b, cp);
6394 res = sysfs_match_string(zbc_model_strs_c, cp);
6395 if (sdeb_zbc_model < 0)
6402 static ssize_t zbc_show(struct device_driver *ddp, char *buf)
6404 return scnprintf(buf, PAGE_SIZE, "%s\n",
6405 zbc_model_strs_a[sdeb_zbc_model]);
6407 static DRIVER_ATTR_RO(zbc);
6409 /* Note: The following array creates attribute files in the
6410 /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
6411 files (over those found in the /sys/module/scsi_debug/parameters
6412 directory) is that auxiliary actions can be triggered when an attribute
6413 is changed. For example see: add_host_store() above.
6416 static struct attribute *sdebug_drv_attrs[] = {
6417 &driver_attr_delay.attr,
6418 &driver_attr_opts.attr,
6419 &driver_attr_ptype.attr,
6420 &driver_attr_dsense.attr,
6421 &driver_attr_fake_rw.attr,
6422 &driver_attr_no_lun_0.attr,
6423 &driver_attr_num_tgts.attr,
6424 &driver_attr_dev_size_mb.attr,
6425 &driver_attr_num_parts.attr,
6426 &driver_attr_every_nth.attr,
6427 &driver_attr_max_luns.attr,
6428 &driver_attr_max_queue.attr,
6429 &driver_attr_no_uld.attr,
6430 &driver_attr_scsi_level.attr,
6431 &driver_attr_virtual_gb.attr,
6432 &driver_attr_add_host.attr,
6433 &driver_attr_per_host_store.attr,
6434 &driver_attr_vpd_use_hostno.attr,
6435 &driver_attr_sector_size.attr,
6436 &driver_attr_statistics.attr,
6437 &driver_attr_submit_queues.attr,
6438 &driver_attr_dix.attr,
6439 &driver_attr_dif.attr,
6440 &driver_attr_guard.attr,
6441 &driver_attr_ato.attr,
6442 &driver_attr_map.attr,
6443 &driver_attr_random.attr,
6444 &driver_attr_removable.attr,
6445 &driver_attr_host_lock.attr,
6446 &driver_attr_ndelay.attr,
6447 &driver_attr_strict.attr,
6448 &driver_attr_uuid_ctl.attr,
6449 &driver_attr_cdb_len.attr,
6450 &driver_attr_zbc.attr,
6453 ATTRIBUTE_GROUPS(sdebug_drv);
6455 static struct device *pseudo_primary;
6457 static int __init scsi_debug_init(void)
6459 bool want_store = (sdebug_fake_rw == 0);
6461 int k, ret, hosts_to_add;
6464 ramdisk_lck_a[0] = &atomic_rw;
6465 ramdisk_lck_a[1] = &atomic_rw2;
6466 atomic_set(&retired_max_queue, 0);
6468 if (sdebug_ndelay >= 1000 * 1000 * 1000) {
6469 pr_warn("ndelay must be less than 1 second, ignored\n");
6471 } else if (sdebug_ndelay > 0)
6472 sdebug_jdelay = JDELAY_OVERRIDDEN;
6474 switch (sdebug_sector_size) {
6481 pr_err("invalid sector_size %d\n", sdebug_sector_size);
6485 switch (sdebug_dif) {
6486 case T10_PI_TYPE0_PROTECTION:
6488 case T10_PI_TYPE1_PROTECTION:
6489 case T10_PI_TYPE2_PROTECTION:
6490 case T10_PI_TYPE3_PROTECTION:
6491 have_dif_prot = true;
6495 pr_err("dif must be 0, 1, 2 or 3\n");
6499 if (sdebug_num_tgts < 0) {
6500 pr_err("num_tgts must be >= 0\n");
6504 if (sdebug_guard > 1) {
6505 pr_err("guard must be 0 or 1\n");
6509 if (sdebug_ato > 1) {
6510 pr_err("ato must be 0 or 1\n");
6514 if (sdebug_physblk_exp > 15) {
6515 pr_err("invalid physblk_exp %u\n", sdebug_physblk_exp);
6518 if (sdebug_max_luns > 256) {
6519 pr_warn("max_luns can be no more than 256, use default\n");
6520 sdebug_max_luns = DEF_MAX_LUNS;
6523 if (sdebug_lowest_aligned > 0x3fff) {
6524 pr_err("lowest_aligned too big: %u\n", sdebug_lowest_aligned);
6528 if (submit_queues < 1) {
6529 pr_err("submit_queues must be 1 or more\n");
6532 sdebug_q_arr = kcalloc(submit_queues, sizeof(struct sdebug_queue),
6534 if (sdebug_q_arr == NULL)
6536 for (k = 0; k < submit_queues; ++k)
6537 spin_lock_init(&sdebug_q_arr[k].qc_lock);
6540 * check for host managed zoned block device specified with
6541 * ptype=0x14 or zbc=XXX.
6543 if (sdebug_ptype == TYPE_ZBC) {
6544 sdeb_zbc_model = BLK_ZONED_HM;
6545 } else if (sdeb_zbc_model_s && *sdeb_zbc_model_s) {
6546 k = sdeb_zbc_model_str(sdeb_zbc_model_s);
6552 switch (sdeb_zbc_model) {
6553 case BLK_ZONED_NONE:
6554 sdebug_ptype = TYPE_DISK;
6557 sdebug_ptype = TYPE_ZBC;
6561 pr_err("Invalid ZBC model\n");
6565 if (sdeb_zbc_model != BLK_ZONED_NONE) {
6566 sdeb_zbc_in_use = true;
6567 if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
6568 sdebug_dev_size_mb = DEF_ZBC_DEV_SIZE_MB;
6571 if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
6572 sdebug_dev_size_mb = DEF_DEV_SIZE_MB;
6573 if (sdebug_dev_size_mb < 1)
6574 sdebug_dev_size_mb = 1; /* force minimum 1 MB ramdisk */
6575 sz = (unsigned long)sdebug_dev_size_mb * 1048576;
6576 sdebug_store_sectors = sz / sdebug_sector_size;
6577 sdebug_capacity = get_sdebug_capacity();
6579 /* play around with geometry, don't waste too much on track 0 */
6581 sdebug_sectors_per = 32;
6582 if (sdebug_dev_size_mb >= 256)
6584 else if (sdebug_dev_size_mb >= 16)
6586 sdebug_cylinders_per = (unsigned long)sdebug_capacity /
6587 (sdebug_sectors_per * sdebug_heads);
6588 if (sdebug_cylinders_per >= 1024) {
6589 /* other LLDs do this; implies >= 1GB ram disk ... */
6591 sdebug_sectors_per = 63;
6592 sdebug_cylinders_per = (unsigned long)sdebug_capacity /
6593 (sdebug_sectors_per * sdebug_heads);
6595 if (scsi_debug_lbp()) {
6596 sdebug_unmap_max_blocks =
6597 clamp(sdebug_unmap_max_blocks, 0U, 0xffffffffU);
6599 sdebug_unmap_max_desc =
6600 clamp(sdebug_unmap_max_desc, 0U, 256U);
6602 sdebug_unmap_granularity =
6603 clamp(sdebug_unmap_granularity, 1U, 0xffffffffU);
6605 if (sdebug_unmap_alignment &&
6606 sdebug_unmap_granularity <=
6607 sdebug_unmap_alignment) {
6608 pr_err("ERR: unmap_granularity <= unmap_alignment\n");
6613 xa_init_flags(per_store_ap, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
6615 idx = sdebug_add_store();
6622 pseudo_primary = root_device_register("pseudo_0");
6623 if (IS_ERR(pseudo_primary)) {
6624 pr_warn("root_device_register() error\n");
6625 ret = PTR_ERR(pseudo_primary);
6628 ret = bus_register(&pseudo_lld_bus);
6630 pr_warn("bus_register error: %d\n", ret);
6633 ret = driver_register(&sdebug_driverfs_driver);
6635 pr_warn("driver_register error: %d\n", ret);
6639 hosts_to_add = sdebug_add_host;
6640 sdebug_add_host = 0;
6642 for (k = 0; k < hosts_to_add; k++) {
6643 if (want_store && k == 0) {
6644 ret = sdebug_add_host_helper(idx);
6646 pr_err("add_host_helper k=%d, error=%d\n",
6651 ret = sdebug_do_add_host(want_store &&
6652 sdebug_per_host_store);
6654 pr_err("add_host k=%d error=%d\n", k, -ret);
6660 pr_info("built %d host(s)\n", sdebug_num_hosts);
6665 bus_unregister(&pseudo_lld_bus);
6667 root_device_unregister(pseudo_primary);
6669 sdebug_erase_store(idx, NULL);
6671 kfree(sdebug_q_arr);
6675 static void __exit scsi_debug_exit(void)
6677 int k = sdebug_num_hosts;
6681 sdebug_do_remove_host(true);
6683 driver_unregister(&sdebug_driverfs_driver);
6684 bus_unregister(&pseudo_lld_bus);
6685 root_device_unregister(pseudo_primary);
6687 sdebug_erase_all_stores(false);
6688 xa_destroy(per_store_ap);
6691 device_initcall(scsi_debug_init);
6692 module_exit(scsi_debug_exit);
6694 static void sdebug_release_adapter(struct device *dev)
6696 struct sdebug_host_info *sdbg_host;
6698 sdbg_host = to_sdebug_host(dev);
6702 /* idx must be valid, if sip is NULL then it will be obtained using idx */
6703 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip)
6708 if (xa_empty(per_store_ap))
6710 sip = xa_load(per_store_ap, idx);
6714 vfree(sip->map_storep);
6715 vfree(sip->dif_storep);
6717 xa_erase(per_store_ap, idx);
6721 /* Assume apart_from_first==false only in shutdown case. */
6722 static void sdebug_erase_all_stores(bool apart_from_first)
6725 struct sdeb_store_info *sip = NULL;
6727 xa_for_each(per_store_ap, idx, sip) {
6728 if (apart_from_first)
6729 apart_from_first = false;
6731 sdebug_erase_store(idx, sip);
6733 if (apart_from_first)
6734 sdeb_most_recent_idx = sdeb_first_idx;
6738 * Returns store xarray new element index (idx) if >=0 else negated errno.
6739 * Limit the number of stores to 65536.
6741 static int sdebug_add_store(void)
6745 unsigned long iflags;
6746 unsigned long sz = (unsigned long)sdebug_dev_size_mb * 1048576;
6747 struct sdeb_store_info *sip = NULL;
6748 struct xa_limit xal = { .max = 1 << 16, .min = 0 };
6750 sip = kzalloc(sizeof(*sip), GFP_KERNEL);
6754 xa_lock_irqsave(per_store_ap, iflags);
6755 res = __xa_alloc(per_store_ap, &n_idx, sip, xal, GFP_ATOMIC);
6756 if (unlikely(res < 0)) {
6757 xa_unlock_irqrestore(per_store_ap, iflags);
6759 pr_warn("%s: xa_alloc() errno=%d\n", __func__, -res);
6762 sdeb_most_recent_idx = n_idx;
6763 if (sdeb_first_idx < 0)
6764 sdeb_first_idx = n_idx;
6765 xa_unlock_irqrestore(per_store_ap, iflags);
6768 sip->storep = vzalloc(sz);
6770 pr_err("user data oom\n");
6773 if (sdebug_num_parts > 0)
6774 sdebug_build_parts(sip->storep, sz);
6776 /* DIF/DIX: what T10 calls Protection Information (PI) */
6780 dif_size = sdebug_store_sectors * sizeof(struct t10_pi_tuple);
6781 sip->dif_storep = vmalloc(dif_size);
6783 pr_info("dif_storep %u bytes @ %pK\n", dif_size,
6786 if (!sip->dif_storep) {
6787 pr_err("DIX oom\n");
6790 memset(sip->dif_storep, 0xff, dif_size);
6792 /* Logical Block Provisioning */
6793 if (scsi_debug_lbp()) {
6794 map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
6795 sip->map_storep = vmalloc(array_size(sizeof(long),
6796 BITS_TO_LONGS(map_size)));
6798 pr_info("%lu provisioning blocks\n", map_size);
6800 if (!sip->map_storep) {
6801 pr_err("LBP map oom\n");
6805 bitmap_zero(sip->map_storep, map_size);
6807 /* Map first 1KB for partition table */
6808 if (sdebug_num_parts)
6809 map_region(sip, 0, 2);
6812 rwlock_init(&sip->macc_lck);
6815 sdebug_erase_store((int)n_idx, sip);
6816 pr_warn("%s: failed, errno=%d\n", __func__, -res);
6820 static int sdebug_add_host_helper(int per_host_idx)
6822 int k, devs_per_host, idx;
6823 int error = -ENOMEM;
6824 struct sdebug_host_info *sdbg_host;
6825 struct sdebug_dev_info *sdbg_devinfo, *tmp;
6827 sdbg_host = kzalloc(sizeof(*sdbg_host), GFP_KERNEL);
6830 idx = (per_host_idx < 0) ? sdeb_first_idx : per_host_idx;
6831 if (xa_get_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE))
6832 xa_clear_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
6833 sdbg_host->si_idx = idx;
6835 INIT_LIST_HEAD(&sdbg_host->dev_info_list);
6837 devs_per_host = sdebug_num_tgts * sdebug_max_luns;
6838 for (k = 0; k < devs_per_host; k++) {
6839 sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
6844 spin_lock(&sdebug_host_list_lock);
6845 list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
6846 spin_unlock(&sdebug_host_list_lock);
6848 sdbg_host->dev.bus = &pseudo_lld_bus;
6849 sdbg_host->dev.parent = pseudo_primary;
6850 sdbg_host->dev.release = &sdebug_release_adapter;
6851 dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_num_hosts);
6853 error = device_register(&sdbg_host->dev);
6861 list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
6863 list_del(&sdbg_devinfo->dev_list);
6864 kfree(sdbg_devinfo->zstate);
6865 kfree(sdbg_devinfo);
6868 pr_warn("%s: failed, errno=%d\n", __func__, -error);
6872 static int sdebug_do_add_host(bool mk_new_store)
6874 int ph_idx = sdeb_most_recent_idx;
6877 ph_idx = sdebug_add_store();
6881 return sdebug_add_host_helper(ph_idx);
6884 static void sdebug_do_remove_host(bool the_end)
6887 struct sdebug_host_info *sdbg_host = NULL;
6888 struct sdebug_host_info *sdbg_host2;
6890 spin_lock(&sdebug_host_list_lock);
6891 if (!list_empty(&sdebug_host_list)) {
6892 sdbg_host = list_entry(sdebug_host_list.prev,
6893 struct sdebug_host_info, host_list);
6894 idx = sdbg_host->si_idx;
6896 if (!the_end && idx >= 0) {
6899 list_for_each_entry(sdbg_host2, &sdebug_host_list, host_list) {
6900 if (sdbg_host2 == sdbg_host)
6902 if (idx == sdbg_host2->si_idx) {
6908 xa_set_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
6909 if (idx == sdeb_most_recent_idx)
6910 --sdeb_most_recent_idx;
6914 list_del(&sdbg_host->host_list);
6915 spin_unlock(&sdebug_host_list_lock);
6920 device_unregister(&sdbg_host->dev);
6924 static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
6927 struct sdebug_dev_info *devip;
6929 block_unblock_all_queues(true);
6930 devip = (struct sdebug_dev_info *)sdev->hostdata;
6931 if (NULL == devip) {
6932 block_unblock_all_queues(false);
6935 num_in_q = atomic_read(&devip->num_in_q);
6939 /* allow to exceed max host qc_arr elements for testing */
6940 if (qdepth > SDEBUG_CANQUEUE + 10)
6941 qdepth = SDEBUG_CANQUEUE + 10;
6942 scsi_change_queue_depth(sdev, qdepth);
6944 if (SDEBUG_OPT_Q_NOISE & sdebug_opts) {
6945 sdev_printk(KERN_INFO, sdev, "%s: qdepth=%d, num_in_q=%d\n",
6946 __func__, qdepth, num_in_q);
6948 block_unblock_all_queues(false);
6949 return sdev->queue_depth;
6952 static bool fake_timeout(struct scsi_cmnd *scp)
6954 if (0 == (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth))) {
6955 if (sdebug_every_nth < -1)
6956 sdebug_every_nth = -1;
6957 if (SDEBUG_OPT_TIMEOUT & sdebug_opts)
6958 return true; /* ignore command causing timeout */
6959 else if (SDEBUG_OPT_MAC_TIMEOUT & sdebug_opts &&
6960 scsi_medium_access_command(scp))
6961 return true; /* time out reads and writes */
6966 static bool fake_host_busy(struct scsi_cmnd *scp)
6968 return (sdebug_opts & SDEBUG_OPT_HOST_BUSY) &&
6969 (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) == 0;
6972 static int scsi_debug_queuecommand(struct Scsi_Host *shost,
6973 struct scsi_cmnd *scp)
6976 struct scsi_device *sdp = scp->device;
6977 const struct opcode_info_t *oip;
6978 const struct opcode_info_t *r_oip;
6979 struct sdebug_dev_info *devip;
6981 u8 *cmd = scp->cmnd;
6982 int (*r_pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
6983 int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *) = NULL;
6991 scsi_set_resid(scp, 0);
6992 if (sdebug_statistics)
6993 atomic_inc(&sdebug_cmnd_count);
6994 if (unlikely(sdebug_verbose &&
6995 !(SDEBUG_OPT_NO_CDB_NOISE & sdebug_opts))) {
7000 sb = (int)sizeof(b);
7002 strcpy(b, "too long, over 32 bytes");
7004 for (k = 0, n = 0; k < len && n < sb; ++k)
7005 n += scnprintf(b + n, sb - n, "%02x ",
7008 sdev_printk(KERN_INFO, sdp, "%s: tag=%#x, cmd %s\n", my_name,
7009 blk_mq_unique_tag(scp->request), b);
7011 if (fake_host_busy(scp))
7012 return SCSI_MLQUEUE_HOST_BUSY;
7013 has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS);
7014 if (unlikely((sdp->lun >= sdebug_max_luns) && !has_wlun_rl))
7017 sdeb_i = opcode_ind_arr[opcode]; /* fully mapped */
7018 oip = &opcode_info_arr[sdeb_i]; /* safe if table consistent */
7019 devip = (struct sdebug_dev_info *)sdp->hostdata;
7020 if (unlikely(!devip)) {
7021 devip = find_build_dev_info(sdp);
7025 na = oip->num_attached;
7027 if (na) { /* multiple commands with this opcode */
7029 if (FF_SA & r_oip->flags) {
7030 if (F_SA_LOW & oip->flags)
7033 sa = get_unaligned_be16(cmd + 8);
7034 for (k = 0; k <= na; oip = r_oip->arrp + k++) {
7035 if (opcode == oip->opcode && sa == oip->sa)
7038 } else { /* since no service action only check opcode */
7039 for (k = 0; k <= na; oip = r_oip->arrp + k++) {
7040 if (opcode == oip->opcode)
7045 if (F_SA_LOW & r_oip->flags)
7046 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 4);
7047 else if (F_SA_HIGH & r_oip->flags)
7048 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, 7);
7050 mk_sense_invalid_opcode(scp);
7053 } /* else (when na==0) we assume the oip is a match */
7055 if (unlikely(F_INV_OP & flags)) {
7056 mk_sense_invalid_opcode(scp);
7059 if (unlikely(has_wlun_rl && !(F_RL_WLUN_OK & flags))) {
7061 sdev_printk(KERN_INFO, sdp, "%s: Opcode 0x%x not%s\n",
7062 my_name, opcode, " supported for wlun");
7063 mk_sense_invalid_opcode(scp);
7066 if (unlikely(sdebug_strict)) { /* check cdb against mask */
7070 for (k = 1; k < oip->len_mask[0] && k < 16; ++k) {
7071 rem = ~oip->len_mask[k] & cmd[k];
7073 for (j = 7; j >= 0; --j, rem <<= 1) {
7077 mk_sense_invalid_fld(scp, SDEB_IN_CDB, k, j);
7082 if (unlikely(!(F_SKIP_UA & flags) &&
7083 find_first_bit(devip->uas_bm,
7084 SDEBUG_NUM_UAS) != SDEBUG_NUM_UAS)) {
7085 errsts = make_ua(scp, devip);
7089 if (unlikely((F_M_ACCESS & flags) && atomic_read(&devip->stopped))) {
7090 mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x2);
7092 sdev_printk(KERN_INFO, sdp, "%s reports: Not ready: "
7093 "%s\n", my_name, "initializing command "
7095 errsts = check_condition_result;
7098 if (sdebug_fake_rw && (F_FAKE_RW & flags))
7100 if (unlikely(sdebug_every_nth)) {
7101 if (fake_timeout(scp))
7102 return 0; /* ignore command: make trouble */
7104 if (likely(oip->pfp))
7105 pfp = oip->pfp; /* calls a resp_* function */
7107 pfp = r_pfp; /* if leaf function ptr NULL, try the root's */
7110 if (F_DELAY_OVERR & flags) /* cmds like INQUIRY respond asap */
7111 return schedule_resp(scp, devip, errsts, pfp, 0, 0);
7112 else if ((flags & F_LONG_DELAY) && (sdebug_jdelay > 0 ||
7113 sdebug_ndelay > 10000)) {
7115 * Skip long delays if ndelay <= 10 microseconds. Otherwise
7116 * for Start Stop Unit (SSU) want at least 1 second delay and
7117 * if sdebug_jdelay>1 want a long delay of that many seconds.
7118 * For Synchronize Cache want 1/20 of SSU's delay.
7120 int jdelay = (sdebug_jdelay < 2) ? 1 : sdebug_jdelay;
7121 int denom = (flags & F_SYNC_DELAY) ? 20 : 1;
7123 jdelay = mult_frac(USER_HZ * jdelay, HZ, denom * USER_HZ);
7124 return schedule_resp(scp, devip, errsts, pfp, jdelay, 0);
7126 return schedule_resp(scp, devip, errsts, pfp, sdebug_jdelay,
7129 return schedule_resp(scp, devip, check_condition_result, NULL, 0, 0);
7131 return schedule_resp(scp, NULL, DID_NO_CONNECT << 16, NULL, 0, 0);
7134 static struct scsi_host_template sdebug_driver_template = {
7135 .show_info = scsi_debug_show_info,
7136 .write_info = scsi_debug_write_info,
7137 .proc_name = sdebug_proc_name,
7138 .name = "SCSI DEBUG",
7139 .info = scsi_debug_info,
7140 .slave_alloc = scsi_debug_slave_alloc,
7141 .slave_configure = scsi_debug_slave_configure,
7142 .slave_destroy = scsi_debug_slave_destroy,
7143 .ioctl = scsi_debug_ioctl,
7144 .queuecommand = scsi_debug_queuecommand,
7145 .change_queue_depth = sdebug_change_qdepth,
7146 .eh_abort_handler = scsi_debug_abort,
7147 .eh_device_reset_handler = scsi_debug_device_reset,
7148 .eh_target_reset_handler = scsi_debug_target_reset,
7149 .eh_bus_reset_handler = scsi_debug_bus_reset,
7150 .eh_host_reset_handler = scsi_debug_host_reset,
7151 .can_queue = SDEBUG_CANQUEUE,
7153 .sg_tablesize = SG_MAX_SEGMENTS,
7154 .cmd_per_lun = DEF_CMD_PER_LUN,
7156 .max_segment_size = -1U,
7157 .module = THIS_MODULE,
7158 .track_queue_depth = 1,
7161 static int sdebug_driver_probe(struct device *dev)
7164 struct sdebug_host_info *sdbg_host;
7165 struct Scsi_Host *hpnt;
7168 sdbg_host = to_sdebug_host(dev);
7170 sdebug_driver_template.can_queue = sdebug_max_queue;
7171 if (!sdebug_clustering)
7172 sdebug_driver_template.dma_boundary = PAGE_SIZE - 1;
7174 hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
7176 pr_err("scsi_host_alloc failed\n");
7180 if (submit_queues > nr_cpu_ids) {
7181 pr_warn("%s: trim submit_queues (was %d) to nr_cpu_ids=%u\n",
7182 my_name, submit_queues, nr_cpu_ids);
7183 submit_queues = nr_cpu_ids;
7185 /* Decide whether to tell scsi subsystem that we want mq */
7186 /* Following should give the same answer for each host */
7187 hpnt->nr_hw_queues = submit_queues;
7189 sdbg_host->shost = hpnt;
7190 *((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host;
7191 if ((hpnt->this_id >= 0) && (sdebug_num_tgts > hpnt->this_id))
7192 hpnt->max_id = sdebug_num_tgts + 1;
7194 hpnt->max_id = sdebug_num_tgts;
7195 /* = sdebug_max_luns; */
7196 hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
7200 switch (sdebug_dif) {
7202 case T10_PI_TYPE1_PROTECTION:
7203 hprot = SHOST_DIF_TYPE1_PROTECTION;
7205 hprot |= SHOST_DIX_TYPE1_PROTECTION;
7208 case T10_PI_TYPE2_PROTECTION:
7209 hprot = SHOST_DIF_TYPE2_PROTECTION;
7211 hprot |= SHOST_DIX_TYPE2_PROTECTION;
7214 case T10_PI_TYPE3_PROTECTION:
7215 hprot = SHOST_DIF_TYPE3_PROTECTION;
7217 hprot |= SHOST_DIX_TYPE3_PROTECTION;
7222 hprot |= SHOST_DIX_TYPE0_PROTECTION;
7226 scsi_host_set_prot(hpnt, hprot);
7228 if (have_dif_prot || sdebug_dix)
7229 pr_info("host protection%s%s%s%s%s%s%s\n",
7230 (hprot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
7231 (hprot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
7232 (hprot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
7233 (hprot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
7234 (hprot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
7235 (hprot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
7236 (hprot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
7238 if (sdebug_guard == 1)
7239 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
7241 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
7243 sdebug_verbose = !!(SDEBUG_OPT_NOISE & sdebug_opts);
7244 sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & sdebug_opts);
7245 if (sdebug_every_nth) /* need stats counters for every_nth */
7246 sdebug_statistics = true;
7247 error = scsi_add_host(hpnt, &sdbg_host->dev);
7249 pr_err("scsi_add_host failed\n");
7251 scsi_host_put(hpnt);
7253 scsi_scan_host(hpnt);
7259 static int sdebug_driver_remove(struct device *dev)
7261 struct sdebug_host_info *sdbg_host;
7262 struct sdebug_dev_info *sdbg_devinfo, *tmp;
7264 sdbg_host = to_sdebug_host(dev);
7267 pr_err("Unable to locate host info\n");
7271 scsi_remove_host(sdbg_host->shost);
7273 list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
7275 list_del(&sdbg_devinfo->dev_list);
7276 kfree(sdbg_devinfo->zstate);
7277 kfree(sdbg_devinfo);
7280 scsi_host_put(sdbg_host->shost);
7284 static int pseudo_lld_bus_match(struct device *dev,
7285 struct device_driver *dev_driver)
7290 static struct bus_type pseudo_lld_bus = {
7292 .match = pseudo_lld_bus_match,
7293 .probe = sdebug_driver_probe,
7294 .remove = sdebug_driver_remove,
7295 .drv_groups = sdebug_drv_groups,