scsi: scsi_debug: Allocate the MODE SENSE response from the heap
[linux-2.6-microblaze.git] / drivers / scsi / scsi_debug.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
4  *  Copyright (C) 1992  Eric Youngdale
5  *  Simulate a host adapter with 2 disks attached.  Do a lot of checking
6  *  to make sure that we are not getting blocks mixed up, and PANIC if
7  *  anything out of the ordinary is seen.
8  * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
9  *
10  * Copyright (C) 2001 - 2021 Douglas Gilbert
11  *
12  *  For documentation see http://sg.danny.cz/sg/scsi_debug.html
13  */
14
15
16 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
17
18 #include <linux/module.h>
19 #include <linux/align.h>
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/jiffies.h>
23 #include <linux/slab.h>
24 #include <linux/types.h>
25 #include <linux/string.h>
26 #include <linux/fs.h>
27 #include <linux/init.h>
28 #include <linux/proc_fs.h>
29 #include <linux/vmalloc.h>
30 #include <linux/moduleparam.h>
31 #include <linux/scatterlist.h>
32 #include <linux/blkdev.h>
33 #include <linux/crc-t10dif.h>
34 #include <linux/spinlock.h>
35 #include <linux/interrupt.h>
36 #include <linux/atomic.h>
37 #include <linux/hrtimer.h>
38 #include <linux/uuid.h>
39 #include <linux/t10-pi.h>
40 #include <linux/msdos_partition.h>
41 #include <linux/random.h>
42 #include <linux/xarray.h>
43 #include <linux/prefetch.h>
44 #include <linux/debugfs.h>
45 #include <linux/async.h>
46 #include <linux/cleanup.h>
47
48 #include <net/checksum.h>
49
50 #include <asm/unaligned.h>
51
52 #include <scsi/scsi.h>
53 #include <scsi/scsi_cmnd.h>
54 #include <scsi/scsi_device.h>
55 #include <scsi/scsi_host.h>
56 #include <scsi/scsicam.h>
57 #include <scsi/scsi_eh.h>
58 #include <scsi/scsi_tcq.h>
59 #include <scsi/scsi_dbg.h>
60
61 #include "sd.h"
62 #include "scsi_logging.h"
63
64 /* make sure inq_product_rev string corresponds to this version */
65 #define SDEBUG_VERSION "0191"   /* format to fit INQUIRY revision field */
66 static const char *sdebug_version_date = "20210520";
67
68 #define MY_NAME "scsi_debug"
69
70 /* Additional Sense Code (ASC) */
71 #define NO_ADDITIONAL_SENSE 0x0
72 #define LOGICAL_UNIT_NOT_READY 0x4
73 #define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
74 #define UNRECOVERED_READ_ERR 0x11
75 #define PARAMETER_LIST_LENGTH_ERR 0x1a
76 #define INVALID_OPCODE 0x20
77 #define LBA_OUT_OF_RANGE 0x21
78 #define INVALID_FIELD_IN_CDB 0x24
79 #define INVALID_FIELD_IN_PARAM_LIST 0x26
80 #define WRITE_PROTECTED 0x27
81 #define UA_RESET_ASC 0x29
82 #define UA_CHANGED_ASC 0x2a
83 #define TARGET_CHANGED_ASC 0x3f
84 #define LUNS_CHANGED_ASCQ 0x0e
85 #define INSUFF_RES_ASC 0x55
86 #define INSUFF_RES_ASCQ 0x3
87 #define POWER_ON_RESET_ASCQ 0x0
88 #define POWER_ON_OCCURRED_ASCQ 0x1
89 #define BUS_RESET_ASCQ 0x2      /* scsi bus reset occurred */
90 #define MODE_CHANGED_ASCQ 0x1   /* mode parameters changed */
91 #define CAPACITY_CHANGED_ASCQ 0x9
92 #define SAVING_PARAMS_UNSUP 0x39
93 #define TRANSPORT_PROBLEM 0x4b
94 #define THRESHOLD_EXCEEDED 0x5d
95 #define LOW_POWER_COND_ON 0x5e
96 #define MISCOMPARE_VERIFY_ASC 0x1d
97 #define MICROCODE_CHANGED_ASCQ 0x1      /* with TARGET_CHANGED_ASC */
98 #define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16
99 #define WRITE_ERROR_ASC 0xc
100 #define UNALIGNED_WRITE_ASCQ 0x4
101 #define WRITE_BOUNDARY_ASCQ 0x5
102 #define READ_INVDATA_ASCQ 0x6
103 #define READ_BOUNDARY_ASCQ 0x7
104 #define ATTEMPT_ACCESS_GAP 0x9
105 #define INSUFF_ZONE_ASCQ 0xe
106
107 /* Additional Sense Code Qualifier (ASCQ) */
108 #define ACK_NAK_TO 0x3
109
110 /* Default values for driver parameters */
111 #define DEF_NUM_HOST   1
112 #define DEF_NUM_TGTS   1
113 #define DEF_MAX_LUNS   1
114 /* With these defaults, this driver will make 1 host with 1 target
115  * (id 0) containing 1 logical unit (lun 0). That is 1 device.
116  */
117 #define DEF_ATO 1
118 #define DEF_CDB_LEN 10
119 #define DEF_JDELAY   1          /* if > 0 unit is a jiffy */
120 #define DEF_DEV_SIZE_PRE_INIT   0
121 #define DEF_DEV_SIZE_MB   8
122 #define DEF_ZBC_DEV_SIZE_MB   128
123 #define DEF_DIF 0
124 #define DEF_DIX 0
125 #define DEF_PER_HOST_STORE false
126 #define DEF_D_SENSE   0
127 #define DEF_EVERY_NTH   0
128 #define DEF_FAKE_RW     0
129 #define DEF_GUARD 0
130 #define DEF_HOST_LOCK 0
131 #define DEF_LBPU 0
132 #define DEF_LBPWS 0
133 #define DEF_LBPWS10 0
134 #define DEF_LBPRZ 1
135 #define DEF_LOWEST_ALIGNED 0
136 #define DEF_NDELAY   0          /* if > 0 unit is a nanosecond */
137 #define DEF_NO_LUN_0   0
138 #define DEF_NUM_PARTS   0
139 #define DEF_OPTS   0
140 #define DEF_OPT_BLKS 1024
141 #define DEF_PHYSBLK_EXP 0
142 #define DEF_OPT_XFERLEN_EXP 0
143 #define DEF_PTYPE   TYPE_DISK
144 #define DEF_RANDOM false
145 #define DEF_REMOVABLE false
146 #define DEF_SCSI_LEVEL   7    /* INQUIRY, byte2 [6->SPC-4; 7->SPC-5] */
147 #define DEF_SECTOR_SIZE 512
148 #define DEF_UNMAP_ALIGNMENT 0
149 #define DEF_UNMAP_GRANULARITY 1
150 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
151 #define DEF_UNMAP_MAX_DESC 256
152 #define DEF_VIRTUAL_GB   0
153 #define DEF_VPD_USE_HOSTNO 1
154 #define DEF_WRITESAME_LENGTH 0xFFFF
155 #define DEF_STRICT 0
156 #define DEF_STATISTICS false
157 #define DEF_SUBMIT_QUEUES 1
158 #define DEF_TUR_MS_TO_READY 0
159 #define DEF_UUID_CTL 0
160 #define JDELAY_OVERRIDDEN -9999
161
162 /* Default parameters for ZBC drives */
163 #define DEF_ZBC_ZONE_SIZE_MB    128
164 #define DEF_ZBC_MAX_OPEN_ZONES  8
165 #define DEF_ZBC_NR_CONV_ZONES   1
166
167 #define SDEBUG_LUN_0_VAL 0
168
169 /* bit mask values for sdebug_opts */
170 #define SDEBUG_OPT_NOISE                1
171 #define SDEBUG_OPT_MEDIUM_ERR           2
172 #define SDEBUG_OPT_TIMEOUT              4
173 #define SDEBUG_OPT_RECOVERED_ERR        8
174 #define SDEBUG_OPT_TRANSPORT_ERR        16
175 #define SDEBUG_OPT_DIF_ERR              32
176 #define SDEBUG_OPT_DIX_ERR              64
177 #define SDEBUG_OPT_MAC_TIMEOUT          128
178 #define SDEBUG_OPT_SHORT_TRANSFER       0x100
179 #define SDEBUG_OPT_Q_NOISE              0x200
180 #define SDEBUG_OPT_ALL_TSF              0x400   /* ignore */
181 #define SDEBUG_OPT_RARE_TSF             0x800
182 #define SDEBUG_OPT_N_WCE                0x1000
183 #define SDEBUG_OPT_RESET_NOISE          0x2000
184 #define SDEBUG_OPT_NO_CDB_NOISE         0x4000
185 #define SDEBUG_OPT_HOST_BUSY            0x8000
186 #define SDEBUG_OPT_CMD_ABORT            0x10000
187 #define SDEBUG_OPT_ALL_NOISE (SDEBUG_OPT_NOISE | SDEBUG_OPT_Q_NOISE | \
188                               SDEBUG_OPT_RESET_NOISE)
189 #define SDEBUG_OPT_ALL_INJECTING (SDEBUG_OPT_RECOVERED_ERR | \
190                                   SDEBUG_OPT_TRANSPORT_ERR | \
191                                   SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR | \
192                                   SDEBUG_OPT_SHORT_TRANSFER | \
193                                   SDEBUG_OPT_HOST_BUSY | \
194                                   SDEBUG_OPT_CMD_ABORT)
195 #define SDEBUG_OPT_RECOV_DIF_DIX (SDEBUG_OPT_RECOVERED_ERR | \
196                                   SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR)
197
198 /* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs) are returned in
199  * priority order. In the subset implemented here lower numbers have higher
200  * priority. The UA numbers should be a sequence starting from 0 with
201  * SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */
202 #define SDEBUG_UA_POR 0         /* Power on, reset, or bus device reset */
203 #define SDEBUG_UA_POOCCUR 1     /* Power on occurred */
204 #define SDEBUG_UA_BUS_RESET 2
205 #define SDEBUG_UA_MODE_CHANGED 3
206 #define SDEBUG_UA_CAPACITY_CHANGED 4
207 #define SDEBUG_UA_LUNS_CHANGED 5
208 #define SDEBUG_UA_MICROCODE_CHANGED 6   /* simulate firmware change */
209 #define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 7
210 #define SDEBUG_NUM_UAS 8
211
212 /* when 1==SDEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
213  * sector on read commands: */
214 #define OPT_MEDIUM_ERR_ADDR   0x1234 /* that's sector 4660 in decimal */
215 #define OPT_MEDIUM_ERR_NUM    10     /* number of consecutive medium errs */
216
217 /* SDEBUG_CANQUEUE is the maximum number of commands that can be queued
218  * (for response) per submit queue at one time. Can be reduced by max_queue
219  * option. Command responses are not queued when jdelay=0 and ndelay=0. The
220  * per-device DEF_CMD_PER_LUN can be changed via sysfs:
221  * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth
222  * but cannot exceed SDEBUG_CANQUEUE .
223  */
224 #define SDEBUG_CANQUEUE_WORDS  3        /* a WORD is bits in a long */
225 #define SDEBUG_CANQUEUE  (SDEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
226 #define DEF_CMD_PER_LUN  SDEBUG_CANQUEUE
227
228 /* UA - Unit Attention; SA - Service Action; SSU - Start Stop Unit */
229 #define F_D_IN                  1       /* Data-in command (e.g. READ) */
230 #define F_D_OUT                 2       /* Data-out command (e.g. WRITE) */
231 #define F_D_OUT_MAYBE           4       /* WRITE SAME, NDOB bit */
232 #define F_D_UNKN                8
233 #define F_RL_WLUN_OK            0x10    /* allowed with REPORT LUNS W-LUN */
234 #define F_SKIP_UA               0x20    /* bypass UAs (e.g. INQUIRY command) */
235 #define F_DELAY_OVERR           0x40    /* for commands like INQUIRY */
236 #define F_SA_LOW                0x80    /* SA is in cdb byte 1, bits 4 to 0 */
237 #define F_SA_HIGH               0x100   /* SA is in cdb bytes 8 and 9 */
238 #define F_INV_OP                0x200   /* invalid opcode (not supported) */
239 #define F_FAKE_RW               0x400   /* bypass resp_*() when fake_rw set */
240 #define F_M_ACCESS              0x800   /* media access, reacts to SSU state */
241 #define F_SSU_DELAY             0x1000  /* SSU command delay (long-ish) */
242 #define F_SYNC_DELAY            0x2000  /* SYNCHRONIZE CACHE delay */
243
244 /* Useful combinations of the above flags */
245 #define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
246 #define FF_MEDIA_IO (F_M_ACCESS | F_FAKE_RW)
247 #define FF_SA (F_SA_HIGH | F_SA_LOW)
248 #define F_LONG_DELAY            (F_SSU_DELAY | F_SYNC_DELAY)
249
250 #define SDEBUG_MAX_PARTS 4
251
252 #define SDEBUG_MAX_CMD_LEN 32
253
254 #define SDEB_XA_NOT_IN_USE XA_MARK_1
255
256 static struct kmem_cache *queued_cmd_cache;
257
258 #define TO_QUEUED_CMD(scmd)  ((void *)(scmd)->host_scribble)
259 #define ASSIGN_QUEUED_CMD(scmnd, qc) { (scmnd)->host_scribble = (void *) qc; }
260
261 /* Zone types (zbcr05 table 25) */
262 enum sdebug_z_type {
263         ZBC_ZTYPE_CNV   = 0x1,
264         ZBC_ZTYPE_SWR   = 0x2,
265         ZBC_ZTYPE_SWP   = 0x3,
266         /* ZBC_ZTYPE_SOBR = 0x4, */
267         ZBC_ZTYPE_GAP   = 0x5,
268 };
269
270 /* enumeration names taken from table 26, zbcr05 */
271 enum sdebug_z_cond {
272         ZBC_NOT_WRITE_POINTER   = 0x0,
273         ZC1_EMPTY               = 0x1,
274         ZC2_IMPLICIT_OPEN       = 0x2,
275         ZC3_EXPLICIT_OPEN       = 0x3,
276         ZC4_CLOSED              = 0x4,
277         ZC6_READ_ONLY           = 0xd,
278         ZC5_FULL                = 0xe,
279         ZC7_OFFLINE             = 0xf,
280 };
281
282 struct sdeb_zone_state {        /* ZBC: per zone state */
283         enum sdebug_z_type z_type;
284         enum sdebug_z_cond z_cond;
285         bool z_non_seq_resource;
286         unsigned int z_size;
287         sector_t z_start;
288         sector_t z_wp;
289 };
290
291 enum sdebug_err_type {
292         ERR_TMOUT_CMD           = 0,    /* make specific scsi command timeout */
293         ERR_FAIL_QUEUE_CMD      = 1,    /* make specific scsi command's */
294                                         /* queuecmd return failed */
295         ERR_FAIL_CMD            = 2,    /* make specific scsi command's */
296                                         /* queuecmd return succeed but */
297                                         /* with errors set in scsi_cmnd */
298         ERR_ABORT_CMD_FAILED    = 3,    /* control return FAILED from */
299                                         /* scsi_debug_abort() */
300         ERR_LUN_RESET_FAILED    = 4,    /* control return FAILED from */
301                                         /* scsi_debug_device_reseLUN_RESET_FAILEDt() */
302 };
303
304 struct sdebug_err_inject {
305         int type;
306         struct list_head list;
307         int cnt;
308         unsigned char cmd;
309         struct rcu_head rcu;
310
311         union {
312                 /*
313                  * For ERR_FAIL_QUEUE_CMD
314                  */
315                 int queuecmd_ret;
316
317                 /*
318                  * For ERR_FAIL_CMD
319                  */
320                 struct {
321                         unsigned char host_byte;
322                         unsigned char driver_byte;
323                         unsigned char status_byte;
324                         unsigned char sense_key;
325                         unsigned char asc;
326                         unsigned char asq;
327                 };
328         };
329 };
330
331 struct sdebug_dev_info {
332         struct list_head dev_list;
333         unsigned int channel;
334         unsigned int target;
335         u64 lun;
336         uuid_t lu_name;
337         struct sdebug_host_info *sdbg_host;
338         unsigned long uas_bm[1];
339         atomic_t stopped;       /* 1: by SSU, 2: device start */
340         bool used;
341
342         /* For ZBC devices */
343         bool zoned;
344         unsigned int zcap;
345         unsigned int zsize;
346         unsigned int zsize_shift;
347         unsigned int nr_zones;
348         unsigned int nr_conv_zones;
349         unsigned int nr_seq_zones;
350         unsigned int nr_imp_open;
351         unsigned int nr_exp_open;
352         unsigned int nr_closed;
353         unsigned int max_open;
354         ktime_t create_ts;      /* time since bootup that this device was created */
355         struct sdeb_zone_state *zstate;
356
357         struct dentry *debugfs_entry;
358         struct spinlock list_lock;
359         struct list_head inject_err_list;
360 };
361
362 struct sdebug_target_info {
363         bool reset_fail;
364         struct dentry *debugfs_entry;
365 };
366
367 struct sdebug_host_info {
368         struct list_head host_list;
369         int si_idx;     /* sdeb_store_info (per host) xarray index */
370         struct Scsi_Host *shost;
371         struct device dev;
372         struct list_head dev_info_list;
373 };
374
375 /* There is an xarray of pointers to this struct's objects, one per host */
376 struct sdeb_store_info {
377         rwlock_t macc_lck;      /* for atomic media access on this store */
378         u8 *storep;             /* user data storage (ram) */
379         struct t10_pi_tuple *dif_storep; /* protection info */
380         void *map_storep;       /* provisioning map */
381 };
382
383 #define dev_to_sdebug_host(d)   \
384         container_of(d, struct sdebug_host_info, dev)
385
386 #define shost_to_sdebug_host(shost)     \
387         dev_to_sdebug_host(shost->dma_dev)
388
389 enum sdeb_defer_type {SDEB_DEFER_NONE = 0, SDEB_DEFER_HRT = 1,
390                       SDEB_DEFER_WQ = 2, SDEB_DEFER_POLL = 3};
391
392 struct sdebug_defer {
393         struct hrtimer hrt;
394         struct execute_work ew;
395         ktime_t cmpl_ts;/* time since boot to complete this cmd */
396         int issuing_cpu;
397         bool aborted;   /* true when blk_abort_request() already called */
398         enum sdeb_defer_type defer_t;
399 };
400
401 struct sdebug_queued_cmd {
402         /* corresponding bit set in in_use_bm[] in owning struct sdebug_queue
403          * instance indicates this slot is in use.
404          */
405         struct sdebug_defer sd_dp;
406         struct scsi_cmnd *scmd;
407 };
408
409 struct sdebug_scsi_cmd {
410         spinlock_t   lock;
411 };
412
413 static atomic_t sdebug_cmnd_count;   /* number of incoming commands */
414 static atomic_t sdebug_completions;  /* count of deferred completions */
415 static atomic_t sdebug_miss_cpus;    /* submission + completion cpus differ */
416 static atomic_t sdebug_a_tsf;        /* 'almost task set full' counter */
417 static atomic_t sdeb_inject_pending;
418 static atomic_t sdeb_mq_poll_count;  /* bumped when mq_poll returns > 0 */
419
420 struct opcode_info_t {
421         u8 num_attached;        /* 0 if this is it (i.e. a leaf); use 0xff */
422                                 /* for terminating element */
423         u8 opcode;              /* if num_attached > 0, preferred */
424         u16 sa;                 /* service action */
425         u32 flags;              /* OR-ed set of SDEB_F_* */
426         int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
427         const struct opcode_info_t *arrp;  /* num_attached elements or NULL */
428         u8 len_mask[16];        /* len_mask[0]-->cdb_len, then mask for cdb */
429                                 /* 1 to min(cdb_len, 15); ignore cdb[15...] */
430 };
431
432 /* SCSI opcodes (first byte of cdb) of interest mapped onto these indexes */
433 enum sdeb_opcode_index {
434         SDEB_I_INVALID_OPCODE = 0,
435         SDEB_I_INQUIRY = 1,
436         SDEB_I_REPORT_LUNS = 2,
437         SDEB_I_REQUEST_SENSE = 3,
438         SDEB_I_TEST_UNIT_READY = 4,
439         SDEB_I_MODE_SENSE = 5,          /* 6, 10 */
440         SDEB_I_MODE_SELECT = 6,         /* 6, 10 */
441         SDEB_I_LOG_SENSE = 7,
442         SDEB_I_READ_CAPACITY = 8,       /* 10; 16 is in SA_IN(16) */
443         SDEB_I_READ = 9,                /* 6, 10, 12, 16 */
444         SDEB_I_WRITE = 10,              /* 6, 10, 12, 16 */
445         SDEB_I_START_STOP = 11,
446         SDEB_I_SERV_ACT_IN_16 = 12,     /* add ...SERV_ACT_IN_12 if needed */
447         SDEB_I_SERV_ACT_OUT_16 = 13,    /* add ...SERV_ACT_OUT_12 if needed */
448         SDEB_I_MAINT_IN = 14,
449         SDEB_I_MAINT_OUT = 15,
450         SDEB_I_VERIFY = 16,             /* VERIFY(10), VERIFY(16) */
451         SDEB_I_VARIABLE_LEN = 17,       /* READ(32), WRITE(32), WR_SCAT(32) */
452         SDEB_I_RESERVE = 18,            /* 6, 10 */
453         SDEB_I_RELEASE = 19,            /* 6, 10 */
454         SDEB_I_ALLOW_REMOVAL = 20,      /* PREVENT ALLOW MEDIUM REMOVAL */
455         SDEB_I_REZERO_UNIT = 21,        /* REWIND in SSC */
456         SDEB_I_ATA_PT = 22,             /* 12, 16 */
457         SDEB_I_SEND_DIAG = 23,
458         SDEB_I_UNMAP = 24,
459         SDEB_I_WRITE_BUFFER = 25,
460         SDEB_I_WRITE_SAME = 26,         /* 10, 16 */
461         SDEB_I_SYNC_CACHE = 27,         /* 10, 16 */
462         SDEB_I_COMP_WRITE = 28,
463         SDEB_I_PRE_FETCH = 29,          /* 10, 16 */
464         SDEB_I_ZONE_OUT = 30,           /* 0x94+SA; includes no data xfer */
465         SDEB_I_ZONE_IN = 31,            /* 0x95+SA; all have data-in */
466         SDEB_I_LAST_ELEM_P1 = 32,       /* keep this last (previous + 1) */
467 };
468
469
470 static const unsigned char opcode_ind_arr[256] = {
471 /* 0x0; 0x0->0x1f: 6 byte cdbs */
472         SDEB_I_TEST_UNIT_READY, SDEB_I_REZERO_UNIT, 0, SDEB_I_REQUEST_SENSE,
473             0, 0, 0, 0,
474         SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, 0,
475         0, 0, SDEB_I_INQUIRY, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
476             SDEB_I_RELEASE,
477         0, 0, SDEB_I_MODE_SENSE, SDEB_I_START_STOP, 0, SDEB_I_SEND_DIAG,
478             SDEB_I_ALLOW_REMOVAL, 0,
479 /* 0x20; 0x20->0x3f: 10 byte cdbs */
480         0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY, 0, 0,
481         SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, SDEB_I_VERIFY,
482         0, 0, 0, 0, SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, 0,
483         0, 0, 0, SDEB_I_WRITE_BUFFER, 0, 0, 0, 0,
484 /* 0x40; 0x40->0x5f: 10 byte cdbs */
485         0, SDEB_I_WRITE_SAME, SDEB_I_UNMAP, 0, 0, 0, 0, 0,
486         0, 0, 0, 0, 0, SDEB_I_LOG_SENSE, 0, 0,
487         0, 0, 0, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
488             SDEB_I_RELEASE,
489         0, 0, SDEB_I_MODE_SENSE, 0, 0, 0, 0, 0,
490 /* 0x60; 0x60->0x7d are reserved, 0x7e is "extended cdb" */
491         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
492         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
493         0, SDEB_I_VARIABLE_LEN,
494 /* 0x80; 0x80->0x9f: 16 byte cdbs */
495         0, 0, 0, 0, 0, SDEB_I_ATA_PT, 0, 0,
496         SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0,
497         0, 0, 0, SDEB_I_VERIFY,
498         SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, SDEB_I_WRITE_SAME,
499         SDEB_I_ZONE_OUT, SDEB_I_ZONE_IN, 0, 0,
500         0, 0, 0, 0, 0, 0, SDEB_I_SERV_ACT_IN_16, SDEB_I_SERV_ACT_OUT_16,
501 /* 0xa0; 0xa0->0xbf: 12 byte cdbs */
502         SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN,
503              SDEB_I_MAINT_OUT, 0, 0, 0,
504         SDEB_I_READ, 0 /* SDEB_I_SERV_ACT_OUT_12 */, SDEB_I_WRITE,
505              0 /* SDEB_I_SERV_ACT_IN_12 */, 0, 0, 0, 0,
506         0, 0, 0, 0, 0, 0, 0, 0,
507         0, 0, 0, 0, 0, 0, 0, 0,
508 /* 0xc0; 0xc0->0xff: vendor specific */
509         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
510         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
511         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
512         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
513 };
514
515 /*
516  * The following "response" functions return the SCSI mid-level's 4 byte
517  * tuple-in-an-int. To handle commands with an IMMED bit, for a faster
518  * command completion, they can mask their return value with
519  * SDEG_RES_IMMED_MASK .
520  */
521 #define SDEG_RES_IMMED_MASK 0x40000000
522
523 static int resp_inquiry(struct scsi_cmnd *, struct sdebug_dev_info *);
524 static int resp_report_luns(struct scsi_cmnd *, struct sdebug_dev_info *);
525 static int resp_requests(struct scsi_cmnd *, struct sdebug_dev_info *);
526 static int resp_mode_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
527 static int resp_mode_select(struct scsi_cmnd *, struct sdebug_dev_info *);
528 static int resp_log_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
529 static int resp_readcap(struct scsi_cmnd *, struct sdebug_dev_info *);
530 static int resp_read_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
531 static int resp_write_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
532 static int resp_write_scat(struct scsi_cmnd *, struct sdebug_dev_info *);
533 static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *);
534 static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *);
535 static int resp_get_lba_status(struct scsi_cmnd *, struct sdebug_dev_info *);
536 static int resp_report_tgtpgs(struct scsi_cmnd *, struct sdebug_dev_info *);
537 static int resp_unmap(struct scsi_cmnd *, struct sdebug_dev_info *);
538 static int resp_rsup_opcodes(struct scsi_cmnd *, struct sdebug_dev_info *);
539 static int resp_rsup_tmfs(struct scsi_cmnd *, struct sdebug_dev_info *);
540 static int resp_verify(struct scsi_cmnd *, struct sdebug_dev_info *);
541 static int resp_write_same_10(struct scsi_cmnd *, struct sdebug_dev_info *);
542 static int resp_write_same_16(struct scsi_cmnd *, struct sdebug_dev_info *);
543 static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *);
544 static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *);
545 static int resp_sync_cache(struct scsi_cmnd *, struct sdebug_dev_info *);
546 static int resp_pre_fetch(struct scsi_cmnd *, struct sdebug_dev_info *);
547 static int resp_report_zones(struct scsi_cmnd *, struct sdebug_dev_info *);
548 static int resp_open_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
549 static int resp_close_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
550 static int resp_finish_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
551 static int resp_rwp_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
552
553 static int sdebug_do_add_host(bool mk_new_store);
554 static int sdebug_add_host_helper(int per_host_idx);
555 static void sdebug_do_remove_host(bool the_end);
556 static int sdebug_add_store(void);
557 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip);
558 static void sdebug_erase_all_stores(bool apart_from_first);
559
560 static void sdebug_free_queued_cmd(struct sdebug_queued_cmd *sqcp);
561
562 /*
563  * The following are overflow arrays for cdbs that "hit" the same index in
564  * the opcode_info_arr array. The most time sensitive (or commonly used) cdb
565  * should be placed in opcode_info_arr[], the others should be placed here.
566  */
567 static const struct opcode_info_t msense_iarr[] = {
568         {0, 0x1a, 0, F_D_IN, NULL, NULL,
569             {6,  0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
570 };
571
572 static const struct opcode_info_t mselect_iarr[] = {
573         {0, 0x15, 0, F_D_OUT, NULL, NULL,
574             {6,  0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
575 };
576
577 static const struct opcode_info_t read_iarr[] = {
578         {0, 0x28, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(10) */
579             {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
580              0, 0, 0, 0} },
581         {0, 0x8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL, /* READ(6) */
582             {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
583         {0, 0xa8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(12) */
584             {12,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf,
585              0xc7, 0, 0, 0, 0} },
586 };
587
588 static const struct opcode_info_t write_iarr[] = {
589         {0, 0x2a, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,  /* WRITE(10) */
590             NULL, {10,  0xfb, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7,
591                    0, 0, 0, 0, 0, 0} },
592         {0, 0xa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,   /* WRITE(6) */
593             NULL, {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0,
594                    0, 0, 0} },
595         {0, 0xaa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,  /* WRITE(12) */
596             NULL, {12,  0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
597                    0xbf, 0xc7, 0, 0, 0, 0} },
598 };
599
600 static const struct opcode_info_t verify_iarr[] = {
601         {0, 0x2f, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,/* VERIFY(10) */
602             NULL, {10,  0xf7, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xff, 0xff, 0xc7,
603                    0, 0, 0, 0, 0, 0} },
604 };
605
606 static const struct opcode_info_t sa_in_16_iarr[] = {
607         {0, 0x9e, 0x12, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
608             {16,  0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
609              0xff, 0xff, 0xff, 0, 0xc7} },      /* GET LBA STATUS(16) */
610 };
611
612 static const struct opcode_info_t vl_iarr[] = { /* VARIABLE LENGTH */
613         {0, 0x7f, 0xb, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_dt0,
614             NULL, {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0xb, 0xfa,
615                    0, 0xff, 0xff, 0xff, 0xff} },        /* WRITE(32) */
616         {0, 0x7f, 0x11, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
617             NULL, {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x11, 0xf8,
618                    0, 0xff, 0xff, 0x0, 0x0} },  /* WRITE SCATTERED(32) */
619 };
620
621 static const struct opcode_info_t maint_in_iarr[] = {   /* MAINT IN */
622         {0, 0xa3, 0xc, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL,
623             {12,  0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
624              0xc7, 0, 0, 0, 0} }, /* REPORT SUPPORTED OPERATION CODES */
625         {0, 0xa3, 0xd, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL,
626             {12,  0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
627              0, 0} },   /* REPORTED SUPPORTED TASK MANAGEMENT FUNCTIONS */
628 };
629
630 static const struct opcode_info_t write_same_iarr[] = {
631         {0, 0x93, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_write_same_16, NULL,
632             {16,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
633              0xff, 0xff, 0xff, 0x3f, 0xc7} },           /* WRITE SAME(16) */
634 };
635
636 static const struct opcode_info_t reserve_iarr[] = {
637         {0, 0x16, 0, F_D_OUT, NULL, NULL,               /* RESERVE(6) */
638             {6,  0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
639 };
640
641 static const struct opcode_info_t release_iarr[] = {
642         {0, 0x17, 0, F_D_OUT, NULL, NULL,               /* RELEASE(6) */
643             {6,  0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
644 };
645
646 static const struct opcode_info_t sync_cache_iarr[] = {
647         {0, 0x91, 0, F_SYNC_DELAY | F_M_ACCESS, resp_sync_cache, NULL,
648             {16,  0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
649              0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },     /* SYNC_CACHE (16) */
650 };
651
652 static const struct opcode_info_t pre_fetch_iarr[] = {
653         {0, 0x90, 0, F_SYNC_DELAY | FF_MEDIA_IO, resp_pre_fetch, NULL,
654             {16,  0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
655              0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },     /* PRE-FETCH (16) */
656 };
657
658 static const struct opcode_info_t zone_out_iarr[] = {   /* ZONE OUT(16) */
659         {0, 0x94, 0x1, F_SA_LOW | F_M_ACCESS, resp_close_zone, NULL,
660             {16, 0x1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
661              0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },      /* CLOSE ZONE */
662         {0, 0x94, 0x2, F_SA_LOW | F_M_ACCESS, resp_finish_zone, NULL,
663             {16, 0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
664              0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },      /* FINISH ZONE */
665         {0, 0x94, 0x4, F_SA_LOW | F_M_ACCESS, resp_rwp_zone, NULL,
666             {16, 0x4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
667              0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },  /* RESET WRITE POINTER */
668 };
669
670 static const struct opcode_info_t zone_in_iarr[] = {    /* ZONE IN(16) */
671         {0, 0x95, 0x6, F_SA_LOW | F_D_IN | F_M_ACCESS, NULL, NULL,
672             {16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
673              0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* REPORT ZONES */
674 };
675
676
677 /* This array is accessed via SDEB_I_* values. Make sure all are mapped,
678  * plus the terminating elements for logic that scans this table such as
679  * REPORT SUPPORTED OPERATION CODES. */
680 static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEM_P1 + 1] = {
681 /* 0 */
682         {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL,    /* unknown opcodes */
683             {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
684         {0, 0x12, 0, FF_RESPOND | F_D_IN, resp_inquiry, NULL, /* INQUIRY */
685             {6,  0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
686         {0, 0xa0, 0, FF_RESPOND | F_D_IN, resp_report_luns, NULL,
687             {12,  0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
688              0, 0} },                                   /* REPORT LUNS */
689         {0, 0x3, 0, FF_RESPOND | F_D_IN, resp_requests, NULL,
690             {6,  0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
691         {0, 0x0, 0, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */
692             {6,  0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
693 /* 5 */
694         {ARRAY_SIZE(msense_iarr), 0x5a, 0, F_D_IN,      /* MODE SENSE(10) */
695             resp_mode_sense, msense_iarr, {10,  0xf8, 0xff, 0xff, 0, 0, 0,
696                 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
697         {ARRAY_SIZE(mselect_iarr), 0x55, 0, F_D_OUT,    /* MODE SELECT(10) */
698             resp_mode_select, mselect_iarr, {10,  0xf1, 0, 0, 0, 0, 0, 0xff,
699                 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
700         {0, 0x4d, 0, F_D_IN, resp_log_sense, NULL,      /* LOG SENSE */
701             {10,  0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0,
702              0, 0, 0} },
703         {0, 0x25, 0, F_D_IN, resp_readcap, NULL,    /* READ CAPACITY(10) */
704             {10,  0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0,
705              0, 0} },
706         {ARRAY_SIZE(read_iarr), 0x88, 0, F_D_IN | FF_MEDIA_IO, /* READ(16) */
707             resp_read_dt0, read_iarr, {16,  0xfe, 0xff, 0xff, 0xff, 0xff,
708             0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
709 /* 10 */
710         {ARRAY_SIZE(write_iarr), 0x8a, 0, F_D_OUT | FF_MEDIA_IO,
711             resp_write_dt0, write_iarr,                 /* WRITE(16) */
712                 {16,  0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
713                  0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
714         {0, 0x1b, 0, F_SSU_DELAY, resp_start_stop, NULL,/* START STOP UNIT */
715             {6,  0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
716         {ARRAY_SIZE(sa_in_16_iarr), 0x9e, 0x10, F_SA_LOW | F_D_IN,
717             resp_readcap16, sa_in_16_iarr, /* SA_IN(16), READ CAPACITY(16) */
718                 {16,  0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
719                  0xff, 0xff, 0xff, 0xff, 0x1, 0xc7} },
720         {0, 0x9f, 0x12, F_SA_LOW | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
721             NULL, {16,  0x12, 0xf9, 0x0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff,
722             0xff, 0xff, 0xff, 0xff, 0xc7} },  /* SA_OUT(16), WRITE SCAT(16) */
723         {ARRAY_SIZE(maint_in_iarr), 0xa3, 0xa, F_SA_LOW | F_D_IN,
724             resp_report_tgtpgs, /* MAINT IN, REPORT TARGET PORT GROUPS */
725                 maint_in_iarr, {12,  0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff,
726                                 0xff, 0, 0xc7, 0, 0, 0, 0} },
727 /* 15 */
728         {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
729             {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
730         {ARRAY_SIZE(verify_iarr), 0x8f, 0,
731             F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,   /* VERIFY(16) */
732             verify_iarr, {16,  0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
733                           0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },
734         {ARRAY_SIZE(vl_iarr), 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_MEDIA_IO,
735             resp_read_dt0, vl_iarr,     /* VARIABLE LENGTH, READ(32) */
736             {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x9, 0xfe, 0, 0xff, 0xff,
737              0xff, 0xff} },
738         {ARRAY_SIZE(reserve_iarr), 0x56, 0, F_D_OUT,
739             NULL, reserve_iarr, /* RESERVE(10) <no response function> */
740             {10,  0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
741              0} },
742         {ARRAY_SIZE(release_iarr), 0x57, 0, F_D_OUT,
743             NULL, release_iarr, /* RELEASE(10) <no response function> */
744             {10,  0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
745              0} },
746 /* 20 */
747         {0, 0x1e, 0, 0, NULL, NULL, /* ALLOW REMOVAL */
748             {6,  0, 0, 0, 0x3, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
749         {0, 0x1, 0, 0, resp_start_stop, NULL, /* REWIND ?? */
750             {6,  0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
751         {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */
752             {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
753         {0, 0x1d, F_D_OUT, 0, NULL, NULL,       /* SEND DIAGNOSTIC */
754             {6,  0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
755         {0, 0x42, 0, F_D_OUT | FF_MEDIA_IO, resp_unmap, NULL, /* UNMAP */
756             {10,  0x1, 0, 0, 0, 0, 0x3f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
757 /* 25 */
758         {0, 0x3b, 0, F_D_OUT_MAYBE, resp_write_buffer, NULL,
759             {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0,
760              0, 0, 0, 0} },                     /* WRITE_BUFFER */
761         {ARRAY_SIZE(write_same_iarr), 0x41, 0, F_D_OUT_MAYBE | FF_MEDIA_IO,
762             resp_write_same_10, write_same_iarr,        /* WRITE SAME(10) */
763                 {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0,
764                  0, 0, 0, 0, 0} },
765         {ARRAY_SIZE(sync_cache_iarr), 0x35, 0, F_SYNC_DELAY | F_M_ACCESS,
766             resp_sync_cache, sync_cache_iarr,
767             {10,  0x7, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
768              0, 0, 0, 0} },                     /* SYNC_CACHE (10) */
769         {0, 0x89, 0, F_D_OUT | FF_MEDIA_IO, resp_comp_write, NULL,
770             {16,  0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
771              0, 0xff, 0x3f, 0xc7} },            /* COMPARE AND WRITE */
772         {ARRAY_SIZE(pre_fetch_iarr), 0x34, 0, F_SYNC_DELAY | FF_MEDIA_IO,
773             resp_pre_fetch, pre_fetch_iarr,
774             {10,  0x2, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
775              0, 0, 0, 0} },                     /* PRE-FETCH (10) */
776
777 /* 30 */
778         {ARRAY_SIZE(zone_out_iarr), 0x94, 0x3, F_SA_LOW | F_M_ACCESS,
779             resp_open_zone, zone_out_iarr, /* ZONE_OUT(16), OPEN ZONE) */
780                 {16,  0x3 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
781                  0xff, 0xff, 0x0, 0x0, 0xff, 0xff, 0x1, 0xc7} },
782         {ARRAY_SIZE(zone_in_iarr), 0x95, 0x0, F_SA_LOW | F_M_ACCESS,
783             resp_report_zones, zone_in_iarr, /* ZONE_IN(16), REPORT ZONES) */
784                 {16,  0x0 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
785                  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xc7} },
786 /* sentinel */
787         {0xff, 0, 0, 0, NULL, NULL,             /* terminating element */
788             {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
789 };
790
791 static int sdebug_num_hosts;
792 static int sdebug_add_host = DEF_NUM_HOST;  /* in sysfs this is relative */
793 static int sdebug_ato = DEF_ATO;
794 static int sdebug_cdb_len = DEF_CDB_LEN;
795 static int sdebug_jdelay = DEF_JDELAY;  /* if > 0 then unit is jiffies */
796 static int sdebug_dev_size_mb = DEF_DEV_SIZE_PRE_INIT;
797 static int sdebug_dif = DEF_DIF;
798 static int sdebug_dix = DEF_DIX;
799 static int sdebug_dsense = DEF_D_SENSE;
800 static int sdebug_every_nth = DEF_EVERY_NTH;
801 static int sdebug_fake_rw = DEF_FAKE_RW;
802 static unsigned int sdebug_guard = DEF_GUARD;
803 static int sdebug_host_max_queue;       /* per host */
804 static int sdebug_lowest_aligned = DEF_LOWEST_ALIGNED;
805 static int sdebug_max_luns = DEF_MAX_LUNS;
806 static int sdebug_max_queue = SDEBUG_CANQUEUE;  /* per submit queue */
807 static unsigned int sdebug_medium_error_start = OPT_MEDIUM_ERR_ADDR;
808 static int sdebug_medium_error_count = OPT_MEDIUM_ERR_NUM;
809 static int sdebug_ndelay = DEF_NDELAY;  /* if > 0 then unit is nanoseconds */
810 static int sdebug_no_lun_0 = DEF_NO_LUN_0;
811 static int sdebug_no_uld;
812 static int sdebug_num_parts = DEF_NUM_PARTS;
813 static int sdebug_num_tgts = DEF_NUM_TGTS; /* targets per host */
814 static int sdebug_opt_blks = DEF_OPT_BLKS;
815 static int sdebug_opts = DEF_OPTS;
816 static int sdebug_physblk_exp = DEF_PHYSBLK_EXP;
817 static int sdebug_opt_xferlen_exp = DEF_OPT_XFERLEN_EXP;
818 static int sdebug_ptype = DEF_PTYPE; /* SCSI peripheral device type */
819 static int sdebug_scsi_level = DEF_SCSI_LEVEL;
820 static int sdebug_sector_size = DEF_SECTOR_SIZE;
821 static int sdeb_tur_ms_to_ready = DEF_TUR_MS_TO_READY;
822 static int sdebug_virtual_gb = DEF_VIRTUAL_GB;
823 static int sdebug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
824 static unsigned int sdebug_lbpu = DEF_LBPU;
825 static unsigned int sdebug_lbpws = DEF_LBPWS;
826 static unsigned int sdebug_lbpws10 = DEF_LBPWS10;
827 static unsigned int sdebug_lbprz = DEF_LBPRZ;
828 static unsigned int sdebug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
829 static unsigned int sdebug_unmap_granularity = DEF_UNMAP_GRANULARITY;
830 static unsigned int sdebug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
831 static unsigned int sdebug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
832 static unsigned int sdebug_write_same_length = DEF_WRITESAME_LENGTH;
833 static int sdebug_uuid_ctl = DEF_UUID_CTL;
834 static bool sdebug_random = DEF_RANDOM;
835 static bool sdebug_per_host_store = DEF_PER_HOST_STORE;
836 static bool sdebug_removable = DEF_REMOVABLE;
837 static bool sdebug_clustering;
838 static bool sdebug_host_lock = DEF_HOST_LOCK;
839 static bool sdebug_strict = DEF_STRICT;
840 static bool sdebug_any_injecting_opt;
841 static bool sdebug_no_rwlock;
842 static bool sdebug_verbose;
843 static bool have_dif_prot;
844 static bool write_since_sync;
845 static bool sdebug_statistics = DEF_STATISTICS;
846 static bool sdebug_wp;
847 static bool sdebug_allow_restart;
848 static enum {
849         BLK_ZONED_NONE  = 0,
850         BLK_ZONED_HA    = 1,
851         BLK_ZONED_HM    = 2,
852 } sdeb_zbc_model = BLK_ZONED_NONE;
853 static char *sdeb_zbc_model_s;
854
855 enum sam_lun_addr_method {SAM_LUN_AM_PERIPHERAL = 0x0,
856                           SAM_LUN_AM_FLAT = 0x1,
857                           SAM_LUN_AM_LOGICAL_UNIT = 0x2,
858                           SAM_LUN_AM_EXTENDED = 0x3};
859 static enum sam_lun_addr_method sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
860 static int sdebug_lun_am_i = (int)SAM_LUN_AM_PERIPHERAL;
861
862 static unsigned int sdebug_store_sectors;
863 static sector_t sdebug_capacity;        /* in sectors */
864
865 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
866    may still need them */
867 static int sdebug_heads;                /* heads per disk */
868 static int sdebug_cylinders_per;        /* cylinders per surface */
869 static int sdebug_sectors_per;          /* sectors per cylinder */
870
871 static LIST_HEAD(sdebug_host_list);
872 static DEFINE_MUTEX(sdebug_host_list_mutex);
873
874 static struct xarray per_store_arr;
875 static struct xarray *per_store_ap = &per_store_arr;
876 static int sdeb_first_idx = -1;         /* invalid index ==> none created */
877 static int sdeb_most_recent_idx = -1;
878 static DEFINE_RWLOCK(sdeb_fake_rw_lck); /* need a RW lock when fake_rw=1 */
879
880 static unsigned long map_size;
881 static int num_aborts;
882 static int num_dev_resets;
883 static int num_target_resets;
884 static int num_bus_resets;
885 static int num_host_resets;
886 static int dix_writes;
887 static int dix_reads;
888 static int dif_errors;
889
890 /* ZBC global data */
891 static bool sdeb_zbc_in_use;    /* true for host-aware and host-managed disks */
892 static int sdeb_zbc_zone_cap_mb;
893 static int sdeb_zbc_zone_size_mb;
894 static int sdeb_zbc_max_open = DEF_ZBC_MAX_OPEN_ZONES;
895 static int sdeb_zbc_nr_conv = DEF_ZBC_NR_CONV_ZONES;
896
897 static int submit_queues = DEF_SUBMIT_QUEUES;  /* > 1 for multi-queue (mq) */
898 static int poll_queues; /* iouring iopoll interface.*/
899
900 static char sdebug_proc_name[] = MY_NAME;
901 static const char *my_name = MY_NAME;
902
903 static struct bus_type pseudo_lld_bus;
904
905 static struct device_driver sdebug_driverfs_driver = {
906         .name           = sdebug_proc_name,
907         .bus            = &pseudo_lld_bus,
908 };
909
910 static const int check_condition_result =
911         SAM_STAT_CHECK_CONDITION;
912
913 static const int illegal_condition_result =
914         (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
915
916 static const int device_qfull_result =
917         (DID_ABORT << 16) | SAM_STAT_TASK_SET_FULL;
918
919 static const int condition_met_result = SAM_STAT_CONDITION_MET;
920
921 static struct dentry *sdebug_debugfs_root;
922
923 static void sdebug_err_free(struct rcu_head *head)
924 {
925         struct sdebug_err_inject *inject =
926                 container_of(head, typeof(*inject), rcu);
927
928         kfree(inject);
929 }
930
931 static void sdebug_err_add(struct scsi_device *sdev, struct sdebug_err_inject *new)
932 {
933         struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdev->hostdata;
934         struct sdebug_err_inject *err;
935
936         spin_lock(&devip->list_lock);
937         list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
938                 if (err->type == new->type && err->cmd == new->cmd) {
939                         list_del_rcu(&err->list);
940                         call_rcu(&err->rcu, sdebug_err_free);
941                 }
942         }
943
944         list_add_tail_rcu(&new->list, &devip->inject_err_list);
945         spin_unlock(&devip->list_lock);
946 }
947
948 static int sdebug_err_remove(struct scsi_device *sdev, const char *buf, size_t count)
949 {
950         struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdev->hostdata;
951         struct sdebug_err_inject *err;
952         int type;
953         unsigned char cmd;
954
955         if (sscanf(buf, "- %d %hhx", &type, &cmd) != 2) {
956                 kfree(buf);
957                 return -EINVAL;
958         }
959
960         spin_lock(&devip->list_lock);
961         list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
962                 if (err->type == type && err->cmd == cmd) {
963                         list_del_rcu(&err->list);
964                         call_rcu(&err->rcu, sdebug_err_free);
965                         spin_unlock(&devip->list_lock);
966                         kfree(buf);
967                         return count;
968                 }
969         }
970         spin_unlock(&devip->list_lock);
971
972         kfree(buf);
973         return -EINVAL;
974 }
975
976 static int sdebug_error_show(struct seq_file *m, void *p)
977 {
978         struct scsi_device *sdev = (struct scsi_device *)m->private;
979         struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdev->hostdata;
980         struct sdebug_err_inject *err;
981
982         seq_puts(m, "Type\tCount\tCommand\n");
983
984         rcu_read_lock();
985         list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
986                 switch (err->type) {
987                 case ERR_TMOUT_CMD:
988                 case ERR_ABORT_CMD_FAILED:
989                 case ERR_LUN_RESET_FAILED:
990                         seq_printf(m, "%d\t%d\t0x%x\n", err->type, err->cnt,
991                                 err->cmd);
992                 break;
993
994                 case ERR_FAIL_QUEUE_CMD:
995                         seq_printf(m, "%d\t%d\t0x%x\t0x%x\n", err->type,
996                                 err->cnt, err->cmd, err->queuecmd_ret);
997                 break;
998
999                 case ERR_FAIL_CMD:
1000                         seq_printf(m, "%d\t%d\t0x%x\t0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
1001                                 err->type, err->cnt, err->cmd,
1002                                 err->host_byte, err->driver_byte,
1003                                 err->status_byte, err->sense_key,
1004                                 err->asc, err->asq);
1005                 break;
1006                 }
1007         }
1008         rcu_read_unlock();
1009
1010         return 0;
1011 }
1012
1013 static int sdebug_error_open(struct inode *inode, struct file *file)
1014 {
1015         return single_open(file, sdebug_error_show, inode->i_private);
1016 }
1017
1018 static ssize_t sdebug_error_write(struct file *file, const char __user *ubuf,
1019                 size_t count, loff_t *ppos)
1020 {
1021         char *buf;
1022         unsigned int inject_type;
1023         struct sdebug_err_inject *inject;
1024         struct scsi_device *sdev = (struct scsi_device *)file->f_inode->i_private;
1025
1026         buf = kzalloc(count + 1, GFP_KERNEL);
1027         if (!buf)
1028                 return -ENOMEM;
1029
1030         if (copy_from_user(buf, ubuf, count)) {
1031                 kfree(buf);
1032                 return -EFAULT;
1033         }
1034
1035         if (buf[0] == '-')
1036                 return sdebug_err_remove(sdev, buf, count);
1037
1038         if (sscanf(buf, "%d", &inject_type) != 1) {
1039                 kfree(buf);
1040                 return -EINVAL;
1041         }
1042
1043         inject = kzalloc(sizeof(struct sdebug_err_inject), GFP_KERNEL);
1044         if (!inject) {
1045                 kfree(buf);
1046                 return -ENOMEM;
1047         }
1048
1049         switch (inject_type) {
1050         case ERR_TMOUT_CMD:
1051         case ERR_ABORT_CMD_FAILED:
1052         case ERR_LUN_RESET_FAILED:
1053                 if (sscanf(buf, "%d %d %hhx", &inject->type, &inject->cnt,
1054                            &inject->cmd) != 3)
1055                         goto out_error;
1056         break;
1057
1058         case ERR_FAIL_QUEUE_CMD:
1059                 if (sscanf(buf, "%d %d %hhx %x", &inject->type, &inject->cnt,
1060                            &inject->cmd, &inject->queuecmd_ret) != 4)
1061                         goto out_error;
1062         break;
1063
1064         case ERR_FAIL_CMD:
1065                 if (sscanf(buf, "%d %d %hhx %hhx %hhx %hhx %hhx %hhx %hhx",
1066                            &inject->type, &inject->cnt, &inject->cmd,
1067                            &inject->host_byte, &inject->driver_byte,
1068                            &inject->status_byte, &inject->sense_key,
1069                            &inject->asc, &inject->asq) != 9)
1070                         goto out_error;
1071         break;
1072
1073         default:
1074                 goto out_error;
1075         break;
1076         }
1077
1078         kfree(buf);
1079         sdebug_err_add(sdev, inject);
1080
1081         return count;
1082
1083 out_error:
1084         kfree(buf);
1085         kfree(inject);
1086         return -EINVAL;
1087 }
1088
1089 static const struct file_operations sdebug_error_fops = {
1090         .open   = sdebug_error_open,
1091         .read   = seq_read,
1092         .write  = sdebug_error_write,
1093         .release = single_release,
1094 };
1095
1096 static int sdebug_target_reset_fail_show(struct seq_file *m, void *p)
1097 {
1098         struct scsi_target *starget = (struct scsi_target *)m->private;
1099         struct sdebug_target_info *targetip =
1100                 (struct sdebug_target_info *)starget->hostdata;
1101
1102         if (targetip)
1103                 seq_printf(m, "%c\n", targetip->reset_fail ? 'Y' : 'N');
1104
1105         return 0;
1106 }
1107
1108 static int sdebug_target_reset_fail_open(struct inode *inode, struct file *file)
1109 {
1110         return single_open(file, sdebug_target_reset_fail_show, inode->i_private);
1111 }
1112
1113 static ssize_t sdebug_target_reset_fail_write(struct file *file,
1114                 const char __user *ubuf, size_t count, loff_t *ppos)
1115 {
1116         int ret;
1117         struct scsi_target *starget =
1118                 (struct scsi_target *)file->f_inode->i_private;
1119         struct sdebug_target_info *targetip =
1120                 (struct sdebug_target_info *)starget->hostdata;
1121
1122         if (targetip) {
1123                 ret = kstrtobool_from_user(ubuf, count, &targetip->reset_fail);
1124                 return ret < 0 ? ret : count;
1125         }
1126         return -ENODEV;
1127 }
1128
1129 static const struct file_operations sdebug_target_reset_fail_fops = {
1130         .open   = sdebug_target_reset_fail_open,
1131         .read   = seq_read,
1132         .write  = sdebug_target_reset_fail_write,
1133         .release = single_release,
1134 };
1135
1136 static int sdebug_target_alloc(struct scsi_target *starget)
1137 {
1138         struct sdebug_target_info *targetip;
1139
1140         targetip = kzalloc(sizeof(struct sdebug_target_info), GFP_KERNEL);
1141         if (!targetip)
1142                 return -ENOMEM;
1143
1144         targetip->debugfs_entry = debugfs_create_dir(dev_name(&starget->dev),
1145                                 sdebug_debugfs_root);
1146
1147         debugfs_create_file("fail_reset", 0600, targetip->debugfs_entry, starget,
1148                                 &sdebug_target_reset_fail_fops);
1149
1150         starget->hostdata = targetip;
1151
1152         return 0;
1153 }
1154
1155 static void sdebug_tartget_cleanup_async(void *data, async_cookie_t cookie)
1156 {
1157         struct sdebug_target_info *targetip = data;
1158
1159         debugfs_remove(targetip->debugfs_entry);
1160         kfree(targetip);
1161 }
1162
1163 static void sdebug_target_destroy(struct scsi_target *starget)
1164 {
1165         struct sdebug_target_info *targetip;
1166
1167         targetip = (struct sdebug_target_info *)starget->hostdata;
1168         if (targetip) {
1169                 starget->hostdata = NULL;
1170                 async_schedule(sdebug_tartget_cleanup_async, targetip);
1171         }
1172 }
1173
1174 /* Only do the extra work involved in logical block provisioning if one or
1175  * more of the lbpu, lbpws or lbpws10 parameters are given and we are doing
1176  * real reads and writes (i.e. not skipping them for speed).
1177  */
1178 static inline bool scsi_debug_lbp(void)
1179 {
1180         return 0 == sdebug_fake_rw &&
1181                 (sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10);
1182 }
1183
1184 static void *lba2fake_store(struct sdeb_store_info *sip,
1185                             unsigned long long lba)
1186 {
1187         struct sdeb_store_info *lsip = sip;
1188
1189         lba = do_div(lba, sdebug_store_sectors);
1190         if (!sip || !sip->storep) {
1191                 WARN_ON_ONCE(true);
1192                 lsip = xa_load(per_store_ap, 0);  /* should never be NULL */
1193         }
1194         return lsip->storep + lba * sdebug_sector_size;
1195 }
1196
1197 static struct t10_pi_tuple *dif_store(struct sdeb_store_info *sip,
1198                                       sector_t sector)
1199 {
1200         sector = sector_div(sector, sdebug_store_sectors);
1201
1202         return sip->dif_storep + sector;
1203 }
1204
1205 static void sdebug_max_tgts_luns(void)
1206 {
1207         struct sdebug_host_info *sdbg_host;
1208         struct Scsi_Host *hpnt;
1209
1210         mutex_lock(&sdebug_host_list_mutex);
1211         list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
1212                 hpnt = sdbg_host->shost;
1213                 if ((hpnt->this_id >= 0) &&
1214                     (sdebug_num_tgts > hpnt->this_id))
1215                         hpnt->max_id = sdebug_num_tgts + 1;
1216                 else
1217                         hpnt->max_id = sdebug_num_tgts;
1218                 /* sdebug_max_luns; */
1219                 hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
1220         }
1221         mutex_unlock(&sdebug_host_list_mutex);
1222 }
1223
1224 enum sdeb_cmd_data {SDEB_IN_DATA = 0, SDEB_IN_CDB = 1};
1225
1226 /* Set in_bit to -1 to indicate no bit position of invalid field */
1227 static void mk_sense_invalid_fld(struct scsi_cmnd *scp,
1228                                  enum sdeb_cmd_data c_d,
1229                                  int in_byte, int in_bit)
1230 {
1231         unsigned char *sbuff;
1232         u8 sks[4];
1233         int sl, asc;
1234
1235         sbuff = scp->sense_buffer;
1236         if (!sbuff) {
1237                 sdev_printk(KERN_ERR, scp->device,
1238                             "%s: sense_buffer is NULL\n", __func__);
1239                 return;
1240         }
1241         asc = c_d ? INVALID_FIELD_IN_CDB : INVALID_FIELD_IN_PARAM_LIST;
1242         memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
1243         scsi_build_sense(scp, sdebug_dsense, ILLEGAL_REQUEST, asc, 0);
1244         memset(sks, 0, sizeof(sks));
1245         sks[0] = 0x80;
1246         if (c_d)
1247                 sks[0] |= 0x40;
1248         if (in_bit >= 0) {
1249                 sks[0] |= 0x8;
1250                 sks[0] |= 0x7 & in_bit;
1251         }
1252         put_unaligned_be16(in_byte, sks + 1);
1253         if (sdebug_dsense) {
1254                 sl = sbuff[7] + 8;
1255                 sbuff[7] = sl;
1256                 sbuff[sl] = 0x2;
1257                 sbuff[sl + 1] = 0x6;
1258                 memcpy(sbuff + sl + 4, sks, 3);
1259         } else
1260                 memcpy(sbuff + 15, sks, 3);
1261         if (sdebug_verbose)
1262                 sdev_printk(KERN_INFO, scp->device, "%s:  [sense_key,asc,ascq"
1263                             "]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n",
1264                             my_name, asc, c_d ? 'C' : 'D', in_byte, in_bit);
1265 }
1266
1267 static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
1268 {
1269         if (!scp->sense_buffer) {
1270                 sdev_printk(KERN_ERR, scp->device,
1271                             "%s: sense_buffer is NULL\n", __func__);
1272                 return;
1273         }
1274         memset(scp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1275
1276         scsi_build_sense(scp, sdebug_dsense, key, asc, asq);
1277
1278         if (sdebug_verbose)
1279                 sdev_printk(KERN_INFO, scp->device,
1280                             "%s:  [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
1281                             my_name, key, asc, asq);
1282 }
1283
1284 static void mk_sense_invalid_opcode(struct scsi_cmnd *scp)
1285 {
1286         mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
1287 }
1288
1289 static int scsi_debug_ioctl(struct scsi_device *dev, unsigned int cmd,
1290                             void __user *arg)
1291 {
1292         if (sdebug_verbose) {
1293                 if (0x1261 == cmd)
1294                         sdev_printk(KERN_INFO, dev,
1295                                     "%s: BLKFLSBUF [0x1261]\n", __func__);
1296                 else if (0x5331 == cmd)
1297                         sdev_printk(KERN_INFO, dev,
1298                                     "%s: CDROM_GET_CAPABILITY [0x5331]\n",
1299                                     __func__);
1300                 else
1301                         sdev_printk(KERN_INFO, dev, "%s: cmd=0x%x\n",
1302                                     __func__, cmd);
1303         }
1304         return -EINVAL;
1305         /* return -ENOTTY; // correct return but upsets fdisk */
1306 }
1307
1308 static void config_cdb_len(struct scsi_device *sdev)
1309 {
1310         switch (sdebug_cdb_len) {
1311         case 6: /* suggest 6 byte READ, WRITE and MODE SENSE/SELECT */
1312                 sdev->use_10_for_rw = false;
1313                 sdev->use_16_for_rw = false;
1314                 sdev->use_10_for_ms = false;
1315                 break;
1316         case 10: /* suggest 10 byte RWs and 6 byte MODE SENSE/SELECT */
1317                 sdev->use_10_for_rw = true;
1318                 sdev->use_16_for_rw = false;
1319                 sdev->use_10_for_ms = false;
1320                 break;
1321         case 12: /* suggest 10 byte RWs and 10 byte MODE SENSE/SELECT */
1322                 sdev->use_10_for_rw = true;
1323                 sdev->use_16_for_rw = false;
1324                 sdev->use_10_for_ms = true;
1325                 break;
1326         case 16:
1327                 sdev->use_10_for_rw = false;
1328                 sdev->use_16_for_rw = true;
1329                 sdev->use_10_for_ms = true;
1330                 break;
1331         case 32: /* No knobs to suggest this so same as 16 for now */
1332                 sdev->use_10_for_rw = false;
1333                 sdev->use_16_for_rw = true;
1334                 sdev->use_10_for_ms = true;
1335                 break;
1336         default:
1337                 pr_warn("unexpected cdb_len=%d, force to 10\n",
1338                         sdebug_cdb_len);
1339                 sdev->use_10_for_rw = true;
1340                 sdev->use_16_for_rw = false;
1341                 sdev->use_10_for_ms = false;
1342                 sdebug_cdb_len = 10;
1343                 break;
1344         }
1345 }
1346
1347 static void all_config_cdb_len(void)
1348 {
1349         struct sdebug_host_info *sdbg_host;
1350         struct Scsi_Host *shost;
1351         struct scsi_device *sdev;
1352
1353         mutex_lock(&sdebug_host_list_mutex);
1354         list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
1355                 shost = sdbg_host->shost;
1356                 shost_for_each_device(sdev, shost) {
1357                         config_cdb_len(sdev);
1358                 }
1359         }
1360         mutex_unlock(&sdebug_host_list_mutex);
1361 }
1362
1363 static void clear_luns_changed_on_target(struct sdebug_dev_info *devip)
1364 {
1365         struct sdebug_host_info *sdhp = devip->sdbg_host;
1366         struct sdebug_dev_info *dp;
1367
1368         list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
1369                 if ((devip->sdbg_host == dp->sdbg_host) &&
1370                     (devip->target == dp->target)) {
1371                         clear_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
1372                 }
1373         }
1374 }
1375
1376 static int make_ua(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1377 {
1378         int k;
1379
1380         k = find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS);
1381         if (k != SDEBUG_NUM_UAS) {
1382                 const char *cp = NULL;
1383
1384                 switch (k) {
1385                 case SDEBUG_UA_POR:
1386                         mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1387                                         POWER_ON_RESET_ASCQ);
1388                         if (sdebug_verbose)
1389                                 cp = "power on reset";
1390                         break;
1391                 case SDEBUG_UA_POOCCUR:
1392                         mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1393                                         POWER_ON_OCCURRED_ASCQ);
1394                         if (sdebug_verbose)
1395                                 cp = "power on occurred";
1396                         break;
1397                 case SDEBUG_UA_BUS_RESET:
1398                         mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1399                                         BUS_RESET_ASCQ);
1400                         if (sdebug_verbose)
1401                                 cp = "bus reset";
1402                         break;
1403                 case SDEBUG_UA_MODE_CHANGED:
1404                         mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1405                                         MODE_CHANGED_ASCQ);
1406                         if (sdebug_verbose)
1407                                 cp = "mode parameters changed";
1408                         break;
1409                 case SDEBUG_UA_CAPACITY_CHANGED:
1410                         mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1411                                         CAPACITY_CHANGED_ASCQ);
1412                         if (sdebug_verbose)
1413                                 cp = "capacity data changed";
1414                         break;
1415                 case SDEBUG_UA_MICROCODE_CHANGED:
1416                         mk_sense_buffer(scp, UNIT_ATTENTION,
1417                                         TARGET_CHANGED_ASC,
1418                                         MICROCODE_CHANGED_ASCQ);
1419                         if (sdebug_verbose)
1420                                 cp = "microcode has been changed";
1421                         break;
1422                 case SDEBUG_UA_MICROCODE_CHANGED_WO_RESET:
1423                         mk_sense_buffer(scp, UNIT_ATTENTION,
1424                                         TARGET_CHANGED_ASC,
1425                                         MICROCODE_CHANGED_WO_RESET_ASCQ);
1426                         if (sdebug_verbose)
1427                                 cp = "microcode has been changed without reset";
1428                         break;
1429                 case SDEBUG_UA_LUNS_CHANGED:
1430                         /*
1431                          * SPC-3 behavior is to report a UNIT ATTENTION with
1432                          * ASC/ASCQ REPORTED LUNS DATA HAS CHANGED on every LUN
1433                          * on the target, until a REPORT LUNS command is
1434                          * received.  SPC-4 behavior is to report it only once.
1435                          * NOTE:  sdebug_scsi_level does not use the same
1436                          * values as struct scsi_device->scsi_level.
1437                          */
1438                         if (sdebug_scsi_level >= 6)     /* SPC-4 and above */
1439                                 clear_luns_changed_on_target(devip);
1440                         mk_sense_buffer(scp, UNIT_ATTENTION,
1441                                         TARGET_CHANGED_ASC,
1442                                         LUNS_CHANGED_ASCQ);
1443                         if (sdebug_verbose)
1444                                 cp = "reported luns data has changed";
1445                         break;
1446                 default:
1447                         pr_warn("unexpected unit attention code=%d\n", k);
1448                         if (sdebug_verbose)
1449                                 cp = "unknown";
1450                         break;
1451                 }
1452                 clear_bit(k, devip->uas_bm);
1453                 if (sdebug_verbose)
1454                         sdev_printk(KERN_INFO, scp->device,
1455                                    "%s reports: Unit attention: %s\n",
1456                                    my_name, cp);
1457                 return check_condition_result;
1458         }
1459         return 0;
1460 }
1461
1462 /* Build SCSI "data-in" buffer. Returns 0 if ok else (DID_ERROR << 16). */
1463 static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1464                                 int arr_len)
1465 {
1466         int act_len;
1467         struct scsi_data_buffer *sdb = &scp->sdb;
1468
1469         if (!sdb->length)
1470                 return 0;
1471         if (scp->sc_data_direction != DMA_FROM_DEVICE)
1472                 return DID_ERROR << 16;
1473
1474         act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
1475                                       arr, arr_len);
1476         scsi_set_resid(scp, scsi_bufflen(scp) - act_len);
1477
1478         return 0;
1479 }
1480
1481 /* Partial build of SCSI "data-in" buffer. Returns 0 if ok else
1482  * (DID_ERROR << 16). Can write to offset in data-in buffer. If multiple
1483  * calls, not required to write in ascending offset order. Assumes resid
1484  * set to scsi_bufflen() prior to any calls.
1485  */
1486 static int p_fill_from_dev_buffer(struct scsi_cmnd *scp, const void *arr,
1487                                   int arr_len, unsigned int off_dst)
1488 {
1489         unsigned int act_len, n;
1490         struct scsi_data_buffer *sdb = &scp->sdb;
1491         off_t skip = off_dst;
1492
1493         if (sdb->length <= off_dst)
1494                 return 0;
1495         if (scp->sc_data_direction != DMA_FROM_DEVICE)
1496                 return DID_ERROR << 16;
1497
1498         act_len = sg_pcopy_from_buffer(sdb->table.sgl, sdb->table.nents,
1499                                        arr, arr_len, skip);
1500         pr_debug("%s: off_dst=%u, scsi_bufflen=%u, act_len=%u, resid=%d\n",
1501                  __func__, off_dst, scsi_bufflen(scp), act_len,
1502                  scsi_get_resid(scp));
1503         n = scsi_bufflen(scp) - (off_dst + act_len);
1504         scsi_set_resid(scp, min_t(u32, scsi_get_resid(scp), n));
1505         return 0;
1506 }
1507
1508 /* Fetches from SCSI "data-out" buffer. Returns number of bytes fetched into
1509  * 'arr' or -1 if error.
1510  */
1511 static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1512                                int arr_len)
1513 {
1514         if (!scsi_bufflen(scp))
1515                 return 0;
1516         if (scp->sc_data_direction != DMA_TO_DEVICE)
1517                 return -1;
1518
1519         return scsi_sg_copy_to_buffer(scp, arr, arr_len);
1520 }
1521
1522
1523 static char sdebug_inq_vendor_id[9] = "Linux   ";
1524 static char sdebug_inq_product_id[17] = "scsi_debug      ";
1525 static char sdebug_inq_product_rev[5] = SDEBUG_VERSION;
1526 /* Use some locally assigned NAAs for SAS addresses. */
1527 static const u64 naa3_comp_a = 0x3222222000000000ULL;
1528 static const u64 naa3_comp_b = 0x3333333000000000ULL;
1529 static const u64 naa3_comp_c = 0x3111111000000000ULL;
1530
1531 /* Device identification VPD page. Returns number of bytes placed in arr */
1532 static int inquiry_vpd_83(unsigned char *arr, int port_group_id,
1533                           int target_dev_id, int dev_id_num,
1534                           const char *dev_id_str, int dev_id_str_len,
1535                           const uuid_t *lu_name)
1536 {
1537         int num, port_a;
1538         char b[32];
1539
1540         port_a = target_dev_id + 1;
1541         /* T10 vendor identifier field format (faked) */
1542         arr[0] = 0x2;   /* ASCII */
1543         arr[1] = 0x1;
1544         arr[2] = 0x0;
1545         memcpy(&arr[4], sdebug_inq_vendor_id, 8);
1546         memcpy(&arr[12], sdebug_inq_product_id, 16);
1547         memcpy(&arr[28], dev_id_str, dev_id_str_len);
1548         num = 8 + 16 + dev_id_str_len;
1549         arr[3] = num;
1550         num += 4;
1551         if (dev_id_num >= 0) {
1552                 if (sdebug_uuid_ctl) {
1553                         /* Locally assigned UUID */
1554                         arr[num++] = 0x1;  /* binary (not necessarily sas) */
1555                         arr[num++] = 0xa;  /* PIV=0, lu, naa */
1556                         arr[num++] = 0x0;
1557                         arr[num++] = 0x12;
1558                         arr[num++] = 0x10; /* uuid type=1, locally assigned */
1559                         arr[num++] = 0x0;
1560                         memcpy(arr + num, lu_name, 16);
1561                         num += 16;
1562                 } else {
1563                         /* NAA-3, Logical unit identifier (binary) */
1564                         arr[num++] = 0x1;  /* binary (not necessarily sas) */
1565                         arr[num++] = 0x3;  /* PIV=0, lu, naa */
1566                         arr[num++] = 0x0;
1567                         arr[num++] = 0x8;
1568                         put_unaligned_be64(naa3_comp_b + dev_id_num, arr + num);
1569                         num += 8;
1570                 }
1571                 /* Target relative port number */
1572                 arr[num++] = 0x61;      /* proto=sas, binary */
1573                 arr[num++] = 0x94;      /* PIV=1, target port, rel port */
1574                 arr[num++] = 0x0;       /* reserved */
1575                 arr[num++] = 0x4;       /* length */
1576                 arr[num++] = 0x0;       /* reserved */
1577                 arr[num++] = 0x0;       /* reserved */
1578                 arr[num++] = 0x0;
1579                 arr[num++] = 0x1;       /* relative port A */
1580         }
1581         /* NAA-3, Target port identifier */
1582         arr[num++] = 0x61;      /* proto=sas, binary */
1583         arr[num++] = 0x93;      /* piv=1, target port, naa */
1584         arr[num++] = 0x0;
1585         arr[num++] = 0x8;
1586         put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1587         num += 8;
1588         /* NAA-3, Target port group identifier */
1589         arr[num++] = 0x61;      /* proto=sas, binary */
1590         arr[num++] = 0x95;      /* piv=1, target port group id */
1591         arr[num++] = 0x0;
1592         arr[num++] = 0x4;
1593         arr[num++] = 0;
1594         arr[num++] = 0;
1595         put_unaligned_be16(port_group_id, arr + num);
1596         num += 2;
1597         /* NAA-3, Target device identifier */
1598         arr[num++] = 0x61;      /* proto=sas, binary */
1599         arr[num++] = 0xa3;      /* piv=1, target device, naa */
1600         arr[num++] = 0x0;
1601         arr[num++] = 0x8;
1602         put_unaligned_be64(naa3_comp_a + target_dev_id, arr + num);
1603         num += 8;
1604         /* SCSI name string: Target device identifier */
1605         arr[num++] = 0x63;      /* proto=sas, UTF-8 */
1606         arr[num++] = 0xa8;      /* piv=1, target device, SCSI name string */
1607         arr[num++] = 0x0;
1608         arr[num++] = 24;
1609         memcpy(arr + num, "naa.32222220", 12);
1610         num += 12;
1611         snprintf(b, sizeof(b), "%08X", target_dev_id);
1612         memcpy(arr + num, b, 8);
1613         num += 8;
1614         memset(arr + num, 0, 4);
1615         num += 4;
1616         return num;
1617 }
1618
1619 static unsigned char vpd84_data[] = {
1620 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
1621     0x22,0x22,0x22,0x0,0xbb,0x1,
1622     0x22,0x22,0x22,0x0,0xbb,0x2,
1623 };
1624
1625 /*  Software interface identification VPD page */
1626 static int inquiry_vpd_84(unsigned char *arr)
1627 {
1628         memcpy(arr, vpd84_data, sizeof(vpd84_data));
1629         return sizeof(vpd84_data);
1630 }
1631
1632 /* Management network addresses VPD page */
1633 static int inquiry_vpd_85(unsigned char *arr)
1634 {
1635         int num = 0;
1636         const char *na1 = "https://www.kernel.org/config";
1637         const char *na2 = "http://www.kernel.org/log";
1638         int plen, olen;
1639
1640         arr[num++] = 0x1;       /* lu, storage config */
1641         arr[num++] = 0x0;       /* reserved */
1642         arr[num++] = 0x0;
1643         olen = strlen(na1);
1644         plen = olen + 1;
1645         if (plen % 4)
1646                 plen = ((plen / 4) + 1) * 4;
1647         arr[num++] = plen;      /* length, null termianted, padded */
1648         memcpy(arr + num, na1, olen);
1649         memset(arr + num + olen, 0, plen - olen);
1650         num += plen;
1651
1652         arr[num++] = 0x4;       /* lu, logging */
1653         arr[num++] = 0x0;       /* reserved */
1654         arr[num++] = 0x0;
1655         olen = strlen(na2);
1656         plen = olen + 1;
1657         if (plen % 4)
1658                 plen = ((plen / 4) + 1) * 4;
1659         arr[num++] = plen;      /* length, null terminated, padded */
1660         memcpy(arr + num, na2, olen);
1661         memset(arr + num + olen, 0, plen - olen);
1662         num += plen;
1663
1664         return num;
1665 }
1666
1667 /* SCSI ports VPD page */
1668 static int inquiry_vpd_88(unsigned char *arr, int target_dev_id)
1669 {
1670         int num = 0;
1671         int port_a, port_b;
1672
1673         port_a = target_dev_id + 1;
1674         port_b = port_a + 1;
1675         arr[num++] = 0x0;       /* reserved */
1676         arr[num++] = 0x0;       /* reserved */
1677         arr[num++] = 0x0;
1678         arr[num++] = 0x1;       /* relative port 1 (primary) */
1679         memset(arr + num, 0, 6);
1680         num += 6;
1681         arr[num++] = 0x0;
1682         arr[num++] = 12;        /* length tp descriptor */
1683         /* naa-5 target port identifier (A) */
1684         arr[num++] = 0x61;      /* proto=sas, binary */
1685         arr[num++] = 0x93;      /* PIV=1, target port, NAA */
1686         arr[num++] = 0x0;       /* reserved */
1687         arr[num++] = 0x8;       /* length */
1688         put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1689         num += 8;
1690         arr[num++] = 0x0;       /* reserved */
1691         arr[num++] = 0x0;       /* reserved */
1692         arr[num++] = 0x0;
1693         arr[num++] = 0x2;       /* relative port 2 (secondary) */
1694         memset(arr + num, 0, 6);
1695         num += 6;
1696         arr[num++] = 0x0;
1697         arr[num++] = 12;        /* length tp descriptor */
1698         /* naa-5 target port identifier (B) */
1699         arr[num++] = 0x61;      /* proto=sas, binary */
1700         arr[num++] = 0x93;      /* PIV=1, target port, NAA */
1701         arr[num++] = 0x0;       /* reserved */
1702         arr[num++] = 0x8;       /* length */
1703         put_unaligned_be64(naa3_comp_a + port_b, arr + num);
1704         num += 8;
1705
1706         return num;
1707 }
1708
1709
1710 static unsigned char vpd89_data[] = {
1711 /* from 4th byte */ 0,0,0,0,
1712 'l','i','n','u','x',' ',' ',' ',
1713 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
1714 '1','2','3','4',
1715 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
1716 0xec,0,0,0,
1717 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
1718 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
1719 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
1720 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
1721 0x53,0x41,
1722 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1723 0x20,0x20,
1724 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1725 0x10,0x80,
1726 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
1727 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
1728 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
1729 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
1730 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
1731 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
1732 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
1733 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1734 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1735 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1736 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
1737 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
1738 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
1739 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
1740 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1741 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1742 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1743 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1744 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1745 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1746 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1747 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1748 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1749 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1750 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1751 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
1752 };
1753
1754 /* ATA Information VPD page */
1755 static int inquiry_vpd_89(unsigned char *arr)
1756 {
1757         memcpy(arr, vpd89_data, sizeof(vpd89_data));
1758         return sizeof(vpd89_data);
1759 }
1760
1761
1762 static unsigned char vpdb0_data[] = {
1763         /* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
1764         0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1765         0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1766         0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1767 };
1768
1769 /* Block limits VPD page (SBC-3) */
1770 static int inquiry_vpd_b0(unsigned char *arr)
1771 {
1772         unsigned int gran;
1773
1774         memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
1775
1776         /* Optimal transfer length granularity */
1777         if (sdebug_opt_xferlen_exp != 0 &&
1778             sdebug_physblk_exp < sdebug_opt_xferlen_exp)
1779                 gran = 1 << sdebug_opt_xferlen_exp;
1780         else
1781                 gran = 1 << sdebug_physblk_exp;
1782         put_unaligned_be16(gran, arr + 2);
1783
1784         /* Maximum Transfer Length */
1785         if (sdebug_store_sectors > 0x400)
1786                 put_unaligned_be32(sdebug_store_sectors, arr + 4);
1787
1788         /* Optimal Transfer Length */
1789         put_unaligned_be32(sdebug_opt_blks, &arr[8]);
1790
1791         if (sdebug_lbpu) {
1792                 /* Maximum Unmap LBA Count */
1793                 put_unaligned_be32(sdebug_unmap_max_blocks, &arr[16]);
1794
1795                 /* Maximum Unmap Block Descriptor Count */
1796                 put_unaligned_be32(sdebug_unmap_max_desc, &arr[20]);
1797         }
1798
1799         /* Unmap Granularity Alignment */
1800         if (sdebug_unmap_alignment) {
1801                 put_unaligned_be32(sdebug_unmap_alignment, &arr[28]);
1802                 arr[28] |= 0x80; /* UGAVALID */
1803         }
1804
1805         /* Optimal Unmap Granularity */
1806         put_unaligned_be32(sdebug_unmap_granularity, &arr[24]);
1807
1808         /* Maximum WRITE SAME Length */
1809         put_unaligned_be64(sdebug_write_same_length, &arr[32]);
1810
1811         return 0x3c; /* Mandatory page length for Logical Block Provisioning */
1812 }
1813
1814 /* Block device characteristics VPD page (SBC-3) */
1815 static int inquiry_vpd_b1(struct sdebug_dev_info *devip, unsigned char *arr)
1816 {
1817         memset(arr, 0, 0x3c);
1818         arr[0] = 0;
1819         arr[1] = 1;     /* non rotating medium (e.g. solid state) */
1820         arr[2] = 0;
1821         arr[3] = 5;     /* less than 1.8" */
1822
1823         return 0x3c;
1824 }
1825
1826 /* Logical block provisioning VPD page (SBC-4) */
1827 static int inquiry_vpd_b2(unsigned char *arr)
1828 {
1829         memset(arr, 0, 0x4);
1830         arr[0] = 0;                     /* threshold exponent */
1831         if (sdebug_lbpu)
1832                 arr[1] = 1 << 7;
1833         if (sdebug_lbpws)
1834                 arr[1] |= 1 << 6;
1835         if (sdebug_lbpws10)
1836                 arr[1] |= 1 << 5;
1837         if (sdebug_lbprz && scsi_debug_lbp())
1838                 arr[1] |= (sdebug_lbprz & 0x7) << 2;  /* sbc4r07 and later */
1839         /* anc_sup=0; dp=0 (no provisioning group descriptor) */
1840         /* minimum_percentage=0; provisioning_type=0 (unknown) */
1841         /* threshold_percentage=0 */
1842         return 0x4;
1843 }
1844
1845 /* Zoned block device characteristics VPD page (ZBC mandatory) */
1846 static int inquiry_vpd_b6(struct sdebug_dev_info *devip, unsigned char *arr)
1847 {
1848         memset(arr, 0, 0x3c);
1849         arr[0] = 0x1; /* set URSWRZ (unrestricted read in seq. wr req zone) */
1850         /*
1851          * Set Optimal number of open sequential write preferred zones and
1852          * Optimal number of non-sequentially written sequential write
1853          * preferred zones fields to 'not reported' (0xffffffff). Leave other
1854          * fields set to zero, apart from Max. number of open swrz_s field.
1855          */
1856         put_unaligned_be32(0xffffffff, &arr[4]);
1857         put_unaligned_be32(0xffffffff, &arr[8]);
1858         if (sdeb_zbc_model == BLK_ZONED_HM && devip->max_open)
1859                 put_unaligned_be32(devip->max_open, &arr[12]);
1860         else
1861                 put_unaligned_be32(0xffffffff, &arr[12]);
1862         if (devip->zcap < devip->zsize) {
1863                 arr[19] = ZBC_CONSTANT_ZONE_START_OFFSET;
1864                 put_unaligned_be64(devip->zsize, &arr[20]);
1865         } else {
1866                 arr[19] = 0;
1867         }
1868         return 0x3c;
1869 }
1870
1871 #define SDEBUG_BLE_LEN_AFTER_B4 28      /* thus vpage 32 bytes long */
1872
1873 enum { MAXIMUM_NUMBER_OF_STREAMS = 6, PERMANENT_STREAM_COUNT = 5 };
1874
1875 /* Block limits extension VPD page (SBC-4) */
1876 static int inquiry_vpd_b7(unsigned char *arrb4)
1877 {
1878         memset(arrb4, 0, SDEBUG_BLE_LEN_AFTER_B4);
1879         arrb4[1] = 1; /* Reduced stream control support (RSCS) */
1880         put_unaligned_be16(MAXIMUM_NUMBER_OF_STREAMS, &arrb4[2]);
1881         return SDEBUG_BLE_LEN_AFTER_B4;
1882 }
1883
1884 #define SDEBUG_LONG_INQ_SZ 96
1885 #define SDEBUG_MAX_INQ_ARR_SZ 584
1886
1887 static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1888 {
1889         unsigned char pq_pdt;
1890         unsigned char *arr;
1891         unsigned char *cmd = scp->cmnd;
1892         u32 alloc_len, n;
1893         int ret;
1894         bool have_wlun, is_disk, is_zbc, is_disk_zbc;
1895
1896         alloc_len = get_unaligned_be16(cmd + 3);
1897         arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
1898         if (! arr)
1899                 return DID_REQUEUE << 16;
1900         is_disk = (sdebug_ptype == TYPE_DISK);
1901         is_zbc = devip->zoned;
1902         is_disk_zbc = (is_disk || is_zbc);
1903         have_wlun = scsi_is_wlun(scp->device->lun);
1904         if (have_wlun)
1905                 pq_pdt = TYPE_WLUN;     /* present, wlun */
1906         else if (sdebug_no_lun_0 && (devip->lun == SDEBUG_LUN_0_VAL))
1907                 pq_pdt = 0x7f;  /* not present, PQ=3, PDT=0x1f */
1908         else
1909                 pq_pdt = (sdebug_ptype & 0x1f);
1910         arr[0] = pq_pdt;
1911         if (0x2 & cmd[1]) {  /* CMDDT bit set */
1912                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
1913                 kfree(arr);
1914                 return check_condition_result;
1915         } else if (0x1 & cmd[1]) {  /* EVPD bit set */
1916                 int lu_id_num, port_group_id, target_dev_id;
1917                 u32 len;
1918                 char lu_id_str[6];
1919                 int host_no = devip->sdbg_host->shost->host_no;
1920
1921                 arr[1] = cmd[2];
1922                 port_group_id = (((host_no + 1) & 0x7f) << 8) +
1923                     (devip->channel & 0x7f);
1924                 if (sdebug_vpd_use_hostno == 0)
1925                         host_no = 0;
1926                 lu_id_num = have_wlun ? -1 : (((host_no + 1) * 2000) +
1927                             (devip->target * 1000) + devip->lun);
1928                 target_dev_id = ((host_no + 1) * 2000) +
1929                                  (devip->target * 1000) - 3;
1930                 len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
1931                 if (0 == cmd[2]) { /* supported vital product data pages */
1932                         n = 4;
1933                         arr[n++] = 0x0;   /* this page */
1934                         arr[n++] = 0x80;  /* unit serial number */
1935                         arr[n++] = 0x83;  /* device identification */
1936                         arr[n++] = 0x84;  /* software interface ident. */
1937                         arr[n++] = 0x85;  /* management network addresses */
1938                         arr[n++] = 0x86;  /* extended inquiry */
1939                         arr[n++] = 0x87;  /* mode page policy */
1940                         arr[n++] = 0x88;  /* SCSI ports */
1941                         if (is_disk_zbc) {        /* SBC or ZBC */
1942                                 arr[n++] = 0x89;  /* ATA information */
1943                                 arr[n++] = 0xb0;  /* Block limits */
1944                                 arr[n++] = 0xb1;  /* Block characteristics */
1945                                 if (is_disk)
1946                                         arr[n++] = 0xb2;  /* LB Provisioning */
1947                                 if (is_zbc)
1948                                         arr[n++] = 0xb6;  /* ZB dev. char. */
1949                                 arr[n++] = 0xb7;  /* Block limits extension */
1950                         }
1951                         arr[3] = n - 4;   /* number of supported VPD pages */
1952                 } else if (0x80 == cmd[2]) { /* unit serial number */
1953                         arr[3] = len;
1954                         memcpy(&arr[4], lu_id_str, len);
1955                 } else if (0x83 == cmd[2]) { /* device identification */
1956                         arr[3] = inquiry_vpd_83(&arr[4], port_group_id,
1957                                                 target_dev_id, lu_id_num,
1958                                                 lu_id_str, len,
1959                                                 &devip->lu_name);
1960                 } else if (0x84 == cmd[2]) { /* Software interface ident. */
1961                         arr[3] = inquiry_vpd_84(&arr[4]);
1962                 } else if (0x85 == cmd[2]) { /* Management network addresses */
1963                         arr[3] = inquiry_vpd_85(&arr[4]);
1964                 } else if (0x86 == cmd[2]) { /* extended inquiry */
1965                         arr[3] = 0x3c;  /* number of following entries */
1966                         if (sdebug_dif == T10_PI_TYPE3_PROTECTION)
1967                                 arr[4] = 0x4;   /* SPT: GRD_CHK:1 */
1968                         else if (have_dif_prot)
1969                                 arr[4] = 0x5;   /* SPT: GRD_CHK:1, REF_CHK:1 */
1970                         else
1971                                 arr[4] = 0x0;   /* no protection stuff */
1972                         arr[5] = 0x7;   /* head of q, ordered + simple q's */
1973                 } else if (0x87 == cmd[2]) { /* mode page policy */
1974                         arr[3] = 0x8;   /* number of following entries */
1975                         arr[4] = 0x2;   /* disconnect-reconnect mp */
1976                         arr[6] = 0x80;  /* mlus, shared */
1977                         arr[8] = 0x18;   /* protocol specific lu */
1978                         arr[10] = 0x82;  /* mlus, per initiator port */
1979                 } else if (0x88 == cmd[2]) { /* SCSI Ports */
1980                         arr[3] = inquiry_vpd_88(&arr[4], target_dev_id);
1981                 } else if (is_disk_zbc && 0x89 == cmd[2]) { /* ATA info */
1982                         n = inquiry_vpd_89(&arr[4]);
1983                         put_unaligned_be16(n, arr + 2);
1984                 } else if (is_disk_zbc && 0xb0 == cmd[2]) { /* Block limits */
1985                         arr[3] = inquiry_vpd_b0(&arr[4]);
1986                 } else if (is_disk_zbc && 0xb1 == cmd[2]) { /* Block char. */
1987                         arr[3] = inquiry_vpd_b1(devip, &arr[4]);
1988                 } else if (is_disk && 0xb2 == cmd[2]) { /* LB Prov. */
1989                         arr[3] = inquiry_vpd_b2(&arr[4]);
1990                 } else if (is_zbc && cmd[2] == 0xb6) { /* ZB dev. charact. */
1991                         arr[3] = inquiry_vpd_b6(devip, &arr[4]);
1992                 } else if (cmd[2] == 0xb7) { /* block limits extension page */
1993                         arr[3] = inquiry_vpd_b7(&arr[4]);
1994                 } else {
1995                         mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
1996                         kfree(arr);
1997                         return check_condition_result;
1998                 }
1999                 len = min_t(u32, get_unaligned_be16(arr + 2) + 4, alloc_len);
2000                 ret = fill_from_dev_buffer(scp, arr,
2001                             min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ));
2002                 kfree(arr);
2003                 return ret;
2004         }
2005         /* drops through here for a standard inquiry */
2006         arr[1] = sdebug_removable ? 0x80 : 0;   /* Removable disk */
2007         arr[2] = sdebug_scsi_level;
2008         arr[3] = 2;    /* response_data_format==2 */
2009         arr[4] = SDEBUG_LONG_INQ_SZ - 5;
2010         arr[5] = (int)have_dif_prot;    /* PROTECT bit */
2011         if (sdebug_vpd_use_hostno == 0)
2012                 arr[5] |= 0x10; /* claim: implicit TPGS */
2013         arr[6] = 0x10; /* claim: MultiP */
2014         /* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
2015         arr[7] = 0xa; /* claim: LINKED + CMDQUE */
2016         memcpy(&arr[8], sdebug_inq_vendor_id, 8);
2017         memcpy(&arr[16], sdebug_inq_product_id, 16);
2018         memcpy(&arr[32], sdebug_inq_product_rev, 4);
2019         /* Use Vendor Specific area to place driver date in ASCII hex */
2020         memcpy(&arr[36], sdebug_version_date, 8);
2021         /* version descriptors (2 bytes each) follow */
2022         put_unaligned_be16(0xc0, arr + 58);   /* SAM-6 no version claimed */
2023         put_unaligned_be16(0x5c0, arr + 60);  /* SPC-5 no version claimed */
2024         n = 62;
2025         if (is_disk) {          /* SBC-4 no version claimed */
2026                 put_unaligned_be16(0x600, arr + n);
2027                 n += 2;
2028         } else if (sdebug_ptype == TYPE_TAPE) { /* SSC-4 rev 3 */
2029                 put_unaligned_be16(0x525, arr + n);
2030                 n += 2;
2031         } else if (is_zbc) {    /* ZBC BSR INCITS 536 revision 05 */
2032                 put_unaligned_be16(0x624, arr + n);
2033                 n += 2;
2034         }
2035         put_unaligned_be16(0x2100, arr + n);    /* SPL-4 no version claimed */
2036         ret = fill_from_dev_buffer(scp, arr,
2037                             min_t(u32, alloc_len, SDEBUG_LONG_INQ_SZ));
2038         kfree(arr);
2039         return ret;
2040 }
2041
2042 /* See resp_iec_m_pg() for how this data is manipulated */
2043 static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
2044                                    0, 0, 0x0, 0x0};
2045
2046 static int resp_requests(struct scsi_cmnd *scp,
2047                          struct sdebug_dev_info *devip)
2048 {
2049         unsigned char *cmd = scp->cmnd;
2050         unsigned char arr[SCSI_SENSE_BUFFERSIZE];       /* assume >= 18 bytes */
2051         bool dsense = !!(cmd[1] & 1);
2052         u32 alloc_len = cmd[4];
2053         u32 len = 18;
2054         int stopped_state = atomic_read(&devip->stopped);
2055
2056         memset(arr, 0, sizeof(arr));
2057         if (stopped_state > 0) {        /* some "pollable" data [spc6r02: 5.12.2] */
2058                 if (dsense) {
2059                         arr[0] = 0x72;
2060                         arr[1] = NOT_READY;
2061                         arr[2] = LOGICAL_UNIT_NOT_READY;
2062                         arr[3] = (stopped_state == 2) ? 0x1 : 0x2;
2063                         len = 8;
2064                 } else {
2065                         arr[0] = 0x70;
2066                         arr[2] = NOT_READY;             /* NO_SENSE in sense_key */
2067                         arr[7] = 0xa;                   /* 18 byte sense buffer */
2068                         arr[12] = LOGICAL_UNIT_NOT_READY;
2069                         arr[13] = (stopped_state == 2) ? 0x1 : 0x2;
2070                 }
2071         } else if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
2072                 /* Information exceptions control mode page: TEST=1, MRIE=6 */
2073                 if (dsense) {
2074                         arr[0] = 0x72;
2075                         arr[1] = 0x0;           /* NO_SENSE in sense_key */
2076                         arr[2] = THRESHOLD_EXCEEDED;
2077                         arr[3] = 0xff;          /* Failure prediction(false) */
2078                         len = 8;
2079                 } else {
2080                         arr[0] = 0x70;
2081                         arr[2] = 0x0;           /* NO_SENSE in sense_key */
2082                         arr[7] = 0xa;           /* 18 byte sense buffer */
2083                         arr[12] = THRESHOLD_EXCEEDED;
2084                         arr[13] = 0xff;         /* Failure prediction(false) */
2085                 }
2086         } else {        /* nothing to report */
2087                 if (dsense) {
2088                         len = 8;
2089                         memset(arr, 0, len);
2090                         arr[0] = 0x72;
2091                 } else {
2092                         memset(arr, 0, len);
2093                         arr[0] = 0x70;
2094                         arr[7] = 0xa;
2095                 }
2096         }
2097         return fill_from_dev_buffer(scp, arr, min_t(u32, len, alloc_len));
2098 }
2099
2100 static int resp_start_stop(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2101 {
2102         unsigned char *cmd = scp->cmnd;
2103         int power_cond, want_stop, stopped_state;
2104         bool changing;
2105
2106         power_cond = (cmd[4] & 0xf0) >> 4;
2107         if (power_cond) {
2108                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 7);
2109                 return check_condition_result;
2110         }
2111         want_stop = !(cmd[4] & 1);
2112         stopped_state = atomic_read(&devip->stopped);
2113         if (stopped_state == 2) {
2114                 ktime_t now_ts = ktime_get_boottime();
2115
2116                 if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
2117                         u64 diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
2118
2119                         if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
2120                                 /* tur_ms_to_ready timer extinguished */
2121                                 atomic_set(&devip->stopped, 0);
2122                                 stopped_state = 0;
2123                         }
2124                 }
2125                 if (stopped_state == 2) {
2126                         if (want_stop) {
2127                                 stopped_state = 1;      /* dummy up success */
2128                         } else {        /* Disallow tur_ms_to_ready delay to be overridden */
2129                                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 0 /* START bit */);
2130                                 return check_condition_result;
2131                         }
2132                 }
2133         }
2134         changing = (stopped_state != want_stop);
2135         if (changing)
2136                 atomic_xchg(&devip->stopped, want_stop);
2137         if (!changing || (cmd[1] & 0x1))  /* state unchanged or IMMED bit set in cdb */
2138                 return SDEG_RES_IMMED_MASK;
2139         else
2140                 return 0;
2141 }
2142
2143 static sector_t get_sdebug_capacity(void)
2144 {
2145         static const unsigned int gibibyte = 1073741824;
2146
2147         if (sdebug_virtual_gb > 0)
2148                 return (sector_t)sdebug_virtual_gb *
2149                         (gibibyte / sdebug_sector_size);
2150         else
2151                 return sdebug_store_sectors;
2152 }
2153
2154 #define SDEBUG_READCAP_ARR_SZ 8
2155 static int resp_readcap(struct scsi_cmnd *scp,
2156                         struct sdebug_dev_info *devip)
2157 {
2158         unsigned char arr[SDEBUG_READCAP_ARR_SZ];
2159         unsigned int capac;
2160
2161         /* following just in case virtual_gb changed */
2162         sdebug_capacity = get_sdebug_capacity();
2163         memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
2164         if (sdebug_capacity < 0xffffffff) {
2165                 capac = (unsigned int)sdebug_capacity - 1;
2166                 put_unaligned_be32(capac, arr + 0);
2167         } else
2168                 put_unaligned_be32(0xffffffff, arr + 0);
2169         put_unaligned_be16(sdebug_sector_size, arr + 6);
2170         return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
2171 }
2172
2173 #define SDEBUG_READCAP16_ARR_SZ 32
2174 static int resp_readcap16(struct scsi_cmnd *scp,
2175                           struct sdebug_dev_info *devip)
2176 {
2177         unsigned char *cmd = scp->cmnd;
2178         unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
2179         u32 alloc_len;
2180
2181         alloc_len = get_unaligned_be32(cmd + 10);
2182         /* following just in case virtual_gb changed */
2183         sdebug_capacity = get_sdebug_capacity();
2184         memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
2185         put_unaligned_be64((u64)(sdebug_capacity - 1), arr + 0);
2186         put_unaligned_be32(sdebug_sector_size, arr + 8);
2187         arr[13] = sdebug_physblk_exp & 0xf;
2188         arr[14] = (sdebug_lowest_aligned >> 8) & 0x3f;
2189
2190         if (scsi_debug_lbp()) {
2191                 arr[14] |= 0x80; /* LBPME */
2192                 /* from sbc4r07, this LBPRZ field is 1 bit, but the LBPRZ in
2193                  * the LB Provisioning VPD page is 3 bits. Note that lbprz=2
2194                  * in the wider field maps to 0 in this field.
2195                  */
2196                 if (sdebug_lbprz & 1)   /* precisely what the draft requires */
2197                         arr[14] |= 0x40;
2198         }
2199
2200         /*
2201          * Since the scsi_debug READ CAPACITY implementation always reports the
2202          * total disk capacity, set RC BASIS = 1 for host-managed ZBC devices.
2203          */
2204         if (devip->zoned)
2205                 arr[12] |= 1 << 4;
2206
2207         arr[15] = sdebug_lowest_aligned & 0xff;
2208
2209         if (have_dif_prot) {
2210                 arr[12] = (sdebug_dif - 1) << 1; /* P_TYPE */
2211                 arr[12] |= 1; /* PROT_EN */
2212         }
2213
2214         return fill_from_dev_buffer(scp, arr,
2215                             min_t(u32, alloc_len, SDEBUG_READCAP16_ARR_SZ));
2216 }
2217
2218 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
2219
2220 static int resp_report_tgtpgs(struct scsi_cmnd *scp,
2221                               struct sdebug_dev_info *devip)
2222 {
2223         unsigned char *cmd = scp->cmnd;
2224         unsigned char *arr;
2225         int host_no = devip->sdbg_host->shost->host_no;
2226         int port_group_a, port_group_b, port_a, port_b;
2227         u32 alen, n, rlen;
2228         int ret;
2229
2230         alen = get_unaligned_be32(cmd + 6);
2231         arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
2232         if (! arr)
2233                 return DID_REQUEUE << 16;
2234         /*
2235          * EVPD page 0x88 states we have two ports, one
2236          * real and a fake port with no device connected.
2237          * So we create two port groups with one port each
2238          * and set the group with port B to unavailable.
2239          */
2240         port_a = 0x1; /* relative port A */
2241         port_b = 0x2; /* relative port B */
2242         port_group_a = (((host_no + 1) & 0x7f) << 8) +
2243                         (devip->channel & 0x7f);
2244         port_group_b = (((host_no + 1) & 0x7f) << 8) +
2245                         (devip->channel & 0x7f) + 0x80;
2246
2247         /*
2248          * The asymmetric access state is cycled according to the host_id.
2249          */
2250         n = 4;
2251         if (sdebug_vpd_use_hostno == 0) {
2252                 arr[n++] = host_no % 3; /* Asymm access state */
2253                 arr[n++] = 0x0F; /* claim: all states are supported */
2254         } else {
2255                 arr[n++] = 0x0; /* Active/Optimized path */
2256                 arr[n++] = 0x01; /* only support active/optimized paths */
2257         }
2258         put_unaligned_be16(port_group_a, arr + n);
2259         n += 2;
2260         arr[n++] = 0;    /* Reserved */
2261         arr[n++] = 0;    /* Status code */
2262         arr[n++] = 0;    /* Vendor unique */
2263         arr[n++] = 0x1;  /* One port per group */
2264         arr[n++] = 0;    /* Reserved */
2265         arr[n++] = 0;    /* Reserved */
2266         put_unaligned_be16(port_a, arr + n);
2267         n += 2;
2268         arr[n++] = 3;    /* Port unavailable */
2269         arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
2270         put_unaligned_be16(port_group_b, arr + n);
2271         n += 2;
2272         arr[n++] = 0;    /* Reserved */
2273         arr[n++] = 0;    /* Status code */
2274         arr[n++] = 0;    /* Vendor unique */
2275         arr[n++] = 0x1;  /* One port per group */
2276         arr[n++] = 0;    /* Reserved */
2277         arr[n++] = 0;    /* Reserved */
2278         put_unaligned_be16(port_b, arr + n);
2279         n += 2;
2280
2281         rlen = n - 4;
2282         put_unaligned_be32(rlen, arr + 0);
2283
2284         /*
2285          * Return the smallest value of either
2286          * - The allocated length
2287          * - The constructed command length
2288          * - The maximum array size
2289          */
2290         rlen = min(alen, n);
2291         ret = fill_from_dev_buffer(scp, arr,
2292                            min_t(u32, rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
2293         kfree(arr);
2294         return ret;
2295 }
2296
2297 static int resp_rsup_opcodes(struct scsi_cmnd *scp,
2298                              struct sdebug_dev_info *devip)
2299 {
2300         bool rctd;
2301         u8 reporting_opts, req_opcode, sdeb_i, supp;
2302         u16 req_sa, u;
2303         u32 alloc_len, a_len;
2304         int k, offset, len, errsts, count, bump, na;
2305         const struct opcode_info_t *oip;
2306         const struct opcode_info_t *r_oip;
2307         u8 *arr;
2308         u8 *cmd = scp->cmnd;
2309
2310         rctd = !!(cmd[2] & 0x80);
2311         reporting_opts = cmd[2] & 0x7;
2312         req_opcode = cmd[3];
2313         req_sa = get_unaligned_be16(cmd + 4);
2314         alloc_len = get_unaligned_be32(cmd + 6);
2315         if (alloc_len < 4 || alloc_len > 0xffff) {
2316                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
2317                 return check_condition_result;
2318         }
2319         if (alloc_len > 8192)
2320                 a_len = 8192;
2321         else
2322                 a_len = alloc_len;
2323         arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_ATOMIC);
2324         if (NULL == arr) {
2325                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
2326                                 INSUFF_RES_ASCQ);
2327                 return check_condition_result;
2328         }
2329         switch (reporting_opts) {
2330         case 0: /* all commands */
2331                 /* count number of commands */
2332                 for (count = 0, oip = opcode_info_arr;
2333                      oip->num_attached != 0xff; ++oip) {
2334                         if (F_INV_OP & oip->flags)
2335                                 continue;
2336                         count += (oip->num_attached + 1);
2337                 }
2338                 bump = rctd ? 20 : 8;
2339                 put_unaligned_be32(count * bump, arr);
2340                 for (offset = 4, oip = opcode_info_arr;
2341                      oip->num_attached != 0xff && offset < a_len; ++oip) {
2342                         if (F_INV_OP & oip->flags)
2343                                 continue;
2344                         na = oip->num_attached;
2345                         arr[offset] = oip->opcode;
2346                         put_unaligned_be16(oip->sa, arr + offset + 2);
2347                         if (rctd)
2348                                 arr[offset + 5] |= 0x2;
2349                         if (FF_SA & oip->flags)
2350                                 arr[offset + 5] |= 0x1;
2351                         put_unaligned_be16(oip->len_mask[0], arr + offset + 6);
2352                         if (rctd)
2353                                 put_unaligned_be16(0xa, arr + offset + 8);
2354                         r_oip = oip;
2355                         for (k = 0, oip = oip->arrp; k < na; ++k, ++oip) {
2356                                 if (F_INV_OP & oip->flags)
2357                                         continue;
2358                                 offset += bump;
2359                                 arr[offset] = oip->opcode;
2360                                 put_unaligned_be16(oip->sa, arr + offset + 2);
2361                                 if (rctd)
2362                                         arr[offset + 5] |= 0x2;
2363                                 if (FF_SA & oip->flags)
2364                                         arr[offset + 5] |= 0x1;
2365                                 put_unaligned_be16(oip->len_mask[0],
2366                                                    arr + offset + 6);
2367                                 if (rctd)
2368                                         put_unaligned_be16(0xa,
2369                                                            arr + offset + 8);
2370                         }
2371                         oip = r_oip;
2372                         offset += bump;
2373                 }
2374                 break;
2375         case 1: /* one command: opcode only */
2376         case 2: /* one command: opcode plus service action */
2377         case 3: /* one command: if sa==0 then opcode only else opcode+sa */
2378                 sdeb_i = opcode_ind_arr[req_opcode];
2379                 oip = &opcode_info_arr[sdeb_i];
2380                 if (F_INV_OP & oip->flags) {
2381                         supp = 1;
2382                         offset = 4;
2383                 } else {
2384                         if (1 == reporting_opts) {
2385                                 if (FF_SA & oip->flags) {
2386                                         mk_sense_invalid_fld(scp, SDEB_IN_CDB,
2387                                                              2, 2);
2388                                         kfree(arr);
2389                                         return check_condition_result;
2390                                 }
2391                                 req_sa = 0;
2392                         } else if (2 == reporting_opts &&
2393                                    0 == (FF_SA & oip->flags)) {
2394                                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
2395                                 kfree(arr);     /* point at requested sa */
2396                                 return check_condition_result;
2397                         }
2398                         if (0 == (FF_SA & oip->flags) &&
2399                             req_opcode == oip->opcode)
2400                                 supp = 3;
2401                         else if (0 == (FF_SA & oip->flags)) {
2402                                 na = oip->num_attached;
2403                                 for (k = 0, oip = oip->arrp; k < na;
2404                                      ++k, ++oip) {
2405                                         if (req_opcode == oip->opcode)
2406                                                 break;
2407                                 }
2408                                 supp = (k >= na) ? 1 : 3;
2409                         } else if (req_sa != oip->sa) {
2410                                 na = oip->num_attached;
2411                                 for (k = 0, oip = oip->arrp; k < na;
2412                                      ++k, ++oip) {
2413                                         if (req_sa == oip->sa)
2414                                                 break;
2415                                 }
2416                                 supp = (k >= na) ? 1 : 3;
2417                         } else
2418                                 supp = 3;
2419                         if (3 == supp) {
2420                                 u = oip->len_mask[0];
2421                                 put_unaligned_be16(u, arr + 2);
2422                                 arr[4] = oip->opcode;
2423                                 for (k = 1; k < u; ++k)
2424                                         arr[4 + k] = (k < 16) ?
2425                                                  oip->len_mask[k] : 0xff;
2426                                 offset = 4 + u;
2427                         } else
2428                                 offset = 4;
2429                 }
2430                 arr[1] = (rctd ? 0x80 : 0) | supp;
2431                 if (rctd) {
2432                         put_unaligned_be16(0xa, arr + offset);
2433                         offset += 12;
2434                 }
2435                 break;
2436         default:
2437                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
2438                 kfree(arr);
2439                 return check_condition_result;
2440         }
2441         offset = (offset < a_len) ? offset : a_len;
2442         len = (offset < alloc_len) ? offset : alloc_len;
2443         errsts = fill_from_dev_buffer(scp, arr, len);
2444         kfree(arr);
2445         return errsts;
2446 }
2447
2448 static int resp_rsup_tmfs(struct scsi_cmnd *scp,
2449                           struct sdebug_dev_info *devip)
2450 {
2451         bool repd;
2452         u32 alloc_len, len;
2453         u8 arr[16];
2454         u8 *cmd = scp->cmnd;
2455
2456         memset(arr, 0, sizeof(arr));
2457         repd = !!(cmd[2] & 0x80);
2458         alloc_len = get_unaligned_be32(cmd + 6);
2459         if (alloc_len < 4) {
2460                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
2461                 return check_condition_result;
2462         }
2463         arr[0] = 0xc8;          /* ATS | ATSS | LURS */
2464         arr[1] = 0x1;           /* ITNRS */
2465         if (repd) {
2466                 arr[3] = 0xc;
2467                 len = 16;
2468         } else
2469                 len = 4;
2470
2471         len = (len < alloc_len) ? len : alloc_len;
2472         return fill_from_dev_buffer(scp, arr, len);
2473 }
2474
2475 /* <<Following mode page info copied from ST318451LW>> */
2476
2477 static int resp_err_recov_pg(unsigned char *p, int pcontrol, int target)
2478 {       /* Read-Write Error Recovery page for mode_sense */
2479         unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
2480                                         5, 0, 0xff, 0xff};
2481
2482         memcpy(p, err_recov_pg, sizeof(err_recov_pg));
2483         if (1 == pcontrol)
2484                 memset(p + 2, 0, sizeof(err_recov_pg) - 2);
2485         return sizeof(err_recov_pg);
2486 }
2487
2488 static int resp_disconnect_pg(unsigned char *p, int pcontrol, int target)
2489 {       /* Disconnect-Reconnect page for mode_sense */
2490         unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
2491                                          0, 0, 0, 0, 0, 0, 0, 0};
2492
2493         memcpy(p, disconnect_pg, sizeof(disconnect_pg));
2494         if (1 == pcontrol)
2495                 memset(p + 2, 0, sizeof(disconnect_pg) - 2);
2496         return sizeof(disconnect_pg);
2497 }
2498
2499 static int resp_format_pg(unsigned char *p, int pcontrol, int target)
2500 {       /* Format device page for mode_sense */
2501         unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
2502                                      0, 0, 0, 0, 0, 0, 0, 0,
2503                                      0, 0, 0, 0, 0x40, 0, 0, 0};
2504
2505         memcpy(p, format_pg, sizeof(format_pg));
2506         put_unaligned_be16(sdebug_sectors_per, p + 10);
2507         put_unaligned_be16(sdebug_sector_size, p + 12);
2508         if (sdebug_removable)
2509                 p[20] |= 0x20; /* should agree with INQUIRY */
2510         if (1 == pcontrol)
2511                 memset(p + 2, 0, sizeof(format_pg) - 2);
2512         return sizeof(format_pg);
2513 }
2514
2515 static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2516                                      0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,
2517                                      0, 0, 0, 0};
2518
2519 static int resp_caching_pg(unsigned char *p, int pcontrol, int target)
2520 {       /* Caching page for mode_sense */
2521         unsigned char ch_caching_pg[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0,
2522                 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
2523         unsigned char d_caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2524                 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,     0, 0, 0, 0};
2525
2526         if (SDEBUG_OPT_N_WCE & sdebug_opts)
2527                 caching_pg[2] &= ~0x4;  /* set WCE=0 (default WCE=1) */
2528         memcpy(p, caching_pg, sizeof(caching_pg));
2529         if (1 == pcontrol)
2530                 memcpy(p + 2, ch_caching_pg, sizeof(ch_caching_pg));
2531         else if (2 == pcontrol)
2532                 memcpy(p, d_caching_pg, sizeof(d_caching_pg));
2533         return sizeof(caching_pg);
2534 }
2535
2536 static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2537                                     0, 0, 0x2, 0x4b};
2538
2539 static int resp_ctrl_m_pg(unsigned char *p, int pcontrol, int target)
2540 {       /* Control mode page for mode_sense */
2541         unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
2542                                         0, 0, 0, 0};
2543         unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2544                                      0, 0, 0x2, 0x4b};
2545
2546         if (sdebug_dsense)
2547                 ctrl_m_pg[2] |= 0x4;
2548         else
2549                 ctrl_m_pg[2] &= ~0x4;
2550
2551         if (sdebug_ato)
2552                 ctrl_m_pg[5] |= 0x80; /* ATO=1 */
2553
2554         memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
2555         if (1 == pcontrol)
2556                 memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
2557         else if (2 == pcontrol)
2558                 memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
2559         return sizeof(ctrl_m_pg);
2560 }
2561
2562
2563 static int resp_iec_m_pg(unsigned char *p, int pcontrol, int target)
2564 {       /* Informational Exceptions control mode page for mode_sense */
2565         unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
2566                                        0, 0, 0x0, 0x0};
2567         unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
2568                                       0, 0, 0x0, 0x0};
2569
2570         memcpy(p, iec_m_pg, sizeof(iec_m_pg));
2571         if (1 == pcontrol)
2572                 memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
2573         else if (2 == pcontrol)
2574                 memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
2575         return sizeof(iec_m_pg);
2576 }
2577
2578 static int resp_sas_sf_m_pg(unsigned char *p, int pcontrol, int target)
2579 {       /* SAS SSP mode page - short format for mode_sense */
2580         unsigned char sas_sf_m_pg[] = {0x19, 0x6,
2581                 0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
2582
2583         memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
2584         if (1 == pcontrol)
2585                 memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
2586         return sizeof(sas_sf_m_pg);
2587 }
2588
2589
2590 static int resp_sas_pcd_m_spg(unsigned char *p, int pcontrol, int target,
2591                               int target_dev_id)
2592 {       /* SAS phy control and discover mode page for mode_sense */
2593         unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
2594                     0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
2595                     0, 0, 0, 0, 0, 0, 0, 0,     /* insert SAS addr */
2596                     0, 0, 0, 0, 0, 0, 0, 0,     /* insert SAS addr */
2597                     0x2, 0, 0, 0, 0, 0, 0, 0,
2598                     0x88, 0x99, 0, 0, 0, 0, 0, 0,
2599                     0, 0, 0, 0, 0, 0, 0, 0,
2600                     0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
2601                     0, 0, 0, 0, 0, 0, 0, 0,     /* insert SAS addr */
2602                     0, 0, 0, 0, 0, 0, 0, 0,     /* insert SAS addr */
2603                     0x3, 0, 0, 0, 0, 0, 0, 0,
2604                     0x88, 0x99, 0, 0, 0, 0, 0, 0,
2605                     0, 0, 0, 0, 0, 0, 0, 0,
2606                 };
2607         int port_a, port_b;
2608
2609         put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 16);
2610         put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 24);
2611         put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 64);
2612         put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 72);
2613         port_a = target_dev_id + 1;
2614         port_b = port_a + 1;
2615         memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
2616         put_unaligned_be32(port_a, p + 20);
2617         put_unaligned_be32(port_b, p + 48 + 20);
2618         if (1 == pcontrol)
2619                 memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
2620         return sizeof(sas_pcd_m_pg);
2621 }
2622
2623 static int resp_sas_sha_m_spg(unsigned char *p, int pcontrol)
2624 {       /* SAS SSP shared protocol specific port mode subpage */
2625         unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
2626                     0, 0, 0, 0, 0, 0, 0, 0,
2627                 };
2628
2629         memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
2630         if (1 == pcontrol)
2631                 memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
2632         return sizeof(sas_sha_m_pg);
2633 }
2634
2635 /* PAGE_SIZE is more than necessary but provides room for future expansion. */
2636 #define SDEBUG_MAX_MSENSE_SZ PAGE_SIZE
2637
2638 static int resp_mode_sense(struct scsi_cmnd *scp,
2639                            struct sdebug_dev_info *devip)
2640 {
2641         int pcontrol, pcode, subpcode, bd_len;
2642         unsigned char dev_spec;
2643         u32 alloc_len, offset, len;
2644         int target_dev_id;
2645         int target = scp->device->id;
2646         unsigned char *ap;
2647         unsigned char *arr __free(kfree);
2648         unsigned char *cmd = scp->cmnd;
2649         bool dbd, llbaa, msense_6, is_disk, is_zbc;
2650
2651         arr = kzalloc(SDEBUG_MAX_MSENSE_SZ, GFP_ATOMIC);
2652         if (!arr)
2653                 return -ENOMEM;
2654         dbd = !!(cmd[1] & 0x8);         /* disable block descriptors */
2655         pcontrol = (cmd[2] & 0xc0) >> 6;
2656         pcode = cmd[2] & 0x3f;
2657         subpcode = cmd[3];
2658         msense_6 = (MODE_SENSE == cmd[0]);
2659         llbaa = msense_6 ? false : !!(cmd[1] & 0x10);
2660         is_disk = (sdebug_ptype == TYPE_DISK);
2661         is_zbc = devip->zoned;
2662         if ((is_disk || is_zbc) && !dbd)
2663                 bd_len = llbaa ? 16 : 8;
2664         else
2665                 bd_len = 0;
2666         alloc_len = msense_6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2667         memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
2668         if (0x3 == pcontrol) {  /* Saving values not supported */
2669                 mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0);
2670                 return check_condition_result;
2671         }
2672         target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
2673                         (devip->target * 1000) - 3;
2674         /* for disks+zbc set DPOFUA bit and clear write protect (WP) bit */
2675         if (is_disk || is_zbc) {
2676                 dev_spec = 0x10;        /* =0x90 if WP=1 implies read-only */
2677                 if (sdebug_wp)
2678                         dev_spec |= 0x80;
2679         } else
2680                 dev_spec = 0x0;
2681         if (msense_6) {
2682                 arr[2] = dev_spec;
2683                 arr[3] = bd_len;
2684                 offset = 4;
2685         } else {
2686                 arr[3] = dev_spec;
2687                 if (16 == bd_len)
2688                         arr[4] = 0x1;   /* set LONGLBA bit */
2689                 arr[7] = bd_len;        /* assume 255 or less */
2690                 offset = 8;
2691         }
2692         ap = arr + offset;
2693         if ((bd_len > 0) && (!sdebug_capacity))
2694                 sdebug_capacity = get_sdebug_capacity();
2695
2696         if (8 == bd_len) {
2697                 if (sdebug_capacity > 0xfffffffe)
2698                         put_unaligned_be32(0xffffffff, ap + 0);
2699                 else
2700                         put_unaligned_be32(sdebug_capacity, ap + 0);
2701                 put_unaligned_be16(sdebug_sector_size, ap + 6);
2702                 offset += bd_len;
2703                 ap = arr + offset;
2704         } else if (16 == bd_len) {
2705                 put_unaligned_be64((u64)sdebug_capacity, ap + 0);
2706                 put_unaligned_be32(sdebug_sector_size, ap + 12);
2707                 offset += bd_len;
2708                 ap = arr + offset;
2709         }
2710
2711         switch (pcode) {
2712         case 0x1:       /* Read-Write error recovery page, direct access */
2713                 if (subpcode > 0x0 && subpcode < 0xff)
2714                         goto bad_subpcode;
2715                 len = resp_err_recov_pg(ap, pcontrol, target);
2716                 offset += len;
2717                 break;
2718         case 0x2:       /* Disconnect-Reconnect page, all devices */
2719                 if (subpcode > 0x0 && subpcode < 0xff)
2720                         goto bad_subpcode;
2721                 len = resp_disconnect_pg(ap, pcontrol, target);
2722                 offset += len;
2723                 break;
2724         case 0x3:       /* Format device page, direct access */
2725                 if (subpcode > 0x0 && subpcode < 0xff)
2726                         goto bad_subpcode;
2727                 if (is_disk) {
2728                         len = resp_format_pg(ap, pcontrol, target);
2729                         offset += len;
2730                 } else {
2731                         goto bad_pcode;
2732                 }
2733                 break;
2734         case 0x8:       /* Caching page, direct access */
2735                 if (subpcode > 0x0 && subpcode < 0xff)
2736                         goto bad_subpcode;
2737                 if (is_disk || is_zbc) {
2738                         len = resp_caching_pg(ap, pcontrol, target);
2739                         offset += len;
2740                 } else {
2741                         goto bad_pcode;
2742                 }
2743                 break;
2744         case 0xa:       /* Control Mode page, all devices */
2745                 if (subpcode > 0x0 && subpcode < 0xff)
2746                         goto bad_subpcode;
2747                 len = resp_ctrl_m_pg(ap, pcontrol, target);
2748                 offset += len;
2749                 break;
2750         case 0x19:      /* if spc==1 then sas phy, control+discover */
2751                 if (subpcode > 0x2 && subpcode < 0xff)
2752                         goto bad_subpcode;
2753                 len = 0;
2754                 if ((0x0 == subpcode) || (0xff == subpcode))
2755                         len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2756                 if ((0x1 == subpcode) || (0xff == subpcode))
2757                         len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
2758                                                   target_dev_id);
2759                 if ((0x2 == subpcode) || (0xff == subpcode))
2760                         len += resp_sas_sha_m_spg(ap + len, pcontrol);
2761                 offset += len;
2762                 break;
2763         case 0x1c:      /* Informational Exceptions Mode page, all devices */
2764                 if (subpcode > 0x0 && subpcode < 0xff)
2765                         goto bad_subpcode;
2766                 len = resp_iec_m_pg(ap, pcontrol, target);
2767                 offset += len;
2768                 break;
2769         case 0x3f:      /* Read all Mode pages */
2770                 if (subpcode > 0x0 && subpcode < 0xff)
2771                         goto bad_subpcode;
2772                 len = resp_err_recov_pg(ap, pcontrol, target);
2773                 len += resp_disconnect_pg(ap + len, pcontrol, target);
2774                 if (is_disk) {
2775                         len += resp_format_pg(ap + len, pcontrol, target);
2776                         len += resp_caching_pg(ap + len, pcontrol, target);
2777                 } else if (is_zbc) {
2778                         len += resp_caching_pg(ap + len, pcontrol, target);
2779                 }
2780                 len += resp_ctrl_m_pg(ap + len, pcontrol, target);
2781                 len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2782                 if (0xff == subpcode) {
2783                         len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
2784                                                   target_dev_id);
2785                         len += resp_sas_sha_m_spg(ap + len, pcontrol);
2786                 }
2787                 len += resp_iec_m_pg(ap + len, pcontrol, target);
2788                 offset += len;
2789                 break;
2790         default:
2791                 goto bad_pcode;
2792         }
2793         if (msense_6)
2794                 arr[0] = offset - 1;
2795         else
2796                 put_unaligned_be16((offset - 2), arr + 0);
2797         return fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, offset));
2798
2799 bad_pcode:
2800         mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2801         return check_condition_result;
2802
2803 bad_subpcode:
2804         mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2805         return check_condition_result;
2806 }
2807
2808 #define SDEBUG_MAX_MSELECT_SZ 512
2809
2810 static int resp_mode_select(struct scsi_cmnd *scp,
2811                             struct sdebug_dev_info *devip)
2812 {
2813         int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
2814         int param_len, res, mpage;
2815         unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
2816         unsigned char *cmd = scp->cmnd;
2817         int mselect6 = (MODE_SELECT == cmd[0]);
2818
2819         memset(arr, 0, sizeof(arr));
2820         pf = cmd[1] & 0x10;
2821         sp = cmd[1] & 0x1;
2822         param_len = mselect6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2823         if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
2824                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1);
2825                 return check_condition_result;
2826         }
2827         res = fetch_to_dev_buffer(scp, arr, param_len);
2828         if (-1 == res)
2829                 return DID_ERROR << 16;
2830         else if (sdebug_verbose && (res < param_len))
2831                 sdev_printk(KERN_INFO, scp->device,
2832                             "%s: cdb indicated=%d, IO sent=%d bytes\n",
2833                             __func__, param_len, res);
2834         md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2);
2835         bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6);
2836         off = bd_len + (mselect6 ? 4 : 8);
2837         if (md_len > 2 || off >= res) {
2838                 mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
2839                 return check_condition_result;
2840         }
2841         mpage = arr[off] & 0x3f;
2842         ps = !!(arr[off] & 0x80);
2843         if (ps) {
2844                 mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 7);
2845                 return check_condition_result;
2846         }
2847         spf = !!(arr[off] & 0x40);
2848         pg_len = spf ? (get_unaligned_be16(arr + off + 2) + 4) :
2849                        (arr[off + 1] + 2);
2850         if ((pg_len + off) > param_len) {
2851                 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2852                                 PARAMETER_LIST_LENGTH_ERR, 0);
2853                 return check_condition_result;
2854         }
2855         switch (mpage) {
2856         case 0x8:      /* Caching Mode page */
2857                 if (caching_pg[1] == arr[off + 1]) {
2858                         memcpy(caching_pg + 2, arr + off + 2,
2859                                sizeof(caching_pg) - 2);
2860                         goto set_mode_changed_ua;
2861                 }
2862                 break;
2863         case 0xa:      /* Control Mode page */
2864                 if (ctrl_m_pg[1] == arr[off + 1]) {
2865                         memcpy(ctrl_m_pg + 2, arr + off + 2,
2866                                sizeof(ctrl_m_pg) - 2);
2867                         if (ctrl_m_pg[4] & 0x8)
2868                                 sdebug_wp = true;
2869                         else
2870                                 sdebug_wp = false;
2871                         sdebug_dsense = !!(ctrl_m_pg[2] & 0x4);
2872                         goto set_mode_changed_ua;
2873                 }
2874                 break;
2875         case 0x1c:      /* Informational Exceptions Mode page */
2876                 if (iec_m_pg[1] == arr[off + 1]) {
2877                         memcpy(iec_m_pg + 2, arr + off + 2,
2878                                sizeof(iec_m_pg) - 2);
2879                         goto set_mode_changed_ua;
2880                 }
2881                 break;
2882         default:
2883                 break;
2884         }
2885         mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 5);
2886         return check_condition_result;
2887 set_mode_changed_ua:
2888         set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm);
2889         return 0;
2890 }
2891
2892 static int resp_temp_l_pg(unsigned char *arr)
2893 {
2894         unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
2895                                      0x0, 0x1, 0x3, 0x2, 0x0, 65,
2896                 };
2897
2898         memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
2899         return sizeof(temp_l_pg);
2900 }
2901
2902 static int resp_ie_l_pg(unsigned char *arr)
2903 {
2904         unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
2905                 };
2906
2907         memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
2908         if (iec_m_pg[2] & 0x4) {        /* TEST bit set */
2909                 arr[4] = THRESHOLD_EXCEEDED;
2910                 arr[5] = 0xff;
2911         }
2912         return sizeof(ie_l_pg);
2913 }
2914
2915 static int resp_env_rep_l_spg(unsigned char *arr)
2916 {
2917         unsigned char env_rep_l_spg[] = {0x0, 0x0, 0x23, 0x8,
2918                                          0x0, 40, 72, 0xff, 45, 18, 0, 0,
2919                                          0x1, 0x0, 0x23, 0x8,
2920                                          0x0, 55, 72, 35, 55, 45, 0, 0,
2921                 };
2922
2923         memcpy(arr, env_rep_l_spg, sizeof(env_rep_l_spg));
2924         return sizeof(env_rep_l_spg);
2925 }
2926
2927 #define SDEBUG_MAX_LSENSE_SZ 512
2928
2929 static int resp_log_sense(struct scsi_cmnd *scp,
2930                           struct sdebug_dev_info *devip)
2931 {
2932         int ppc, sp, pcode, subpcode;
2933         u32 alloc_len, len, n;
2934         unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
2935         unsigned char *cmd = scp->cmnd;
2936
2937         memset(arr, 0, sizeof(arr));
2938         ppc = cmd[1] & 0x2;
2939         sp = cmd[1] & 0x1;
2940         if (ppc || sp) {
2941                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, ppc ? 1 : 0);
2942                 return check_condition_result;
2943         }
2944         pcode = cmd[2] & 0x3f;
2945         subpcode = cmd[3] & 0xff;
2946         alloc_len = get_unaligned_be16(cmd + 7);
2947         arr[0] = pcode;
2948         if (0 == subpcode) {
2949                 switch (pcode) {
2950                 case 0x0:       /* Supported log pages log page */
2951                         n = 4;
2952                         arr[n++] = 0x0;         /* this page */
2953                         arr[n++] = 0xd;         /* Temperature */
2954                         arr[n++] = 0x2f;        /* Informational exceptions */
2955                         arr[3] = n - 4;
2956                         break;
2957                 case 0xd:       /* Temperature log page */
2958                         arr[3] = resp_temp_l_pg(arr + 4);
2959                         break;
2960                 case 0x2f:      /* Informational exceptions log page */
2961                         arr[3] = resp_ie_l_pg(arr + 4);
2962                         break;
2963                 default:
2964                         mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2965                         return check_condition_result;
2966                 }
2967         } else if (0xff == subpcode) {
2968                 arr[0] |= 0x40;
2969                 arr[1] = subpcode;
2970                 switch (pcode) {
2971                 case 0x0:       /* Supported log pages and subpages log page */
2972                         n = 4;
2973                         arr[n++] = 0x0;
2974                         arr[n++] = 0x0;         /* 0,0 page */
2975                         arr[n++] = 0x0;
2976                         arr[n++] = 0xff;        /* this page */
2977                         arr[n++] = 0xd;
2978                         arr[n++] = 0x0;         /* Temperature */
2979                         arr[n++] = 0xd;
2980                         arr[n++] = 0x1;         /* Environment reporting */
2981                         arr[n++] = 0xd;
2982                         arr[n++] = 0xff;        /* all 0xd subpages */
2983                         arr[n++] = 0x2f;
2984                         arr[n++] = 0x0; /* Informational exceptions */
2985                         arr[n++] = 0x2f;
2986                         arr[n++] = 0xff;        /* all 0x2f subpages */
2987                         arr[3] = n - 4;
2988                         break;
2989                 case 0xd:       /* Temperature subpages */
2990                         n = 4;
2991                         arr[n++] = 0xd;
2992                         arr[n++] = 0x0;         /* Temperature */
2993                         arr[n++] = 0xd;
2994                         arr[n++] = 0x1;         /* Environment reporting */
2995                         arr[n++] = 0xd;
2996                         arr[n++] = 0xff;        /* these subpages */
2997                         arr[3] = n - 4;
2998                         break;
2999                 case 0x2f:      /* Informational exceptions subpages */
3000                         n = 4;
3001                         arr[n++] = 0x2f;
3002                         arr[n++] = 0x0;         /* Informational exceptions */
3003                         arr[n++] = 0x2f;
3004                         arr[n++] = 0xff;        /* these subpages */
3005                         arr[3] = n - 4;
3006                         break;
3007                 default:
3008                         mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
3009                         return check_condition_result;
3010                 }
3011         } else if (subpcode > 0) {
3012                 arr[0] |= 0x40;
3013                 arr[1] = subpcode;
3014                 if (pcode == 0xd && subpcode == 1)
3015                         arr[3] = resp_env_rep_l_spg(arr + 4);
3016                 else {
3017                         mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
3018                         return check_condition_result;
3019                 }
3020         } else {
3021                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
3022                 return check_condition_result;
3023         }
3024         len = min_t(u32, get_unaligned_be16(arr + 2) + 4, alloc_len);
3025         return fill_from_dev_buffer(scp, arr,
3026                     min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ));
3027 }
3028
3029 static inline bool sdebug_dev_is_zoned(struct sdebug_dev_info *devip)
3030 {
3031         return devip->nr_zones != 0;
3032 }
3033
3034 static struct sdeb_zone_state *zbc_zone(struct sdebug_dev_info *devip,
3035                                         unsigned long long lba)
3036 {
3037         u32 zno = lba >> devip->zsize_shift;
3038         struct sdeb_zone_state *zsp;
3039
3040         if (devip->zcap == devip->zsize || zno < devip->nr_conv_zones)
3041                 return &devip->zstate[zno];
3042
3043         /*
3044          * If the zone capacity is less than the zone size, adjust for gap
3045          * zones.
3046          */
3047         zno = 2 * zno - devip->nr_conv_zones;
3048         WARN_ONCE(zno >= devip->nr_zones, "%u > %u\n", zno, devip->nr_zones);
3049         zsp = &devip->zstate[zno];
3050         if (lba >= zsp->z_start + zsp->z_size)
3051                 zsp++;
3052         WARN_ON_ONCE(lba >= zsp->z_start + zsp->z_size);
3053         return zsp;
3054 }
3055
3056 static inline bool zbc_zone_is_conv(struct sdeb_zone_state *zsp)
3057 {
3058         return zsp->z_type == ZBC_ZTYPE_CNV;
3059 }
3060
3061 static inline bool zbc_zone_is_gap(struct sdeb_zone_state *zsp)
3062 {
3063         return zsp->z_type == ZBC_ZTYPE_GAP;
3064 }
3065
3066 static inline bool zbc_zone_is_seq(struct sdeb_zone_state *zsp)
3067 {
3068         return !zbc_zone_is_conv(zsp) && !zbc_zone_is_gap(zsp);
3069 }
3070
3071 static void zbc_close_zone(struct sdebug_dev_info *devip,
3072                            struct sdeb_zone_state *zsp)
3073 {
3074         enum sdebug_z_cond zc;
3075
3076         if (!zbc_zone_is_seq(zsp))
3077                 return;
3078
3079         zc = zsp->z_cond;
3080         if (!(zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN))
3081                 return;
3082
3083         if (zc == ZC2_IMPLICIT_OPEN)
3084                 devip->nr_imp_open--;
3085         else
3086                 devip->nr_exp_open--;
3087
3088         if (zsp->z_wp == zsp->z_start) {
3089                 zsp->z_cond = ZC1_EMPTY;
3090         } else {
3091                 zsp->z_cond = ZC4_CLOSED;
3092                 devip->nr_closed++;
3093         }
3094 }
3095
3096 static void zbc_close_imp_open_zone(struct sdebug_dev_info *devip)
3097 {
3098         struct sdeb_zone_state *zsp = &devip->zstate[0];
3099         unsigned int i;
3100
3101         for (i = 0; i < devip->nr_zones; i++, zsp++) {
3102                 if (zsp->z_cond == ZC2_IMPLICIT_OPEN) {
3103                         zbc_close_zone(devip, zsp);
3104                         return;
3105                 }
3106         }
3107 }
3108
3109 static void zbc_open_zone(struct sdebug_dev_info *devip,
3110                           struct sdeb_zone_state *zsp, bool explicit)
3111 {
3112         enum sdebug_z_cond zc;
3113
3114         if (!zbc_zone_is_seq(zsp))
3115                 return;
3116
3117         zc = zsp->z_cond;
3118         if ((explicit && zc == ZC3_EXPLICIT_OPEN) ||
3119             (!explicit && zc == ZC2_IMPLICIT_OPEN))
3120                 return;
3121
3122         /* Close an implicit open zone if necessary */
3123         if (explicit && zsp->z_cond == ZC2_IMPLICIT_OPEN)
3124                 zbc_close_zone(devip, zsp);
3125         else if (devip->max_open &&
3126                  devip->nr_imp_open + devip->nr_exp_open >= devip->max_open)
3127                 zbc_close_imp_open_zone(devip);
3128
3129         if (zsp->z_cond == ZC4_CLOSED)
3130                 devip->nr_closed--;
3131         if (explicit) {
3132                 zsp->z_cond = ZC3_EXPLICIT_OPEN;
3133                 devip->nr_exp_open++;
3134         } else {
3135                 zsp->z_cond = ZC2_IMPLICIT_OPEN;
3136                 devip->nr_imp_open++;
3137         }
3138 }
3139
3140 static inline void zbc_set_zone_full(struct sdebug_dev_info *devip,
3141                                      struct sdeb_zone_state *zsp)
3142 {
3143         switch (zsp->z_cond) {
3144         case ZC2_IMPLICIT_OPEN:
3145                 devip->nr_imp_open--;
3146                 break;
3147         case ZC3_EXPLICIT_OPEN:
3148                 devip->nr_exp_open--;
3149                 break;
3150         default:
3151                 WARN_ONCE(true, "Invalid zone %llu condition %x\n",
3152                           zsp->z_start, zsp->z_cond);
3153                 break;
3154         }
3155         zsp->z_cond = ZC5_FULL;
3156 }
3157
3158 static void zbc_inc_wp(struct sdebug_dev_info *devip,
3159                        unsigned long long lba, unsigned int num)
3160 {
3161         struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
3162         unsigned long long n, end, zend = zsp->z_start + zsp->z_size;
3163
3164         if (!zbc_zone_is_seq(zsp))
3165                 return;
3166
3167         if (zsp->z_type == ZBC_ZTYPE_SWR) {
3168                 zsp->z_wp += num;
3169                 if (zsp->z_wp >= zend)
3170                         zbc_set_zone_full(devip, zsp);
3171                 return;
3172         }
3173
3174         while (num) {
3175                 if (lba != zsp->z_wp)
3176                         zsp->z_non_seq_resource = true;
3177
3178                 end = lba + num;
3179                 if (end >= zend) {
3180                         n = zend - lba;
3181                         zsp->z_wp = zend;
3182                 } else if (end > zsp->z_wp) {
3183                         n = num;
3184                         zsp->z_wp = end;
3185                 } else {
3186                         n = num;
3187                 }
3188                 if (zsp->z_wp >= zend)
3189                         zbc_set_zone_full(devip, zsp);
3190
3191                 num -= n;
3192                 lba += n;
3193                 if (num) {
3194                         zsp++;
3195                         zend = zsp->z_start + zsp->z_size;
3196                 }
3197         }
3198 }
3199
3200 static int check_zbc_access_params(struct scsi_cmnd *scp,
3201                         unsigned long long lba, unsigned int num, bool write)
3202 {
3203         struct scsi_device *sdp = scp->device;
3204         struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
3205         struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
3206         struct sdeb_zone_state *zsp_end = zbc_zone(devip, lba + num - 1);
3207
3208         if (!write) {
3209                 /* For host-managed, reads cannot cross zone types boundaries */
3210                 if (zsp->z_type != zsp_end->z_type) {
3211                         mk_sense_buffer(scp, ILLEGAL_REQUEST,
3212                                         LBA_OUT_OF_RANGE,
3213                                         READ_INVDATA_ASCQ);
3214                         return check_condition_result;
3215                 }
3216                 return 0;
3217         }
3218
3219         /* Writing into a gap zone is not allowed */
3220         if (zbc_zone_is_gap(zsp)) {
3221                 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE,
3222                                 ATTEMPT_ACCESS_GAP);
3223                 return check_condition_result;
3224         }
3225
3226         /* No restrictions for writes within conventional zones */
3227         if (zbc_zone_is_conv(zsp)) {
3228                 if (!zbc_zone_is_conv(zsp_end)) {
3229                         mk_sense_buffer(scp, ILLEGAL_REQUEST,
3230                                         LBA_OUT_OF_RANGE,
3231                                         WRITE_BOUNDARY_ASCQ);
3232                         return check_condition_result;
3233                 }
3234                 return 0;
3235         }
3236
3237         if (zsp->z_type == ZBC_ZTYPE_SWR) {
3238                 /* Writes cannot cross sequential zone boundaries */
3239                 if (zsp_end != zsp) {
3240                         mk_sense_buffer(scp, ILLEGAL_REQUEST,
3241                                         LBA_OUT_OF_RANGE,
3242                                         WRITE_BOUNDARY_ASCQ);
3243                         return check_condition_result;
3244                 }
3245                 /* Cannot write full zones */
3246                 if (zsp->z_cond == ZC5_FULL) {
3247                         mk_sense_buffer(scp, ILLEGAL_REQUEST,
3248                                         INVALID_FIELD_IN_CDB, 0);
3249                         return check_condition_result;
3250                 }
3251                 /* Writes must be aligned to the zone WP */
3252                 if (lba != zsp->z_wp) {
3253                         mk_sense_buffer(scp, ILLEGAL_REQUEST,
3254                                         LBA_OUT_OF_RANGE,
3255                                         UNALIGNED_WRITE_ASCQ);
3256                         return check_condition_result;
3257                 }
3258         }
3259
3260         /* Handle implicit open of closed and empty zones */
3261         if (zsp->z_cond == ZC1_EMPTY || zsp->z_cond == ZC4_CLOSED) {
3262                 if (devip->max_open &&
3263                     devip->nr_exp_open >= devip->max_open) {
3264                         mk_sense_buffer(scp, DATA_PROTECT,
3265                                         INSUFF_RES_ASC,
3266                                         INSUFF_ZONE_ASCQ);
3267                         return check_condition_result;
3268                 }
3269                 zbc_open_zone(devip, zsp, false);
3270         }
3271
3272         return 0;
3273 }
3274
3275 static inline int check_device_access_params
3276                         (struct scsi_cmnd *scp, unsigned long long lba,
3277                          unsigned int num, bool write)
3278 {
3279         struct scsi_device *sdp = scp->device;
3280         struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
3281
3282         if (lba + num > sdebug_capacity) {
3283                 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
3284                 return check_condition_result;
3285         }
3286         /* transfer length excessive (tie in to block limits VPD page) */
3287         if (num > sdebug_store_sectors) {
3288                 /* needs work to find which cdb byte 'num' comes from */
3289                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3290                 return check_condition_result;
3291         }
3292         if (write && unlikely(sdebug_wp)) {
3293                 mk_sense_buffer(scp, DATA_PROTECT, WRITE_PROTECTED, 0x2);
3294                 return check_condition_result;
3295         }
3296         if (sdebug_dev_is_zoned(devip))
3297                 return check_zbc_access_params(scp, lba, num, write);
3298
3299         return 0;
3300 }
3301
3302 /*
3303  * Note: if BUG_ON() fires it usually indicates a problem with the parser
3304  * tables. Perhaps a missing F_FAKE_RW or FF_MEDIA_IO flag. Response functions
3305  * that access any of the "stores" in struct sdeb_store_info should call this
3306  * function with bug_if_fake_rw set to true.
3307  */
3308 static inline struct sdeb_store_info *devip2sip(struct sdebug_dev_info *devip,
3309                                                 bool bug_if_fake_rw)
3310 {
3311         if (sdebug_fake_rw) {
3312                 BUG_ON(bug_if_fake_rw); /* See note above */
3313                 return NULL;
3314         }
3315         return xa_load(per_store_ap, devip->sdbg_host->si_idx);
3316 }
3317
3318 /* Returns number of bytes copied or -1 if error. */
3319 static int do_device_access(struct sdeb_store_info *sip, struct scsi_cmnd *scp,
3320                             u32 sg_skip, u64 lba, u32 num, bool do_write)
3321 {
3322         int ret;
3323         u64 block, rest = 0;
3324         enum dma_data_direction dir;
3325         struct scsi_data_buffer *sdb = &scp->sdb;
3326         u8 *fsp;
3327
3328         if (do_write) {
3329                 dir = DMA_TO_DEVICE;
3330                 write_since_sync = true;
3331         } else {
3332                 dir = DMA_FROM_DEVICE;
3333         }
3334
3335         if (!sdb->length || !sip)
3336                 return 0;
3337         if (scp->sc_data_direction != dir)
3338                 return -1;
3339         fsp = sip->storep;
3340
3341         block = do_div(lba, sdebug_store_sectors);
3342         if (block + num > sdebug_store_sectors)
3343                 rest = block + num - sdebug_store_sectors;
3344
3345         ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
3346                    fsp + (block * sdebug_sector_size),
3347                    (num - rest) * sdebug_sector_size, sg_skip, do_write);
3348         if (ret != (num - rest) * sdebug_sector_size)
3349                 return ret;
3350
3351         if (rest) {
3352                 ret += sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
3353                             fsp, rest * sdebug_sector_size,
3354                             sg_skip + ((num - rest) * sdebug_sector_size),
3355                             do_write);
3356         }
3357
3358         return ret;
3359 }
3360
3361 /* Returns number of bytes copied or -1 if error. */
3362 static int do_dout_fetch(struct scsi_cmnd *scp, u32 num, u8 *doutp)
3363 {
3364         struct scsi_data_buffer *sdb = &scp->sdb;
3365
3366         if (!sdb->length)
3367                 return 0;
3368         if (scp->sc_data_direction != DMA_TO_DEVICE)
3369                 return -1;
3370         return sg_copy_buffer(sdb->table.sgl, sdb->table.nents, doutp,
3371                               num * sdebug_sector_size, 0, true);
3372 }
3373
3374 /* If sip->storep+lba compares equal to arr(num), then copy top half of
3375  * arr into sip->storep+lba and return true. If comparison fails then
3376  * return false. */
3377 static bool comp_write_worker(struct sdeb_store_info *sip, u64 lba, u32 num,
3378                               const u8 *arr, bool compare_only)
3379 {
3380         bool res;
3381         u64 block, rest = 0;
3382         u32 store_blks = sdebug_store_sectors;
3383         u32 lb_size = sdebug_sector_size;
3384         u8 *fsp = sip->storep;
3385
3386         block = do_div(lba, store_blks);
3387         if (block + num > store_blks)
3388                 rest = block + num - store_blks;
3389
3390         res = !memcmp(fsp + (block * lb_size), arr, (num - rest) * lb_size);
3391         if (!res)
3392                 return res;
3393         if (rest)
3394                 res = memcmp(fsp, arr + ((num - rest) * lb_size),
3395                              rest * lb_size);
3396         if (!res)
3397                 return res;
3398         if (compare_only)
3399                 return true;
3400         arr += num * lb_size;
3401         memcpy(fsp + (block * lb_size), arr, (num - rest) * lb_size);
3402         if (rest)
3403                 memcpy(fsp, arr + ((num - rest) * lb_size), rest * lb_size);
3404         return res;
3405 }
3406
3407 static __be16 dif_compute_csum(const void *buf, int len)
3408 {
3409         __be16 csum;
3410
3411         if (sdebug_guard)
3412                 csum = (__force __be16)ip_compute_csum(buf, len);
3413         else
3414                 csum = cpu_to_be16(crc_t10dif(buf, len));
3415
3416         return csum;
3417 }
3418
3419 static int dif_verify(struct t10_pi_tuple *sdt, const void *data,
3420                       sector_t sector, u32 ei_lba)
3421 {
3422         __be16 csum = dif_compute_csum(data, sdebug_sector_size);
3423
3424         if (sdt->guard_tag != csum) {
3425                 pr_err("GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
3426                         (unsigned long)sector,
3427                         be16_to_cpu(sdt->guard_tag),
3428                         be16_to_cpu(csum));
3429                 return 0x01;
3430         }
3431         if (sdebug_dif == T10_PI_TYPE1_PROTECTION &&
3432             be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
3433                 pr_err("REF check failed on sector %lu\n",
3434                         (unsigned long)sector);
3435                 return 0x03;
3436         }
3437         if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3438             be32_to_cpu(sdt->ref_tag) != ei_lba) {
3439                 pr_err("REF check failed on sector %lu\n",
3440                         (unsigned long)sector);
3441                 return 0x03;
3442         }
3443         return 0;
3444 }
3445
3446 static void dif_copy_prot(struct scsi_cmnd *scp, sector_t sector,
3447                           unsigned int sectors, bool read)
3448 {
3449         size_t resid;
3450         void *paddr;
3451         struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3452                                                 scp->device->hostdata, true);
3453         struct t10_pi_tuple *dif_storep = sip->dif_storep;
3454         const void *dif_store_end = dif_storep + sdebug_store_sectors;
3455         struct sg_mapping_iter miter;
3456
3457         /* Bytes of protection data to copy into sgl */
3458         resid = sectors * sizeof(*dif_storep);
3459
3460         sg_miter_start(&miter, scsi_prot_sglist(scp),
3461                        scsi_prot_sg_count(scp), SG_MITER_ATOMIC |
3462                        (read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
3463
3464         while (sg_miter_next(&miter) && resid > 0) {
3465                 size_t len = min_t(size_t, miter.length, resid);
3466                 void *start = dif_store(sip, sector);
3467                 size_t rest = 0;
3468
3469                 if (dif_store_end < start + len)
3470                         rest = start + len - dif_store_end;
3471
3472                 paddr = miter.addr;
3473
3474                 if (read)
3475                         memcpy(paddr, start, len - rest);
3476                 else
3477                         memcpy(start, paddr, len - rest);
3478
3479                 if (rest) {
3480                         if (read)
3481                                 memcpy(paddr + len - rest, dif_storep, rest);
3482                         else
3483                                 memcpy(dif_storep, paddr + len - rest, rest);
3484                 }
3485
3486                 sector += len / sizeof(*dif_storep);
3487                 resid -= len;
3488         }
3489         sg_miter_stop(&miter);
3490 }
3491
3492 static int prot_verify_read(struct scsi_cmnd *scp, sector_t start_sec,
3493                             unsigned int sectors, u32 ei_lba)
3494 {
3495         int ret = 0;
3496         unsigned int i;
3497         sector_t sector;
3498         struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3499                                                 scp->device->hostdata, true);
3500         struct t10_pi_tuple *sdt;
3501
3502         for (i = 0; i < sectors; i++, ei_lba++) {
3503                 sector = start_sec + i;
3504                 sdt = dif_store(sip, sector);
3505
3506                 if (sdt->app_tag == cpu_to_be16(0xffff))
3507                         continue;
3508
3509                 /*
3510                  * Because scsi_debug acts as both initiator and
3511                  * target we proceed to verify the PI even if
3512                  * RDPROTECT=3. This is done so the "initiator" knows
3513                  * which type of error to return. Otherwise we would
3514                  * have to iterate over the PI twice.
3515                  */
3516                 if (scp->cmnd[1] >> 5) { /* RDPROTECT */
3517                         ret = dif_verify(sdt, lba2fake_store(sip, sector),
3518                                          sector, ei_lba);
3519                         if (ret) {
3520                                 dif_errors++;
3521                                 break;
3522                         }
3523                 }
3524         }
3525
3526         dif_copy_prot(scp, start_sec, sectors, true);
3527         dix_reads++;
3528
3529         return ret;
3530 }
3531
3532 static inline void
3533 sdeb_read_lock(struct sdeb_store_info *sip)
3534 {
3535         if (sdebug_no_rwlock) {
3536                 if (sip)
3537                         __acquire(&sip->macc_lck);
3538                 else
3539                         __acquire(&sdeb_fake_rw_lck);
3540         } else {
3541                 if (sip)
3542                         read_lock(&sip->macc_lck);
3543                 else
3544                         read_lock(&sdeb_fake_rw_lck);
3545         }
3546 }
3547
3548 static inline void
3549 sdeb_read_unlock(struct sdeb_store_info *sip)
3550 {
3551         if (sdebug_no_rwlock) {
3552                 if (sip)
3553                         __release(&sip->macc_lck);
3554                 else
3555                         __release(&sdeb_fake_rw_lck);
3556         } else {
3557                 if (sip)
3558                         read_unlock(&sip->macc_lck);
3559                 else
3560                         read_unlock(&sdeb_fake_rw_lck);
3561         }
3562 }
3563
3564 static inline void
3565 sdeb_write_lock(struct sdeb_store_info *sip)
3566 {
3567         if (sdebug_no_rwlock) {
3568                 if (sip)
3569                         __acquire(&sip->macc_lck);
3570                 else
3571                         __acquire(&sdeb_fake_rw_lck);
3572         } else {
3573                 if (sip)
3574                         write_lock(&sip->macc_lck);
3575                 else
3576                         write_lock(&sdeb_fake_rw_lck);
3577         }
3578 }
3579
3580 static inline void
3581 sdeb_write_unlock(struct sdeb_store_info *sip)
3582 {
3583         if (sdebug_no_rwlock) {
3584                 if (sip)
3585                         __release(&sip->macc_lck);
3586                 else
3587                         __release(&sdeb_fake_rw_lck);
3588         } else {
3589                 if (sip)
3590                         write_unlock(&sip->macc_lck);
3591                 else
3592                         write_unlock(&sdeb_fake_rw_lck);
3593         }
3594 }
3595
3596 static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3597 {
3598         bool check_prot;
3599         u32 num;
3600         u32 ei_lba;
3601         int ret;
3602         u64 lba;
3603         struct sdeb_store_info *sip = devip2sip(devip, true);
3604         u8 *cmd = scp->cmnd;
3605
3606         switch (cmd[0]) {
3607         case READ_16:
3608                 ei_lba = 0;
3609                 lba = get_unaligned_be64(cmd + 2);
3610                 num = get_unaligned_be32(cmd + 10);
3611                 check_prot = true;
3612                 break;
3613         case READ_10:
3614                 ei_lba = 0;
3615                 lba = get_unaligned_be32(cmd + 2);
3616                 num = get_unaligned_be16(cmd + 7);
3617                 check_prot = true;
3618                 break;
3619         case READ_6:
3620                 ei_lba = 0;
3621                 lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3622                       (u32)(cmd[1] & 0x1f) << 16;
3623                 num = (0 == cmd[4]) ? 256 : cmd[4];
3624                 check_prot = true;
3625                 break;
3626         case READ_12:
3627                 ei_lba = 0;
3628                 lba = get_unaligned_be32(cmd + 2);
3629                 num = get_unaligned_be32(cmd + 6);
3630                 check_prot = true;
3631                 break;
3632         case XDWRITEREAD_10:
3633                 ei_lba = 0;
3634                 lba = get_unaligned_be32(cmd + 2);
3635                 num = get_unaligned_be16(cmd + 7);
3636                 check_prot = false;
3637                 break;
3638         default:        /* assume READ(32) */
3639                 lba = get_unaligned_be64(cmd + 12);
3640                 ei_lba = get_unaligned_be32(cmd + 20);
3641                 num = get_unaligned_be32(cmd + 28);
3642                 check_prot = false;
3643                 break;
3644         }
3645         if (unlikely(have_dif_prot && check_prot)) {
3646                 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3647                     (cmd[1] & 0xe0)) {
3648                         mk_sense_invalid_opcode(scp);
3649                         return check_condition_result;
3650                 }
3651                 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3652                      sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3653                     (cmd[1] & 0xe0) == 0)
3654                         sdev_printk(KERN_ERR, scp->device, "Unprotected RD "
3655                                     "to DIF device\n");
3656         }
3657         if (unlikely((sdebug_opts & SDEBUG_OPT_SHORT_TRANSFER) &&
3658                      atomic_read(&sdeb_inject_pending))) {
3659                 num /= 2;
3660                 atomic_set(&sdeb_inject_pending, 0);
3661         }
3662
3663         ret = check_device_access_params(scp, lba, num, false);
3664         if (ret)
3665                 return ret;
3666         if (unlikely((SDEBUG_OPT_MEDIUM_ERR & sdebug_opts) &&
3667                      (lba <= (sdebug_medium_error_start + sdebug_medium_error_count - 1)) &&
3668                      ((lba + num) > sdebug_medium_error_start))) {
3669                 /* claim unrecoverable read error */
3670                 mk_sense_buffer(scp, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
3671                 /* set info field and valid bit for fixed descriptor */
3672                 if (0x70 == (scp->sense_buffer[0] & 0x7f)) {
3673                         scp->sense_buffer[0] |= 0x80;   /* Valid bit */
3674                         ret = (lba < OPT_MEDIUM_ERR_ADDR)
3675                               ? OPT_MEDIUM_ERR_ADDR : (int)lba;
3676                         put_unaligned_be32(ret, scp->sense_buffer + 3);
3677                 }
3678                 scsi_set_resid(scp, scsi_bufflen(scp));
3679                 return check_condition_result;
3680         }
3681
3682         sdeb_read_lock(sip);
3683
3684         /* DIX + T10 DIF */
3685         if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3686                 switch (prot_verify_read(scp, lba, num, ei_lba)) {
3687                 case 1: /* Guard tag error */
3688                         if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */
3689                                 sdeb_read_unlock(sip);
3690                                 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3691                                 return check_condition_result;
3692                         } else if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) {
3693                                 sdeb_read_unlock(sip);
3694                                 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3695                                 return illegal_condition_result;
3696                         }
3697                         break;
3698                 case 3: /* Reference tag error */
3699                         if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */
3700                                 sdeb_read_unlock(sip);
3701                                 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3);
3702                                 return check_condition_result;
3703                         } else if (scp->prot_flags & SCSI_PROT_REF_CHECK) {
3704                                 sdeb_read_unlock(sip);
3705                                 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3);
3706                                 return illegal_condition_result;
3707                         }
3708                         break;
3709                 }
3710         }
3711
3712         ret = do_device_access(sip, scp, 0, lba, num, false);
3713         sdeb_read_unlock(sip);
3714         if (unlikely(ret == -1))
3715                 return DID_ERROR << 16;
3716
3717         scsi_set_resid(scp, scsi_bufflen(scp) - ret);
3718
3719         if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3720                      atomic_read(&sdeb_inject_pending))) {
3721                 if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3722                         mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3723                         atomic_set(&sdeb_inject_pending, 0);
3724                         return check_condition_result;
3725                 } else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3726                         /* Logical block guard check failed */
3727                         mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3728                         atomic_set(&sdeb_inject_pending, 0);
3729                         return illegal_condition_result;
3730                 } else if (SDEBUG_OPT_DIX_ERR & sdebug_opts) {
3731                         mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3732                         atomic_set(&sdeb_inject_pending, 0);
3733                         return illegal_condition_result;
3734                 }
3735         }
3736         return 0;
3737 }
3738
3739 static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
3740                              unsigned int sectors, u32 ei_lba)
3741 {
3742         int ret;
3743         struct t10_pi_tuple *sdt;
3744         void *daddr;
3745         sector_t sector = start_sec;
3746         int ppage_offset;
3747         int dpage_offset;
3748         struct sg_mapping_iter diter;
3749         struct sg_mapping_iter piter;
3750
3751         BUG_ON(scsi_sg_count(SCpnt) == 0);
3752         BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
3753
3754         sg_miter_start(&piter, scsi_prot_sglist(SCpnt),
3755                         scsi_prot_sg_count(SCpnt),
3756                         SG_MITER_ATOMIC | SG_MITER_FROM_SG);
3757         sg_miter_start(&diter, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
3758                         SG_MITER_ATOMIC | SG_MITER_FROM_SG);
3759
3760         /* For each protection page */
3761         while (sg_miter_next(&piter)) {
3762                 dpage_offset = 0;
3763                 if (WARN_ON(!sg_miter_next(&diter))) {
3764                         ret = 0x01;
3765                         goto out;
3766                 }
3767
3768                 for (ppage_offset = 0; ppage_offset < piter.length;
3769                      ppage_offset += sizeof(struct t10_pi_tuple)) {
3770                         /* If we're at the end of the current
3771                          * data page advance to the next one
3772                          */
3773                         if (dpage_offset >= diter.length) {
3774                                 if (WARN_ON(!sg_miter_next(&diter))) {
3775                                         ret = 0x01;
3776                                         goto out;
3777                                 }
3778                                 dpage_offset = 0;
3779                         }
3780
3781                         sdt = piter.addr + ppage_offset;
3782                         daddr = diter.addr + dpage_offset;
3783
3784                         if (SCpnt->cmnd[1] >> 5 != 3) { /* WRPROTECT */
3785                                 ret = dif_verify(sdt, daddr, sector, ei_lba);
3786                                 if (ret)
3787                                         goto out;
3788                         }
3789
3790                         sector++;
3791                         ei_lba++;
3792                         dpage_offset += sdebug_sector_size;
3793                 }
3794                 diter.consumed = dpage_offset;
3795                 sg_miter_stop(&diter);
3796         }
3797         sg_miter_stop(&piter);
3798
3799         dif_copy_prot(SCpnt, start_sec, sectors, false);
3800         dix_writes++;
3801
3802         return 0;
3803
3804 out:
3805         dif_errors++;
3806         sg_miter_stop(&diter);
3807         sg_miter_stop(&piter);
3808         return ret;
3809 }
3810
3811 static unsigned long lba_to_map_index(sector_t lba)
3812 {
3813         if (sdebug_unmap_alignment)
3814                 lba += sdebug_unmap_granularity - sdebug_unmap_alignment;
3815         sector_div(lba, sdebug_unmap_granularity);
3816         return lba;
3817 }
3818
3819 static sector_t map_index_to_lba(unsigned long index)
3820 {
3821         sector_t lba = index * sdebug_unmap_granularity;
3822
3823         if (sdebug_unmap_alignment)
3824                 lba -= sdebug_unmap_granularity - sdebug_unmap_alignment;
3825         return lba;
3826 }
3827
3828 static unsigned int map_state(struct sdeb_store_info *sip, sector_t lba,
3829                               unsigned int *num)
3830 {
3831         sector_t end;
3832         unsigned int mapped;
3833         unsigned long index;
3834         unsigned long next;
3835
3836         index = lba_to_map_index(lba);
3837         mapped = test_bit(index, sip->map_storep);
3838
3839         if (mapped)
3840                 next = find_next_zero_bit(sip->map_storep, map_size, index);
3841         else
3842                 next = find_next_bit(sip->map_storep, map_size, index);
3843
3844         end = min_t(sector_t, sdebug_store_sectors,  map_index_to_lba(next));
3845         *num = end - lba;
3846         return mapped;
3847 }
3848
3849 static void map_region(struct sdeb_store_info *sip, sector_t lba,
3850                        unsigned int len)
3851 {
3852         sector_t end = lba + len;
3853
3854         while (lba < end) {
3855                 unsigned long index = lba_to_map_index(lba);
3856
3857                 if (index < map_size)
3858                         set_bit(index, sip->map_storep);
3859
3860                 lba = map_index_to_lba(index + 1);
3861         }
3862 }
3863
3864 static void unmap_region(struct sdeb_store_info *sip, sector_t lba,
3865                          unsigned int len)
3866 {
3867         sector_t end = lba + len;
3868         u8 *fsp = sip->storep;
3869
3870         while (lba < end) {
3871                 unsigned long index = lba_to_map_index(lba);
3872
3873                 if (lba == map_index_to_lba(index) &&
3874                     lba + sdebug_unmap_granularity <= end &&
3875                     index < map_size) {
3876                         clear_bit(index, sip->map_storep);
3877                         if (sdebug_lbprz) {  /* for LBPRZ=2 return 0xff_s */
3878                                 memset(fsp + lba * sdebug_sector_size,
3879                                        (sdebug_lbprz & 1) ? 0 : 0xff,
3880                                        sdebug_sector_size *
3881                                        sdebug_unmap_granularity);
3882                         }
3883                         if (sip->dif_storep) {
3884                                 memset(sip->dif_storep + lba, 0xff,
3885                                        sizeof(*sip->dif_storep) *
3886                                        sdebug_unmap_granularity);
3887                         }
3888                 }
3889                 lba = map_index_to_lba(index + 1);
3890         }
3891 }
3892
3893 static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3894 {
3895         bool check_prot;
3896         u32 num;
3897         u32 ei_lba;
3898         int ret;
3899         u64 lba;
3900         struct sdeb_store_info *sip = devip2sip(devip, true);
3901         u8 *cmd = scp->cmnd;
3902
3903         switch (cmd[0]) {
3904         case WRITE_16:
3905                 ei_lba = 0;
3906                 lba = get_unaligned_be64(cmd + 2);
3907                 num = get_unaligned_be32(cmd + 10);
3908                 check_prot = true;
3909                 break;
3910         case WRITE_10:
3911                 ei_lba = 0;
3912                 lba = get_unaligned_be32(cmd + 2);
3913                 num = get_unaligned_be16(cmd + 7);
3914                 check_prot = true;
3915                 break;
3916         case WRITE_6:
3917                 ei_lba = 0;
3918                 lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3919                       (u32)(cmd[1] & 0x1f) << 16;
3920                 num = (0 == cmd[4]) ? 256 : cmd[4];
3921                 check_prot = true;
3922                 break;
3923         case WRITE_12:
3924                 ei_lba = 0;
3925                 lba = get_unaligned_be32(cmd + 2);
3926                 num = get_unaligned_be32(cmd + 6);
3927                 check_prot = true;
3928                 break;
3929         case 0x53:      /* XDWRITEREAD(10) */
3930                 ei_lba = 0;
3931                 lba = get_unaligned_be32(cmd + 2);
3932                 num = get_unaligned_be16(cmd + 7);
3933                 check_prot = false;
3934                 break;
3935         default:        /* assume WRITE(32) */
3936                 lba = get_unaligned_be64(cmd + 12);
3937                 ei_lba = get_unaligned_be32(cmd + 20);
3938                 num = get_unaligned_be32(cmd + 28);
3939                 check_prot = false;
3940                 break;
3941         }
3942         if (unlikely(have_dif_prot && check_prot)) {
3943                 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3944                     (cmd[1] & 0xe0)) {
3945                         mk_sense_invalid_opcode(scp);
3946                         return check_condition_result;
3947                 }
3948                 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3949                      sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3950                     (cmd[1] & 0xe0) == 0)
3951                         sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3952                                     "to DIF device\n");
3953         }
3954
3955         sdeb_write_lock(sip);
3956         ret = check_device_access_params(scp, lba, num, true);
3957         if (ret) {
3958                 sdeb_write_unlock(sip);
3959                 return ret;
3960         }
3961
3962         /* DIX + T10 DIF */
3963         if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3964                 switch (prot_verify_write(scp, lba, num, ei_lba)) {
3965                 case 1: /* Guard tag error */
3966                         if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) {
3967                                 sdeb_write_unlock(sip);
3968                                 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3969                                 return illegal_condition_result;
3970                         } else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */
3971                                 sdeb_write_unlock(sip);
3972                                 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3973                                 return check_condition_result;
3974                         }
3975                         break;
3976                 case 3: /* Reference tag error */
3977                         if (scp->prot_flags & SCSI_PROT_REF_CHECK) {
3978                                 sdeb_write_unlock(sip);
3979                                 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3);
3980                                 return illegal_condition_result;
3981                         } else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */
3982                                 sdeb_write_unlock(sip);
3983                                 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3);
3984                                 return check_condition_result;
3985                         }
3986                         break;
3987                 }
3988         }
3989
3990         ret = do_device_access(sip, scp, 0, lba, num, true);
3991         if (unlikely(scsi_debug_lbp()))
3992                 map_region(sip, lba, num);
3993         /* If ZBC zone then bump its write pointer */
3994         if (sdebug_dev_is_zoned(devip))
3995                 zbc_inc_wp(devip, lba, num);
3996         sdeb_write_unlock(sip);
3997         if (unlikely(-1 == ret))
3998                 return DID_ERROR << 16;
3999         else if (unlikely(sdebug_verbose &&
4000                           (ret < (num * sdebug_sector_size))))
4001                 sdev_printk(KERN_INFO, scp->device,
4002                             "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
4003                             my_name, num * sdebug_sector_size, ret);
4004
4005         if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
4006                      atomic_read(&sdeb_inject_pending))) {
4007                 if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
4008                         mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
4009                         atomic_set(&sdeb_inject_pending, 0);
4010                         return check_condition_result;
4011                 } else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
4012                         /* Logical block guard check failed */
4013                         mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
4014                         atomic_set(&sdeb_inject_pending, 0);
4015                         return illegal_condition_result;
4016                 } else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
4017                         mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
4018                         atomic_set(&sdeb_inject_pending, 0);
4019                         return illegal_condition_result;
4020                 }
4021         }
4022         return 0;
4023 }
4024
4025 /*
4026  * T10 has only specified WRITE SCATTERED(16) and WRITE SCATTERED(32).
4027  * No READ GATHERED yet (requires bidi or long cdb holding gather list).
4028  */
4029 static int resp_write_scat(struct scsi_cmnd *scp,
4030                            struct sdebug_dev_info *devip)
4031 {
4032         u8 *cmd = scp->cmnd;
4033         u8 *lrdp = NULL;
4034         u8 *up;
4035         struct sdeb_store_info *sip = devip2sip(devip, true);
4036         u8 wrprotect;
4037         u16 lbdof, num_lrd, k;
4038         u32 num, num_by, bt_len, lbdof_blen, sg_off, cum_lb;
4039         u32 lb_size = sdebug_sector_size;
4040         u32 ei_lba;
4041         u64 lba;
4042         int ret, res;
4043         bool is_16;
4044         static const u32 lrd_size = 32; /* + parameter list header size */
4045
4046         if (cmd[0] == VARIABLE_LENGTH_CMD) {
4047                 is_16 = false;
4048                 wrprotect = (cmd[10] >> 5) & 0x7;
4049                 lbdof = get_unaligned_be16(cmd + 12);
4050                 num_lrd = get_unaligned_be16(cmd + 16);
4051                 bt_len = get_unaligned_be32(cmd + 28);
4052         } else {        /* that leaves WRITE SCATTERED(16) */
4053                 is_16 = true;
4054                 wrprotect = (cmd[2] >> 5) & 0x7;
4055                 lbdof = get_unaligned_be16(cmd + 4);
4056                 num_lrd = get_unaligned_be16(cmd + 8);
4057                 bt_len = get_unaligned_be32(cmd + 10);
4058                 if (unlikely(have_dif_prot)) {
4059                         if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
4060                             wrprotect) {
4061                                 mk_sense_invalid_opcode(scp);
4062                                 return illegal_condition_result;
4063                         }
4064                         if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
4065                              sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
4066                              wrprotect == 0)
4067                                 sdev_printk(KERN_ERR, scp->device,
4068                                             "Unprotected WR to DIF device\n");
4069                 }
4070         }
4071         if ((num_lrd == 0) || (bt_len == 0))
4072                 return 0;       /* T10 says these do-nothings are not errors */
4073         if (lbdof == 0) {
4074                 if (sdebug_verbose)
4075                         sdev_printk(KERN_INFO, scp->device,
4076                                 "%s: %s: LB Data Offset field bad\n",
4077                                 my_name, __func__);
4078                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4079                 return illegal_condition_result;
4080         }
4081         lbdof_blen = lbdof * lb_size;
4082         if ((lrd_size + (num_lrd * lrd_size)) > lbdof_blen) {
4083                 if (sdebug_verbose)
4084                         sdev_printk(KERN_INFO, scp->device,
4085                                 "%s: %s: LBA range descriptors don't fit\n",
4086                                 my_name, __func__);
4087                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4088                 return illegal_condition_result;
4089         }
4090         lrdp = kzalloc(lbdof_blen, GFP_ATOMIC | __GFP_NOWARN);
4091         if (lrdp == NULL)
4092                 return SCSI_MLQUEUE_HOST_BUSY;
4093         if (sdebug_verbose)
4094                 sdev_printk(KERN_INFO, scp->device,
4095                         "%s: %s: Fetch header+scatter_list, lbdof_blen=%u\n",
4096                         my_name, __func__, lbdof_blen);
4097         res = fetch_to_dev_buffer(scp, lrdp, lbdof_blen);
4098         if (res == -1) {
4099                 ret = DID_ERROR << 16;
4100                 goto err_out;
4101         }
4102
4103         sdeb_write_lock(sip);
4104         sg_off = lbdof_blen;
4105         /* Spec says Buffer xfer Length field in number of LBs in dout */
4106         cum_lb = 0;
4107         for (k = 0, up = lrdp + lrd_size; k < num_lrd; ++k, up += lrd_size) {
4108                 lba = get_unaligned_be64(up + 0);
4109                 num = get_unaligned_be32(up + 8);
4110                 if (sdebug_verbose)
4111                         sdev_printk(KERN_INFO, scp->device,
4112                                 "%s: %s: k=%d  LBA=0x%llx num=%u  sg_off=%u\n",
4113                                 my_name, __func__, k, lba, num, sg_off);
4114                 if (num == 0)
4115                         continue;
4116                 ret = check_device_access_params(scp, lba, num, true);
4117                 if (ret)
4118                         goto err_out_unlock;
4119                 num_by = num * lb_size;
4120                 ei_lba = is_16 ? 0 : get_unaligned_be32(up + 12);
4121
4122                 if ((cum_lb + num) > bt_len) {
4123                         if (sdebug_verbose)
4124                                 sdev_printk(KERN_INFO, scp->device,
4125                                     "%s: %s: sum of blocks > data provided\n",
4126                                     my_name, __func__);
4127                         mk_sense_buffer(scp, ILLEGAL_REQUEST, WRITE_ERROR_ASC,
4128                                         0);
4129                         ret = illegal_condition_result;
4130                         goto err_out_unlock;
4131                 }
4132
4133                 /* DIX + T10 DIF */
4134                 if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
4135                         int prot_ret = prot_verify_write(scp, lba, num,
4136                                                          ei_lba);
4137
4138                         if (prot_ret) {
4139                                 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10,
4140                                                 prot_ret);
4141                                 ret = illegal_condition_result;
4142                                 goto err_out_unlock;
4143                         }
4144                 }
4145
4146                 ret = do_device_access(sip, scp, sg_off, lba, num, true);
4147                 /* If ZBC zone then bump its write pointer */
4148                 if (sdebug_dev_is_zoned(devip))
4149                         zbc_inc_wp(devip, lba, num);
4150                 if (unlikely(scsi_debug_lbp()))
4151                         map_region(sip, lba, num);
4152                 if (unlikely(-1 == ret)) {
4153                         ret = DID_ERROR << 16;
4154                         goto err_out_unlock;
4155                 } else if (unlikely(sdebug_verbose && (ret < num_by)))
4156                         sdev_printk(KERN_INFO, scp->device,
4157                             "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
4158                             my_name, num_by, ret);
4159
4160                 if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
4161                              atomic_read(&sdeb_inject_pending))) {
4162                         if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
4163                                 mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
4164                                 atomic_set(&sdeb_inject_pending, 0);
4165                                 ret = check_condition_result;
4166                                 goto err_out_unlock;
4167                         } else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
4168                                 /* Logical block guard check failed */
4169                                 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
4170                                 atomic_set(&sdeb_inject_pending, 0);
4171                                 ret = illegal_condition_result;
4172                                 goto err_out_unlock;
4173                         } else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
4174                                 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
4175                                 atomic_set(&sdeb_inject_pending, 0);
4176                                 ret = illegal_condition_result;
4177                                 goto err_out_unlock;
4178                         }
4179                 }
4180                 sg_off += num_by;
4181                 cum_lb += num;
4182         }
4183         ret = 0;
4184 err_out_unlock:
4185         sdeb_write_unlock(sip);
4186 err_out:
4187         kfree(lrdp);
4188         return ret;
4189 }
4190
4191 static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
4192                            u32 ei_lba, bool unmap, bool ndob)
4193 {
4194         struct scsi_device *sdp = scp->device;
4195         struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
4196         unsigned long long i;
4197         u64 block, lbaa;
4198         u32 lb_size = sdebug_sector_size;
4199         int ret;
4200         struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
4201                                                 scp->device->hostdata, true);
4202         u8 *fs1p;
4203         u8 *fsp;
4204
4205         sdeb_write_lock(sip);
4206
4207         ret = check_device_access_params(scp, lba, num, true);
4208         if (ret) {
4209                 sdeb_write_unlock(sip);
4210                 return ret;
4211         }
4212
4213         if (unmap && scsi_debug_lbp()) {
4214                 unmap_region(sip, lba, num);
4215                 goto out;
4216         }
4217         lbaa = lba;
4218         block = do_div(lbaa, sdebug_store_sectors);
4219         /* if ndob then zero 1 logical block, else fetch 1 logical block */
4220         fsp = sip->storep;
4221         fs1p = fsp + (block * lb_size);
4222         if (ndob) {
4223                 memset(fs1p, 0, lb_size);
4224                 ret = 0;
4225         } else
4226                 ret = fetch_to_dev_buffer(scp, fs1p, lb_size);
4227
4228         if (-1 == ret) {
4229                 sdeb_write_unlock(sip);
4230                 return DID_ERROR << 16;
4231         } else if (sdebug_verbose && !ndob && (ret < lb_size))
4232                 sdev_printk(KERN_INFO, scp->device,
4233                             "%s: %s: lb size=%u, IO sent=%d bytes\n",
4234                             my_name, "write same", lb_size, ret);
4235
4236         /* Copy first sector to remaining blocks */
4237         for (i = 1 ; i < num ; i++) {
4238                 lbaa = lba + i;
4239                 block = do_div(lbaa, sdebug_store_sectors);
4240                 memmove(fsp + (block * lb_size), fs1p, lb_size);
4241         }
4242         if (scsi_debug_lbp())
4243                 map_region(sip, lba, num);
4244         /* If ZBC zone then bump its write pointer */
4245         if (sdebug_dev_is_zoned(devip))
4246                 zbc_inc_wp(devip, lba, num);
4247 out:
4248         sdeb_write_unlock(sip);
4249
4250         return 0;
4251 }
4252
4253 static int resp_write_same_10(struct scsi_cmnd *scp,
4254                               struct sdebug_dev_info *devip)
4255 {
4256         u8 *cmd = scp->cmnd;
4257         u32 lba;
4258         u16 num;
4259         u32 ei_lba = 0;
4260         bool unmap = false;
4261
4262         if (cmd[1] & 0x8) {
4263                 if (sdebug_lbpws10 == 0) {
4264                         mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
4265                         return check_condition_result;
4266                 } else
4267                         unmap = true;
4268         }
4269         lba = get_unaligned_be32(cmd + 2);
4270         num = get_unaligned_be16(cmd + 7);
4271         if (num > sdebug_write_same_length) {
4272                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
4273                 return check_condition_result;
4274         }
4275         return resp_write_same(scp, lba, num, ei_lba, unmap, false);
4276 }
4277
4278 static int resp_write_same_16(struct scsi_cmnd *scp,
4279                               struct sdebug_dev_info *devip)
4280 {
4281         u8 *cmd = scp->cmnd;
4282         u64 lba;
4283         u32 num;
4284         u32 ei_lba = 0;
4285         bool unmap = false;
4286         bool ndob = false;
4287
4288         if (cmd[1] & 0x8) {     /* UNMAP */
4289                 if (sdebug_lbpws == 0) {
4290                         mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
4291                         return check_condition_result;
4292                 } else
4293                         unmap = true;
4294         }
4295         if (cmd[1] & 0x1)  /* NDOB (no data-out buffer, assumes zeroes) */
4296                 ndob = true;
4297         lba = get_unaligned_be64(cmd + 2);
4298         num = get_unaligned_be32(cmd + 10);
4299         if (num > sdebug_write_same_length) {
4300                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
4301                 return check_condition_result;
4302         }
4303         return resp_write_same(scp, lba, num, ei_lba, unmap, ndob);
4304 }
4305
4306 /* Note the mode field is in the same position as the (lower) service action
4307  * field. For the Report supported operation codes command, SPC-4 suggests
4308  * each mode of this command should be reported separately; for future. */
4309 static int resp_write_buffer(struct scsi_cmnd *scp,
4310                              struct sdebug_dev_info *devip)
4311 {
4312         u8 *cmd = scp->cmnd;
4313         struct scsi_device *sdp = scp->device;
4314         struct sdebug_dev_info *dp;
4315         u8 mode;
4316
4317         mode = cmd[1] & 0x1f;
4318         switch (mode) {
4319         case 0x4:       /* download microcode (MC) and activate (ACT) */
4320                 /* set UAs on this device only */
4321                 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
4322                 set_bit(SDEBUG_UA_MICROCODE_CHANGED, devip->uas_bm);
4323                 break;
4324         case 0x5:       /* download MC, save and ACT */
4325                 set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET, devip->uas_bm);
4326                 break;
4327         case 0x6:       /* download MC with offsets and ACT */
4328                 /* set UAs on most devices (LUs) in this target */
4329                 list_for_each_entry(dp,
4330                                     &devip->sdbg_host->dev_info_list,
4331                                     dev_list)
4332                         if (dp->target == sdp->id) {
4333                                 set_bit(SDEBUG_UA_BUS_RESET, dp->uas_bm);
4334                                 if (devip != dp)
4335                                         set_bit(SDEBUG_UA_MICROCODE_CHANGED,
4336                                                 dp->uas_bm);
4337                         }
4338                 break;
4339         case 0x7:       /* download MC with offsets, save, and ACT */
4340                 /* set UA on all devices (LUs) in this target */
4341                 list_for_each_entry(dp,
4342                                     &devip->sdbg_host->dev_info_list,
4343                                     dev_list)
4344                         if (dp->target == sdp->id)
4345                                 set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET,
4346                                         dp->uas_bm);
4347                 break;
4348         default:
4349                 /* do nothing for this command for other mode values */
4350                 break;
4351         }
4352         return 0;
4353 }
4354
4355 static int resp_comp_write(struct scsi_cmnd *scp,
4356                            struct sdebug_dev_info *devip)
4357 {
4358         u8 *cmd = scp->cmnd;
4359         u8 *arr;
4360         struct sdeb_store_info *sip = devip2sip(devip, true);
4361         u64 lba;
4362         u32 dnum;
4363         u32 lb_size = sdebug_sector_size;
4364         u8 num;
4365         int ret;
4366         int retval = 0;
4367
4368         lba = get_unaligned_be64(cmd + 2);
4369         num = cmd[13];          /* 1 to a maximum of 255 logical blocks */
4370         if (0 == num)
4371                 return 0;       /* degenerate case, not an error */
4372         if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
4373             (cmd[1] & 0xe0)) {
4374                 mk_sense_invalid_opcode(scp);
4375                 return check_condition_result;
4376         }
4377         if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
4378              sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
4379             (cmd[1] & 0xe0) == 0)
4380                 sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
4381                             "to DIF device\n");
4382         ret = check_device_access_params(scp, lba, num, false);
4383         if (ret)
4384                 return ret;
4385         dnum = 2 * num;
4386         arr = kcalloc(lb_size, dnum, GFP_ATOMIC);
4387         if (NULL == arr) {
4388                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4389                                 INSUFF_RES_ASCQ);
4390                 return check_condition_result;
4391         }
4392
4393         sdeb_write_lock(sip);
4394
4395         ret = do_dout_fetch(scp, dnum, arr);
4396         if (ret == -1) {
4397                 retval = DID_ERROR << 16;
4398                 goto cleanup;
4399         } else if (sdebug_verbose && (ret < (dnum * lb_size)))
4400                 sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb "
4401                             "indicated=%u, IO sent=%d bytes\n", my_name,
4402                             dnum * lb_size, ret);
4403         if (!comp_write_worker(sip, lba, num, arr, false)) {
4404                 mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
4405                 retval = check_condition_result;
4406                 goto cleanup;
4407         }
4408         if (scsi_debug_lbp())
4409                 map_region(sip, lba, num);
4410 cleanup:
4411         sdeb_write_unlock(sip);
4412         kfree(arr);
4413         return retval;
4414 }
4415
4416 struct unmap_block_desc {
4417         __be64  lba;
4418         __be32  blocks;
4419         __be32  __reserved;
4420 };
4421
4422 static int resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4423 {
4424         unsigned char *buf;
4425         struct unmap_block_desc *desc;
4426         struct sdeb_store_info *sip = devip2sip(devip, true);
4427         unsigned int i, payload_len, descriptors;
4428         int ret;
4429
4430         if (!scsi_debug_lbp())
4431                 return 0;       /* fib and say its done */
4432         payload_len = get_unaligned_be16(scp->cmnd + 7);
4433         BUG_ON(scsi_bufflen(scp) != payload_len);
4434
4435         descriptors = (payload_len - 8) / 16;
4436         if (descriptors > sdebug_unmap_max_desc) {
4437                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
4438                 return check_condition_result;
4439         }
4440
4441         buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC);
4442         if (!buf) {
4443                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4444                                 INSUFF_RES_ASCQ);
4445                 return check_condition_result;
4446         }
4447
4448         scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
4449
4450         BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
4451         BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
4452
4453         desc = (void *)&buf[8];
4454
4455         sdeb_write_lock(sip);
4456
4457         for (i = 0 ; i < descriptors ; i++) {
4458                 unsigned long long lba = get_unaligned_be64(&desc[i].lba);
4459                 unsigned int num = get_unaligned_be32(&desc[i].blocks);
4460
4461                 ret = check_device_access_params(scp, lba, num, true);
4462                 if (ret)
4463                         goto out;
4464
4465                 unmap_region(sip, lba, num);
4466         }
4467
4468         ret = 0;
4469
4470 out:
4471         sdeb_write_unlock(sip);
4472         kfree(buf);
4473
4474         return ret;
4475 }
4476
4477 #define SDEBUG_GET_LBA_STATUS_LEN 32
4478
4479 static int resp_get_lba_status(struct scsi_cmnd *scp,
4480                                struct sdebug_dev_info *devip)
4481 {
4482         u8 *cmd = scp->cmnd;
4483         u64 lba;
4484         u32 alloc_len, mapped, num;
4485         int ret;
4486         u8 arr[SDEBUG_GET_LBA_STATUS_LEN];
4487
4488         lba = get_unaligned_be64(cmd + 2);
4489         alloc_len = get_unaligned_be32(cmd + 10);
4490
4491         if (alloc_len < 24)
4492                 return 0;
4493
4494         ret = check_device_access_params(scp, lba, 1, false);
4495         if (ret)
4496                 return ret;
4497
4498         if (scsi_debug_lbp()) {
4499                 struct sdeb_store_info *sip = devip2sip(devip, true);
4500
4501                 mapped = map_state(sip, lba, &num);
4502         } else {
4503                 mapped = 1;
4504                 /* following just in case virtual_gb changed */
4505                 sdebug_capacity = get_sdebug_capacity();
4506                 if (sdebug_capacity - lba <= 0xffffffff)
4507                         num = sdebug_capacity - lba;
4508                 else
4509                         num = 0xffffffff;
4510         }
4511
4512         memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
4513         put_unaligned_be32(20, arr);            /* Parameter Data Length */
4514         put_unaligned_be64(lba, arr + 8);       /* LBA */
4515         put_unaligned_be32(num, arr + 16);      /* Number of blocks */
4516         arr[20] = !mapped;              /* prov_stat=0: mapped; 1: dealloc */
4517
4518         return fill_from_dev_buffer(scp, arr, SDEBUG_GET_LBA_STATUS_LEN);
4519 }
4520
4521 static int resp_sync_cache(struct scsi_cmnd *scp,
4522                            struct sdebug_dev_info *devip)
4523 {
4524         int res = 0;
4525         u64 lba;
4526         u32 num_blocks;
4527         u8 *cmd = scp->cmnd;
4528
4529         if (cmd[0] == SYNCHRONIZE_CACHE) {      /* 10 byte cdb */
4530                 lba = get_unaligned_be32(cmd + 2);
4531                 num_blocks = get_unaligned_be16(cmd + 7);
4532         } else {                                /* SYNCHRONIZE_CACHE(16) */
4533                 lba = get_unaligned_be64(cmd + 2);
4534                 num_blocks = get_unaligned_be32(cmd + 10);
4535         }
4536         if (lba + num_blocks > sdebug_capacity) {
4537                 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4538                 return check_condition_result;
4539         }
4540         if (!write_since_sync || (cmd[1] & 0x2))
4541                 res = SDEG_RES_IMMED_MASK;
4542         else            /* delay if write_since_sync and IMMED clear */
4543                 write_since_sync = false;
4544         return res;
4545 }
4546
4547 /*
4548  * Assuming the LBA+num_blocks is not out-of-range, this function will return
4549  * CONDITION MET if the specified blocks will/have fitted in the cache, and
4550  * a GOOD status otherwise. Model a disk with a big cache and yield
4551  * CONDITION MET. Actually tries to bring range in main memory into the
4552  * cache associated with the CPU(s).
4553  */
4554 static int resp_pre_fetch(struct scsi_cmnd *scp,
4555                           struct sdebug_dev_info *devip)
4556 {
4557         int res = 0;
4558         u64 lba;
4559         u64 block, rest = 0;
4560         u32 nblks;
4561         u8 *cmd = scp->cmnd;
4562         struct sdeb_store_info *sip = devip2sip(devip, true);
4563         u8 *fsp = sip->storep;
4564
4565         if (cmd[0] == PRE_FETCH) {      /* 10 byte cdb */
4566                 lba = get_unaligned_be32(cmd + 2);
4567                 nblks = get_unaligned_be16(cmd + 7);
4568         } else {                        /* PRE-FETCH(16) */
4569                 lba = get_unaligned_be64(cmd + 2);
4570                 nblks = get_unaligned_be32(cmd + 10);
4571         }
4572         if (lba + nblks > sdebug_capacity) {
4573                 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4574                 return check_condition_result;
4575         }
4576         if (!fsp)
4577                 goto fini;
4578         /* PRE-FETCH spec says nothing about LBP or PI so skip them */
4579         block = do_div(lba, sdebug_store_sectors);
4580         if (block + nblks > sdebug_store_sectors)
4581                 rest = block + nblks - sdebug_store_sectors;
4582
4583         /* Try to bring the PRE-FETCH range into CPU's cache */
4584         sdeb_read_lock(sip);
4585         prefetch_range(fsp + (sdebug_sector_size * block),
4586                        (nblks - rest) * sdebug_sector_size);
4587         if (rest)
4588                 prefetch_range(fsp, rest * sdebug_sector_size);
4589         sdeb_read_unlock(sip);
4590 fini:
4591         if (cmd[1] & 0x2)
4592                 res = SDEG_RES_IMMED_MASK;
4593         return res | condition_met_result;
4594 }
4595
4596 #define RL_BUCKET_ELEMS 8
4597
4598 /* Even though each pseudo target has a REPORT LUNS "well known logical unit"
4599  * (W-LUN), the normal Linux scanning logic does not associate it with a
4600  * device (e.g. /dev/sg7). The following magic will make that association:
4601  *   "cd /sys/class/scsi_host/host<n> ; echo '- - 49409' > scan"
4602  * where <n> is a host number. If there are multiple targets in a host then
4603  * the above will associate a W-LUN to each target. To only get a W-LUN
4604  * for target 2, then use "echo '- 2 49409' > scan" .
4605  */
4606 static int resp_report_luns(struct scsi_cmnd *scp,
4607                             struct sdebug_dev_info *devip)
4608 {
4609         unsigned char *cmd = scp->cmnd;
4610         unsigned int alloc_len;
4611         unsigned char select_report;
4612         u64 lun;
4613         struct scsi_lun *lun_p;
4614         u8 arr[RL_BUCKET_ELEMS * sizeof(struct scsi_lun)];
4615         unsigned int lun_cnt;   /* normal LUN count (max: 256) */
4616         unsigned int wlun_cnt;  /* report luns W-LUN count */
4617         unsigned int tlun_cnt;  /* total LUN count */
4618         unsigned int rlen;      /* response length (in bytes) */
4619         int k, j, n, res;
4620         unsigned int off_rsp = 0;
4621         const int sz_lun = sizeof(struct scsi_lun);
4622
4623         clear_luns_changed_on_target(devip);
4624
4625         select_report = cmd[2];
4626         alloc_len = get_unaligned_be32(cmd + 6);
4627
4628         if (alloc_len < 4) {
4629                 pr_err("alloc len too small %d\n", alloc_len);
4630                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
4631                 return check_condition_result;
4632         }
4633
4634         switch (select_report) {
4635         case 0:         /* all LUNs apart from W-LUNs */
4636                 lun_cnt = sdebug_max_luns;
4637                 wlun_cnt = 0;
4638                 break;
4639         case 1:         /* only W-LUNs */
4640                 lun_cnt = 0;
4641                 wlun_cnt = 1;
4642                 break;
4643         case 2:         /* all LUNs */
4644                 lun_cnt = sdebug_max_luns;
4645                 wlun_cnt = 1;
4646                 break;
4647         case 0x10:      /* only administrative LUs */
4648         case 0x11:      /* see SPC-5 */
4649         case 0x12:      /* only subsiduary LUs owned by referenced LU */
4650         default:
4651                 pr_debug("select report invalid %d\n", select_report);
4652                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
4653                 return check_condition_result;
4654         }
4655
4656         if (sdebug_no_lun_0 && (lun_cnt > 0))
4657                 --lun_cnt;
4658
4659         tlun_cnt = lun_cnt + wlun_cnt;
4660         rlen = tlun_cnt * sz_lun;       /* excluding 8 byte header */
4661         scsi_set_resid(scp, scsi_bufflen(scp));
4662         pr_debug("select_report %d luns = %d wluns = %d no_lun0 %d\n",
4663                  select_report, lun_cnt, wlun_cnt, sdebug_no_lun_0);
4664
4665         /* loops rely on sizeof response header same as sizeof lun (both 8) */
4666         lun = sdebug_no_lun_0 ? 1 : 0;
4667         for (k = 0, j = 0, res = 0; true; ++k, j = 0) {
4668                 memset(arr, 0, sizeof(arr));
4669                 lun_p = (struct scsi_lun *)&arr[0];
4670                 if (k == 0) {
4671                         put_unaligned_be32(rlen, &arr[0]);
4672                         ++lun_p;
4673                         j = 1;
4674                 }
4675                 for ( ; j < RL_BUCKET_ELEMS; ++j, ++lun_p) {
4676                         if ((k * RL_BUCKET_ELEMS) + j > lun_cnt)
4677                                 break;
4678                         int_to_scsilun(lun++, lun_p);
4679                         if (lun > 1 && sdebug_lun_am == SAM_LUN_AM_FLAT)
4680                                 lun_p->scsi_lun[0] |= 0x40;
4681                 }
4682                 if (j < RL_BUCKET_ELEMS)
4683                         break;
4684                 n = j * sz_lun;
4685                 res = p_fill_from_dev_buffer(scp, arr, n, off_rsp);
4686                 if (res)
4687                         return res;
4688                 off_rsp += n;
4689         }
4690         if (wlun_cnt) {
4691                 int_to_scsilun(SCSI_W_LUN_REPORT_LUNS, lun_p);
4692                 ++j;
4693         }
4694         if (j > 0)
4695                 res = p_fill_from_dev_buffer(scp, arr, j * sz_lun, off_rsp);
4696         return res;
4697 }
4698
4699 static int resp_verify(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4700 {
4701         bool is_bytchk3 = false;
4702         u8 bytchk;
4703         int ret, j;
4704         u32 vnum, a_num, off;
4705         const u32 lb_size = sdebug_sector_size;
4706         u64 lba;
4707         u8 *arr;
4708         u8 *cmd = scp->cmnd;
4709         struct sdeb_store_info *sip = devip2sip(devip, true);
4710
4711         bytchk = (cmd[1] >> 1) & 0x3;
4712         if (bytchk == 0) {
4713                 return 0;       /* always claim internal verify okay */
4714         } else if (bytchk == 2) {
4715                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
4716                 return check_condition_result;
4717         } else if (bytchk == 3) {
4718                 is_bytchk3 = true;      /* 1 block sent, compared repeatedly */
4719         }
4720         switch (cmd[0]) {
4721         case VERIFY_16:
4722                 lba = get_unaligned_be64(cmd + 2);
4723                 vnum = get_unaligned_be32(cmd + 10);
4724                 break;
4725         case VERIFY:            /* is VERIFY(10) */
4726                 lba = get_unaligned_be32(cmd + 2);
4727                 vnum = get_unaligned_be16(cmd + 7);
4728                 break;
4729         default:
4730                 mk_sense_invalid_opcode(scp);
4731                 return check_condition_result;
4732         }
4733         if (vnum == 0)
4734                 return 0;       /* not an error */
4735         a_num = is_bytchk3 ? 1 : vnum;
4736         /* Treat following check like one for read (i.e. no write) access */
4737         ret = check_device_access_params(scp, lba, a_num, false);
4738         if (ret)
4739                 return ret;
4740
4741         arr = kcalloc(lb_size, vnum, GFP_ATOMIC | __GFP_NOWARN);
4742         if (!arr) {
4743                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4744                                 INSUFF_RES_ASCQ);
4745                 return check_condition_result;
4746         }
4747         /* Not changing store, so only need read access */
4748         sdeb_read_lock(sip);
4749
4750         ret = do_dout_fetch(scp, a_num, arr);
4751         if (ret == -1) {
4752                 ret = DID_ERROR << 16;
4753                 goto cleanup;
4754         } else if (sdebug_verbose && (ret < (a_num * lb_size))) {
4755                 sdev_printk(KERN_INFO, scp->device,
4756                             "%s: %s: cdb indicated=%u, IO sent=%d bytes\n",
4757                             my_name, __func__, a_num * lb_size, ret);
4758         }
4759         if (is_bytchk3) {
4760                 for (j = 1, off = lb_size; j < vnum; ++j, off += lb_size)
4761                         memcpy(arr + off, arr, lb_size);
4762         }
4763         ret = 0;
4764         if (!comp_write_worker(sip, lba, vnum, arr, true)) {
4765                 mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
4766                 ret = check_condition_result;
4767                 goto cleanup;
4768         }
4769 cleanup:
4770         sdeb_read_unlock(sip);
4771         kfree(arr);
4772         return ret;
4773 }
4774
4775 #define RZONES_DESC_HD 64
4776
4777 /* Report zones depending on start LBA and reporting options */
4778 static int resp_report_zones(struct scsi_cmnd *scp,
4779                              struct sdebug_dev_info *devip)
4780 {
4781         unsigned int rep_max_zones, nrz = 0;
4782         int ret = 0;
4783         u32 alloc_len, rep_opts, rep_len;
4784         bool partial;
4785         u64 lba, zs_lba;
4786         u8 *arr = NULL, *desc;
4787         u8 *cmd = scp->cmnd;
4788         struct sdeb_zone_state *zsp = NULL;
4789         struct sdeb_store_info *sip = devip2sip(devip, false);
4790
4791         if (!sdebug_dev_is_zoned(devip)) {
4792                 mk_sense_invalid_opcode(scp);
4793                 return check_condition_result;
4794         }
4795         zs_lba = get_unaligned_be64(cmd + 2);
4796         alloc_len = get_unaligned_be32(cmd + 10);
4797         if (alloc_len == 0)
4798                 return 0;       /* not an error */
4799         rep_opts = cmd[14] & 0x3f;
4800         partial = cmd[14] & 0x80;
4801
4802         if (zs_lba >= sdebug_capacity) {
4803                 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4804                 return check_condition_result;
4805         }
4806
4807         rep_max_zones = (alloc_len - 64) >> ilog2(RZONES_DESC_HD);
4808
4809         arr = kzalloc(alloc_len, GFP_ATOMIC | __GFP_NOWARN);
4810         if (!arr) {
4811                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4812                                 INSUFF_RES_ASCQ);
4813                 return check_condition_result;
4814         }
4815
4816         sdeb_read_lock(sip);
4817
4818         desc = arr + 64;
4819         for (lba = zs_lba; lba < sdebug_capacity;
4820              lba = zsp->z_start + zsp->z_size) {
4821                 if (WARN_ONCE(zbc_zone(devip, lba) == zsp, "lba = %llu\n", lba))
4822                         break;
4823                 zsp = zbc_zone(devip, lba);
4824                 switch (rep_opts) {
4825                 case 0x00:
4826                         /* All zones */
4827                         break;
4828                 case 0x01:
4829                         /* Empty zones */
4830                         if (zsp->z_cond != ZC1_EMPTY)
4831                                 continue;
4832                         break;
4833                 case 0x02:
4834                         /* Implicit open zones */
4835                         if (zsp->z_cond != ZC2_IMPLICIT_OPEN)
4836                                 continue;
4837                         break;
4838                 case 0x03:
4839                         /* Explicit open zones */
4840                         if (zsp->z_cond != ZC3_EXPLICIT_OPEN)
4841                                 continue;
4842                         break;
4843                 case 0x04:
4844                         /* Closed zones */
4845                         if (zsp->z_cond != ZC4_CLOSED)
4846                                 continue;
4847                         break;
4848                 case 0x05:
4849                         /* Full zones */
4850                         if (zsp->z_cond != ZC5_FULL)
4851                                 continue;
4852                         break;
4853                 case 0x06:
4854                 case 0x07:
4855                 case 0x10:
4856                         /*
4857                          * Read-only, offline, reset WP recommended are
4858                          * not emulated: no zones to report;
4859                          */
4860                         continue;
4861                 case 0x11:
4862                         /* non-seq-resource set */
4863                         if (!zsp->z_non_seq_resource)
4864                                 continue;
4865                         break;
4866                 case 0x3e:
4867                         /* All zones except gap zones. */
4868                         if (zbc_zone_is_gap(zsp))
4869                                 continue;
4870                         break;
4871                 case 0x3f:
4872                         /* Not write pointer (conventional) zones */
4873                         if (zbc_zone_is_seq(zsp))
4874                                 continue;
4875                         break;
4876                 default:
4877                         mk_sense_buffer(scp, ILLEGAL_REQUEST,
4878                                         INVALID_FIELD_IN_CDB, 0);
4879                         ret = check_condition_result;
4880                         goto fini;
4881                 }
4882
4883                 if (nrz < rep_max_zones) {
4884                         /* Fill zone descriptor */
4885                         desc[0] = zsp->z_type;
4886                         desc[1] = zsp->z_cond << 4;
4887                         if (zsp->z_non_seq_resource)
4888                                 desc[1] |= 1 << 1;
4889                         put_unaligned_be64((u64)zsp->z_size, desc + 8);
4890                         put_unaligned_be64((u64)zsp->z_start, desc + 16);
4891                         put_unaligned_be64((u64)zsp->z_wp, desc + 24);
4892                         desc += 64;
4893                 }
4894
4895                 if (partial && nrz >= rep_max_zones)
4896                         break;
4897
4898                 nrz++;
4899         }
4900
4901         /* Report header */
4902         /* Zone list length. */
4903         put_unaligned_be32(nrz * RZONES_DESC_HD, arr + 0);
4904         /* Maximum LBA */
4905         put_unaligned_be64(sdebug_capacity - 1, arr + 8);
4906         /* Zone starting LBA granularity. */
4907         if (devip->zcap < devip->zsize)
4908                 put_unaligned_be64(devip->zsize, arr + 16);
4909
4910         rep_len = (unsigned long)desc - (unsigned long)arr;
4911         ret = fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, rep_len));
4912
4913 fini:
4914         sdeb_read_unlock(sip);
4915         kfree(arr);
4916         return ret;
4917 }
4918
4919 /* Logic transplanted from tcmu-runner, file_zbc.c */
4920 static void zbc_open_all(struct sdebug_dev_info *devip)
4921 {
4922         struct sdeb_zone_state *zsp = &devip->zstate[0];
4923         unsigned int i;
4924
4925         for (i = 0; i < devip->nr_zones; i++, zsp++) {
4926                 if (zsp->z_cond == ZC4_CLOSED)
4927                         zbc_open_zone(devip, &devip->zstate[i], true);
4928         }
4929 }
4930
4931 static int resp_open_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4932 {
4933         int res = 0;
4934         u64 z_id;
4935         enum sdebug_z_cond zc;
4936         u8 *cmd = scp->cmnd;
4937         struct sdeb_zone_state *zsp;
4938         bool all = cmd[14] & 0x01;
4939         struct sdeb_store_info *sip = devip2sip(devip, false);
4940
4941         if (!sdebug_dev_is_zoned(devip)) {
4942                 mk_sense_invalid_opcode(scp);
4943                 return check_condition_result;
4944         }
4945
4946         sdeb_write_lock(sip);
4947
4948         if (all) {
4949                 /* Check if all closed zones can be open */
4950                 if (devip->max_open &&
4951                     devip->nr_exp_open + devip->nr_closed > devip->max_open) {
4952                         mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
4953                                         INSUFF_ZONE_ASCQ);
4954                         res = check_condition_result;
4955                         goto fini;
4956                 }
4957                 /* Open all closed zones */
4958                 zbc_open_all(devip);
4959                 goto fini;
4960         }
4961
4962         /* Open the specified zone */
4963         z_id = get_unaligned_be64(cmd + 2);
4964         if (z_id >= sdebug_capacity) {
4965                 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4966                 res = check_condition_result;
4967                 goto fini;
4968         }
4969
4970         zsp = zbc_zone(devip, z_id);
4971         if (z_id != zsp->z_start) {
4972                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4973                 res = check_condition_result;
4974                 goto fini;
4975         }
4976         if (zbc_zone_is_conv(zsp)) {
4977                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4978                 res = check_condition_result;
4979                 goto fini;
4980         }
4981
4982         zc = zsp->z_cond;
4983         if (zc == ZC3_EXPLICIT_OPEN || zc == ZC5_FULL)
4984                 goto fini;
4985
4986         if (devip->max_open && devip->nr_exp_open >= devip->max_open) {
4987                 mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
4988                                 INSUFF_ZONE_ASCQ);
4989                 res = check_condition_result;
4990                 goto fini;
4991         }
4992
4993         zbc_open_zone(devip, zsp, true);
4994 fini:
4995         sdeb_write_unlock(sip);
4996         return res;
4997 }
4998
4999 static void zbc_close_all(struct sdebug_dev_info *devip)
5000 {
5001         unsigned int i;
5002
5003         for (i = 0; i < devip->nr_zones; i++)
5004                 zbc_close_zone(devip, &devip->zstate[i]);
5005 }
5006
5007 static int resp_close_zone(struct scsi_cmnd *scp,
5008                            struct sdebug_dev_info *devip)
5009 {
5010         int res = 0;
5011         u64 z_id;
5012         u8 *cmd = scp->cmnd;
5013         struct sdeb_zone_state *zsp;
5014         bool all = cmd[14] & 0x01;
5015         struct sdeb_store_info *sip = devip2sip(devip, false);
5016
5017         if (!sdebug_dev_is_zoned(devip)) {
5018                 mk_sense_invalid_opcode(scp);
5019                 return check_condition_result;
5020         }
5021
5022         sdeb_write_lock(sip);
5023
5024         if (all) {
5025                 zbc_close_all(devip);
5026                 goto fini;
5027         }
5028
5029         /* Close specified zone */
5030         z_id = get_unaligned_be64(cmd + 2);
5031         if (z_id >= sdebug_capacity) {
5032                 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
5033                 res = check_condition_result;
5034                 goto fini;
5035         }
5036
5037         zsp = zbc_zone(devip, z_id);
5038         if (z_id != zsp->z_start) {
5039                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
5040                 res = check_condition_result;
5041                 goto fini;
5042         }
5043         if (zbc_zone_is_conv(zsp)) {
5044                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
5045                 res = check_condition_result;
5046                 goto fini;
5047         }
5048
5049         zbc_close_zone(devip, zsp);
5050 fini:
5051         sdeb_write_unlock(sip);
5052         return res;
5053 }
5054
5055 static void zbc_finish_zone(struct sdebug_dev_info *devip,
5056                             struct sdeb_zone_state *zsp, bool empty)
5057 {
5058         enum sdebug_z_cond zc = zsp->z_cond;
5059
5060         if (zc == ZC4_CLOSED || zc == ZC2_IMPLICIT_OPEN ||
5061             zc == ZC3_EXPLICIT_OPEN || (empty && zc == ZC1_EMPTY)) {
5062                 if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
5063                         zbc_close_zone(devip, zsp);
5064                 if (zsp->z_cond == ZC4_CLOSED)
5065                         devip->nr_closed--;
5066                 zsp->z_wp = zsp->z_start + zsp->z_size;
5067                 zsp->z_cond = ZC5_FULL;
5068         }
5069 }
5070
5071 static void zbc_finish_all(struct sdebug_dev_info *devip)
5072 {
5073         unsigned int i;
5074
5075         for (i = 0; i < devip->nr_zones; i++)
5076                 zbc_finish_zone(devip, &devip->zstate[i], false);
5077 }
5078
5079 static int resp_finish_zone(struct scsi_cmnd *scp,
5080                             struct sdebug_dev_info *devip)
5081 {
5082         struct sdeb_zone_state *zsp;
5083         int res = 0;
5084         u64 z_id;
5085         u8 *cmd = scp->cmnd;
5086         bool all = cmd[14] & 0x01;
5087         struct sdeb_store_info *sip = devip2sip(devip, false);
5088
5089         if (!sdebug_dev_is_zoned(devip)) {
5090                 mk_sense_invalid_opcode(scp);
5091                 return check_condition_result;
5092         }
5093
5094         sdeb_write_lock(sip);
5095
5096         if (all) {
5097                 zbc_finish_all(devip);
5098                 goto fini;
5099         }
5100
5101         /* Finish the specified zone */
5102         z_id = get_unaligned_be64(cmd + 2);
5103         if (z_id >= sdebug_capacity) {
5104                 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
5105                 res = check_condition_result;
5106                 goto fini;
5107         }
5108
5109         zsp = zbc_zone(devip, z_id);
5110         if (z_id != zsp->z_start) {
5111                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
5112                 res = check_condition_result;
5113                 goto fini;
5114         }
5115         if (zbc_zone_is_conv(zsp)) {
5116                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
5117                 res = check_condition_result;
5118                 goto fini;
5119         }
5120
5121         zbc_finish_zone(devip, zsp, true);
5122 fini:
5123         sdeb_write_unlock(sip);
5124         return res;
5125 }
5126
5127 static void zbc_rwp_zone(struct sdebug_dev_info *devip,
5128                          struct sdeb_zone_state *zsp)
5129 {
5130         enum sdebug_z_cond zc;
5131         struct sdeb_store_info *sip = devip2sip(devip, false);
5132
5133         if (!zbc_zone_is_seq(zsp))
5134                 return;
5135
5136         zc = zsp->z_cond;
5137         if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
5138                 zbc_close_zone(devip, zsp);
5139
5140         if (zsp->z_cond == ZC4_CLOSED)
5141                 devip->nr_closed--;
5142
5143         if (zsp->z_wp > zsp->z_start)
5144                 memset(sip->storep + zsp->z_start * sdebug_sector_size, 0,
5145                        (zsp->z_wp - zsp->z_start) * sdebug_sector_size);
5146
5147         zsp->z_non_seq_resource = false;
5148         zsp->z_wp = zsp->z_start;
5149         zsp->z_cond = ZC1_EMPTY;
5150 }
5151
5152 static void zbc_rwp_all(struct sdebug_dev_info *devip)
5153 {
5154         unsigned int i;
5155
5156         for (i = 0; i < devip->nr_zones; i++)
5157                 zbc_rwp_zone(devip, &devip->zstate[i]);
5158 }
5159
5160 static int resp_rwp_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
5161 {
5162         struct sdeb_zone_state *zsp;
5163         int res = 0;
5164         u64 z_id;
5165         u8 *cmd = scp->cmnd;
5166         bool all = cmd[14] & 0x01;
5167         struct sdeb_store_info *sip = devip2sip(devip, false);
5168
5169         if (!sdebug_dev_is_zoned(devip)) {
5170                 mk_sense_invalid_opcode(scp);
5171                 return check_condition_result;
5172         }
5173
5174         sdeb_write_lock(sip);
5175
5176         if (all) {
5177                 zbc_rwp_all(devip);
5178                 goto fini;
5179         }
5180
5181         z_id = get_unaligned_be64(cmd + 2);
5182         if (z_id >= sdebug_capacity) {
5183                 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
5184                 res = check_condition_result;
5185                 goto fini;
5186         }
5187
5188         zsp = zbc_zone(devip, z_id);
5189         if (z_id != zsp->z_start) {
5190                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
5191                 res = check_condition_result;
5192                 goto fini;
5193         }
5194         if (zbc_zone_is_conv(zsp)) {
5195                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
5196                 res = check_condition_result;
5197                 goto fini;
5198         }
5199
5200         zbc_rwp_zone(devip, zsp);
5201 fini:
5202         sdeb_write_unlock(sip);
5203         return res;
5204 }
5205
5206 static u32 get_tag(struct scsi_cmnd *cmnd)
5207 {
5208         return blk_mq_unique_tag(scsi_cmd_to_rq(cmnd));
5209 }
5210
5211 /* Queued (deferred) command completions converge here. */
5212 static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
5213 {
5214         struct sdebug_queued_cmd *sqcp = container_of(sd_dp, struct sdebug_queued_cmd, sd_dp);
5215         unsigned long flags;
5216         struct scsi_cmnd *scp = sqcp->scmd;
5217         struct sdebug_scsi_cmd *sdsc;
5218         bool aborted;
5219
5220         if (sdebug_statistics) {
5221                 atomic_inc(&sdebug_completions);
5222                 if (raw_smp_processor_id() != sd_dp->issuing_cpu)
5223                         atomic_inc(&sdebug_miss_cpus);
5224         }
5225
5226         if (!scp) {
5227                 pr_err("scmd=NULL\n");
5228                 goto out;
5229         }
5230
5231         sdsc = scsi_cmd_priv(scp);
5232         spin_lock_irqsave(&sdsc->lock, flags);
5233         aborted = sd_dp->aborted;
5234         if (unlikely(aborted))
5235                 sd_dp->aborted = false;
5236         ASSIGN_QUEUED_CMD(scp, NULL);
5237
5238         spin_unlock_irqrestore(&sdsc->lock, flags);
5239
5240         if (aborted) {
5241                 pr_info("bypassing scsi_done() due to aborted cmd, kicking-off EH\n");
5242                 blk_abort_request(scsi_cmd_to_rq(scp));
5243                 goto out;
5244         }
5245
5246         scsi_done(scp); /* callback to mid level */
5247 out:
5248         sdebug_free_queued_cmd(sqcp);
5249 }
5250
5251 /* When high resolution timer goes off this function is called. */
5252 static enum hrtimer_restart sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
5253 {
5254         struct sdebug_defer *sd_dp = container_of(timer, struct sdebug_defer,
5255                                                   hrt);
5256         sdebug_q_cmd_complete(sd_dp);
5257         return HRTIMER_NORESTART;
5258 }
5259
5260 /* When work queue schedules work, it calls this function. */
5261 static void sdebug_q_cmd_wq_complete(struct work_struct *work)
5262 {
5263         struct sdebug_defer *sd_dp = container_of(work, struct sdebug_defer,
5264                                                   ew.work);
5265         sdebug_q_cmd_complete(sd_dp);
5266 }
5267
5268 static bool got_shared_uuid;
5269 static uuid_t shared_uuid;
5270
5271 static int sdebug_device_create_zones(struct sdebug_dev_info *devip)
5272 {
5273         struct sdeb_zone_state *zsp;
5274         sector_t capacity = get_sdebug_capacity();
5275         sector_t conv_capacity;
5276         sector_t zstart = 0;
5277         unsigned int i;
5278
5279         /*
5280          * Set the zone size: if sdeb_zbc_zone_size_mb is not set, figure out
5281          * a zone size allowing for at least 4 zones on the device. Otherwise,
5282          * use the specified zone size checking that at least 2 zones can be
5283          * created for the device.
5284          */
5285         if (!sdeb_zbc_zone_size_mb) {
5286                 devip->zsize = (DEF_ZBC_ZONE_SIZE_MB * SZ_1M)
5287                         >> ilog2(sdebug_sector_size);
5288                 while (capacity < devip->zsize << 2 && devip->zsize >= 2)
5289                         devip->zsize >>= 1;
5290                 if (devip->zsize < 2) {
5291                         pr_err("Device capacity too small\n");
5292                         return -EINVAL;
5293                 }
5294         } else {
5295                 if (!is_power_of_2(sdeb_zbc_zone_size_mb)) {
5296                         pr_err("Zone size is not a power of 2\n");
5297                         return -EINVAL;
5298                 }
5299                 devip->zsize = (sdeb_zbc_zone_size_mb * SZ_1M)
5300                         >> ilog2(sdebug_sector_size);
5301                 if (devip->zsize >= capacity) {
5302                         pr_err("Zone size too large for device capacity\n");
5303                         return -EINVAL;
5304                 }
5305         }
5306
5307         devip->zsize_shift = ilog2(devip->zsize);
5308         devip->nr_zones = (capacity + devip->zsize - 1) >> devip->zsize_shift;
5309
5310         if (sdeb_zbc_zone_cap_mb == 0) {
5311                 devip->zcap = devip->zsize;
5312         } else {
5313                 devip->zcap = (sdeb_zbc_zone_cap_mb * SZ_1M) >>
5314                               ilog2(sdebug_sector_size);
5315                 if (devip->zcap > devip->zsize) {
5316                         pr_err("Zone capacity too large\n");
5317                         return -EINVAL;
5318                 }
5319         }
5320
5321         conv_capacity = (sector_t)sdeb_zbc_nr_conv << devip->zsize_shift;
5322         if (conv_capacity >= capacity) {
5323                 pr_err("Number of conventional zones too large\n");
5324                 return -EINVAL;
5325         }
5326         devip->nr_conv_zones = sdeb_zbc_nr_conv;
5327         devip->nr_seq_zones = ALIGN(capacity - conv_capacity, devip->zsize) >>
5328                               devip->zsize_shift;
5329         devip->nr_zones = devip->nr_conv_zones + devip->nr_seq_zones;
5330
5331         /* Add gap zones if zone capacity is smaller than the zone size */
5332         if (devip->zcap < devip->zsize)
5333                 devip->nr_zones += devip->nr_seq_zones;
5334
5335         if (devip->zoned) {
5336                 /* zbc_max_open_zones can be 0, meaning "not reported" */
5337                 if (sdeb_zbc_max_open >= devip->nr_zones - 1)
5338                         devip->max_open = (devip->nr_zones - 1) / 2;
5339                 else
5340                         devip->max_open = sdeb_zbc_max_open;
5341         }
5342
5343         devip->zstate = kcalloc(devip->nr_zones,
5344                                 sizeof(struct sdeb_zone_state), GFP_KERNEL);
5345         if (!devip->zstate)
5346                 return -ENOMEM;
5347
5348         for (i = 0; i < devip->nr_zones; i++) {
5349                 zsp = &devip->zstate[i];
5350
5351                 zsp->z_start = zstart;
5352
5353                 if (i < devip->nr_conv_zones) {
5354                         zsp->z_type = ZBC_ZTYPE_CNV;
5355                         zsp->z_cond = ZBC_NOT_WRITE_POINTER;
5356                         zsp->z_wp = (sector_t)-1;
5357                         zsp->z_size =
5358                                 min_t(u64, devip->zsize, capacity - zstart);
5359                 } else if ((zstart & (devip->zsize - 1)) == 0) {
5360                         if (devip->zoned)
5361                                 zsp->z_type = ZBC_ZTYPE_SWR;
5362                         else
5363                                 zsp->z_type = ZBC_ZTYPE_SWP;
5364                         zsp->z_cond = ZC1_EMPTY;
5365                         zsp->z_wp = zsp->z_start;
5366                         zsp->z_size =
5367                                 min_t(u64, devip->zcap, capacity - zstart);
5368                 } else {
5369                         zsp->z_type = ZBC_ZTYPE_GAP;
5370                         zsp->z_cond = ZBC_NOT_WRITE_POINTER;
5371                         zsp->z_wp = (sector_t)-1;
5372                         zsp->z_size = min_t(u64, devip->zsize - devip->zcap,
5373                                             capacity - zstart);
5374                 }
5375
5376                 WARN_ON_ONCE((int)zsp->z_size <= 0);
5377                 zstart += zsp->z_size;
5378         }
5379
5380         return 0;
5381 }
5382
5383 static struct sdebug_dev_info *sdebug_device_create(
5384                         struct sdebug_host_info *sdbg_host, gfp_t flags)
5385 {
5386         struct sdebug_dev_info *devip;
5387
5388         devip = kzalloc(sizeof(*devip), flags);
5389         if (devip) {
5390                 if (sdebug_uuid_ctl == 1)
5391                         uuid_gen(&devip->lu_name);
5392                 else if (sdebug_uuid_ctl == 2) {
5393                         if (got_shared_uuid)
5394                                 devip->lu_name = shared_uuid;
5395                         else {
5396                                 uuid_gen(&shared_uuid);
5397                                 got_shared_uuid = true;
5398                                 devip->lu_name = shared_uuid;
5399                         }
5400                 }
5401                 devip->sdbg_host = sdbg_host;
5402                 if (sdeb_zbc_in_use) {
5403                         devip->zoned = sdeb_zbc_model == BLK_ZONED_HM;
5404                         if (sdebug_device_create_zones(devip)) {
5405                                 kfree(devip);
5406                                 return NULL;
5407                         }
5408                 } else {
5409                         devip->zoned = false;
5410                 }
5411                 devip->create_ts = ktime_get_boottime();
5412                 atomic_set(&devip->stopped, (sdeb_tur_ms_to_ready > 0 ? 2 : 0));
5413                 spin_lock_init(&devip->list_lock);
5414                 INIT_LIST_HEAD(&devip->inject_err_list);
5415                 list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
5416         }
5417         return devip;
5418 }
5419
5420 static struct sdebug_dev_info *find_build_dev_info(struct scsi_device *sdev)
5421 {
5422         struct sdebug_host_info *sdbg_host;
5423         struct sdebug_dev_info *open_devip = NULL;
5424         struct sdebug_dev_info *devip;
5425
5426         sdbg_host = shost_to_sdebug_host(sdev->host);
5427
5428         list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
5429                 if ((devip->used) && (devip->channel == sdev->channel) &&
5430                     (devip->target == sdev->id) &&
5431                     (devip->lun == sdev->lun))
5432                         return devip;
5433                 else {
5434                         if ((!devip->used) && (!open_devip))
5435                                 open_devip = devip;
5436                 }
5437         }
5438         if (!open_devip) { /* try and make a new one */
5439                 open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
5440                 if (!open_devip) {
5441                         pr_err("out of memory at line %d\n", __LINE__);
5442                         return NULL;
5443                 }
5444         }
5445
5446         open_devip->channel = sdev->channel;
5447         open_devip->target = sdev->id;
5448         open_devip->lun = sdev->lun;
5449         open_devip->sdbg_host = sdbg_host;
5450         set_bit(SDEBUG_UA_POOCCUR, open_devip->uas_bm);
5451         open_devip->used = true;
5452         return open_devip;
5453 }
5454
5455 static int scsi_debug_slave_alloc(struct scsi_device *sdp)
5456 {
5457         if (sdebug_verbose)
5458                 pr_info("slave_alloc <%u %u %u %llu>\n",
5459                        sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5460
5461         return 0;
5462 }
5463
5464 static int scsi_debug_slave_configure(struct scsi_device *sdp)
5465 {
5466         struct sdebug_dev_info *devip =
5467                         (struct sdebug_dev_info *)sdp->hostdata;
5468         struct dentry *dentry;
5469
5470         if (sdebug_verbose)
5471                 pr_info("slave_configure <%u %u %u %llu>\n",
5472                        sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5473         if (sdp->host->max_cmd_len != SDEBUG_MAX_CMD_LEN)
5474                 sdp->host->max_cmd_len = SDEBUG_MAX_CMD_LEN;
5475         if (devip == NULL) {
5476                 devip = find_build_dev_info(sdp);
5477                 if (devip == NULL)
5478                         return 1;  /* no resources, will be marked offline */
5479         }
5480         sdp->hostdata = devip;
5481         if (sdebug_no_uld)
5482                 sdp->no_uld_attach = 1;
5483         config_cdb_len(sdp);
5484
5485         if (sdebug_allow_restart)
5486                 sdp->allow_restart = 1;
5487
5488         devip->debugfs_entry = debugfs_create_dir(dev_name(&sdp->sdev_dev),
5489                                 sdebug_debugfs_root);
5490         if (IS_ERR_OR_NULL(devip->debugfs_entry))
5491                 pr_info("%s: failed to create debugfs directory for device %s\n",
5492                         __func__, dev_name(&sdp->sdev_gendev));
5493
5494         dentry = debugfs_create_file("error", 0600, devip->debugfs_entry, sdp,
5495                                 &sdebug_error_fops);
5496         if (IS_ERR_OR_NULL(dentry))
5497                 pr_info("%s: failed to create error file for device %s\n",
5498                         __func__, dev_name(&sdp->sdev_gendev));
5499
5500         return 0;
5501 }
5502
5503 static void scsi_debug_slave_destroy(struct scsi_device *sdp)
5504 {
5505         struct sdebug_dev_info *devip =
5506                 (struct sdebug_dev_info *)sdp->hostdata;
5507         struct sdebug_err_inject *err;
5508
5509         if (sdebug_verbose)
5510                 pr_info("slave_destroy <%u %u %u %llu>\n",
5511                        sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5512
5513         if (!devip)
5514                 return;
5515
5516         spin_lock(&devip->list_lock);
5517         list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
5518                 list_del_rcu(&err->list);
5519                 call_rcu(&err->rcu, sdebug_err_free);
5520         }
5521         spin_unlock(&devip->list_lock);
5522
5523         debugfs_remove(devip->debugfs_entry);
5524
5525         /* make this slot available for re-use */
5526         devip->used = false;
5527         sdp->hostdata = NULL;
5528 }
5529
5530 /* Returns true if we require the queued memory to be freed by the caller. */
5531 static bool stop_qc_helper(struct sdebug_defer *sd_dp,
5532                            enum sdeb_defer_type defer_t)
5533 {
5534         if (defer_t == SDEB_DEFER_HRT) {
5535                 int res = hrtimer_try_to_cancel(&sd_dp->hrt);
5536
5537                 switch (res) {
5538                 case 0: /* Not active, it must have already run */
5539                 case -1: /* -1 It's executing the CB */
5540                         return false;
5541                 case 1: /* Was active, we've now cancelled */
5542                 default:
5543                         return true;
5544                 }
5545         } else if (defer_t == SDEB_DEFER_WQ) {
5546                 /* Cancel if pending */
5547                 if (cancel_work_sync(&sd_dp->ew.work))
5548                         return true;
5549                 /* Was not pending, so it must have run */
5550                 return false;
5551         } else if (defer_t == SDEB_DEFER_POLL) {
5552                 return true;
5553         }
5554
5555         return false;
5556 }
5557
5558
5559 static bool scsi_debug_stop_cmnd(struct scsi_cmnd *cmnd)
5560 {
5561         enum sdeb_defer_type l_defer_t;
5562         struct sdebug_defer *sd_dp;
5563         struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmnd);
5564         struct sdebug_queued_cmd *sqcp = TO_QUEUED_CMD(cmnd);
5565
5566         lockdep_assert_held(&sdsc->lock);
5567
5568         if (!sqcp)
5569                 return false;
5570         sd_dp = &sqcp->sd_dp;
5571         l_defer_t = READ_ONCE(sd_dp->defer_t);
5572         ASSIGN_QUEUED_CMD(cmnd, NULL);
5573
5574         if (stop_qc_helper(sd_dp, l_defer_t))
5575                 sdebug_free_queued_cmd(sqcp);
5576
5577         return true;
5578 }
5579
5580 /*
5581  * Called from scsi_debug_abort() only, which is for timed-out cmd.
5582  */
5583 static bool scsi_debug_abort_cmnd(struct scsi_cmnd *cmnd)
5584 {
5585         struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmnd);
5586         unsigned long flags;
5587         bool res;
5588
5589         spin_lock_irqsave(&sdsc->lock, flags);
5590         res = scsi_debug_stop_cmnd(cmnd);
5591         spin_unlock_irqrestore(&sdsc->lock, flags);
5592
5593         return res;
5594 }
5595
5596 /*
5597  * All we can do is set the cmnd as internally aborted and wait for it to
5598  * finish. We cannot call scsi_done() as normal completion path may do that.
5599  */
5600 static bool sdebug_stop_cmnd(struct request *rq, void *data)
5601 {
5602         scsi_debug_abort_cmnd(blk_mq_rq_to_pdu(rq));
5603
5604         return true;
5605 }
5606
5607 /* Deletes (stops) timers or work queues of all queued commands */
5608 static void stop_all_queued(void)
5609 {
5610         struct sdebug_host_info *sdhp;
5611
5612         mutex_lock(&sdebug_host_list_mutex);
5613         list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
5614                 struct Scsi_Host *shost = sdhp->shost;
5615
5616                 blk_mq_tagset_busy_iter(&shost->tag_set, sdebug_stop_cmnd, NULL);
5617         }
5618         mutex_unlock(&sdebug_host_list_mutex);
5619 }
5620
5621 static int sdebug_fail_abort(struct scsi_cmnd *cmnd)
5622 {
5623         struct scsi_device *sdp = cmnd->device;
5624         struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
5625         struct sdebug_err_inject *err;
5626         unsigned char *cmd = cmnd->cmnd;
5627         int ret = 0;
5628
5629         if (devip == NULL)
5630                 return 0;
5631
5632         rcu_read_lock();
5633         list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
5634                 if (err->type == ERR_ABORT_CMD_FAILED &&
5635                     (err->cmd == cmd[0] || err->cmd == 0xff)) {
5636                         ret = !!err->cnt;
5637                         if (err->cnt < 0)
5638                                 err->cnt++;
5639
5640                         rcu_read_unlock();
5641                         return ret;
5642                 }
5643         }
5644         rcu_read_unlock();
5645
5646         return 0;
5647 }
5648
5649 static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
5650 {
5651         bool ok = scsi_debug_abort_cmnd(SCpnt);
5652         u8 *cmd = SCpnt->cmnd;
5653         u8 opcode = cmd[0];
5654
5655         ++num_aborts;
5656
5657         if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5658                 sdev_printk(KERN_INFO, SCpnt->device,
5659                             "%s: command%s found\n", __func__,
5660                             ok ? "" : " not");
5661
5662         if (sdebug_fail_abort(SCpnt)) {
5663                 scmd_printk(KERN_INFO, SCpnt, "fail abort command 0x%x\n",
5664                             opcode);
5665                 return FAILED;
5666         }
5667
5668         return SUCCESS;
5669 }
5670
5671 static bool scsi_debug_stop_all_queued_iter(struct request *rq, void *data)
5672 {
5673         struct scsi_device *sdp = data;
5674         struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
5675
5676         if (scmd->device == sdp)
5677                 scsi_debug_abort_cmnd(scmd);
5678
5679         return true;
5680 }
5681
5682 /* Deletes (stops) timers or work queues of all queued commands per sdev */
5683 static void scsi_debug_stop_all_queued(struct scsi_device *sdp)
5684 {
5685         struct Scsi_Host *shost = sdp->host;
5686
5687         blk_mq_tagset_busy_iter(&shost->tag_set,
5688                                 scsi_debug_stop_all_queued_iter, sdp);
5689 }
5690
5691 static int sdebug_fail_lun_reset(struct scsi_cmnd *cmnd)
5692 {
5693         struct scsi_device *sdp = cmnd->device;
5694         struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
5695         struct sdebug_err_inject *err;
5696         unsigned char *cmd = cmnd->cmnd;
5697         int ret = 0;
5698
5699         if (devip == NULL)
5700                 return 0;
5701
5702         rcu_read_lock();
5703         list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
5704                 if (err->type == ERR_LUN_RESET_FAILED &&
5705                     (err->cmd == cmd[0] || err->cmd == 0xff)) {
5706                         ret = !!err->cnt;
5707                         if (err->cnt < 0)
5708                                 err->cnt++;
5709
5710                         rcu_read_unlock();
5711                         return ret;
5712                 }
5713         }
5714         rcu_read_unlock();
5715
5716         return 0;
5717 }
5718
5719 static int scsi_debug_device_reset(struct scsi_cmnd *SCpnt)
5720 {
5721         struct scsi_device *sdp = SCpnt->device;
5722         struct sdebug_dev_info *devip = sdp->hostdata;
5723         u8 *cmd = SCpnt->cmnd;
5724         u8 opcode = cmd[0];
5725
5726         ++num_dev_resets;
5727
5728         if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5729                 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5730
5731         scsi_debug_stop_all_queued(sdp);
5732         if (devip)
5733                 set_bit(SDEBUG_UA_POR, devip->uas_bm);
5734
5735         if (sdebug_fail_lun_reset(SCpnt)) {
5736                 scmd_printk(KERN_INFO, SCpnt, "fail lun reset 0x%x\n", opcode);
5737                 return FAILED;
5738         }
5739
5740         return SUCCESS;
5741 }
5742
5743 static int sdebug_fail_target_reset(struct scsi_cmnd *cmnd)
5744 {
5745         struct scsi_target *starget = scsi_target(cmnd->device);
5746         struct sdebug_target_info *targetip =
5747                 (struct sdebug_target_info *)starget->hostdata;
5748
5749         if (targetip)
5750                 return targetip->reset_fail;
5751
5752         return 0;
5753 }
5754
5755 static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
5756 {
5757         struct scsi_device *sdp = SCpnt->device;
5758         struct sdebug_host_info *sdbg_host = shost_to_sdebug_host(sdp->host);
5759         struct sdebug_dev_info *devip;
5760         u8 *cmd = SCpnt->cmnd;
5761         u8 opcode = cmd[0];
5762         int k = 0;
5763
5764         ++num_target_resets;
5765         if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5766                 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5767
5768         list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
5769                 if (devip->target == sdp->id) {
5770                         set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5771                         ++k;
5772                 }
5773         }
5774
5775         if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5776                 sdev_printk(KERN_INFO, sdp,
5777                             "%s: %d device(s) found in target\n", __func__, k);
5778
5779         if (sdebug_fail_target_reset(SCpnt)) {
5780                 scmd_printk(KERN_INFO, SCpnt, "fail target reset 0x%x\n",
5781                             opcode);
5782                 return FAILED;
5783         }
5784
5785         return SUCCESS;
5786 }
5787
5788 static int scsi_debug_bus_reset(struct scsi_cmnd *SCpnt)
5789 {
5790         struct scsi_device *sdp = SCpnt->device;
5791         struct sdebug_host_info *sdbg_host = shost_to_sdebug_host(sdp->host);
5792         struct sdebug_dev_info *devip;
5793         int k = 0;
5794
5795         ++num_bus_resets;
5796
5797         if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5798                 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5799
5800         list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
5801                 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5802                 ++k;
5803         }
5804
5805         if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5806                 sdev_printk(KERN_INFO, sdp,
5807                             "%s: %d device(s) found in host\n", __func__, k);
5808         return SUCCESS;
5809 }
5810
5811 static int scsi_debug_host_reset(struct scsi_cmnd *SCpnt)
5812 {
5813         struct sdebug_host_info *sdbg_host;
5814         struct sdebug_dev_info *devip;
5815         int k = 0;
5816
5817         ++num_host_resets;
5818         if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5819                 sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__);
5820         mutex_lock(&sdebug_host_list_mutex);
5821         list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
5822                 list_for_each_entry(devip, &sdbg_host->dev_info_list,
5823                                     dev_list) {
5824                         set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5825                         ++k;
5826                 }
5827         }
5828         mutex_unlock(&sdebug_host_list_mutex);
5829         stop_all_queued();
5830         if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5831                 sdev_printk(KERN_INFO, SCpnt->device,
5832                             "%s: %d device(s) found\n", __func__, k);
5833         return SUCCESS;
5834 }
5835
5836 static void sdebug_build_parts(unsigned char *ramp, unsigned long store_size)
5837 {
5838         struct msdos_partition *pp;
5839         int starts[SDEBUG_MAX_PARTS + 2], max_part_secs;
5840         int sectors_per_part, num_sectors, k;
5841         int heads_by_sects, start_sec, end_sec;
5842
5843         /* assume partition table already zeroed */
5844         if ((sdebug_num_parts < 1) || (store_size < 1048576))
5845                 return;
5846         if (sdebug_num_parts > SDEBUG_MAX_PARTS) {
5847                 sdebug_num_parts = SDEBUG_MAX_PARTS;
5848                 pr_warn("reducing partitions to %d\n", SDEBUG_MAX_PARTS);
5849         }
5850         num_sectors = (int)get_sdebug_capacity();
5851         sectors_per_part = (num_sectors - sdebug_sectors_per)
5852                            / sdebug_num_parts;
5853         heads_by_sects = sdebug_heads * sdebug_sectors_per;
5854         starts[0] = sdebug_sectors_per;
5855         max_part_secs = sectors_per_part;
5856         for (k = 1; k < sdebug_num_parts; ++k) {
5857                 starts[k] = ((k * sectors_per_part) / heads_by_sects)
5858                             * heads_by_sects;
5859                 if (starts[k] - starts[k - 1] < max_part_secs)
5860                         max_part_secs = starts[k] - starts[k - 1];
5861         }
5862         starts[sdebug_num_parts] = num_sectors;
5863         starts[sdebug_num_parts + 1] = 0;
5864
5865         ramp[510] = 0x55;       /* magic partition markings */
5866         ramp[511] = 0xAA;
5867         pp = (struct msdos_partition *)(ramp + 0x1be);
5868         for (k = 0; starts[k + 1]; ++k, ++pp) {
5869                 start_sec = starts[k];
5870                 end_sec = starts[k] + max_part_secs - 1;
5871                 pp->boot_ind = 0;
5872
5873                 pp->cyl = start_sec / heads_by_sects;
5874                 pp->head = (start_sec - (pp->cyl * heads_by_sects))
5875                            / sdebug_sectors_per;
5876                 pp->sector = (start_sec % sdebug_sectors_per) + 1;
5877
5878                 pp->end_cyl = end_sec / heads_by_sects;
5879                 pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
5880                                / sdebug_sectors_per;
5881                 pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
5882
5883                 pp->start_sect = cpu_to_le32(start_sec);
5884                 pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1);
5885                 pp->sys_ind = 0x83;     /* plain Linux partition */
5886         }
5887 }
5888
5889 static void block_unblock_all_queues(bool block)
5890 {
5891         struct sdebug_host_info *sdhp;
5892
5893         lockdep_assert_held(&sdebug_host_list_mutex);
5894
5895         list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
5896                 struct Scsi_Host *shost = sdhp->shost;
5897
5898                 if (block)
5899                         scsi_block_requests(shost);
5900                 else
5901                         scsi_unblock_requests(shost);
5902         }
5903 }
5904
5905 /* Adjust (by rounding down) the sdebug_cmnd_count so abs(every_nth)-1
5906  * commands will be processed normally before triggers occur.
5907  */
5908 static void tweak_cmnd_count(void)
5909 {
5910         int count, modulo;
5911
5912         modulo = abs(sdebug_every_nth);
5913         if (modulo < 2)
5914                 return;
5915
5916         mutex_lock(&sdebug_host_list_mutex);
5917         block_unblock_all_queues(true);
5918         count = atomic_read(&sdebug_cmnd_count);
5919         atomic_set(&sdebug_cmnd_count, (count / modulo) * modulo);
5920         block_unblock_all_queues(false);
5921         mutex_unlock(&sdebug_host_list_mutex);
5922 }
5923
5924 static void clear_queue_stats(void)
5925 {
5926         atomic_set(&sdebug_cmnd_count, 0);
5927         atomic_set(&sdebug_completions, 0);
5928         atomic_set(&sdebug_miss_cpus, 0);
5929         atomic_set(&sdebug_a_tsf, 0);
5930 }
5931
5932 static bool inject_on_this_cmd(void)
5933 {
5934         if (sdebug_every_nth == 0)
5935                 return false;
5936         return (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) == 0;
5937 }
5938
5939 #define INCLUSIVE_TIMING_MAX_NS 1000000         /* 1 millisecond */
5940
5941
5942 void sdebug_free_queued_cmd(struct sdebug_queued_cmd *sqcp)
5943 {
5944         if (sqcp)
5945                 kmem_cache_free(queued_cmd_cache, sqcp);
5946 }
5947
5948 static struct sdebug_queued_cmd *sdebug_alloc_queued_cmd(struct scsi_cmnd *scmd)
5949 {
5950         struct sdebug_queued_cmd *sqcp;
5951         struct sdebug_defer *sd_dp;
5952
5953         sqcp = kmem_cache_zalloc(queued_cmd_cache, GFP_ATOMIC);
5954         if (!sqcp)
5955                 return NULL;
5956
5957         sd_dp = &sqcp->sd_dp;
5958
5959         hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
5960         sd_dp->hrt.function = sdebug_q_cmd_hrt_complete;
5961         INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete);
5962
5963         sqcp->scmd = scmd;
5964
5965         return sqcp;
5966 }
5967
5968 /* Complete the processing of the thread that queued a SCSI command to this
5969  * driver. It either completes the command by calling cmnd_done() or
5970  * schedules a hr timer or work queue then returns 0. Returns
5971  * SCSI_MLQUEUE_HOST_BUSY if temporarily out of resources.
5972  */
5973 static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
5974                          int scsi_result,
5975                          int (*pfp)(struct scsi_cmnd *,
5976                                     struct sdebug_dev_info *),
5977                          int delta_jiff, int ndelay)
5978 {
5979         struct request *rq = scsi_cmd_to_rq(cmnd);
5980         bool polled = rq->cmd_flags & REQ_POLLED;
5981         struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmnd);
5982         unsigned long flags;
5983         u64 ns_from_boot = 0;
5984         struct sdebug_queued_cmd *sqcp;
5985         struct scsi_device *sdp;
5986         struct sdebug_defer *sd_dp;
5987
5988         if (unlikely(devip == NULL)) {
5989                 if (scsi_result == 0)
5990                         scsi_result = DID_NO_CONNECT << 16;
5991                 goto respond_in_thread;
5992         }
5993         sdp = cmnd->device;
5994
5995         if (delta_jiff == 0)
5996                 goto respond_in_thread;
5997
5998
5999         if (unlikely(sdebug_every_nth && (SDEBUG_OPT_RARE_TSF & sdebug_opts) &&
6000                      (scsi_result == 0))) {
6001                 int num_in_q = scsi_device_busy(sdp);
6002                 int qdepth = cmnd->device->queue_depth;
6003
6004                 if ((num_in_q == qdepth) &&
6005                     (atomic_inc_return(&sdebug_a_tsf) >=
6006                      abs(sdebug_every_nth))) {
6007                         atomic_set(&sdebug_a_tsf, 0);
6008                         scsi_result = device_qfull_result;
6009
6010                         if (unlikely(SDEBUG_OPT_Q_NOISE & sdebug_opts))
6011                                 sdev_printk(KERN_INFO, sdp, "%s: num_in_q=%d +1, <inject> status: TASK SET FULL\n",
6012                                             __func__, num_in_q);
6013                 }
6014         }
6015
6016         sqcp = sdebug_alloc_queued_cmd(cmnd);
6017         if (!sqcp) {
6018                 pr_err("%s no alloc\n", __func__);
6019                 return SCSI_MLQUEUE_HOST_BUSY;
6020         }
6021         sd_dp = &sqcp->sd_dp;
6022
6023         if (polled)
6024                 ns_from_boot = ktime_get_boottime_ns();
6025
6026         /* one of the resp_*() response functions is called here */
6027         cmnd->result = pfp ? pfp(cmnd, devip) : 0;
6028         if (cmnd->result & SDEG_RES_IMMED_MASK) {
6029                 cmnd->result &= ~SDEG_RES_IMMED_MASK;
6030                 delta_jiff = ndelay = 0;
6031         }
6032         if (cmnd->result == 0 && scsi_result != 0)
6033                 cmnd->result = scsi_result;
6034         if (cmnd->result == 0 && unlikely(sdebug_opts & SDEBUG_OPT_TRANSPORT_ERR)) {
6035                 if (atomic_read(&sdeb_inject_pending)) {
6036                         mk_sense_buffer(cmnd, ABORTED_COMMAND, TRANSPORT_PROBLEM, ACK_NAK_TO);
6037                         atomic_set(&sdeb_inject_pending, 0);
6038                         cmnd->result = check_condition_result;
6039                 }
6040         }
6041
6042         if (unlikely(sdebug_verbose && cmnd->result))
6043                 sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n",
6044                             __func__, cmnd->result);
6045
6046         if (delta_jiff > 0 || ndelay > 0) {
6047                 ktime_t kt;
6048
6049                 if (delta_jiff > 0) {
6050                         u64 ns = jiffies_to_nsecs(delta_jiff);
6051
6052                         if (sdebug_random && ns < U32_MAX) {
6053                                 ns = get_random_u32_below((u32)ns);
6054                         } else if (sdebug_random) {
6055                                 ns >>= 12;      /* scale to 4 usec precision */
6056                                 if (ns < U32_MAX)       /* over 4 hours max */
6057                                         ns = get_random_u32_below((u32)ns);
6058                                 ns <<= 12;
6059                         }
6060                         kt = ns_to_ktime(ns);
6061                 } else {        /* ndelay has a 4.2 second max */
6062                         kt = sdebug_random ? get_random_u32_below((u32)ndelay) :
6063                                              (u32)ndelay;
6064                         if (ndelay < INCLUSIVE_TIMING_MAX_NS) {
6065                                 u64 d = ktime_get_boottime_ns() - ns_from_boot;
6066
6067                                 if (kt <= d) {  /* elapsed duration >= kt */
6068                                         /* call scsi_done() from this thread */
6069                                         sdebug_free_queued_cmd(sqcp);
6070                                         scsi_done(cmnd);
6071                                         return 0;
6072                                 }
6073                                 /* otherwise reduce kt by elapsed time */
6074                                 kt -= d;
6075                         }
6076                 }
6077                 if (sdebug_statistics)
6078                         sd_dp->issuing_cpu = raw_smp_processor_id();
6079                 if (polled) {
6080                         spin_lock_irqsave(&sdsc->lock, flags);
6081                         sd_dp->cmpl_ts = ktime_add(ns_to_ktime(ns_from_boot), kt);
6082                         ASSIGN_QUEUED_CMD(cmnd, sqcp);
6083                         WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_POLL);
6084                         spin_unlock_irqrestore(&sdsc->lock, flags);
6085                 } else {
6086                         /* schedule the invocation of scsi_done() for a later time */
6087                         spin_lock_irqsave(&sdsc->lock, flags);
6088                         ASSIGN_QUEUED_CMD(cmnd, sqcp);
6089                         WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_HRT);
6090                         hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED);
6091                         /*
6092                          * The completion handler will try to grab sqcp->lock,
6093                          * so there is no chance that the completion handler
6094                          * will call scsi_done() until we release the lock
6095                          * here (so ok to keep referencing sdsc).
6096                          */
6097                         spin_unlock_irqrestore(&sdsc->lock, flags);
6098                 }
6099         } else {        /* jdelay < 0, use work queue */
6100                 if (unlikely((sdebug_opts & SDEBUG_OPT_CMD_ABORT) &&
6101                              atomic_read(&sdeb_inject_pending))) {
6102                         sd_dp->aborted = true;
6103                         atomic_set(&sdeb_inject_pending, 0);
6104                         sdev_printk(KERN_INFO, sdp, "abort request tag=%#x\n",
6105                                     blk_mq_unique_tag_to_tag(get_tag(cmnd)));
6106                 }
6107
6108                 if (sdebug_statistics)
6109                         sd_dp->issuing_cpu = raw_smp_processor_id();
6110                 if (polled) {
6111                         spin_lock_irqsave(&sdsc->lock, flags);
6112                         ASSIGN_QUEUED_CMD(cmnd, sqcp);
6113                         sd_dp->cmpl_ts = ns_to_ktime(ns_from_boot);
6114                         WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_POLL);
6115                         spin_unlock_irqrestore(&sdsc->lock, flags);
6116                 } else {
6117                         spin_lock_irqsave(&sdsc->lock, flags);
6118                         ASSIGN_QUEUED_CMD(cmnd, sqcp);
6119                         WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_WQ);
6120                         schedule_work(&sd_dp->ew.work);
6121                         spin_unlock_irqrestore(&sdsc->lock, flags);
6122                 }
6123         }
6124
6125         return 0;
6126
6127 respond_in_thread:      /* call back to mid-layer using invocation thread */
6128         cmnd->result = pfp != NULL ? pfp(cmnd, devip) : 0;
6129         cmnd->result &= ~SDEG_RES_IMMED_MASK;
6130         if (cmnd->result == 0 && scsi_result != 0)
6131                 cmnd->result = scsi_result;
6132         scsi_done(cmnd);
6133         return 0;
6134 }
6135
6136 /* Note: The following macros create attribute files in the
6137    /sys/module/scsi_debug/parameters directory. Unfortunately this
6138    driver is unaware of a change and cannot trigger auxiliary actions
6139    as it can when the corresponding attribute in the
6140    /sys/bus/pseudo/drivers/scsi_debug directory is changed.
6141  */
6142 module_param_named(add_host, sdebug_add_host, int, S_IRUGO | S_IWUSR);
6143 module_param_named(ato, sdebug_ato, int, S_IRUGO);
6144 module_param_named(cdb_len, sdebug_cdb_len, int, 0644);
6145 module_param_named(clustering, sdebug_clustering, bool, S_IRUGO | S_IWUSR);
6146 module_param_named(delay, sdebug_jdelay, int, S_IRUGO | S_IWUSR);
6147 module_param_named(dev_size_mb, sdebug_dev_size_mb, int, S_IRUGO);
6148 module_param_named(dif, sdebug_dif, int, S_IRUGO);
6149 module_param_named(dix, sdebug_dix, int, S_IRUGO);
6150 module_param_named(dsense, sdebug_dsense, int, S_IRUGO | S_IWUSR);
6151 module_param_named(every_nth, sdebug_every_nth, int, S_IRUGO | S_IWUSR);
6152 module_param_named(fake_rw, sdebug_fake_rw, int, S_IRUGO | S_IWUSR);
6153 module_param_named(guard, sdebug_guard, uint, S_IRUGO);
6154 module_param_named(host_lock, sdebug_host_lock, bool, S_IRUGO | S_IWUSR);
6155 module_param_named(host_max_queue, sdebug_host_max_queue, int, S_IRUGO);
6156 module_param_string(inq_product, sdebug_inq_product_id,
6157                     sizeof(sdebug_inq_product_id), S_IRUGO | S_IWUSR);
6158 module_param_string(inq_rev, sdebug_inq_product_rev,
6159                     sizeof(sdebug_inq_product_rev), S_IRUGO | S_IWUSR);
6160 module_param_string(inq_vendor, sdebug_inq_vendor_id,
6161                     sizeof(sdebug_inq_vendor_id), S_IRUGO | S_IWUSR);
6162 module_param_named(lbprz, sdebug_lbprz, int, S_IRUGO);
6163 module_param_named(lbpu, sdebug_lbpu, int, S_IRUGO);
6164 module_param_named(lbpws, sdebug_lbpws, int, S_IRUGO);
6165 module_param_named(lbpws10, sdebug_lbpws10, int, S_IRUGO);
6166 module_param_named(lowest_aligned, sdebug_lowest_aligned, int, S_IRUGO);
6167 module_param_named(lun_format, sdebug_lun_am_i, int, S_IRUGO | S_IWUSR);
6168 module_param_named(max_luns, sdebug_max_luns, int, S_IRUGO | S_IWUSR);
6169 module_param_named(max_queue, sdebug_max_queue, int, S_IRUGO | S_IWUSR);
6170 module_param_named(medium_error_count, sdebug_medium_error_count, int,
6171                    S_IRUGO | S_IWUSR);
6172 module_param_named(medium_error_start, sdebug_medium_error_start, int,
6173                    S_IRUGO | S_IWUSR);
6174 module_param_named(ndelay, sdebug_ndelay, int, S_IRUGO | S_IWUSR);
6175 module_param_named(no_lun_0, sdebug_no_lun_0, int, S_IRUGO | S_IWUSR);
6176 module_param_named(no_rwlock, sdebug_no_rwlock, bool, S_IRUGO | S_IWUSR);
6177 module_param_named(no_uld, sdebug_no_uld, int, S_IRUGO);
6178 module_param_named(num_parts, sdebug_num_parts, int, S_IRUGO);
6179 module_param_named(num_tgts, sdebug_num_tgts, int, S_IRUGO | S_IWUSR);
6180 module_param_named(opt_blks, sdebug_opt_blks, int, S_IRUGO);
6181 module_param_named(opt_xferlen_exp, sdebug_opt_xferlen_exp, int, S_IRUGO);
6182 module_param_named(opts, sdebug_opts, int, S_IRUGO | S_IWUSR);
6183 module_param_named(per_host_store, sdebug_per_host_store, bool,
6184                    S_IRUGO | S_IWUSR);
6185 module_param_named(physblk_exp, sdebug_physblk_exp, int, S_IRUGO);
6186 module_param_named(ptype, sdebug_ptype, int, S_IRUGO | S_IWUSR);
6187 module_param_named(random, sdebug_random, bool, S_IRUGO | S_IWUSR);
6188 module_param_named(removable, sdebug_removable, bool, S_IRUGO | S_IWUSR);
6189 module_param_named(scsi_level, sdebug_scsi_level, int, S_IRUGO);
6190 module_param_named(sector_size, sdebug_sector_size, int, S_IRUGO);
6191 module_param_named(statistics, sdebug_statistics, bool, S_IRUGO | S_IWUSR);
6192 module_param_named(strict, sdebug_strict, bool, S_IRUGO | S_IWUSR);
6193 module_param_named(submit_queues, submit_queues, int, S_IRUGO);
6194 module_param_named(poll_queues, poll_queues, int, S_IRUGO);
6195 module_param_named(tur_ms_to_ready, sdeb_tur_ms_to_ready, int, S_IRUGO);
6196 module_param_named(unmap_alignment, sdebug_unmap_alignment, int, S_IRUGO);
6197 module_param_named(unmap_granularity, sdebug_unmap_granularity, int, S_IRUGO);
6198 module_param_named(unmap_max_blocks, sdebug_unmap_max_blocks, int, S_IRUGO);
6199 module_param_named(unmap_max_desc, sdebug_unmap_max_desc, int, S_IRUGO);
6200 module_param_named(uuid_ctl, sdebug_uuid_ctl, int, S_IRUGO);
6201 module_param_named(virtual_gb, sdebug_virtual_gb, int, S_IRUGO | S_IWUSR);
6202 module_param_named(vpd_use_hostno, sdebug_vpd_use_hostno, int,
6203                    S_IRUGO | S_IWUSR);
6204 module_param_named(wp, sdebug_wp, bool, S_IRUGO | S_IWUSR);
6205 module_param_named(write_same_length, sdebug_write_same_length, int,
6206                    S_IRUGO | S_IWUSR);
6207 module_param_named(zbc, sdeb_zbc_model_s, charp, S_IRUGO);
6208 module_param_named(zone_cap_mb, sdeb_zbc_zone_cap_mb, int, S_IRUGO);
6209 module_param_named(zone_max_open, sdeb_zbc_max_open, int, S_IRUGO);
6210 module_param_named(zone_nr_conv, sdeb_zbc_nr_conv, int, S_IRUGO);
6211 module_param_named(zone_size_mb, sdeb_zbc_zone_size_mb, int, S_IRUGO);
6212 module_param_named(allow_restart, sdebug_allow_restart, bool, S_IRUGO | S_IWUSR);
6213
6214 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
6215 MODULE_DESCRIPTION("SCSI debug adapter driver");
6216 MODULE_LICENSE("GPL");
6217 MODULE_VERSION(SDEBUG_VERSION);
6218
6219 MODULE_PARM_DESC(add_host, "add n hosts, in sysfs if negative remove host(s) (def=1)");
6220 MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
6221 MODULE_PARM_DESC(cdb_len, "suggest CDB lengths to drivers (def=10)");
6222 MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
6223 MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
6224 MODULE_PARM_DESC(dev_size_mb, "size in MiB of ram shared by devs(def=8)");
6225 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
6226 MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
6227 MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
6228 MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
6229 MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
6230 MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
6231 MODULE_PARM_DESC(host_lock, "host_lock is ignored (def=0)");
6232 MODULE_PARM_DESC(host_max_queue,
6233                  "host max # of queued cmds (0 to max(def) [max_queue fixed equal for !0])");
6234 MODULE_PARM_DESC(inq_product, "SCSI INQUIRY product string (def=\"scsi_debug\")");
6235 MODULE_PARM_DESC(inq_rev, "SCSI INQUIRY revision string (def=\""
6236                  SDEBUG_VERSION "\")");
6237 MODULE_PARM_DESC(inq_vendor, "SCSI INQUIRY vendor string (def=\"Linux\")");
6238 MODULE_PARM_DESC(lbprz,
6239                  "on read unmapped LBs return 0 when 1 (def), return 0xff when 2");
6240 MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
6241 MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
6242 MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
6243 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
6244 MODULE_PARM_DESC(lun_format, "LUN format: 0->peripheral (def); 1 --> flat address method");
6245 MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
6246 MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))");
6247 MODULE_PARM_DESC(medium_error_count, "count of sectors to return follow on MEDIUM error");
6248 MODULE_PARM_DESC(medium_error_start, "starting sector number to return MEDIUM error");
6249 MODULE_PARM_DESC(ndelay, "response delay in nanoseconds (def=0 -> ignore)");
6250 MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
6251 MODULE_PARM_DESC(no_rwlock, "don't protect user data reads+writes (def=0)");
6252 MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
6253 MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
6254 MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
6255 MODULE_PARM_DESC(opt_blks, "optimal transfer length in blocks (def=1024)");
6256 MODULE_PARM_DESC(opt_xferlen_exp, "optimal transfer length granularity exponent (def=physblk_exp)");
6257 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
6258 MODULE_PARM_DESC(per_host_store, "If set, next positive add_host will get new store (def=0)");
6259 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
6260 MODULE_PARM_DESC(poll_queues, "support for iouring iopoll queues (1 to max(submit_queues - 1))");
6261 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
6262 MODULE_PARM_DESC(random, "If set, uniformly randomize command duration between 0 and delay_in_ns");
6263 MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
6264 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=7[SPC-5])");
6265 MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
6266 MODULE_PARM_DESC(statistics, "collect statistics on commands, queues (def=0)");
6267 MODULE_PARM_DESC(strict, "stricter checks: reserved field in cdb (def=0)");
6268 MODULE_PARM_DESC(submit_queues, "support for block multi-queue (def=1)");
6269 MODULE_PARM_DESC(tur_ms_to_ready, "TEST UNIT READY millisecs before initial good status (def=0)");
6270 MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
6271 MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
6272 MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
6273 MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
6274 MODULE_PARM_DESC(uuid_ctl,
6275                  "1->use uuid for lu name, 0->don't, 2->all use same (def=0)");
6276 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)");
6277 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
6278 MODULE_PARM_DESC(wp, "Write Protect (def=0)");
6279 MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
6280 MODULE_PARM_DESC(zbc, "'none' [0]; 'aware' [1]; 'managed' [2] (def=0). Can have 'host-' prefix");
6281 MODULE_PARM_DESC(zone_cap_mb, "Zone capacity in MiB (def=zone size)");
6282 MODULE_PARM_DESC(zone_max_open, "Maximum number of open zones; [0] for no limit (def=auto)");
6283 MODULE_PARM_DESC(zone_nr_conv, "Number of conventional zones (def=1)");
6284 MODULE_PARM_DESC(zone_size_mb, "Zone size in MiB (def=auto)");
6285 MODULE_PARM_DESC(allow_restart, "Set scsi_device's allow_restart flag(def=0)");
6286
6287 #define SDEBUG_INFO_LEN 256
6288 static char sdebug_info[SDEBUG_INFO_LEN];
6289
6290 static const char *scsi_debug_info(struct Scsi_Host *shp)
6291 {
6292         int k;
6293
6294         k = scnprintf(sdebug_info, SDEBUG_INFO_LEN, "%s: version %s [%s]\n",
6295                       my_name, SDEBUG_VERSION, sdebug_version_date);
6296         if (k >= (SDEBUG_INFO_LEN - 1))
6297                 return sdebug_info;
6298         scnprintf(sdebug_info + k, SDEBUG_INFO_LEN - k,
6299                   "  dev_size_mb=%d, opts=0x%x, submit_queues=%d, %s=%d",
6300                   sdebug_dev_size_mb, sdebug_opts, submit_queues,
6301                   "statistics", (int)sdebug_statistics);
6302         return sdebug_info;
6303 }
6304
6305 /* 'echo <val> > /proc/scsi/scsi_debug/<host_id>' writes to opts */
6306 static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer,
6307                                  int length)
6308 {
6309         char arr[16];
6310         int opts;
6311         int minLen = length > 15 ? 15 : length;
6312
6313         if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
6314                 return -EACCES;
6315         memcpy(arr, buffer, minLen);
6316         arr[minLen] = '\0';
6317         if (1 != sscanf(arr, "%d", &opts))
6318                 return -EINVAL;
6319         sdebug_opts = opts;
6320         sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
6321         sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
6322         if (sdebug_every_nth != 0)
6323                 tweak_cmnd_count();
6324         return length;
6325 }
6326
6327 struct sdebug_submit_queue_data {
6328         int *first;
6329         int *last;
6330         int queue_num;
6331 };
6332
6333 static bool sdebug_submit_queue_iter(struct request *rq, void *opaque)
6334 {
6335         struct sdebug_submit_queue_data *data = opaque;
6336         u32 unique_tag = blk_mq_unique_tag(rq);
6337         u16 hwq = blk_mq_unique_tag_to_hwq(unique_tag);
6338         u16 tag = blk_mq_unique_tag_to_tag(unique_tag);
6339         int queue_num = data->queue_num;
6340
6341         if (hwq != queue_num)
6342                 return true;
6343
6344         /* Rely on iter'ing in ascending tag order */
6345         if (*data->first == -1)
6346                 *data->first = *data->last = tag;
6347         else
6348                 *data->last = tag;
6349
6350         return true;
6351 }
6352
6353 /* Output seen with 'cat /proc/scsi/scsi_debug/<host_id>'. It will be the
6354  * same for each scsi_debug host (if more than one). Some of the counters
6355  * output are not atomics so might be inaccurate in a busy system. */
6356 static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
6357 {
6358         struct sdebug_host_info *sdhp;
6359         int j;
6360
6361         seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n",
6362                    SDEBUG_VERSION, sdebug_version_date);
6363         seq_printf(m, "num_tgts=%d, %ssize=%d MB, opts=0x%x, every_nth=%d\n",
6364                    sdebug_num_tgts, "shared (ram) ", sdebug_dev_size_mb,
6365                    sdebug_opts, sdebug_every_nth);
6366         seq_printf(m, "delay=%d, ndelay=%d, max_luns=%d, sector_size=%d %s\n",
6367                    sdebug_jdelay, sdebug_ndelay, sdebug_max_luns,
6368                    sdebug_sector_size, "bytes");
6369         seq_printf(m, "cylinders=%d, heads=%d, sectors=%d, command aborts=%d\n",
6370                    sdebug_cylinders_per, sdebug_heads, sdebug_sectors_per,
6371                    num_aborts);
6372         seq_printf(m, "RESETs: device=%d, target=%d, bus=%d, host=%d\n",
6373                    num_dev_resets, num_target_resets, num_bus_resets,
6374                    num_host_resets);
6375         seq_printf(m, "dix_reads=%d, dix_writes=%d, dif_errors=%d\n",
6376                    dix_reads, dix_writes, dif_errors);
6377         seq_printf(m, "usec_in_jiffy=%lu, statistics=%d\n", TICK_NSEC / 1000,
6378                    sdebug_statistics);
6379         seq_printf(m, "cmnd_count=%d, completions=%d, %s=%d, a_tsf=%d, mq_polls=%d\n",
6380                    atomic_read(&sdebug_cmnd_count),
6381                    atomic_read(&sdebug_completions),
6382                    "miss_cpus", atomic_read(&sdebug_miss_cpus),
6383                    atomic_read(&sdebug_a_tsf),
6384                    atomic_read(&sdeb_mq_poll_count));
6385
6386         seq_printf(m, "submit_queues=%d\n", submit_queues);
6387         for (j = 0; j < submit_queues; ++j) {
6388                 int f = -1, l = -1;
6389                 struct sdebug_submit_queue_data data = {
6390                         .queue_num = j,
6391                         .first = &f,
6392                         .last = &l,
6393                 };
6394                 seq_printf(m, "  queue %d:\n", j);
6395                 blk_mq_tagset_busy_iter(&host->tag_set, sdebug_submit_queue_iter,
6396                                         &data);
6397                 if (f >= 0) {
6398                         seq_printf(m, "    in_use_bm BUSY: %s: %d,%d\n",
6399                                    "first,last bits", f, l);
6400                 }
6401         }
6402
6403         seq_printf(m, "this host_no=%d\n", host->host_no);
6404         if (!xa_empty(per_store_ap)) {
6405                 bool niu;
6406                 int idx;
6407                 unsigned long l_idx;
6408                 struct sdeb_store_info *sip;
6409
6410                 seq_puts(m, "\nhost list:\n");
6411                 j = 0;
6412                 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6413                         idx = sdhp->si_idx;
6414                         seq_printf(m, "  %d: host_no=%d, si_idx=%d\n", j,
6415                                    sdhp->shost->host_no, idx);
6416                         ++j;
6417                 }
6418                 seq_printf(m, "\nper_store array [most_recent_idx=%d]:\n",
6419                            sdeb_most_recent_idx);
6420                 j = 0;
6421                 xa_for_each(per_store_ap, l_idx, sip) {
6422                         niu = xa_get_mark(per_store_ap, l_idx,
6423                                           SDEB_XA_NOT_IN_USE);
6424                         idx = (int)l_idx;
6425                         seq_printf(m, "  %d: idx=%d%s\n", j, idx,
6426                                    (niu ? "  not_in_use" : ""));
6427                         ++j;
6428                 }
6429         }
6430         return 0;
6431 }
6432
6433 static ssize_t delay_show(struct device_driver *ddp, char *buf)
6434 {
6435         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_jdelay);
6436 }
6437 /* Returns -EBUSY if jdelay is being changed and commands are queued. The unit
6438  * of delay is jiffies.
6439  */
6440 static ssize_t delay_store(struct device_driver *ddp, const char *buf,
6441                            size_t count)
6442 {
6443         int jdelay, res;
6444
6445         if (count > 0 && sscanf(buf, "%d", &jdelay) == 1) {
6446                 res = count;
6447                 if (sdebug_jdelay != jdelay) {
6448                         struct sdebug_host_info *sdhp;
6449
6450                         mutex_lock(&sdebug_host_list_mutex);
6451                         block_unblock_all_queues(true);
6452
6453                         list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6454                                 struct Scsi_Host *shost = sdhp->shost;
6455
6456                                 if (scsi_host_busy(shost)) {
6457                                         res = -EBUSY;   /* queued commands */
6458                                         break;
6459                                 }
6460                         }
6461                         if (res > 0) {
6462                                 sdebug_jdelay = jdelay;
6463                                 sdebug_ndelay = 0;
6464                         }
6465                         block_unblock_all_queues(false);
6466                         mutex_unlock(&sdebug_host_list_mutex);
6467                 }
6468                 return res;
6469         }
6470         return -EINVAL;
6471 }
6472 static DRIVER_ATTR_RW(delay);
6473
6474 static ssize_t ndelay_show(struct device_driver *ddp, char *buf)
6475 {
6476         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ndelay);
6477 }
6478 /* Returns -EBUSY if ndelay is being changed and commands are queued */
6479 /* If > 0 and accepted then sdebug_jdelay is set to JDELAY_OVERRIDDEN */
6480 static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
6481                             size_t count)
6482 {
6483         int ndelay, res;
6484
6485         if ((count > 0) && (1 == sscanf(buf, "%d", &ndelay)) &&
6486             (ndelay >= 0) && (ndelay < (1000 * 1000 * 1000))) {
6487                 res = count;
6488                 if (sdebug_ndelay != ndelay) {
6489                         struct sdebug_host_info *sdhp;
6490
6491                         mutex_lock(&sdebug_host_list_mutex);
6492                         block_unblock_all_queues(true);
6493
6494                         list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6495                                 struct Scsi_Host *shost = sdhp->shost;
6496
6497                                 if (scsi_host_busy(shost)) {
6498                                         res = -EBUSY;   /* queued commands */
6499                                         break;
6500                                 }
6501                         }
6502
6503                         if (res > 0) {
6504                                 sdebug_ndelay = ndelay;
6505                                 sdebug_jdelay = ndelay  ? JDELAY_OVERRIDDEN
6506                                                         : DEF_JDELAY;
6507                         }
6508                         block_unblock_all_queues(false);
6509                         mutex_unlock(&sdebug_host_list_mutex);
6510                 }
6511                 return res;
6512         }
6513         return -EINVAL;
6514 }
6515 static DRIVER_ATTR_RW(ndelay);
6516
6517 static ssize_t opts_show(struct device_driver *ddp, char *buf)
6518 {
6519         return scnprintf(buf, PAGE_SIZE, "0x%x\n", sdebug_opts);
6520 }
6521
6522 static ssize_t opts_store(struct device_driver *ddp, const char *buf,
6523                           size_t count)
6524 {
6525         int opts;
6526         char work[20];
6527
6528         if (sscanf(buf, "%10s", work) == 1) {
6529                 if (strncasecmp(work, "0x", 2) == 0) {
6530                         if (kstrtoint(work + 2, 16, &opts) == 0)
6531                                 goto opts_done;
6532                 } else {
6533                         if (kstrtoint(work, 10, &opts) == 0)
6534                                 goto opts_done;
6535                 }
6536         }
6537         return -EINVAL;
6538 opts_done:
6539         sdebug_opts = opts;
6540         sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
6541         sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
6542         tweak_cmnd_count();
6543         return count;
6544 }
6545 static DRIVER_ATTR_RW(opts);
6546
6547 static ssize_t ptype_show(struct device_driver *ddp, char *buf)
6548 {
6549         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ptype);
6550 }
6551 static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
6552                            size_t count)
6553 {
6554         int n;
6555
6556         /* Cannot change from or to TYPE_ZBC with sysfs */
6557         if (sdebug_ptype == TYPE_ZBC)
6558                 return -EINVAL;
6559
6560         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6561                 if (n == TYPE_ZBC)
6562                         return -EINVAL;
6563                 sdebug_ptype = n;
6564                 return count;
6565         }
6566         return -EINVAL;
6567 }
6568 static DRIVER_ATTR_RW(ptype);
6569
6570 static ssize_t dsense_show(struct device_driver *ddp, char *buf)
6571 {
6572         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dsense);
6573 }
6574 static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
6575                             size_t count)
6576 {
6577         int n;
6578
6579         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6580                 sdebug_dsense = n;
6581                 return count;
6582         }
6583         return -EINVAL;
6584 }
6585 static DRIVER_ATTR_RW(dsense);
6586
6587 static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
6588 {
6589         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_fake_rw);
6590 }
6591 static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
6592                              size_t count)
6593 {
6594         int n, idx;
6595
6596         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6597                 bool want_store = (n == 0);
6598                 struct sdebug_host_info *sdhp;
6599
6600                 n = (n > 0);
6601                 sdebug_fake_rw = (sdebug_fake_rw > 0);
6602                 if (sdebug_fake_rw == n)
6603                         return count;   /* not transitioning so do nothing */
6604
6605                 if (want_store) {       /* 1 --> 0 transition, set up store */
6606                         if (sdeb_first_idx < 0) {
6607                                 idx = sdebug_add_store();
6608                                 if (idx < 0)
6609                                         return idx;
6610                         } else {
6611                                 idx = sdeb_first_idx;
6612                                 xa_clear_mark(per_store_ap, idx,
6613                                               SDEB_XA_NOT_IN_USE);
6614                         }
6615                         /* make all hosts use same store */
6616                         list_for_each_entry(sdhp, &sdebug_host_list,
6617                                             host_list) {
6618                                 if (sdhp->si_idx != idx) {
6619                                         xa_set_mark(per_store_ap, sdhp->si_idx,
6620                                                     SDEB_XA_NOT_IN_USE);
6621                                         sdhp->si_idx = idx;
6622                                 }
6623                         }
6624                         sdeb_most_recent_idx = idx;
6625                 } else {        /* 0 --> 1 transition is trigger for shrink */
6626                         sdebug_erase_all_stores(true /* apart from first */);
6627                 }
6628                 sdebug_fake_rw = n;
6629                 return count;
6630         }
6631         return -EINVAL;
6632 }
6633 static DRIVER_ATTR_RW(fake_rw);
6634
6635 static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
6636 {
6637         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_lun_0);
6638 }
6639 static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
6640                               size_t count)
6641 {
6642         int n;
6643
6644         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6645                 sdebug_no_lun_0 = n;
6646                 return count;
6647         }
6648         return -EINVAL;
6649 }
6650 static DRIVER_ATTR_RW(no_lun_0);
6651
6652 static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
6653 {
6654         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_tgts);
6655 }
6656 static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
6657                               size_t count)
6658 {
6659         int n;
6660
6661         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6662                 sdebug_num_tgts = n;
6663                 sdebug_max_tgts_luns();
6664                 return count;
6665         }
6666         return -EINVAL;
6667 }
6668 static DRIVER_ATTR_RW(num_tgts);
6669
6670 static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)
6671 {
6672         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dev_size_mb);
6673 }
6674 static DRIVER_ATTR_RO(dev_size_mb);
6675
6676 static ssize_t per_host_store_show(struct device_driver *ddp, char *buf)
6677 {
6678         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_per_host_store);
6679 }
6680
6681 static ssize_t per_host_store_store(struct device_driver *ddp, const char *buf,
6682                                     size_t count)
6683 {
6684         bool v;
6685
6686         if (kstrtobool(buf, &v))
6687                 return -EINVAL;
6688
6689         sdebug_per_host_store = v;
6690         return count;
6691 }
6692 static DRIVER_ATTR_RW(per_host_store);
6693
6694 static ssize_t num_parts_show(struct device_driver *ddp, char *buf)
6695 {
6696         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_parts);
6697 }
6698 static DRIVER_ATTR_RO(num_parts);
6699
6700 static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
6701 {
6702         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_every_nth);
6703 }
6704 static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
6705                                size_t count)
6706 {
6707         int nth;
6708         char work[20];
6709
6710         if (sscanf(buf, "%10s", work) == 1) {
6711                 if (strncasecmp(work, "0x", 2) == 0) {
6712                         if (kstrtoint(work + 2, 16, &nth) == 0)
6713                                 goto every_nth_done;
6714                 } else {
6715                         if (kstrtoint(work, 10, &nth) == 0)
6716                                 goto every_nth_done;
6717                 }
6718         }
6719         return -EINVAL;
6720
6721 every_nth_done:
6722         sdebug_every_nth = nth;
6723         if (nth && !sdebug_statistics) {
6724                 pr_info("every_nth needs statistics=1, set it\n");
6725                 sdebug_statistics = true;
6726         }
6727         tweak_cmnd_count();
6728         return count;
6729 }
6730 static DRIVER_ATTR_RW(every_nth);
6731
6732 static ssize_t lun_format_show(struct device_driver *ddp, char *buf)
6733 {
6734         return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_lun_am);
6735 }
6736 static ssize_t lun_format_store(struct device_driver *ddp, const char *buf,
6737                                 size_t count)
6738 {
6739         int n;
6740         bool changed;
6741
6742         if (kstrtoint(buf, 0, &n))
6743                 return -EINVAL;
6744         if (n >= 0) {
6745                 if (n > (int)SAM_LUN_AM_FLAT) {
6746                         pr_warn("only LUN address methods 0 and 1 are supported\n");
6747                         return -EINVAL;
6748                 }
6749                 changed = ((int)sdebug_lun_am != n);
6750                 sdebug_lun_am = n;
6751                 if (changed && sdebug_scsi_level >= 5) {        /* >= SPC-3 */
6752                         struct sdebug_host_info *sdhp;
6753                         struct sdebug_dev_info *dp;
6754
6755                         mutex_lock(&sdebug_host_list_mutex);
6756                         list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6757                                 list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
6758                                         set_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
6759                                 }
6760                         }
6761                         mutex_unlock(&sdebug_host_list_mutex);
6762                 }
6763                 return count;
6764         }
6765         return -EINVAL;
6766 }
6767 static DRIVER_ATTR_RW(lun_format);
6768
6769 static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
6770 {
6771         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_luns);
6772 }
6773 static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
6774                               size_t count)
6775 {
6776         int n;
6777         bool changed;
6778
6779         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6780                 if (n > 256) {
6781                         pr_warn("max_luns can be no more than 256\n");
6782                         return -EINVAL;
6783                 }
6784                 changed = (sdebug_max_luns != n);
6785                 sdebug_max_luns = n;
6786                 sdebug_max_tgts_luns();
6787                 if (changed && (sdebug_scsi_level >= 5)) {      /* >= SPC-3 */
6788                         struct sdebug_host_info *sdhp;
6789                         struct sdebug_dev_info *dp;
6790
6791                         mutex_lock(&sdebug_host_list_mutex);
6792                         list_for_each_entry(sdhp, &sdebug_host_list,
6793                                             host_list) {
6794                                 list_for_each_entry(dp, &sdhp->dev_info_list,
6795                                                     dev_list) {
6796                                         set_bit(SDEBUG_UA_LUNS_CHANGED,
6797                                                 dp->uas_bm);
6798                                 }
6799                         }
6800                         mutex_unlock(&sdebug_host_list_mutex);
6801                 }
6802                 return count;
6803         }
6804         return -EINVAL;
6805 }
6806 static DRIVER_ATTR_RW(max_luns);
6807
6808 static ssize_t max_queue_show(struct device_driver *ddp, char *buf)
6809 {
6810         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_queue);
6811 }
6812 /* N.B. max_queue can be changed while there are queued commands. In flight
6813  * commands beyond the new max_queue will be completed. */
6814 static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
6815                                size_t count)
6816 {
6817         int n;
6818
6819         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
6820             (n <= SDEBUG_CANQUEUE) &&
6821             (sdebug_host_max_queue == 0)) {
6822                 mutex_lock(&sdebug_host_list_mutex);
6823
6824                 /* We may only change sdebug_max_queue when we have no shosts */
6825                 if (list_empty(&sdebug_host_list))
6826                         sdebug_max_queue = n;
6827                 else
6828                         count = -EBUSY;
6829                 mutex_unlock(&sdebug_host_list_mutex);
6830                 return count;
6831         }
6832         return -EINVAL;
6833 }
6834 static DRIVER_ATTR_RW(max_queue);
6835
6836 static ssize_t host_max_queue_show(struct device_driver *ddp, char *buf)
6837 {
6838         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_host_max_queue);
6839 }
6840
6841 static ssize_t no_rwlock_show(struct device_driver *ddp, char *buf)
6842 {
6843         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_rwlock);
6844 }
6845
6846 static ssize_t no_rwlock_store(struct device_driver *ddp, const char *buf, size_t count)
6847 {
6848         bool v;
6849
6850         if (kstrtobool(buf, &v))
6851                 return -EINVAL;
6852
6853         sdebug_no_rwlock = v;
6854         return count;
6855 }
6856 static DRIVER_ATTR_RW(no_rwlock);
6857
6858 /*
6859  * Since this is used for .can_queue, and we get the hc_idx tag from the bitmap
6860  * in range [0, sdebug_host_max_queue), we can't change it.
6861  */
6862 static DRIVER_ATTR_RO(host_max_queue);
6863
6864 static ssize_t no_uld_show(struct device_driver *ddp, char *buf)
6865 {
6866         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_uld);
6867 }
6868 static DRIVER_ATTR_RO(no_uld);
6869
6870 static ssize_t scsi_level_show(struct device_driver *ddp, char *buf)
6871 {
6872         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_scsi_level);
6873 }
6874 static DRIVER_ATTR_RO(scsi_level);
6875
6876 static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
6877 {
6878         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_virtual_gb);
6879 }
6880 static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
6881                                 size_t count)
6882 {
6883         int n;
6884         bool changed;
6885
6886         /* Ignore capacity change for ZBC drives for now */
6887         if (sdeb_zbc_in_use)
6888                 return -ENOTSUPP;
6889
6890         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6891                 changed = (sdebug_virtual_gb != n);
6892                 sdebug_virtual_gb = n;
6893                 sdebug_capacity = get_sdebug_capacity();
6894                 if (changed) {
6895                         struct sdebug_host_info *sdhp;
6896                         struct sdebug_dev_info *dp;
6897
6898                         mutex_lock(&sdebug_host_list_mutex);
6899                         list_for_each_entry(sdhp, &sdebug_host_list,
6900                                             host_list) {
6901                                 list_for_each_entry(dp, &sdhp->dev_info_list,
6902                                                     dev_list) {
6903                                         set_bit(SDEBUG_UA_CAPACITY_CHANGED,
6904                                                 dp->uas_bm);
6905                                 }
6906                         }
6907                         mutex_unlock(&sdebug_host_list_mutex);
6908                 }
6909                 return count;
6910         }
6911         return -EINVAL;
6912 }
6913 static DRIVER_ATTR_RW(virtual_gb);
6914
6915 static ssize_t add_host_show(struct device_driver *ddp, char *buf)
6916 {
6917         /* absolute number of hosts currently active is what is shown */
6918         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_hosts);
6919 }
6920
6921 static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
6922                               size_t count)
6923 {
6924         bool found;
6925         unsigned long idx;
6926         struct sdeb_store_info *sip;
6927         bool want_phs = (sdebug_fake_rw == 0) && sdebug_per_host_store;
6928         int delta_hosts;
6929
6930         if (sscanf(buf, "%d", &delta_hosts) != 1)
6931                 return -EINVAL;
6932         if (delta_hosts > 0) {
6933                 do {
6934                         found = false;
6935                         if (want_phs) {
6936                                 xa_for_each_marked(per_store_ap, idx, sip,
6937                                                    SDEB_XA_NOT_IN_USE) {
6938                                         sdeb_most_recent_idx = (int)idx;
6939                                         found = true;
6940                                         break;
6941                                 }
6942                                 if (found)      /* re-use case */
6943                                         sdebug_add_host_helper((int)idx);
6944                                 else
6945                                         sdebug_do_add_host(true);
6946                         } else {
6947                                 sdebug_do_add_host(false);
6948                         }
6949                 } while (--delta_hosts);
6950         } else if (delta_hosts < 0) {
6951                 do {
6952                         sdebug_do_remove_host(false);
6953                 } while (++delta_hosts);
6954         }
6955         return count;
6956 }
6957 static DRIVER_ATTR_RW(add_host);
6958
6959 static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf)
6960 {
6961         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_vpd_use_hostno);
6962 }
6963 static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf,
6964                                     size_t count)
6965 {
6966         int n;
6967
6968         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6969                 sdebug_vpd_use_hostno = n;
6970                 return count;
6971         }
6972         return -EINVAL;
6973 }
6974 static DRIVER_ATTR_RW(vpd_use_hostno);
6975
6976 static ssize_t statistics_show(struct device_driver *ddp, char *buf)
6977 {
6978         return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_statistics);
6979 }
6980 static ssize_t statistics_store(struct device_driver *ddp, const char *buf,
6981                                 size_t count)
6982 {
6983         int n;
6984
6985         if ((count > 0) && (sscanf(buf, "%d", &n) == 1) && (n >= 0)) {
6986                 if (n > 0)
6987                         sdebug_statistics = true;
6988                 else {
6989                         clear_queue_stats();
6990                         sdebug_statistics = false;
6991                 }
6992                 return count;
6993         }
6994         return -EINVAL;
6995 }
6996 static DRIVER_ATTR_RW(statistics);
6997
6998 static ssize_t sector_size_show(struct device_driver *ddp, char *buf)
6999 {
7000         return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_sector_size);
7001 }
7002 static DRIVER_ATTR_RO(sector_size);
7003
7004 static ssize_t submit_queues_show(struct device_driver *ddp, char *buf)
7005 {
7006         return scnprintf(buf, PAGE_SIZE, "%d\n", submit_queues);
7007 }
7008 static DRIVER_ATTR_RO(submit_queues);
7009
7010 static ssize_t dix_show(struct device_driver *ddp, char *buf)
7011 {
7012         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dix);
7013 }
7014 static DRIVER_ATTR_RO(dix);
7015
7016 static ssize_t dif_show(struct device_driver *ddp, char *buf)
7017 {
7018         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dif);
7019 }
7020 static DRIVER_ATTR_RO(dif);
7021
7022 static ssize_t guard_show(struct device_driver *ddp, char *buf)
7023 {
7024         return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_guard);
7025 }
7026 static DRIVER_ATTR_RO(guard);
7027
7028 static ssize_t ato_show(struct device_driver *ddp, char *buf)
7029 {
7030         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ato);
7031 }
7032 static DRIVER_ATTR_RO(ato);
7033
7034 static ssize_t map_show(struct device_driver *ddp, char *buf)
7035 {
7036         ssize_t count = 0;
7037
7038         if (!scsi_debug_lbp())
7039                 return scnprintf(buf, PAGE_SIZE, "0-%u\n",
7040                                  sdebug_store_sectors);
7041
7042         if (sdebug_fake_rw == 0 && !xa_empty(per_store_ap)) {
7043                 struct sdeb_store_info *sip = xa_load(per_store_ap, 0);
7044
7045                 if (sip)
7046                         count = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
7047                                           (int)map_size, sip->map_storep);
7048         }
7049         buf[count++] = '\n';
7050         buf[count] = '\0';
7051
7052         return count;
7053 }
7054 static DRIVER_ATTR_RO(map);
7055
7056 static ssize_t random_show(struct device_driver *ddp, char *buf)
7057 {
7058         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_random);
7059 }
7060
7061 static ssize_t random_store(struct device_driver *ddp, const char *buf,
7062                             size_t count)
7063 {
7064         bool v;
7065
7066         if (kstrtobool(buf, &v))
7067                 return -EINVAL;
7068
7069         sdebug_random = v;
7070         return count;
7071 }
7072 static DRIVER_ATTR_RW(random);
7073
7074 static ssize_t removable_show(struct device_driver *ddp, char *buf)
7075 {
7076         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_removable ? 1 : 0);
7077 }
7078 static ssize_t removable_store(struct device_driver *ddp, const char *buf,
7079                                size_t count)
7080 {
7081         int n;
7082
7083         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
7084                 sdebug_removable = (n > 0);
7085                 return count;
7086         }
7087         return -EINVAL;
7088 }
7089 static DRIVER_ATTR_RW(removable);
7090
7091 static ssize_t host_lock_show(struct device_driver *ddp, char *buf)
7092 {
7093         return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_host_lock);
7094 }
7095 /* N.B. sdebug_host_lock does nothing, kept for backward compatibility */
7096 static ssize_t host_lock_store(struct device_driver *ddp, const char *buf,
7097                                size_t count)
7098 {
7099         int n;
7100
7101         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
7102                 sdebug_host_lock = (n > 0);
7103                 return count;
7104         }
7105         return -EINVAL;
7106 }
7107 static DRIVER_ATTR_RW(host_lock);
7108
7109 static ssize_t strict_show(struct device_driver *ddp, char *buf)
7110 {
7111         return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_strict);
7112 }
7113 static ssize_t strict_store(struct device_driver *ddp, const char *buf,
7114                             size_t count)
7115 {
7116         int n;
7117
7118         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
7119                 sdebug_strict = (n > 0);
7120                 return count;
7121         }
7122         return -EINVAL;
7123 }
7124 static DRIVER_ATTR_RW(strict);
7125
7126 static ssize_t uuid_ctl_show(struct device_driver *ddp, char *buf)
7127 {
7128         return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_uuid_ctl);
7129 }
7130 static DRIVER_ATTR_RO(uuid_ctl);
7131
7132 static ssize_t cdb_len_show(struct device_driver *ddp, char *buf)
7133 {
7134         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_cdb_len);
7135 }
7136 static ssize_t cdb_len_store(struct device_driver *ddp, const char *buf,
7137                              size_t count)
7138 {
7139         int ret, n;
7140
7141         ret = kstrtoint(buf, 0, &n);
7142         if (ret)
7143                 return ret;
7144         sdebug_cdb_len = n;
7145         all_config_cdb_len();
7146         return count;
7147 }
7148 static DRIVER_ATTR_RW(cdb_len);
7149
7150 static const char * const zbc_model_strs_a[] = {
7151         [BLK_ZONED_NONE] = "none",
7152         [BLK_ZONED_HA]   = "host-aware",
7153         [BLK_ZONED_HM]   = "host-managed",
7154 };
7155
7156 static const char * const zbc_model_strs_b[] = {
7157         [BLK_ZONED_NONE] = "no",
7158         [BLK_ZONED_HA]   = "aware",
7159         [BLK_ZONED_HM]   = "managed",
7160 };
7161
7162 static const char * const zbc_model_strs_c[] = {
7163         [BLK_ZONED_NONE] = "0",
7164         [BLK_ZONED_HA]   = "1",
7165         [BLK_ZONED_HM]   = "2",
7166 };
7167
7168 static int sdeb_zbc_model_str(const char *cp)
7169 {
7170         int res = sysfs_match_string(zbc_model_strs_a, cp);
7171
7172         if (res < 0) {
7173                 res = sysfs_match_string(zbc_model_strs_b, cp);
7174                 if (res < 0) {
7175                         res = sysfs_match_string(zbc_model_strs_c, cp);
7176                         if (res < 0)
7177                                 return -EINVAL;
7178                 }
7179         }
7180         return res;
7181 }
7182
7183 static ssize_t zbc_show(struct device_driver *ddp, char *buf)
7184 {
7185         return scnprintf(buf, PAGE_SIZE, "%s\n",
7186                          zbc_model_strs_a[sdeb_zbc_model]);
7187 }
7188 static DRIVER_ATTR_RO(zbc);
7189
7190 static ssize_t tur_ms_to_ready_show(struct device_driver *ddp, char *buf)
7191 {
7192         return scnprintf(buf, PAGE_SIZE, "%d\n", sdeb_tur_ms_to_ready);
7193 }
7194 static DRIVER_ATTR_RO(tur_ms_to_ready);
7195
7196 /* Note: The following array creates attribute files in the
7197    /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
7198    files (over those found in the /sys/module/scsi_debug/parameters
7199    directory) is that auxiliary actions can be triggered when an attribute
7200    is changed. For example see: add_host_store() above.
7201  */
7202
7203 static struct attribute *sdebug_drv_attrs[] = {
7204         &driver_attr_delay.attr,
7205         &driver_attr_opts.attr,
7206         &driver_attr_ptype.attr,
7207         &driver_attr_dsense.attr,
7208         &driver_attr_fake_rw.attr,
7209         &driver_attr_host_max_queue.attr,
7210         &driver_attr_no_lun_0.attr,
7211         &driver_attr_num_tgts.attr,
7212         &driver_attr_dev_size_mb.attr,
7213         &driver_attr_num_parts.attr,
7214         &driver_attr_every_nth.attr,
7215         &driver_attr_lun_format.attr,
7216         &driver_attr_max_luns.attr,
7217         &driver_attr_max_queue.attr,
7218         &driver_attr_no_rwlock.attr,
7219         &driver_attr_no_uld.attr,
7220         &driver_attr_scsi_level.attr,
7221         &driver_attr_virtual_gb.attr,
7222         &driver_attr_add_host.attr,
7223         &driver_attr_per_host_store.attr,
7224         &driver_attr_vpd_use_hostno.attr,
7225         &driver_attr_sector_size.attr,
7226         &driver_attr_statistics.attr,
7227         &driver_attr_submit_queues.attr,
7228         &driver_attr_dix.attr,
7229         &driver_attr_dif.attr,
7230         &driver_attr_guard.attr,
7231         &driver_attr_ato.attr,
7232         &driver_attr_map.attr,
7233         &driver_attr_random.attr,
7234         &driver_attr_removable.attr,
7235         &driver_attr_host_lock.attr,
7236         &driver_attr_ndelay.attr,
7237         &driver_attr_strict.attr,
7238         &driver_attr_uuid_ctl.attr,
7239         &driver_attr_cdb_len.attr,
7240         &driver_attr_tur_ms_to_ready.attr,
7241         &driver_attr_zbc.attr,
7242         NULL,
7243 };
7244 ATTRIBUTE_GROUPS(sdebug_drv);
7245
7246 static struct device *pseudo_primary;
7247
7248 static int __init scsi_debug_init(void)
7249 {
7250         bool want_store = (sdebug_fake_rw == 0);
7251         unsigned long sz;
7252         int k, ret, hosts_to_add;
7253         int idx = -1;
7254
7255         if (sdebug_ndelay >= 1000 * 1000 * 1000) {
7256                 pr_warn("ndelay must be less than 1 second, ignored\n");
7257                 sdebug_ndelay = 0;
7258         } else if (sdebug_ndelay > 0)
7259                 sdebug_jdelay = JDELAY_OVERRIDDEN;
7260
7261         switch (sdebug_sector_size) {
7262         case  512:
7263         case 1024:
7264         case 2048:
7265         case 4096:
7266                 break;
7267         default:
7268                 pr_err("invalid sector_size %d\n", sdebug_sector_size);
7269                 return -EINVAL;
7270         }
7271
7272         switch (sdebug_dif) {
7273         case T10_PI_TYPE0_PROTECTION:
7274                 break;
7275         case T10_PI_TYPE1_PROTECTION:
7276         case T10_PI_TYPE2_PROTECTION:
7277         case T10_PI_TYPE3_PROTECTION:
7278                 have_dif_prot = true;
7279                 break;
7280
7281         default:
7282                 pr_err("dif must be 0, 1, 2 or 3\n");
7283                 return -EINVAL;
7284         }
7285
7286         if (sdebug_num_tgts < 0) {
7287                 pr_err("num_tgts must be >= 0\n");
7288                 return -EINVAL;
7289         }
7290
7291         if (sdebug_guard > 1) {
7292                 pr_err("guard must be 0 or 1\n");
7293                 return -EINVAL;
7294         }
7295
7296         if (sdebug_ato > 1) {
7297                 pr_err("ato must be 0 or 1\n");
7298                 return -EINVAL;
7299         }
7300
7301         if (sdebug_physblk_exp > 15) {
7302                 pr_err("invalid physblk_exp %u\n", sdebug_physblk_exp);
7303                 return -EINVAL;
7304         }
7305
7306         sdebug_lun_am = sdebug_lun_am_i;
7307         if (sdebug_lun_am > SAM_LUN_AM_FLAT) {
7308                 pr_warn("Invalid LUN format %u, using default\n", (int)sdebug_lun_am);
7309                 sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
7310         }
7311
7312         if (sdebug_max_luns > 256) {
7313                 if (sdebug_max_luns > 16384) {
7314                         pr_warn("max_luns can be no more than 16384, use default\n");
7315                         sdebug_max_luns = DEF_MAX_LUNS;
7316                 }
7317                 sdebug_lun_am = SAM_LUN_AM_FLAT;
7318         }
7319
7320         if (sdebug_lowest_aligned > 0x3fff) {
7321                 pr_err("lowest_aligned too big: %u\n", sdebug_lowest_aligned);
7322                 return -EINVAL;
7323         }
7324
7325         if (submit_queues < 1) {
7326                 pr_err("submit_queues must be 1 or more\n");
7327                 return -EINVAL;
7328         }
7329
7330         if ((sdebug_max_queue > SDEBUG_CANQUEUE) || (sdebug_max_queue < 1)) {
7331                 pr_err("max_queue must be in range [1, %d]\n", SDEBUG_CANQUEUE);
7332                 return -EINVAL;
7333         }
7334
7335         if ((sdebug_host_max_queue > SDEBUG_CANQUEUE) ||
7336             (sdebug_host_max_queue < 0)) {
7337                 pr_err("host_max_queue must be in range [0 %d]\n",
7338                        SDEBUG_CANQUEUE);
7339                 return -EINVAL;
7340         }
7341
7342         if (sdebug_host_max_queue &&
7343             (sdebug_max_queue != sdebug_host_max_queue)) {
7344                 sdebug_max_queue = sdebug_host_max_queue;
7345                 pr_warn("fixing max submit queue depth to host max queue depth, %d\n",
7346                         sdebug_max_queue);
7347         }
7348
7349         /*
7350          * check for host managed zoned block device specified with
7351          * ptype=0x14 or zbc=XXX.
7352          */
7353         if (sdebug_ptype == TYPE_ZBC) {
7354                 sdeb_zbc_model = BLK_ZONED_HM;
7355         } else if (sdeb_zbc_model_s && *sdeb_zbc_model_s) {
7356                 k = sdeb_zbc_model_str(sdeb_zbc_model_s);
7357                 if (k < 0)
7358                         return k;
7359                 sdeb_zbc_model = k;
7360                 switch (sdeb_zbc_model) {
7361                 case BLK_ZONED_NONE:
7362                 case BLK_ZONED_HA:
7363                         sdebug_ptype = TYPE_DISK;
7364                         break;
7365                 case BLK_ZONED_HM:
7366                         sdebug_ptype = TYPE_ZBC;
7367                         break;
7368                 default:
7369                         pr_err("Invalid ZBC model\n");
7370                         return -EINVAL;
7371                 }
7372         }
7373         if (sdeb_zbc_model != BLK_ZONED_NONE) {
7374                 sdeb_zbc_in_use = true;
7375                 if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
7376                         sdebug_dev_size_mb = DEF_ZBC_DEV_SIZE_MB;
7377         }
7378
7379         if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
7380                 sdebug_dev_size_mb = DEF_DEV_SIZE_MB;
7381         if (sdebug_dev_size_mb < 1)
7382                 sdebug_dev_size_mb = 1;  /* force minimum 1 MB ramdisk */
7383         sz = (unsigned long)sdebug_dev_size_mb * 1048576;
7384         sdebug_store_sectors = sz / sdebug_sector_size;
7385         sdebug_capacity = get_sdebug_capacity();
7386
7387         /* play around with geometry, don't waste too much on track 0 */
7388         sdebug_heads = 8;
7389         sdebug_sectors_per = 32;
7390         if (sdebug_dev_size_mb >= 256)
7391                 sdebug_heads = 64;
7392         else if (sdebug_dev_size_mb >= 16)
7393                 sdebug_heads = 32;
7394         sdebug_cylinders_per = (unsigned long)sdebug_capacity /
7395                                (sdebug_sectors_per * sdebug_heads);
7396         if (sdebug_cylinders_per >= 1024) {
7397                 /* other LLDs do this; implies >= 1GB ram disk ... */
7398                 sdebug_heads = 255;
7399                 sdebug_sectors_per = 63;
7400                 sdebug_cylinders_per = (unsigned long)sdebug_capacity /
7401                                (sdebug_sectors_per * sdebug_heads);
7402         }
7403         if (scsi_debug_lbp()) {
7404                 sdebug_unmap_max_blocks =
7405                         clamp(sdebug_unmap_max_blocks, 0U, 0xffffffffU);
7406
7407                 sdebug_unmap_max_desc =
7408                         clamp(sdebug_unmap_max_desc, 0U, 256U);
7409
7410                 sdebug_unmap_granularity =
7411                         clamp(sdebug_unmap_granularity, 1U, 0xffffffffU);
7412
7413                 if (sdebug_unmap_alignment &&
7414                     sdebug_unmap_granularity <=
7415                     sdebug_unmap_alignment) {
7416                         pr_err("ERR: unmap_granularity <= unmap_alignment\n");
7417                         return -EINVAL;
7418                 }
7419         }
7420         xa_init_flags(per_store_ap, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
7421         if (want_store) {
7422                 idx = sdebug_add_store();
7423                 if (idx < 0)
7424                         return idx;
7425         }
7426
7427         pseudo_primary = root_device_register("pseudo_0");
7428         if (IS_ERR(pseudo_primary)) {
7429                 pr_warn("root_device_register() error\n");
7430                 ret = PTR_ERR(pseudo_primary);
7431                 goto free_vm;
7432         }
7433         ret = bus_register(&pseudo_lld_bus);
7434         if (ret < 0) {
7435                 pr_warn("bus_register error: %d\n", ret);
7436                 goto dev_unreg;
7437         }
7438         ret = driver_register(&sdebug_driverfs_driver);
7439         if (ret < 0) {
7440                 pr_warn("driver_register error: %d\n", ret);
7441                 goto bus_unreg;
7442         }
7443
7444         hosts_to_add = sdebug_add_host;
7445         sdebug_add_host = 0;
7446
7447         queued_cmd_cache = KMEM_CACHE(sdebug_queued_cmd, SLAB_HWCACHE_ALIGN);
7448         if (!queued_cmd_cache) {
7449                 ret = -ENOMEM;
7450                 goto driver_unreg;
7451         }
7452
7453         sdebug_debugfs_root = debugfs_create_dir("scsi_debug", NULL);
7454         if (IS_ERR_OR_NULL(sdebug_debugfs_root))
7455                 pr_info("%s: failed to create initial debugfs directory\n", __func__);
7456
7457         for (k = 0; k < hosts_to_add; k++) {
7458                 if (want_store && k == 0) {
7459                         ret = sdebug_add_host_helper(idx);
7460                         if (ret < 0) {
7461                                 pr_err("add_host_helper k=%d, error=%d\n",
7462                                        k, -ret);
7463                                 break;
7464                         }
7465                 } else {
7466                         ret = sdebug_do_add_host(want_store &&
7467                                                  sdebug_per_host_store);
7468                         if (ret < 0) {
7469                                 pr_err("add_host k=%d error=%d\n", k, -ret);
7470                                 break;
7471                         }
7472                 }
7473         }
7474         if (sdebug_verbose)
7475                 pr_info("built %d host(s)\n", sdebug_num_hosts);
7476
7477         return 0;
7478
7479 driver_unreg:
7480         driver_unregister(&sdebug_driverfs_driver);
7481 bus_unreg:
7482         bus_unregister(&pseudo_lld_bus);
7483 dev_unreg:
7484         root_device_unregister(pseudo_primary);
7485 free_vm:
7486         sdebug_erase_store(idx, NULL);
7487         return ret;
7488 }
7489
7490 static void __exit scsi_debug_exit(void)
7491 {
7492         int k = sdebug_num_hosts;
7493
7494         for (; k; k--)
7495                 sdebug_do_remove_host(true);
7496         kmem_cache_destroy(queued_cmd_cache);
7497         driver_unregister(&sdebug_driverfs_driver);
7498         bus_unregister(&pseudo_lld_bus);
7499         root_device_unregister(pseudo_primary);
7500
7501         sdebug_erase_all_stores(false);
7502         xa_destroy(per_store_ap);
7503         debugfs_remove(sdebug_debugfs_root);
7504 }
7505
7506 device_initcall(scsi_debug_init);
7507 module_exit(scsi_debug_exit);
7508
7509 static void sdebug_release_adapter(struct device *dev)
7510 {
7511         struct sdebug_host_info *sdbg_host;
7512
7513         sdbg_host = dev_to_sdebug_host(dev);
7514         kfree(sdbg_host);
7515 }
7516
7517 /* idx must be valid, if sip is NULL then it will be obtained using idx */
7518 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip)
7519 {
7520         if (idx < 0)
7521                 return;
7522         if (!sip) {
7523                 if (xa_empty(per_store_ap))
7524                         return;
7525                 sip = xa_load(per_store_ap, idx);
7526                 if (!sip)
7527                         return;
7528         }
7529         vfree(sip->map_storep);
7530         vfree(sip->dif_storep);
7531         vfree(sip->storep);
7532         xa_erase(per_store_ap, idx);
7533         kfree(sip);
7534 }
7535
7536 /* Assume apart_from_first==false only in shutdown case. */
7537 static void sdebug_erase_all_stores(bool apart_from_first)
7538 {
7539         unsigned long idx;
7540         struct sdeb_store_info *sip = NULL;
7541
7542         xa_for_each(per_store_ap, idx, sip) {
7543                 if (apart_from_first)
7544                         apart_from_first = false;
7545                 else
7546                         sdebug_erase_store(idx, sip);
7547         }
7548         if (apart_from_first)
7549                 sdeb_most_recent_idx = sdeb_first_idx;
7550 }
7551
7552 /*
7553  * Returns store xarray new element index (idx) if >=0 else negated errno.
7554  * Limit the number of stores to 65536.
7555  */
7556 static int sdebug_add_store(void)
7557 {
7558         int res;
7559         u32 n_idx;
7560         unsigned long iflags;
7561         unsigned long sz = (unsigned long)sdebug_dev_size_mb * 1048576;
7562         struct sdeb_store_info *sip = NULL;
7563         struct xa_limit xal = { .max = 1 << 16, .min = 0 };
7564
7565         sip = kzalloc(sizeof(*sip), GFP_KERNEL);
7566         if (!sip)
7567                 return -ENOMEM;
7568
7569         xa_lock_irqsave(per_store_ap, iflags);
7570         res = __xa_alloc(per_store_ap, &n_idx, sip, xal, GFP_ATOMIC);
7571         if (unlikely(res < 0)) {
7572                 xa_unlock_irqrestore(per_store_ap, iflags);
7573                 kfree(sip);
7574                 pr_warn("%s: xa_alloc() errno=%d\n", __func__, -res);
7575                 return res;
7576         }
7577         sdeb_most_recent_idx = n_idx;
7578         if (sdeb_first_idx < 0)
7579                 sdeb_first_idx = n_idx;
7580         xa_unlock_irqrestore(per_store_ap, iflags);
7581
7582         res = -ENOMEM;
7583         sip->storep = vzalloc(sz);
7584         if (!sip->storep) {
7585                 pr_err("user data oom\n");
7586                 goto err;
7587         }
7588         if (sdebug_num_parts > 0)
7589                 sdebug_build_parts(sip->storep, sz);
7590
7591         /* DIF/DIX: what T10 calls Protection Information (PI) */
7592         if (sdebug_dix) {
7593                 int dif_size;
7594
7595                 dif_size = sdebug_store_sectors * sizeof(struct t10_pi_tuple);
7596                 sip->dif_storep = vmalloc(dif_size);
7597
7598                 pr_info("dif_storep %u bytes @ %pK\n", dif_size,
7599                         sip->dif_storep);
7600
7601                 if (!sip->dif_storep) {
7602                         pr_err("DIX oom\n");
7603                         goto err;
7604                 }
7605                 memset(sip->dif_storep, 0xff, dif_size);
7606         }
7607         /* Logical Block Provisioning */
7608         if (scsi_debug_lbp()) {
7609                 map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
7610                 sip->map_storep = vmalloc(array_size(sizeof(long),
7611                                                      BITS_TO_LONGS(map_size)));
7612
7613                 pr_info("%lu provisioning blocks\n", map_size);
7614
7615                 if (!sip->map_storep) {
7616                         pr_err("LBP map oom\n");
7617                         goto err;
7618                 }
7619
7620                 bitmap_zero(sip->map_storep, map_size);
7621
7622                 /* Map first 1KB for partition table */
7623                 if (sdebug_num_parts)
7624                         map_region(sip, 0, 2);
7625         }
7626
7627         rwlock_init(&sip->macc_lck);
7628         return (int)n_idx;
7629 err:
7630         sdebug_erase_store((int)n_idx, sip);
7631         pr_warn("%s: failed, errno=%d\n", __func__, -res);
7632         return res;
7633 }
7634
7635 static int sdebug_add_host_helper(int per_host_idx)
7636 {
7637         int k, devs_per_host, idx;
7638         int error = -ENOMEM;
7639         struct sdebug_host_info *sdbg_host;
7640         struct sdebug_dev_info *sdbg_devinfo, *tmp;
7641
7642         sdbg_host = kzalloc(sizeof(*sdbg_host), GFP_KERNEL);
7643         if (!sdbg_host)
7644                 return -ENOMEM;
7645         idx = (per_host_idx < 0) ? sdeb_first_idx : per_host_idx;
7646         if (xa_get_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE))
7647                 xa_clear_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
7648         sdbg_host->si_idx = idx;
7649
7650         INIT_LIST_HEAD(&sdbg_host->dev_info_list);
7651
7652         devs_per_host = sdebug_num_tgts * sdebug_max_luns;
7653         for (k = 0; k < devs_per_host; k++) {
7654                 sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
7655                 if (!sdbg_devinfo)
7656                         goto clean;
7657         }
7658
7659         mutex_lock(&sdebug_host_list_mutex);
7660         list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
7661         mutex_unlock(&sdebug_host_list_mutex);
7662
7663         sdbg_host->dev.bus = &pseudo_lld_bus;
7664         sdbg_host->dev.parent = pseudo_primary;
7665         sdbg_host->dev.release = &sdebug_release_adapter;
7666         dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_num_hosts);
7667
7668         error = device_register(&sdbg_host->dev);
7669         if (error) {
7670                 mutex_lock(&sdebug_host_list_mutex);
7671                 list_del(&sdbg_host->host_list);
7672                 mutex_unlock(&sdebug_host_list_mutex);
7673                 goto clean;
7674         }
7675
7676         ++sdebug_num_hosts;
7677         return 0;
7678
7679 clean:
7680         list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
7681                                  dev_list) {
7682                 list_del(&sdbg_devinfo->dev_list);
7683                 kfree(sdbg_devinfo->zstate);
7684                 kfree(sdbg_devinfo);
7685         }
7686         if (sdbg_host->dev.release)
7687                 put_device(&sdbg_host->dev);
7688         else
7689                 kfree(sdbg_host);
7690         pr_warn("%s: failed, errno=%d\n", __func__, -error);
7691         return error;
7692 }
7693
7694 static int sdebug_do_add_host(bool mk_new_store)
7695 {
7696         int ph_idx = sdeb_most_recent_idx;
7697
7698         if (mk_new_store) {
7699                 ph_idx = sdebug_add_store();
7700                 if (ph_idx < 0)
7701                         return ph_idx;
7702         }
7703         return sdebug_add_host_helper(ph_idx);
7704 }
7705
7706 static void sdebug_do_remove_host(bool the_end)
7707 {
7708         int idx = -1;
7709         struct sdebug_host_info *sdbg_host = NULL;
7710         struct sdebug_host_info *sdbg_host2;
7711
7712         mutex_lock(&sdebug_host_list_mutex);
7713         if (!list_empty(&sdebug_host_list)) {
7714                 sdbg_host = list_entry(sdebug_host_list.prev,
7715                                        struct sdebug_host_info, host_list);
7716                 idx = sdbg_host->si_idx;
7717         }
7718         if (!the_end && idx >= 0) {
7719                 bool unique = true;
7720
7721                 list_for_each_entry(sdbg_host2, &sdebug_host_list, host_list) {
7722                         if (sdbg_host2 == sdbg_host)
7723                                 continue;
7724                         if (idx == sdbg_host2->si_idx) {
7725                                 unique = false;
7726                                 break;
7727                         }
7728                 }
7729                 if (unique) {
7730                         xa_set_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
7731                         if (idx == sdeb_most_recent_idx)
7732                                 --sdeb_most_recent_idx;
7733                 }
7734         }
7735         if (sdbg_host)
7736                 list_del(&sdbg_host->host_list);
7737         mutex_unlock(&sdebug_host_list_mutex);
7738
7739         if (!sdbg_host)
7740                 return;
7741
7742         device_unregister(&sdbg_host->dev);
7743         --sdebug_num_hosts;
7744 }
7745
7746 static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
7747 {
7748         struct sdebug_dev_info *devip = sdev->hostdata;
7749
7750         if (!devip)
7751                 return  -ENODEV;
7752
7753         mutex_lock(&sdebug_host_list_mutex);
7754         block_unblock_all_queues(true);
7755
7756         if (qdepth > SDEBUG_CANQUEUE) {
7757                 qdepth = SDEBUG_CANQUEUE;
7758                 pr_warn("%s: requested qdepth [%d] exceeds canqueue [%d], trim\n", __func__,
7759                         qdepth, SDEBUG_CANQUEUE);
7760         }
7761         if (qdepth < 1)
7762                 qdepth = 1;
7763         if (qdepth != sdev->queue_depth)
7764                 scsi_change_queue_depth(sdev, qdepth);
7765
7766         block_unblock_all_queues(false);
7767         mutex_unlock(&sdebug_host_list_mutex);
7768
7769         if (SDEBUG_OPT_Q_NOISE & sdebug_opts)
7770                 sdev_printk(KERN_INFO, sdev, "%s: qdepth=%d\n", __func__, qdepth);
7771
7772         return sdev->queue_depth;
7773 }
7774
7775 static bool fake_timeout(struct scsi_cmnd *scp)
7776 {
7777         if (0 == (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth))) {
7778                 if (sdebug_every_nth < -1)
7779                         sdebug_every_nth = -1;
7780                 if (SDEBUG_OPT_TIMEOUT & sdebug_opts)
7781                         return true; /* ignore command causing timeout */
7782                 else if (SDEBUG_OPT_MAC_TIMEOUT & sdebug_opts &&
7783                          scsi_medium_access_command(scp))
7784                         return true; /* time out reads and writes */
7785         }
7786         return false;
7787 }
7788
7789 /* Response to TUR or media access command when device stopped */
7790 static int resp_not_ready(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
7791 {
7792         int stopped_state;
7793         u64 diff_ns = 0;
7794         ktime_t now_ts = ktime_get_boottime();
7795         struct scsi_device *sdp = scp->device;
7796
7797         stopped_state = atomic_read(&devip->stopped);
7798         if (stopped_state == 2) {
7799                 if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
7800                         diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
7801                         if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
7802                                 /* tur_ms_to_ready timer extinguished */
7803                                 atomic_set(&devip->stopped, 0);
7804                                 return 0;
7805                         }
7806                 }
7807                 mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x1);
7808                 if (sdebug_verbose)
7809                         sdev_printk(KERN_INFO, sdp,
7810                                     "%s: Not ready: in process of becoming ready\n", my_name);
7811                 if (scp->cmnd[0] == TEST_UNIT_READY) {
7812                         u64 tur_nanosecs_to_ready = (u64)sdeb_tur_ms_to_ready * 1000000;
7813
7814                         if (diff_ns <= tur_nanosecs_to_ready)
7815                                 diff_ns = tur_nanosecs_to_ready - diff_ns;
7816                         else
7817                                 diff_ns = tur_nanosecs_to_ready;
7818                         /* As per 20-061r2 approved for spc6 by T10 on 20200716 */
7819                         do_div(diff_ns, 1000000);       /* diff_ns becomes milliseconds */
7820                         scsi_set_sense_information(scp->sense_buffer, SCSI_SENSE_BUFFERSIZE,
7821                                                    diff_ns);
7822                         return check_condition_result;
7823                 }
7824         }
7825         mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x2);
7826         if (sdebug_verbose)
7827                 sdev_printk(KERN_INFO, sdp, "%s: Not ready: initializing command required\n",
7828                             my_name);
7829         return check_condition_result;
7830 }
7831
7832 static void sdebug_map_queues(struct Scsi_Host *shost)
7833 {
7834         int i, qoff;
7835
7836         if (shost->nr_hw_queues == 1)
7837                 return;
7838
7839         for (i = 0, qoff = 0; i < HCTX_MAX_TYPES; i++) {
7840                 struct blk_mq_queue_map *map = &shost->tag_set.map[i];
7841
7842                 map->nr_queues  = 0;
7843
7844                 if (i == HCTX_TYPE_DEFAULT)
7845                         map->nr_queues = submit_queues - poll_queues;
7846                 else if (i == HCTX_TYPE_POLL)
7847                         map->nr_queues = poll_queues;
7848
7849                 if (!map->nr_queues) {
7850                         BUG_ON(i == HCTX_TYPE_DEFAULT);
7851                         continue;
7852                 }
7853
7854                 map->queue_offset = qoff;
7855                 blk_mq_map_queues(map);
7856
7857                 qoff += map->nr_queues;
7858         }
7859 }
7860
7861 struct sdebug_blk_mq_poll_data {
7862         unsigned int queue_num;
7863         int *num_entries;
7864 };
7865
7866 /*
7867  * We don't handle aborted commands here, but it does not seem possible to have
7868  * aborted polled commands from schedule_resp()
7869  */
7870 static bool sdebug_blk_mq_poll_iter(struct request *rq, void *opaque)
7871 {
7872         struct sdebug_blk_mq_poll_data *data = opaque;
7873         struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
7874         struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmd);
7875         struct sdebug_defer *sd_dp;
7876         u32 unique_tag = blk_mq_unique_tag(rq);
7877         u16 hwq = blk_mq_unique_tag_to_hwq(unique_tag);
7878         struct sdebug_queued_cmd *sqcp;
7879         unsigned long flags;
7880         int queue_num = data->queue_num;
7881         ktime_t time;
7882
7883         /* We're only interested in one queue for this iteration */
7884         if (hwq != queue_num)
7885                 return true;
7886
7887         /* Subsequent checks would fail if this failed, but check anyway */
7888         if (!test_bit(SCMD_STATE_INFLIGHT, &cmd->state))
7889                 return true;
7890
7891         time = ktime_get_boottime();
7892
7893         spin_lock_irqsave(&sdsc->lock, flags);
7894         sqcp = TO_QUEUED_CMD(cmd);
7895         if (!sqcp) {
7896                 spin_unlock_irqrestore(&sdsc->lock, flags);
7897                 return true;
7898         }
7899
7900         sd_dp = &sqcp->sd_dp;
7901         if (READ_ONCE(sd_dp->defer_t) != SDEB_DEFER_POLL) {
7902                 spin_unlock_irqrestore(&sdsc->lock, flags);
7903                 return true;
7904         }
7905
7906         if (time < sd_dp->cmpl_ts) {
7907                 spin_unlock_irqrestore(&sdsc->lock, flags);
7908                 return true;
7909         }
7910
7911         ASSIGN_QUEUED_CMD(cmd, NULL);
7912         spin_unlock_irqrestore(&sdsc->lock, flags);
7913
7914         if (sdebug_statistics) {
7915                 atomic_inc(&sdebug_completions);
7916                 if (raw_smp_processor_id() != sd_dp->issuing_cpu)
7917                         atomic_inc(&sdebug_miss_cpus);
7918         }
7919
7920         sdebug_free_queued_cmd(sqcp);
7921
7922         scsi_done(cmd); /* callback to mid level */
7923         (*data->num_entries)++;
7924         return true;
7925 }
7926
7927 static int sdebug_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
7928 {
7929         int num_entries = 0;
7930         struct sdebug_blk_mq_poll_data data = {
7931                 .queue_num = queue_num,
7932                 .num_entries = &num_entries,
7933         };
7934
7935         blk_mq_tagset_busy_iter(&shost->tag_set, sdebug_blk_mq_poll_iter,
7936                                 &data);
7937
7938         if (num_entries > 0)
7939                 atomic_add(num_entries, &sdeb_mq_poll_count);
7940         return num_entries;
7941 }
7942
7943 static int sdebug_timeout_cmd(struct scsi_cmnd *cmnd)
7944 {
7945         struct scsi_device *sdp = cmnd->device;
7946         struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
7947         struct sdebug_err_inject *err;
7948         unsigned char *cmd = cmnd->cmnd;
7949         int ret = 0;
7950
7951         if (devip == NULL)
7952                 return 0;
7953
7954         rcu_read_lock();
7955         list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
7956                 if (err->type == ERR_TMOUT_CMD &&
7957                     (err->cmd == cmd[0] || err->cmd == 0xff)) {
7958                         ret = !!err->cnt;
7959                         if (err->cnt < 0)
7960                                 err->cnt++;
7961
7962                         rcu_read_unlock();
7963                         return ret;
7964                 }
7965         }
7966         rcu_read_unlock();
7967
7968         return 0;
7969 }
7970
7971 static int sdebug_fail_queue_cmd(struct scsi_cmnd *cmnd)
7972 {
7973         struct scsi_device *sdp = cmnd->device;
7974         struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
7975         struct sdebug_err_inject *err;
7976         unsigned char *cmd = cmnd->cmnd;
7977         int ret = 0;
7978
7979         if (devip == NULL)
7980                 return 0;
7981
7982         rcu_read_lock();
7983         list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
7984                 if (err->type == ERR_FAIL_QUEUE_CMD &&
7985                     (err->cmd == cmd[0] || err->cmd == 0xff)) {
7986                         ret = err->cnt ? err->queuecmd_ret : 0;
7987                         if (err->cnt < 0)
7988                                 err->cnt++;
7989
7990                         rcu_read_unlock();
7991                         return ret;
7992                 }
7993         }
7994         rcu_read_unlock();
7995
7996         return 0;
7997 }
7998
7999 static int sdebug_fail_cmd(struct scsi_cmnd *cmnd, int *retval,
8000                            struct sdebug_err_inject *info)
8001 {
8002         struct scsi_device *sdp = cmnd->device;
8003         struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
8004         struct sdebug_err_inject *err;
8005         unsigned char *cmd = cmnd->cmnd;
8006         int ret = 0;
8007         int result;
8008
8009         if (devip == NULL)
8010                 return 0;
8011
8012         rcu_read_lock();
8013         list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
8014                 if (err->type == ERR_FAIL_CMD &&
8015                     (err->cmd == cmd[0] || err->cmd == 0xff)) {
8016                         if (!err->cnt) {
8017                                 rcu_read_unlock();
8018                                 return 0;
8019                         }
8020
8021                         ret = !!err->cnt;
8022                         rcu_read_unlock();
8023                         goto out_handle;
8024                 }
8025         }
8026         rcu_read_unlock();
8027
8028         return 0;
8029
8030 out_handle:
8031         if (err->cnt < 0)
8032                 err->cnt++;
8033         mk_sense_buffer(cmnd, err->sense_key, err->asc, err->asq);
8034         result = err->status_byte | err->host_byte << 16 | err->driver_byte << 24;
8035         *info = *err;
8036         *retval = schedule_resp(cmnd, devip, result, NULL, 0, 0);
8037
8038         return ret;
8039 }
8040
8041 static int scsi_debug_queuecommand(struct Scsi_Host *shost,
8042                                    struct scsi_cmnd *scp)
8043 {
8044         u8 sdeb_i;
8045         struct scsi_device *sdp = scp->device;
8046         const struct opcode_info_t *oip;
8047         const struct opcode_info_t *r_oip;
8048         struct sdebug_dev_info *devip;
8049         u8 *cmd = scp->cmnd;
8050         int (*r_pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
8051         int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *) = NULL;
8052         int k, na;
8053         int errsts = 0;
8054         u64 lun_index = sdp->lun & 0x3FFF;
8055         u32 flags;
8056         u16 sa;
8057         u8 opcode = cmd[0];
8058         bool has_wlun_rl;
8059         bool inject_now;
8060         int ret = 0;
8061         struct sdebug_err_inject err;
8062
8063         scsi_set_resid(scp, 0);
8064         if (sdebug_statistics) {
8065                 atomic_inc(&sdebug_cmnd_count);
8066                 inject_now = inject_on_this_cmd();
8067         } else {
8068                 inject_now = false;
8069         }
8070         if (unlikely(sdebug_verbose &&
8071                      !(SDEBUG_OPT_NO_CDB_NOISE & sdebug_opts))) {
8072                 char b[120];
8073                 int n, len, sb;
8074
8075                 len = scp->cmd_len;
8076                 sb = (int)sizeof(b);
8077                 if (len > 32)
8078                         strcpy(b, "too long, over 32 bytes");
8079                 else {
8080                         for (k = 0, n = 0; k < len && n < sb; ++k)
8081                                 n += scnprintf(b + n, sb - n, "%02x ",
8082                                                (u32)cmd[k]);
8083                 }
8084                 sdev_printk(KERN_INFO, sdp, "%s: tag=%#x, cmd %s\n", my_name,
8085                             blk_mq_unique_tag(scsi_cmd_to_rq(scp)), b);
8086         }
8087         if (unlikely(inject_now && (sdebug_opts & SDEBUG_OPT_HOST_BUSY)))
8088                 return SCSI_MLQUEUE_HOST_BUSY;
8089         has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS);
8090         if (unlikely(lun_index >= sdebug_max_luns && !has_wlun_rl))
8091                 goto err_out;
8092
8093         sdeb_i = opcode_ind_arr[opcode];        /* fully mapped */
8094         oip = &opcode_info_arr[sdeb_i];         /* safe if table consistent */
8095         devip = (struct sdebug_dev_info *)sdp->hostdata;
8096         if (unlikely(!devip)) {
8097                 devip = find_build_dev_info(sdp);
8098                 if (NULL == devip)
8099                         goto err_out;
8100         }
8101
8102         if (sdebug_timeout_cmd(scp)) {
8103                 scmd_printk(KERN_INFO, scp, "timeout command 0x%x\n", opcode);
8104                 return 0;
8105         }
8106
8107         ret = sdebug_fail_queue_cmd(scp);
8108         if (ret) {
8109                 scmd_printk(KERN_INFO, scp, "fail queue command 0x%x with 0x%x\n",
8110                                 opcode, ret);
8111                 return ret;
8112         }
8113
8114         if (sdebug_fail_cmd(scp, &ret, &err)) {
8115                 scmd_printk(KERN_INFO, scp,
8116                         "fail command 0x%x with hostbyte=0x%x, "
8117                         "driverbyte=0x%x, statusbyte=0x%x, "
8118                         "sense_key=0x%x, asc=0x%x, asq=0x%x\n",
8119                         opcode, err.host_byte, err.driver_byte,
8120                         err.status_byte, err.sense_key, err.asc, err.asq);
8121                 return ret;
8122         }
8123
8124         if (unlikely(inject_now && !atomic_read(&sdeb_inject_pending)))
8125                 atomic_set(&sdeb_inject_pending, 1);
8126
8127         na = oip->num_attached;
8128         r_pfp = oip->pfp;
8129         if (na) {       /* multiple commands with this opcode */
8130                 r_oip = oip;
8131                 if (FF_SA & r_oip->flags) {
8132                         if (F_SA_LOW & oip->flags)
8133                                 sa = 0x1f & cmd[1];
8134                         else
8135                                 sa = get_unaligned_be16(cmd + 8);
8136                         for (k = 0; k <= na; oip = r_oip->arrp + k++) {
8137                                 if (opcode == oip->opcode && sa == oip->sa)
8138                                         break;
8139                         }
8140                 } else {   /* since no service action only check opcode */
8141                         for (k = 0; k <= na; oip = r_oip->arrp + k++) {
8142                                 if (opcode == oip->opcode)
8143                                         break;
8144                         }
8145                 }
8146                 if (k > na) {
8147                         if (F_SA_LOW & r_oip->flags)
8148                                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 4);
8149                         else if (F_SA_HIGH & r_oip->flags)
8150                                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, 7);
8151                         else
8152                                 mk_sense_invalid_opcode(scp);
8153                         goto check_cond;
8154                 }
8155         }       /* else (when na==0) we assume the oip is a match */
8156         flags = oip->flags;
8157         if (unlikely(F_INV_OP & flags)) {
8158                 mk_sense_invalid_opcode(scp);
8159                 goto check_cond;
8160         }
8161         if (unlikely(has_wlun_rl && !(F_RL_WLUN_OK & flags))) {
8162                 if (sdebug_verbose)
8163                         sdev_printk(KERN_INFO, sdp, "%s: Opcode 0x%x not%s\n",
8164                                     my_name, opcode, " supported for wlun");
8165                 mk_sense_invalid_opcode(scp);
8166                 goto check_cond;
8167         }
8168         if (unlikely(sdebug_strict)) {  /* check cdb against mask */
8169                 u8 rem;
8170                 int j;
8171
8172                 for (k = 1; k < oip->len_mask[0] && k < 16; ++k) {
8173                         rem = ~oip->len_mask[k] & cmd[k];
8174                         if (rem) {
8175                                 for (j = 7; j >= 0; --j, rem <<= 1) {
8176                                         if (0x80 & rem)
8177                                                 break;
8178                                 }
8179                                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, k, j);
8180                                 goto check_cond;
8181                         }
8182                 }
8183         }
8184         if (unlikely(!(F_SKIP_UA & flags) &&
8185                      find_first_bit(devip->uas_bm,
8186                                     SDEBUG_NUM_UAS) != SDEBUG_NUM_UAS)) {
8187                 errsts = make_ua(scp, devip);
8188                 if (errsts)
8189                         goto check_cond;
8190         }
8191         if (unlikely(((F_M_ACCESS & flags) || scp->cmnd[0] == TEST_UNIT_READY) &&
8192                      atomic_read(&devip->stopped))) {
8193                 errsts = resp_not_ready(scp, devip);
8194                 if (errsts)
8195                         goto fini;
8196         }
8197         if (sdebug_fake_rw && (F_FAKE_RW & flags))
8198                 goto fini;
8199         if (unlikely(sdebug_every_nth)) {
8200                 if (fake_timeout(scp))
8201                         return 0;       /* ignore command: make trouble */
8202         }
8203         if (likely(oip->pfp))
8204                 pfp = oip->pfp; /* calls a resp_* function */
8205         else
8206                 pfp = r_pfp;    /* if leaf function ptr NULL, try the root's */
8207
8208 fini:
8209         if (F_DELAY_OVERR & flags)      /* cmds like INQUIRY respond asap */
8210                 return schedule_resp(scp, devip, errsts, pfp, 0, 0);
8211         else if ((flags & F_LONG_DELAY) && (sdebug_jdelay > 0 ||
8212                                             sdebug_ndelay > 10000)) {
8213                 /*
8214                  * Skip long delays if ndelay <= 10 microseconds. Otherwise
8215                  * for Start Stop Unit (SSU) want at least 1 second delay and
8216                  * if sdebug_jdelay>1 want a long delay of that many seconds.
8217                  * For Synchronize Cache want 1/20 of SSU's delay.
8218                  */
8219                 int jdelay = (sdebug_jdelay < 2) ? 1 : sdebug_jdelay;
8220                 int denom = (flags & F_SYNC_DELAY) ? 20 : 1;
8221
8222                 jdelay = mult_frac(USER_HZ * jdelay, HZ, denom * USER_HZ);
8223                 return schedule_resp(scp, devip, errsts, pfp, jdelay, 0);
8224         } else
8225                 return schedule_resp(scp, devip, errsts, pfp, sdebug_jdelay,
8226                                      sdebug_ndelay);
8227 check_cond:
8228         return schedule_resp(scp, devip, check_condition_result, NULL, 0, 0);
8229 err_out:
8230         return schedule_resp(scp, NULL, DID_NO_CONNECT << 16, NULL, 0, 0);
8231 }
8232
8233 static int sdebug_init_cmd_priv(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
8234 {
8235         struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmd);
8236
8237         spin_lock_init(&sdsc->lock);
8238
8239         return 0;
8240 }
8241
8242 static struct scsi_host_template sdebug_driver_template = {
8243         .show_info =            scsi_debug_show_info,
8244         .write_info =           scsi_debug_write_info,
8245         .proc_name =            sdebug_proc_name,
8246         .name =                 "SCSI DEBUG",
8247         .info =                 scsi_debug_info,
8248         .slave_alloc =          scsi_debug_slave_alloc,
8249         .slave_configure =      scsi_debug_slave_configure,
8250         .slave_destroy =        scsi_debug_slave_destroy,
8251         .ioctl =                scsi_debug_ioctl,
8252         .queuecommand =         scsi_debug_queuecommand,
8253         .change_queue_depth =   sdebug_change_qdepth,
8254         .map_queues =           sdebug_map_queues,
8255         .mq_poll =              sdebug_blk_mq_poll,
8256         .eh_abort_handler =     scsi_debug_abort,
8257         .eh_device_reset_handler = scsi_debug_device_reset,
8258         .eh_target_reset_handler = scsi_debug_target_reset,
8259         .eh_bus_reset_handler = scsi_debug_bus_reset,
8260         .eh_host_reset_handler = scsi_debug_host_reset,
8261         .can_queue =            SDEBUG_CANQUEUE,
8262         .this_id =              7,
8263         .sg_tablesize =         SG_MAX_SEGMENTS,
8264         .cmd_per_lun =          DEF_CMD_PER_LUN,
8265         .max_sectors =          -1U,
8266         .max_segment_size =     -1U,
8267         .module =               THIS_MODULE,
8268         .track_queue_depth =    1,
8269         .cmd_size = sizeof(struct sdebug_scsi_cmd),
8270         .init_cmd_priv = sdebug_init_cmd_priv,
8271         .target_alloc =         sdebug_target_alloc,
8272         .target_destroy =       sdebug_target_destroy,
8273 };
8274
8275 static int sdebug_driver_probe(struct device *dev)
8276 {
8277         int error = 0;
8278         struct sdebug_host_info *sdbg_host;
8279         struct Scsi_Host *hpnt;
8280         int hprot;
8281
8282         sdbg_host = dev_to_sdebug_host(dev);
8283
8284         sdebug_driver_template.can_queue = sdebug_max_queue;
8285         sdebug_driver_template.cmd_per_lun = sdebug_max_queue;
8286         if (!sdebug_clustering)
8287                 sdebug_driver_template.dma_boundary = PAGE_SIZE - 1;
8288
8289         hpnt = scsi_host_alloc(&sdebug_driver_template, 0);
8290         if (NULL == hpnt) {
8291                 pr_err("scsi_host_alloc failed\n");
8292                 error = -ENODEV;
8293                 return error;
8294         }
8295         if (submit_queues > nr_cpu_ids) {
8296                 pr_warn("%s: trim submit_queues (was %d) to nr_cpu_ids=%u\n",
8297                         my_name, submit_queues, nr_cpu_ids);
8298                 submit_queues = nr_cpu_ids;
8299         }
8300         /*
8301          * Decide whether to tell scsi subsystem that we want mq. The
8302          * following should give the same answer for each host.
8303          */
8304         hpnt->nr_hw_queues = submit_queues;
8305         if (sdebug_host_max_queue)
8306                 hpnt->host_tagset = 1;
8307
8308         /* poll queues are possible for nr_hw_queues > 1 */
8309         if (hpnt->nr_hw_queues == 1 || (poll_queues < 1)) {
8310                 pr_warn("%s: trim poll_queues to 0. poll_q/nr_hw = (%d/%d)\n",
8311                          my_name, poll_queues, hpnt->nr_hw_queues);
8312                 poll_queues = 0;
8313         }
8314
8315         /*
8316          * Poll queues don't need interrupts, but we need at least one I/O queue
8317          * left over for non-polled I/O.
8318          * If condition not met, trim poll_queues to 1 (just for simplicity).
8319          */
8320         if (poll_queues >= submit_queues) {
8321                 if (submit_queues < 3)
8322                         pr_warn("%s: trim poll_queues to 1\n", my_name);
8323                 else
8324                         pr_warn("%s: trim poll_queues to 1. Perhaps try poll_queues=%d\n",
8325                                 my_name, submit_queues - 1);
8326                 poll_queues = 1;
8327         }
8328         if (poll_queues)
8329                 hpnt->nr_maps = 3;
8330
8331         sdbg_host->shost = hpnt;
8332         if ((hpnt->this_id >= 0) && (sdebug_num_tgts > hpnt->this_id))
8333                 hpnt->max_id = sdebug_num_tgts + 1;
8334         else
8335                 hpnt->max_id = sdebug_num_tgts;
8336         /* = sdebug_max_luns; */
8337         hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
8338
8339         hprot = 0;
8340
8341         switch (sdebug_dif) {
8342
8343         case T10_PI_TYPE1_PROTECTION:
8344                 hprot = SHOST_DIF_TYPE1_PROTECTION;
8345                 if (sdebug_dix)
8346                         hprot |= SHOST_DIX_TYPE1_PROTECTION;
8347                 break;
8348
8349         case T10_PI_TYPE2_PROTECTION:
8350                 hprot = SHOST_DIF_TYPE2_PROTECTION;
8351                 if (sdebug_dix)
8352                         hprot |= SHOST_DIX_TYPE2_PROTECTION;
8353                 break;
8354
8355         case T10_PI_TYPE3_PROTECTION:
8356                 hprot = SHOST_DIF_TYPE3_PROTECTION;
8357                 if (sdebug_dix)
8358                         hprot |= SHOST_DIX_TYPE3_PROTECTION;
8359                 break;
8360
8361         default:
8362                 if (sdebug_dix)
8363                         hprot |= SHOST_DIX_TYPE0_PROTECTION;
8364                 break;
8365         }
8366
8367         scsi_host_set_prot(hpnt, hprot);
8368
8369         if (have_dif_prot || sdebug_dix)
8370                 pr_info("host protection%s%s%s%s%s%s%s\n",
8371                         (hprot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
8372                         (hprot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
8373                         (hprot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
8374                         (hprot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
8375                         (hprot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
8376                         (hprot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
8377                         (hprot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
8378
8379         if (sdebug_guard == 1)
8380                 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
8381         else
8382                 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
8383
8384         sdebug_verbose = !!(SDEBUG_OPT_NOISE & sdebug_opts);
8385         sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & sdebug_opts);
8386         if (sdebug_every_nth)   /* need stats counters for every_nth */
8387                 sdebug_statistics = true;
8388         error = scsi_add_host(hpnt, &sdbg_host->dev);
8389         if (error) {
8390                 pr_err("scsi_add_host failed\n");
8391                 error = -ENODEV;
8392                 scsi_host_put(hpnt);
8393         } else {
8394                 scsi_scan_host(hpnt);
8395         }
8396
8397         return error;
8398 }
8399
8400 static void sdebug_driver_remove(struct device *dev)
8401 {
8402         struct sdebug_host_info *sdbg_host;
8403         struct sdebug_dev_info *sdbg_devinfo, *tmp;
8404
8405         sdbg_host = dev_to_sdebug_host(dev);
8406
8407         scsi_remove_host(sdbg_host->shost);
8408
8409         list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
8410                                  dev_list) {
8411                 list_del(&sdbg_devinfo->dev_list);
8412                 kfree(sdbg_devinfo->zstate);
8413                 kfree(sdbg_devinfo);
8414         }
8415
8416         scsi_host_put(sdbg_host->shost);
8417 }
8418
8419 static struct bus_type pseudo_lld_bus = {
8420         .name = "pseudo",
8421         .probe = sdebug_driver_probe,
8422         .remove = sdebug_driver_remove,
8423         .drv_groups = sdebug_drv_groups,
8424 };