scsi: megaraid_sas: Remove IO buffer hole detection logic
[linux-2.6-microblaze.git] / drivers / scsi / scsi_debug.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
4  *  Copyright (C) 1992  Eric Youngdale
5  *  Simulate a host adapter with 2 disks attached.  Do a lot of checking
6  *  to make sure that we are not getting blocks mixed up, and PANIC if
7  *  anything out of the ordinary is seen.
8  * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
9  *
10  * Copyright (C) 2001 - 2020 Douglas Gilbert
11  *
12  *  For documentation see http://sg.danny.cz/sg/sdebug26.html
13  */
14
15
16 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
17
18 #include <linux/module.h>
19
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/jiffies.h>
23 #include <linux/slab.h>
24 #include <linux/types.h>
25 #include <linux/string.h>
26 #include <linux/genhd.h>
27 #include <linux/fs.h>
28 #include <linux/init.h>
29 #include <linux/proc_fs.h>
30 #include <linux/vmalloc.h>
31 #include <linux/moduleparam.h>
32 #include <linux/scatterlist.h>
33 #include <linux/blkdev.h>
34 #include <linux/crc-t10dif.h>
35 #include <linux/spinlock.h>
36 #include <linux/interrupt.h>
37 #include <linux/atomic.h>
38 #include <linux/hrtimer.h>
39 #include <linux/uuid.h>
40 #include <linux/t10-pi.h>
41 #include <linux/msdos_partition.h>
42 #include <linux/random.h>
43 #include <linux/xarray.h>
44 #include <linux/prefetch.h>
45
46 #include <net/checksum.h>
47
48 #include <asm/unaligned.h>
49
50 #include <scsi/scsi.h>
51 #include <scsi/scsi_cmnd.h>
52 #include <scsi/scsi_device.h>
53 #include <scsi/scsi_host.h>
54 #include <scsi/scsicam.h>
55 #include <scsi/scsi_eh.h>
56 #include <scsi/scsi_tcq.h>
57 #include <scsi/scsi_dbg.h>
58
59 #include "sd.h"
60 #include "scsi_logging.h"
61
62 /* make sure inq_product_rev string corresponds to this version */
63 #define SDEBUG_VERSION "0189"   /* format to fit INQUIRY revision field */
64 static const char *sdebug_version_date = "20200421";
65
66 #define MY_NAME "scsi_debug"
67
68 /* Additional Sense Code (ASC) */
69 #define NO_ADDITIONAL_SENSE 0x0
70 #define LOGICAL_UNIT_NOT_READY 0x4
71 #define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
72 #define UNRECOVERED_READ_ERR 0x11
73 #define PARAMETER_LIST_LENGTH_ERR 0x1a
74 #define INVALID_OPCODE 0x20
75 #define LBA_OUT_OF_RANGE 0x21
76 #define INVALID_FIELD_IN_CDB 0x24
77 #define INVALID_FIELD_IN_PARAM_LIST 0x26
78 #define WRITE_PROTECTED 0x27
79 #define UA_RESET_ASC 0x29
80 #define UA_CHANGED_ASC 0x2a
81 #define TARGET_CHANGED_ASC 0x3f
82 #define LUNS_CHANGED_ASCQ 0x0e
83 #define INSUFF_RES_ASC 0x55
84 #define INSUFF_RES_ASCQ 0x3
85 #define POWER_ON_RESET_ASCQ 0x0
86 #define BUS_RESET_ASCQ 0x2      /* scsi bus reset occurred */
87 #define MODE_CHANGED_ASCQ 0x1   /* mode parameters changed */
88 #define CAPACITY_CHANGED_ASCQ 0x9
89 #define SAVING_PARAMS_UNSUP 0x39
90 #define TRANSPORT_PROBLEM 0x4b
91 #define THRESHOLD_EXCEEDED 0x5d
92 #define LOW_POWER_COND_ON 0x5e
93 #define MISCOMPARE_VERIFY_ASC 0x1d
94 #define MICROCODE_CHANGED_ASCQ 0x1      /* with TARGET_CHANGED_ASC */
95 #define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16
96 #define WRITE_ERROR_ASC 0xc
97 #define UNALIGNED_WRITE_ASCQ 0x4
98 #define WRITE_BOUNDARY_ASCQ 0x5
99 #define READ_INVDATA_ASCQ 0x6
100 #define READ_BOUNDARY_ASCQ 0x7
101 #define INSUFF_ZONE_ASCQ 0xe
102
103 /* Additional Sense Code Qualifier (ASCQ) */
104 #define ACK_NAK_TO 0x3
105
106 /* Default values for driver parameters */
107 #define DEF_NUM_HOST   1
108 #define DEF_NUM_TGTS   1
109 #define DEF_MAX_LUNS   1
110 /* With these defaults, this driver will make 1 host with 1 target
111  * (id 0) containing 1 logical unit (lun 0). That is 1 device.
112  */
113 #define DEF_ATO 1
114 #define DEF_CDB_LEN 10
115 #define DEF_JDELAY   1          /* if > 0 unit is a jiffy */
116 #define DEF_DEV_SIZE_PRE_INIT   0
117 #define DEF_DEV_SIZE_MB   8
118 #define DEF_ZBC_DEV_SIZE_MB   128
119 #define DEF_DIF 0
120 #define DEF_DIX 0
121 #define DEF_PER_HOST_STORE false
122 #define DEF_D_SENSE   0
123 #define DEF_EVERY_NTH   0
124 #define DEF_FAKE_RW     0
125 #define DEF_GUARD 0
126 #define DEF_HOST_LOCK 0
127 #define DEF_LBPU 0
128 #define DEF_LBPWS 0
129 #define DEF_LBPWS10 0
130 #define DEF_LBPRZ 1
131 #define DEF_LOWEST_ALIGNED 0
132 #define DEF_NDELAY   0          /* if > 0 unit is a nanosecond */
133 #define DEF_NO_LUN_0   0
134 #define DEF_NUM_PARTS   0
135 #define DEF_OPTS   0
136 #define DEF_OPT_BLKS 1024
137 #define DEF_PHYSBLK_EXP 0
138 #define DEF_OPT_XFERLEN_EXP 0
139 #define DEF_PTYPE   TYPE_DISK
140 #define DEF_RANDOM false
141 #define DEF_REMOVABLE false
142 #define DEF_SCSI_LEVEL   7    /* INQUIRY, byte2 [6->SPC-4; 7->SPC-5] */
143 #define DEF_SECTOR_SIZE 512
144 #define DEF_UNMAP_ALIGNMENT 0
145 #define DEF_UNMAP_GRANULARITY 1
146 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
147 #define DEF_UNMAP_MAX_DESC 256
148 #define DEF_VIRTUAL_GB   0
149 #define DEF_VPD_USE_HOSTNO 1
150 #define DEF_WRITESAME_LENGTH 0xFFFF
151 #define DEF_STRICT 0
152 #define DEF_STATISTICS false
153 #define DEF_SUBMIT_QUEUES 1
154 #define DEF_UUID_CTL 0
155 #define JDELAY_OVERRIDDEN -9999
156
157 /* Default parameters for ZBC drives */
158 #define DEF_ZBC_ZONE_SIZE_MB    128
159 #define DEF_ZBC_MAX_OPEN_ZONES  8
160 #define DEF_ZBC_NR_CONV_ZONES   1
161
162 #define SDEBUG_LUN_0_VAL 0
163
164 /* bit mask values for sdebug_opts */
165 #define SDEBUG_OPT_NOISE                1
166 #define SDEBUG_OPT_MEDIUM_ERR           2
167 #define SDEBUG_OPT_TIMEOUT              4
168 #define SDEBUG_OPT_RECOVERED_ERR        8
169 #define SDEBUG_OPT_TRANSPORT_ERR        16
170 #define SDEBUG_OPT_DIF_ERR              32
171 #define SDEBUG_OPT_DIX_ERR              64
172 #define SDEBUG_OPT_MAC_TIMEOUT          128
173 #define SDEBUG_OPT_SHORT_TRANSFER       0x100
174 #define SDEBUG_OPT_Q_NOISE              0x200
175 #define SDEBUG_OPT_ALL_TSF              0x400
176 #define SDEBUG_OPT_RARE_TSF             0x800
177 #define SDEBUG_OPT_N_WCE                0x1000
178 #define SDEBUG_OPT_RESET_NOISE          0x2000
179 #define SDEBUG_OPT_NO_CDB_NOISE         0x4000
180 #define SDEBUG_OPT_HOST_BUSY            0x8000
181 #define SDEBUG_OPT_CMD_ABORT            0x10000
182 #define SDEBUG_OPT_ALL_NOISE (SDEBUG_OPT_NOISE | SDEBUG_OPT_Q_NOISE | \
183                               SDEBUG_OPT_RESET_NOISE)
184 #define SDEBUG_OPT_ALL_INJECTING (SDEBUG_OPT_RECOVERED_ERR | \
185                                   SDEBUG_OPT_TRANSPORT_ERR | \
186                                   SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR | \
187                                   SDEBUG_OPT_SHORT_TRANSFER | \
188                                   SDEBUG_OPT_HOST_BUSY | \
189                                   SDEBUG_OPT_CMD_ABORT)
190 /* When "every_nth" > 0 then modulo "every_nth" commands:
191  *   - a missing response is simulated if SDEBUG_OPT_TIMEOUT is set
192  *   - a RECOVERED_ERROR is simulated on successful read and write
193  *     commands if SDEBUG_OPT_RECOVERED_ERR is set.
194  *   - a TRANSPORT_ERROR is simulated on successful read and write
195  *     commands if SDEBUG_OPT_TRANSPORT_ERR is set.
196  *   - similarly for DIF_ERR, DIX_ERR, SHORT_TRANSFER, HOST_BUSY and
197  *     CMD_ABORT
198  *
199  * When "every_nth" < 0 then after "- every_nth" commands the selected
200  * error will be injected. The error will be injected on every subsequent
201  * command until some other action occurs; for example, the user writing
202  * a new value (other than -1 or 1) to every_nth:
203  *      echo 0 > /sys/bus/pseudo/drivers/scsi_debug/every_nth
204  */
205
206 /* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs) are returned in
207  * priority order. In the subset implemented here lower numbers have higher
208  * priority. The UA numbers should be a sequence starting from 0 with
209  * SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */
210 #define SDEBUG_UA_POR 0         /* Power on, reset, or bus device reset */
211 #define SDEBUG_UA_BUS_RESET 1
212 #define SDEBUG_UA_MODE_CHANGED 2
213 #define SDEBUG_UA_CAPACITY_CHANGED 3
214 #define SDEBUG_UA_LUNS_CHANGED 4
215 #define SDEBUG_UA_MICROCODE_CHANGED 5   /* simulate firmware change */
216 #define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 6
217 #define SDEBUG_NUM_UAS 7
218
219 /* when 1==SDEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
220  * sector on read commands: */
221 #define OPT_MEDIUM_ERR_ADDR   0x1234 /* that's sector 4660 in decimal */
222 #define OPT_MEDIUM_ERR_NUM    10     /* number of consecutive medium errs */
223
224 /* If REPORT LUNS has luns >= 256 it can choose "flat space" (value 1)
225  * or "peripheral device" addressing (value 0) */
226 #define SAM2_LUN_ADDRESS_METHOD 0
227
228 /* SDEBUG_CANQUEUE is the maximum number of commands that can be queued
229  * (for response) per submit queue at one time. Can be reduced by max_queue
230  * option. Command responses are not queued when jdelay=0 and ndelay=0. The
231  * per-device DEF_CMD_PER_LUN can be changed via sysfs:
232  * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth
233  * but cannot exceed SDEBUG_CANQUEUE .
234  */
235 #define SDEBUG_CANQUEUE_WORDS  3        /* a WORD is bits in a long */
236 #define SDEBUG_CANQUEUE  (SDEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
237 #define DEF_CMD_PER_LUN  255
238
239 #define F_D_IN                  1
240 #define F_D_OUT                 2
241 #define F_D_OUT_MAYBE           4       /* WRITE SAME, NDOB bit */
242 #define F_D_UNKN                8
243 #define F_RL_WLUN_OK            0x10
244 #define F_SKIP_UA               0x20
245 #define F_DELAY_OVERR           0x40
246 #define F_SA_LOW                0x80    /* cdb byte 1, bits 4 to 0 */
247 #define F_SA_HIGH               0x100   /* as used by variable length cdbs */
248 #define F_INV_OP                0x200
249 #define F_FAKE_RW               0x400
250 #define F_M_ACCESS              0x800   /* media access */
251 #define F_SSU_DELAY             0x1000
252 #define F_SYNC_DELAY            0x2000
253
254 #define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
255 #define FF_MEDIA_IO (F_M_ACCESS | F_FAKE_RW)
256 #define FF_SA (F_SA_HIGH | F_SA_LOW)
257 #define F_LONG_DELAY            (F_SSU_DELAY | F_SYNC_DELAY)
258
259 #define SDEBUG_MAX_PARTS 4
260
261 #define SDEBUG_MAX_CMD_LEN 32
262
263 #define SDEB_XA_NOT_IN_USE XA_MARK_1
264
265 /* Zone types (zbcr05 table 25) */
266 enum sdebug_z_type {
267         ZBC_ZONE_TYPE_CNV       = 0x1,
268         ZBC_ZONE_TYPE_SWR       = 0x2,
269         ZBC_ZONE_TYPE_SWP       = 0x3,
270 };
271
272 /* enumeration names taken from table 26, zbcr05 */
273 enum sdebug_z_cond {
274         ZBC_NOT_WRITE_POINTER   = 0x0,
275         ZC1_EMPTY               = 0x1,
276         ZC2_IMPLICIT_OPEN       = 0x2,
277         ZC3_EXPLICIT_OPEN       = 0x3,
278         ZC4_CLOSED              = 0x4,
279         ZC6_READ_ONLY           = 0xd,
280         ZC5_FULL                = 0xe,
281         ZC7_OFFLINE             = 0xf,
282 };
283
284 struct sdeb_zone_state {        /* ZBC: per zone state */
285         enum sdebug_z_type z_type;
286         enum sdebug_z_cond z_cond;
287         bool z_non_seq_resource;
288         unsigned int z_size;
289         sector_t z_start;
290         sector_t z_wp;
291 };
292
293 struct sdebug_dev_info {
294         struct list_head dev_list;
295         unsigned int channel;
296         unsigned int target;
297         u64 lun;
298         uuid_t lu_name;
299         struct sdebug_host_info *sdbg_host;
300         unsigned long uas_bm[1];
301         atomic_t num_in_q;
302         atomic_t stopped;
303         bool used;
304
305         /* For ZBC devices */
306         enum blk_zoned_model zmodel;
307         unsigned int zsize;
308         unsigned int zsize_shift;
309         unsigned int nr_zones;
310         unsigned int nr_conv_zones;
311         unsigned int nr_imp_open;
312         unsigned int nr_exp_open;
313         unsigned int nr_closed;
314         unsigned int max_open;
315         struct sdeb_zone_state *zstate;
316 };
317
318 struct sdebug_host_info {
319         struct list_head host_list;
320         int si_idx;     /* sdeb_store_info (per host) xarray index */
321         struct Scsi_Host *shost;
322         struct device dev;
323         struct list_head dev_info_list;
324 };
325
326 /* There is an xarray of pointers to this struct's objects, one per host */
327 struct sdeb_store_info {
328         rwlock_t macc_lck;      /* for atomic media access on this store */
329         u8 *storep;             /* user data storage (ram) */
330         struct t10_pi_tuple *dif_storep; /* protection info */
331         void *map_storep;       /* provisioning map */
332 };
333
334 #define to_sdebug_host(d)       \
335         container_of(d, struct sdebug_host_info, dev)
336
337 enum sdeb_defer_type {SDEB_DEFER_NONE = 0, SDEB_DEFER_HRT = 1,
338                       SDEB_DEFER_WQ = 2};
339
340 struct sdebug_defer {
341         struct hrtimer hrt;
342         struct execute_work ew;
343         int sqa_idx;    /* index of sdebug_queue array */
344         int qc_idx;     /* index of sdebug_queued_cmd array within sqa_idx */
345         int issuing_cpu;
346         bool init_hrt;
347         bool init_wq;
348         bool aborted;   /* true when blk_abort_request() already called */
349         enum sdeb_defer_type defer_t;
350 };
351
352 struct sdebug_queued_cmd {
353         /* corresponding bit set in in_use_bm[] in owning struct sdebug_queue
354          * instance indicates this slot is in use.
355          */
356         struct sdebug_defer *sd_dp;
357         struct scsi_cmnd *a_cmnd;
358         unsigned int inj_recovered:1;
359         unsigned int inj_transport:1;
360         unsigned int inj_dif:1;
361         unsigned int inj_dix:1;
362         unsigned int inj_short:1;
363         unsigned int inj_host_busy:1;
364         unsigned int inj_cmd_abort:1;
365 };
366
367 struct sdebug_queue {
368         struct sdebug_queued_cmd qc_arr[SDEBUG_CANQUEUE];
369         unsigned long in_use_bm[SDEBUG_CANQUEUE_WORDS];
370         spinlock_t qc_lock;
371         atomic_t blocked;       /* to temporarily stop more being queued */
372 };
373
374 static atomic_t sdebug_cmnd_count;   /* number of incoming commands */
375 static atomic_t sdebug_completions;  /* count of deferred completions */
376 static atomic_t sdebug_miss_cpus;    /* submission + completion cpus differ */
377 static atomic_t sdebug_a_tsf;        /* 'almost task set full' counter */
378
379 struct opcode_info_t {
380         u8 num_attached;        /* 0 if this is it (i.e. a leaf); use 0xff */
381                                 /* for terminating element */
382         u8 opcode;              /* if num_attached > 0, preferred */
383         u16 sa;                 /* service action */
384         u32 flags;              /* OR-ed set of SDEB_F_* */
385         int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
386         const struct opcode_info_t *arrp;  /* num_attached elements or NULL */
387         u8 len_mask[16];        /* len_mask[0]-->cdb_len, then mask for cdb */
388                                 /* 1 to min(cdb_len, 15); ignore cdb[15...] */
389 };
390
391 /* SCSI opcodes (first byte of cdb) of interest mapped onto these indexes */
392 enum sdeb_opcode_index {
393         SDEB_I_INVALID_OPCODE = 0,
394         SDEB_I_INQUIRY = 1,
395         SDEB_I_REPORT_LUNS = 2,
396         SDEB_I_REQUEST_SENSE = 3,
397         SDEB_I_TEST_UNIT_READY = 4,
398         SDEB_I_MODE_SENSE = 5,          /* 6, 10 */
399         SDEB_I_MODE_SELECT = 6,         /* 6, 10 */
400         SDEB_I_LOG_SENSE = 7,
401         SDEB_I_READ_CAPACITY = 8,       /* 10; 16 is in SA_IN(16) */
402         SDEB_I_READ = 9,                /* 6, 10, 12, 16 */
403         SDEB_I_WRITE = 10,              /* 6, 10, 12, 16 */
404         SDEB_I_START_STOP = 11,
405         SDEB_I_SERV_ACT_IN_16 = 12,     /* add ...SERV_ACT_IN_12 if needed */
406         SDEB_I_SERV_ACT_OUT_16 = 13,    /* add ...SERV_ACT_OUT_12 if needed */
407         SDEB_I_MAINT_IN = 14,
408         SDEB_I_MAINT_OUT = 15,
409         SDEB_I_VERIFY = 16,             /* VERIFY(10), VERIFY(16) */
410         SDEB_I_VARIABLE_LEN = 17,       /* READ(32), WRITE(32), WR_SCAT(32) */
411         SDEB_I_RESERVE = 18,            /* 6, 10 */
412         SDEB_I_RELEASE = 19,            /* 6, 10 */
413         SDEB_I_ALLOW_REMOVAL = 20,      /* PREVENT ALLOW MEDIUM REMOVAL */
414         SDEB_I_REZERO_UNIT = 21,        /* REWIND in SSC */
415         SDEB_I_ATA_PT = 22,             /* 12, 16 */
416         SDEB_I_SEND_DIAG = 23,
417         SDEB_I_UNMAP = 24,
418         SDEB_I_WRITE_BUFFER = 25,
419         SDEB_I_WRITE_SAME = 26,         /* 10, 16 */
420         SDEB_I_SYNC_CACHE = 27,         /* 10, 16 */
421         SDEB_I_COMP_WRITE = 28,
422         SDEB_I_PRE_FETCH = 29,          /* 10, 16 */
423         SDEB_I_ZONE_OUT = 30,           /* 0x94+SA; includes no data xfer */
424         SDEB_I_ZONE_IN = 31,            /* 0x95+SA; all have data-in */
425         SDEB_I_LAST_ELEM_P1 = 32,       /* keep this last (previous + 1) */
426 };
427
428
429 static const unsigned char opcode_ind_arr[256] = {
430 /* 0x0; 0x0->0x1f: 6 byte cdbs */
431         SDEB_I_TEST_UNIT_READY, SDEB_I_REZERO_UNIT, 0, SDEB_I_REQUEST_SENSE,
432             0, 0, 0, 0,
433         SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, 0,
434         0, 0, SDEB_I_INQUIRY, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
435             SDEB_I_RELEASE,
436         0, 0, SDEB_I_MODE_SENSE, SDEB_I_START_STOP, 0, SDEB_I_SEND_DIAG,
437             SDEB_I_ALLOW_REMOVAL, 0,
438 /* 0x20; 0x20->0x3f: 10 byte cdbs */
439         0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY, 0, 0,
440         SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, SDEB_I_VERIFY,
441         0, 0, 0, 0, SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, 0,
442         0, 0, 0, SDEB_I_WRITE_BUFFER, 0, 0, 0, 0,
443 /* 0x40; 0x40->0x5f: 10 byte cdbs */
444         0, SDEB_I_WRITE_SAME, SDEB_I_UNMAP, 0, 0, 0, 0, 0,
445         0, 0, 0, 0, 0, SDEB_I_LOG_SENSE, 0, 0,
446         0, 0, 0, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
447             SDEB_I_RELEASE,
448         0, 0, SDEB_I_MODE_SENSE, 0, 0, 0, 0, 0,
449 /* 0x60; 0x60->0x7d are reserved, 0x7e is "extended cdb" */
450         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
451         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
452         0, SDEB_I_VARIABLE_LEN,
453 /* 0x80; 0x80->0x9f: 16 byte cdbs */
454         0, 0, 0, 0, 0, SDEB_I_ATA_PT, 0, 0,
455         SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0,
456         0, 0, 0, SDEB_I_VERIFY,
457         SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, SDEB_I_WRITE_SAME,
458         SDEB_I_ZONE_OUT, SDEB_I_ZONE_IN, 0, 0,
459         0, 0, 0, 0, 0, 0, SDEB_I_SERV_ACT_IN_16, SDEB_I_SERV_ACT_OUT_16,
460 /* 0xa0; 0xa0->0xbf: 12 byte cdbs */
461         SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN,
462              SDEB_I_MAINT_OUT, 0, 0, 0,
463         SDEB_I_READ, 0 /* SDEB_I_SERV_ACT_OUT_12 */, SDEB_I_WRITE,
464              0 /* SDEB_I_SERV_ACT_IN_12 */, 0, 0, 0, 0,
465         0, 0, 0, 0, 0, 0, 0, 0,
466         0, 0, 0, 0, 0, 0, 0, 0,
467 /* 0xc0; 0xc0->0xff: vendor specific */
468         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
469         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
470         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
471         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
472 };
473
474 /*
475  * The following "response" functions return the SCSI mid-level's 4 byte
476  * tuple-in-an-int. To handle commands with an IMMED bit, for a faster
477  * command completion, they can mask their return value with
478  * SDEG_RES_IMMED_MASK .
479  */
480 #define SDEG_RES_IMMED_MASK 0x40000000
481
482 static int resp_inquiry(struct scsi_cmnd *, struct sdebug_dev_info *);
483 static int resp_report_luns(struct scsi_cmnd *, struct sdebug_dev_info *);
484 static int resp_requests(struct scsi_cmnd *, struct sdebug_dev_info *);
485 static int resp_mode_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
486 static int resp_mode_select(struct scsi_cmnd *, struct sdebug_dev_info *);
487 static int resp_log_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
488 static int resp_readcap(struct scsi_cmnd *, struct sdebug_dev_info *);
489 static int resp_read_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
490 static int resp_write_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
491 static int resp_write_scat(struct scsi_cmnd *, struct sdebug_dev_info *);
492 static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *);
493 static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *);
494 static int resp_get_lba_status(struct scsi_cmnd *, struct sdebug_dev_info *);
495 static int resp_report_tgtpgs(struct scsi_cmnd *, struct sdebug_dev_info *);
496 static int resp_unmap(struct scsi_cmnd *, struct sdebug_dev_info *);
497 static int resp_rsup_opcodes(struct scsi_cmnd *, struct sdebug_dev_info *);
498 static int resp_rsup_tmfs(struct scsi_cmnd *, struct sdebug_dev_info *);
499 static int resp_verify(struct scsi_cmnd *, struct sdebug_dev_info *);
500 static int resp_write_same_10(struct scsi_cmnd *, struct sdebug_dev_info *);
501 static int resp_write_same_16(struct scsi_cmnd *, struct sdebug_dev_info *);
502 static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *);
503 static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *);
504 static int resp_sync_cache(struct scsi_cmnd *, struct sdebug_dev_info *);
505 static int resp_pre_fetch(struct scsi_cmnd *, struct sdebug_dev_info *);
506 static int resp_report_zones(struct scsi_cmnd *, struct sdebug_dev_info *);
507 static int resp_open_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
508 static int resp_close_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
509 static int resp_finish_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
510 static int resp_rwp_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
511
512 static int sdebug_do_add_host(bool mk_new_store);
513 static int sdebug_add_host_helper(int per_host_idx);
514 static void sdebug_do_remove_host(bool the_end);
515 static int sdebug_add_store(void);
516 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip);
517 static void sdebug_erase_all_stores(bool apart_from_first);
518
519 /*
520  * The following are overflow arrays for cdbs that "hit" the same index in
521  * the opcode_info_arr array. The most time sensitive (or commonly used) cdb
522  * should be placed in opcode_info_arr[], the others should be placed here.
523  */
524 static const struct opcode_info_t msense_iarr[] = {
525         {0, 0x1a, 0, F_D_IN, NULL, NULL,
526             {6,  0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
527 };
528
529 static const struct opcode_info_t mselect_iarr[] = {
530         {0, 0x15, 0, F_D_OUT, NULL, NULL,
531             {6,  0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
532 };
533
534 static const struct opcode_info_t read_iarr[] = {
535         {0, 0x28, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(10) */
536             {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
537              0, 0, 0, 0} },
538         {0, 0x8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL, /* READ(6) */
539             {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
540         {0, 0xa8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(12) */
541             {12,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf,
542              0xc7, 0, 0, 0, 0} },
543 };
544
545 static const struct opcode_info_t write_iarr[] = {
546         {0, 0x2a, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,  /* WRITE(10) */
547             NULL, {10,  0xfb, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7,
548                    0, 0, 0, 0, 0, 0} },
549         {0, 0xa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,   /* WRITE(6) */
550             NULL, {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0,
551                    0, 0, 0} },
552         {0, 0xaa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,  /* WRITE(12) */
553             NULL, {12,  0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
554                    0xbf, 0xc7, 0, 0, 0, 0} },
555 };
556
557 static const struct opcode_info_t verify_iarr[] = {
558         {0, 0x2f, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,/* VERIFY(10) */
559             NULL, {10,  0xf7, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xff, 0xff, 0xc7,
560                    0, 0, 0, 0, 0, 0} },
561 };
562
563 static const struct opcode_info_t sa_in_16_iarr[] = {
564         {0, 0x9e, 0x12, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
565             {16,  0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
566              0xff, 0xff, 0xff, 0, 0xc7} },      /* GET LBA STATUS(16) */
567 };
568
569 static const struct opcode_info_t vl_iarr[] = { /* VARIABLE LENGTH */
570         {0, 0x7f, 0xb, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_dt0,
571             NULL, {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0xb, 0xfa,
572                    0, 0xff, 0xff, 0xff, 0xff} },        /* WRITE(32) */
573         {0, 0x7f, 0x11, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
574             NULL, {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x11, 0xf8,
575                    0, 0xff, 0xff, 0x0, 0x0} },  /* WRITE SCATTERED(32) */
576 };
577
578 static const struct opcode_info_t maint_in_iarr[] = {   /* MAINT IN */
579         {0, 0xa3, 0xc, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL,
580             {12,  0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
581              0xc7, 0, 0, 0, 0} }, /* REPORT SUPPORTED OPERATION CODES */
582         {0, 0xa3, 0xd, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL,
583             {12,  0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
584              0, 0} },   /* REPORTED SUPPORTED TASK MANAGEMENT FUNCTIONS */
585 };
586
587 static const struct opcode_info_t write_same_iarr[] = {
588         {0, 0x93, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_write_same_16, NULL,
589             {16,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
590              0xff, 0xff, 0xff, 0x3f, 0xc7} },           /* WRITE SAME(16) */
591 };
592
593 static const struct opcode_info_t reserve_iarr[] = {
594         {0, 0x16, 0, F_D_OUT, NULL, NULL,               /* RESERVE(6) */
595             {6,  0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
596 };
597
598 static const struct opcode_info_t release_iarr[] = {
599         {0, 0x17, 0, F_D_OUT, NULL, NULL,               /* RELEASE(6) */
600             {6,  0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
601 };
602
603 static const struct opcode_info_t sync_cache_iarr[] = {
604         {0, 0x91, 0, F_SYNC_DELAY | F_M_ACCESS, resp_sync_cache, NULL,
605             {16,  0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
606              0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },     /* SYNC_CACHE (16) */
607 };
608
609 static const struct opcode_info_t pre_fetch_iarr[] = {
610         {0, 0x90, 0, F_SYNC_DELAY | F_M_ACCESS, resp_pre_fetch, NULL,
611             {16,  0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
612              0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },     /* PRE-FETCH (16) */
613 };
614
615 static const struct opcode_info_t zone_out_iarr[] = {   /* ZONE OUT(16) */
616         {0, 0x94, 0x1, F_SA_LOW, resp_close_zone, NULL,
617             {16, 0x1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
618              0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },      /* CLOSE ZONE */
619         {0, 0x94, 0x2, F_SA_LOW, resp_finish_zone, NULL,
620             {16, 0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
621              0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },      /* FINISH ZONE */
622         {0, 0x94, 0x4, F_SA_LOW, resp_rwp_zone, NULL,
623             {16, 0x4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
624              0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },  /* RESET WRITE POINTER */
625 };
626
627 static const struct opcode_info_t zone_in_iarr[] = {    /* ZONE IN(16) */
628         {0, 0x95, 0x6, F_SA_LOW | F_D_IN, NULL, NULL,
629             {16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
630              0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* REPORT ZONES */
631 };
632
633
634 /* This array is accessed via SDEB_I_* values. Make sure all are mapped,
635  * plus the terminating elements for logic that scans this table such as
636  * REPORT SUPPORTED OPERATION CODES. */
637 static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEM_P1 + 1] = {
638 /* 0 */
639         {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL,    /* unknown opcodes */
640             {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
641         {0, 0x12, 0, FF_RESPOND | F_D_IN, resp_inquiry, NULL, /* INQUIRY */
642             {6,  0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
643         {0, 0xa0, 0, FF_RESPOND | F_D_IN, resp_report_luns, NULL,
644             {12,  0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
645              0, 0} },                                   /* REPORT LUNS */
646         {0, 0x3, 0, FF_RESPOND | F_D_IN, resp_requests, NULL,
647             {6,  0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
648         {0, 0x0, 0, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */
649             {6,  0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
650 /* 5 */
651         {ARRAY_SIZE(msense_iarr), 0x5a, 0, F_D_IN,      /* MODE SENSE(10) */
652             resp_mode_sense, msense_iarr, {10,  0xf8, 0xff, 0xff, 0, 0, 0,
653                 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
654         {ARRAY_SIZE(mselect_iarr), 0x55, 0, F_D_OUT,    /* MODE SELECT(10) */
655             resp_mode_select, mselect_iarr, {10,  0xf1, 0, 0, 0, 0, 0, 0xff,
656                 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
657         {0, 0x4d, 0, F_D_IN, resp_log_sense, NULL,      /* LOG SENSE */
658             {10,  0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0,
659              0, 0, 0} },
660         {0, 0x25, 0, F_D_IN, resp_readcap, NULL,    /* READ CAPACITY(10) */
661             {10,  0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0,
662              0, 0} },
663         {ARRAY_SIZE(read_iarr), 0x88, 0, F_D_IN | FF_MEDIA_IO, /* READ(16) */
664             resp_read_dt0, read_iarr, {16,  0xfe, 0xff, 0xff, 0xff, 0xff,
665             0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
666 /* 10 */
667         {ARRAY_SIZE(write_iarr), 0x8a, 0, F_D_OUT | FF_MEDIA_IO,
668             resp_write_dt0, write_iarr,                 /* WRITE(16) */
669                 {16,  0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
670                  0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
671         {0, 0x1b, 0, F_SSU_DELAY, resp_start_stop, NULL,/* START STOP UNIT */
672             {6,  0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
673         {ARRAY_SIZE(sa_in_16_iarr), 0x9e, 0x10, F_SA_LOW | F_D_IN,
674             resp_readcap16, sa_in_16_iarr, /* SA_IN(16), READ CAPACITY(16) */
675                 {16,  0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
676                  0xff, 0xff, 0xff, 0xff, 0x1, 0xc7} },
677         {0, 0x9f, 0x12, F_SA_LOW | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
678             NULL, {16,  0x12, 0xf9, 0x0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff,
679             0xff, 0xff, 0xff, 0xff, 0xc7} },  /* SA_OUT(16), WRITE SCAT(16) */
680         {ARRAY_SIZE(maint_in_iarr), 0xa3, 0xa, F_SA_LOW | F_D_IN,
681             resp_report_tgtpgs, /* MAINT IN, REPORT TARGET PORT GROUPS */
682                 maint_in_iarr, {12,  0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff,
683                                 0xff, 0, 0xc7, 0, 0, 0, 0} },
684 /* 15 */
685         {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
686             {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
687         {ARRAY_SIZE(verify_iarr), 0x8f, 0,
688             F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,   /* VERIFY(16) */
689             verify_iarr, {16,  0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
690                           0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },
691         {ARRAY_SIZE(vl_iarr), 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_MEDIA_IO,
692             resp_read_dt0, vl_iarr,     /* VARIABLE LENGTH, READ(32) */
693             {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x9, 0xfe, 0, 0xff, 0xff,
694              0xff, 0xff} },
695         {ARRAY_SIZE(reserve_iarr), 0x56, 0, F_D_OUT,
696             NULL, reserve_iarr, /* RESERVE(10) <no response function> */
697             {10,  0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
698              0} },
699         {ARRAY_SIZE(release_iarr), 0x57, 0, F_D_OUT,
700             NULL, release_iarr, /* RELEASE(10) <no response function> */
701             {10,  0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
702              0} },
703 /* 20 */
704         {0, 0x1e, 0, 0, NULL, NULL, /* ALLOW REMOVAL */
705             {6,  0, 0, 0, 0x3, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
706         {0, 0x1, 0, 0, resp_start_stop, NULL, /* REWIND ?? */
707             {6,  0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
708         {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */
709             {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
710         {0, 0x1d, F_D_OUT, 0, NULL, NULL,       /* SEND DIAGNOSTIC */
711             {6,  0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
712         {0, 0x42, 0, F_D_OUT | FF_MEDIA_IO, resp_unmap, NULL, /* UNMAP */
713             {10,  0x1, 0, 0, 0, 0, 0x3f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
714 /* 25 */
715         {0, 0x3b, 0, F_D_OUT_MAYBE, resp_write_buffer, NULL,
716             {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0,
717              0, 0, 0, 0} },                     /* WRITE_BUFFER */
718         {ARRAY_SIZE(write_same_iarr), 0x41, 0, F_D_OUT_MAYBE | FF_MEDIA_IO,
719             resp_write_same_10, write_same_iarr,        /* WRITE SAME(10) */
720                 {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0,
721                  0, 0, 0, 0, 0} },
722         {ARRAY_SIZE(sync_cache_iarr), 0x35, 0, F_SYNC_DELAY | F_M_ACCESS,
723             resp_sync_cache, sync_cache_iarr,
724             {10,  0x7, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
725              0, 0, 0, 0} },                     /* SYNC_CACHE (10) */
726         {0, 0x89, 0, F_D_OUT | FF_MEDIA_IO, resp_comp_write, NULL,
727             {16,  0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
728              0, 0xff, 0x3f, 0xc7} },            /* COMPARE AND WRITE */
729         {ARRAY_SIZE(pre_fetch_iarr), 0x34, 0, F_SYNC_DELAY | F_M_ACCESS,
730             resp_pre_fetch, pre_fetch_iarr,
731             {10,  0x2, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
732              0, 0, 0, 0} },                     /* PRE-FETCH (10) */
733
734 /* 30 */
735         {ARRAY_SIZE(zone_out_iarr), 0x94, 0x3, F_SA_LOW,
736             resp_open_zone, zone_out_iarr, /* ZONE_OUT(16), OPEN ZONE) */
737                 {16,  0x3 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
738                  0xff, 0xff, 0x0, 0x0, 0xff, 0xff, 0x1, 0xc7} },
739         {ARRAY_SIZE(zone_in_iarr), 0x95, 0x0, F_SA_LOW | F_D_IN,
740             resp_report_zones, zone_in_iarr, /* ZONE_IN(16), REPORT ZONES) */
741                 {16,  0x0 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
742                  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xc7} },
743 /* sentinel */
744         {0xff, 0, 0, 0, NULL, NULL,             /* terminating element */
745             {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
746 };
747
748 static int sdebug_num_hosts;
749 static int sdebug_add_host = DEF_NUM_HOST;  /* in sysfs this is relative */
750 static int sdebug_ato = DEF_ATO;
751 static int sdebug_cdb_len = DEF_CDB_LEN;
752 static int sdebug_jdelay = DEF_JDELAY;  /* if > 0 then unit is jiffies */
753 static int sdebug_dev_size_mb = DEF_DEV_SIZE_PRE_INIT;
754 static int sdebug_dif = DEF_DIF;
755 static int sdebug_dix = DEF_DIX;
756 static int sdebug_dsense = DEF_D_SENSE;
757 static int sdebug_every_nth = DEF_EVERY_NTH;
758 static int sdebug_fake_rw = DEF_FAKE_RW;
759 static unsigned int sdebug_guard = DEF_GUARD;
760 static int sdebug_lowest_aligned = DEF_LOWEST_ALIGNED;
761 static int sdebug_max_luns = DEF_MAX_LUNS;
762 static int sdebug_max_queue = SDEBUG_CANQUEUE;  /* per submit queue */
763 static unsigned int sdebug_medium_error_start = OPT_MEDIUM_ERR_ADDR;
764 static int sdebug_medium_error_count = OPT_MEDIUM_ERR_NUM;
765 static atomic_t retired_max_queue;      /* if > 0 then was prior max_queue */
766 static int sdebug_ndelay = DEF_NDELAY;  /* if > 0 then unit is nanoseconds */
767 static int sdebug_no_lun_0 = DEF_NO_LUN_0;
768 static int sdebug_no_uld;
769 static int sdebug_num_parts = DEF_NUM_PARTS;
770 static int sdebug_num_tgts = DEF_NUM_TGTS; /* targets per host */
771 static int sdebug_opt_blks = DEF_OPT_BLKS;
772 static int sdebug_opts = DEF_OPTS;
773 static int sdebug_physblk_exp = DEF_PHYSBLK_EXP;
774 static int sdebug_opt_xferlen_exp = DEF_OPT_XFERLEN_EXP;
775 static int sdebug_ptype = DEF_PTYPE; /* SCSI peripheral device type */
776 static int sdebug_scsi_level = DEF_SCSI_LEVEL;
777 static int sdebug_sector_size = DEF_SECTOR_SIZE;
778 static int sdebug_virtual_gb = DEF_VIRTUAL_GB;
779 static int sdebug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
780 static unsigned int sdebug_lbpu = DEF_LBPU;
781 static unsigned int sdebug_lbpws = DEF_LBPWS;
782 static unsigned int sdebug_lbpws10 = DEF_LBPWS10;
783 static unsigned int sdebug_lbprz = DEF_LBPRZ;
784 static unsigned int sdebug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
785 static unsigned int sdebug_unmap_granularity = DEF_UNMAP_GRANULARITY;
786 static unsigned int sdebug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
787 static unsigned int sdebug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
788 static unsigned int sdebug_write_same_length = DEF_WRITESAME_LENGTH;
789 static int sdebug_uuid_ctl = DEF_UUID_CTL;
790 static bool sdebug_random = DEF_RANDOM;
791 static bool sdebug_per_host_store = DEF_PER_HOST_STORE;
792 static bool sdebug_removable = DEF_REMOVABLE;
793 static bool sdebug_clustering;
794 static bool sdebug_host_lock = DEF_HOST_LOCK;
795 static bool sdebug_strict = DEF_STRICT;
796 static bool sdebug_any_injecting_opt;
797 static bool sdebug_verbose;
798 static bool have_dif_prot;
799 static bool write_since_sync;
800 static bool sdebug_statistics = DEF_STATISTICS;
801 static bool sdebug_wp;
802 /* Following enum: 0: no zbc, def; 1: host aware; 2: host managed */
803 static enum blk_zoned_model sdeb_zbc_model = BLK_ZONED_NONE;
804 static char *sdeb_zbc_model_s;
805
806 static unsigned int sdebug_store_sectors;
807 static sector_t sdebug_capacity;        /* in sectors */
808
809 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
810    may still need them */
811 static int sdebug_heads;                /* heads per disk */
812 static int sdebug_cylinders_per;        /* cylinders per surface */
813 static int sdebug_sectors_per;          /* sectors per cylinder */
814
815 static LIST_HEAD(sdebug_host_list);
816 static DEFINE_SPINLOCK(sdebug_host_list_lock);
817
818 static struct xarray per_store_arr;
819 static struct xarray *per_store_ap = &per_store_arr;
820 static int sdeb_first_idx = -1;         /* invalid index ==> none created */
821 static int sdeb_most_recent_idx = -1;
822 static DEFINE_RWLOCK(sdeb_fake_rw_lck); /* need a RW lock when fake_rw=1 */
823
824 static unsigned long map_size;
825 static int num_aborts;
826 static int num_dev_resets;
827 static int num_target_resets;
828 static int num_bus_resets;
829 static int num_host_resets;
830 static int dix_writes;
831 static int dix_reads;
832 static int dif_errors;
833
834 /* ZBC global data */
835 static bool sdeb_zbc_in_use;    /* true for host-aware and host-managed disks */
836 static int sdeb_zbc_zone_size_mb;
837 static int sdeb_zbc_max_open = DEF_ZBC_MAX_OPEN_ZONES;
838 static int sdeb_zbc_nr_conv = DEF_ZBC_NR_CONV_ZONES;
839
840 static int submit_queues = DEF_SUBMIT_QUEUES;  /* > 1 for multi-queue (mq) */
841 static struct sdebug_queue *sdebug_q_arr;  /* ptr to array of submit queues */
842
843 static DEFINE_RWLOCK(atomic_rw);
844 static DEFINE_RWLOCK(atomic_rw2);
845
846 static rwlock_t *ramdisk_lck_a[2];
847
848 static char sdebug_proc_name[] = MY_NAME;
849 static const char *my_name = MY_NAME;
850
851 static struct bus_type pseudo_lld_bus;
852
853 static struct device_driver sdebug_driverfs_driver = {
854         .name           = sdebug_proc_name,
855         .bus            = &pseudo_lld_bus,
856 };
857
858 static const int check_condition_result =
859                 (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
860
861 static const int illegal_condition_result =
862         (DRIVER_SENSE << 24) | (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
863
864 static const int device_qfull_result =
865         (DID_OK << 16) | (COMMAND_COMPLETE << 8) | SAM_STAT_TASK_SET_FULL;
866
867 static const int condition_met_result = SAM_STAT_CONDITION_MET;
868
869
870 /* Only do the extra work involved in logical block provisioning if one or
871  * more of the lbpu, lbpws or lbpws10 parameters are given and we are doing
872  * real reads and writes (i.e. not skipping them for speed).
873  */
874 static inline bool scsi_debug_lbp(void)
875 {
876         return 0 == sdebug_fake_rw &&
877                 (sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10);
878 }
879
880 static void *lba2fake_store(struct sdeb_store_info *sip,
881                             unsigned long long lba)
882 {
883         struct sdeb_store_info *lsip = sip;
884
885         lba = do_div(lba, sdebug_store_sectors);
886         if (!sip || !sip->storep) {
887                 WARN_ON_ONCE(true);
888                 lsip = xa_load(per_store_ap, 0);  /* should never be NULL */
889         }
890         return lsip->storep + lba * sdebug_sector_size;
891 }
892
893 static struct t10_pi_tuple *dif_store(struct sdeb_store_info *sip,
894                                       sector_t sector)
895 {
896         sector = sector_div(sector, sdebug_store_sectors);
897
898         return sip->dif_storep + sector;
899 }
900
901 static void sdebug_max_tgts_luns(void)
902 {
903         struct sdebug_host_info *sdbg_host;
904         struct Scsi_Host *hpnt;
905
906         spin_lock(&sdebug_host_list_lock);
907         list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
908                 hpnt = sdbg_host->shost;
909                 if ((hpnt->this_id >= 0) &&
910                     (sdebug_num_tgts > hpnt->this_id))
911                         hpnt->max_id = sdebug_num_tgts + 1;
912                 else
913                         hpnt->max_id = sdebug_num_tgts;
914                 /* sdebug_max_luns; */
915                 hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
916         }
917         spin_unlock(&sdebug_host_list_lock);
918 }
919
920 enum sdeb_cmd_data {SDEB_IN_DATA = 0, SDEB_IN_CDB = 1};
921
922 /* Set in_bit to -1 to indicate no bit position of invalid field */
923 static void mk_sense_invalid_fld(struct scsi_cmnd *scp,
924                                  enum sdeb_cmd_data c_d,
925                                  int in_byte, int in_bit)
926 {
927         unsigned char *sbuff;
928         u8 sks[4];
929         int sl, asc;
930
931         sbuff = scp->sense_buffer;
932         if (!sbuff) {
933                 sdev_printk(KERN_ERR, scp->device,
934                             "%s: sense_buffer is NULL\n", __func__);
935                 return;
936         }
937         asc = c_d ? INVALID_FIELD_IN_CDB : INVALID_FIELD_IN_PARAM_LIST;
938         memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
939         scsi_build_sense_buffer(sdebug_dsense, sbuff, ILLEGAL_REQUEST, asc, 0);
940         memset(sks, 0, sizeof(sks));
941         sks[0] = 0x80;
942         if (c_d)
943                 sks[0] |= 0x40;
944         if (in_bit >= 0) {
945                 sks[0] |= 0x8;
946                 sks[0] |= 0x7 & in_bit;
947         }
948         put_unaligned_be16(in_byte, sks + 1);
949         if (sdebug_dsense) {
950                 sl = sbuff[7] + 8;
951                 sbuff[7] = sl;
952                 sbuff[sl] = 0x2;
953                 sbuff[sl + 1] = 0x6;
954                 memcpy(sbuff + sl + 4, sks, 3);
955         } else
956                 memcpy(sbuff + 15, sks, 3);
957         if (sdebug_verbose)
958                 sdev_printk(KERN_INFO, scp->device, "%s:  [sense_key,asc,ascq"
959                             "]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n",
960                             my_name, asc, c_d ? 'C' : 'D', in_byte, in_bit);
961 }
962
963 static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
964 {
965         unsigned char *sbuff;
966
967         sbuff = scp->sense_buffer;
968         if (!sbuff) {
969                 sdev_printk(KERN_ERR, scp->device,
970                             "%s: sense_buffer is NULL\n", __func__);
971                 return;
972         }
973         memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
974
975         scsi_build_sense_buffer(sdebug_dsense, sbuff, key, asc, asq);
976
977         if (sdebug_verbose)
978                 sdev_printk(KERN_INFO, scp->device,
979                             "%s:  [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
980                             my_name, key, asc, asq);
981 }
982
983 static void mk_sense_invalid_opcode(struct scsi_cmnd *scp)
984 {
985         mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
986 }
987
988 static int scsi_debug_ioctl(struct scsi_device *dev, unsigned int cmd,
989                             void __user *arg)
990 {
991         if (sdebug_verbose) {
992                 if (0x1261 == cmd)
993                         sdev_printk(KERN_INFO, dev,
994                                     "%s: BLKFLSBUF [0x1261]\n", __func__);
995                 else if (0x5331 == cmd)
996                         sdev_printk(KERN_INFO, dev,
997                                     "%s: CDROM_GET_CAPABILITY [0x5331]\n",
998                                     __func__);
999                 else
1000                         sdev_printk(KERN_INFO, dev, "%s: cmd=0x%x\n",
1001                                     __func__, cmd);
1002         }
1003         return -EINVAL;
1004         /* return -ENOTTY; // correct return but upsets fdisk */
1005 }
1006
1007 static void config_cdb_len(struct scsi_device *sdev)
1008 {
1009         switch (sdebug_cdb_len) {
1010         case 6: /* suggest 6 byte READ, WRITE and MODE SENSE/SELECT */
1011                 sdev->use_10_for_rw = false;
1012                 sdev->use_16_for_rw = false;
1013                 sdev->use_10_for_ms = false;
1014                 break;
1015         case 10: /* suggest 10 byte RWs and 6 byte MODE SENSE/SELECT */
1016                 sdev->use_10_for_rw = true;
1017                 sdev->use_16_for_rw = false;
1018                 sdev->use_10_for_ms = false;
1019                 break;
1020         case 12: /* suggest 10 byte RWs and 10 byte MODE SENSE/SELECT */
1021                 sdev->use_10_for_rw = true;
1022                 sdev->use_16_for_rw = false;
1023                 sdev->use_10_for_ms = true;
1024                 break;
1025         case 16:
1026                 sdev->use_10_for_rw = false;
1027                 sdev->use_16_for_rw = true;
1028                 sdev->use_10_for_ms = true;
1029                 break;
1030         case 32: /* No knobs to suggest this so same as 16 for now */
1031                 sdev->use_10_for_rw = false;
1032                 sdev->use_16_for_rw = true;
1033                 sdev->use_10_for_ms = true;
1034                 break;
1035         default:
1036                 pr_warn("unexpected cdb_len=%d, force to 10\n",
1037                         sdebug_cdb_len);
1038                 sdev->use_10_for_rw = true;
1039                 sdev->use_16_for_rw = false;
1040                 sdev->use_10_for_ms = false;
1041                 sdebug_cdb_len = 10;
1042                 break;
1043         }
1044 }
1045
1046 static void all_config_cdb_len(void)
1047 {
1048         struct sdebug_host_info *sdbg_host;
1049         struct Scsi_Host *shost;
1050         struct scsi_device *sdev;
1051
1052         spin_lock(&sdebug_host_list_lock);
1053         list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
1054                 shost = sdbg_host->shost;
1055                 shost_for_each_device(sdev, shost) {
1056                         config_cdb_len(sdev);
1057                 }
1058         }
1059         spin_unlock(&sdebug_host_list_lock);
1060 }
1061
1062 static void clear_luns_changed_on_target(struct sdebug_dev_info *devip)
1063 {
1064         struct sdebug_host_info *sdhp;
1065         struct sdebug_dev_info *dp;
1066
1067         spin_lock(&sdebug_host_list_lock);
1068         list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
1069                 list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
1070                         if ((devip->sdbg_host == dp->sdbg_host) &&
1071                             (devip->target == dp->target))
1072                                 clear_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
1073                 }
1074         }
1075         spin_unlock(&sdebug_host_list_lock);
1076 }
1077
1078 static int make_ua(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1079 {
1080         int k;
1081
1082         k = find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS);
1083         if (k != SDEBUG_NUM_UAS) {
1084                 const char *cp = NULL;
1085
1086                 switch (k) {
1087                 case SDEBUG_UA_POR:
1088                         mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1089                                         POWER_ON_RESET_ASCQ);
1090                         if (sdebug_verbose)
1091                                 cp = "power on reset";
1092                         break;
1093                 case SDEBUG_UA_BUS_RESET:
1094                         mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1095                                         BUS_RESET_ASCQ);
1096                         if (sdebug_verbose)
1097                                 cp = "bus reset";
1098                         break;
1099                 case SDEBUG_UA_MODE_CHANGED:
1100                         mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1101                                         MODE_CHANGED_ASCQ);
1102                         if (sdebug_verbose)
1103                                 cp = "mode parameters changed";
1104                         break;
1105                 case SDEBUG_UA_CAPACITY_CHANGED:
1106                         mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1107                                         CAPACITY_CHANGED_ASCQ);
1108                         if (sdebug_verbose)
1109                                 cp = "capacity data changed";
1110                         break;
1111                 case SDEBUG_UA_MICROCODE_CHANGED:
1112                         mk_sense_buffer(scp, UNIT_ATTENTION,
1113                                         TARGET_CHANGED_ASC,
1114                                         MICROCODE_CHANGED_ASCQ);
1115                         if (sdebug_verbose)
1116                                 cp = "microcode has been changed";
1117                         break;
1118                 case SDEBUG_UA_MICROCODE_CHANGED_WO_RESET:
1119                         mk_sense_buffer(scp, UNIT_ATTENTION,
1120                                         TARGET_CHANGED_ASC,
1121                                         MICROCODE_CHANGED_WO_RESET_ASCQ);
1122                         if (sdebug_verbose)
1123                                 cp = "microcode has been changed without reset";
1124                         break;
1125                 case SDEBUG_UA_LUNS_CHANGED:
1126                         /*
1127                          * SPC-3 behavior is to report a UNIT ATTENTION with
1128                          * ASC/ASCQ REPORTED LUNS DATA HAS CHANGED on every LUN
1129                          * on the target, until a REPORT LUNS command is
1130                          * received.  SPC-4 behavior is to report it only once.
1131                          * NOTE:  sdebug_scsi_level does not use the same
1132                          * values as struct scsi_device->scsi_level.
1133                          */
1134                         if (sdebug_scsi_level >= 6)     /* SPC-4 and above */
1135                                 clear_luns_changed_on_target(devip);
1136                         mk_sense_buffer(scp, UNIT_ATTENTION,
1137                                         TARGET_CHANGED_ASC,
1138                                         LUNS_CHANGED_ASCQ);
1139                         if (sdebug_verbose)
1140                                 cp = "reported luns data has changed";
1141                         break;
1142                 default:
1143                         pr_warn("unexpected unit attention code=%d\n", k);
1144                         if (sdebug_verbose)
1145                                 cp = "unknown";
1146                         break;
1147                 }
1148                 clear_bit(k, devip->uas_bm);
1149                 if (sdebug_verbose)
1150                         sdev_printk(KERN_INFO, scp->device,
1151                                    "%s reports: Unit attention: %s\n",
1152                                    my_name, cp);
1153                 return check_condition_result;
1154         }
1155         return 0;
1156 }
1157
1158 /* Build SCSI "data-in" buffer. Returns 0 if ok else (DID_ERROR << 16). */
1159 static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1160                                 int arr_len)
1161 {
1162         int act_len;
1163         struct scsi_data_buffer *sdb = &scp->sdb;
1164
1165         if (!sdb->length)
1166                 return 0;
1167         if (scp->sc_data_direction != DMA_FROM_DEVICE)
1168                 return DID_ERROR << 16;
1169
1170         act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
1171                                       arr, arr_len);
1172         scsi_set_resid(scp, scsi_bufflen(scp) - act_len);
1173
1174         return 0;
1175 }
1176
1177 /* Partial build of SCSI "data-in" buffer. Returns 0 if ok else
1178  * (DID_ERROR << 16). Can write to offset in data-in buffer. If multiple
1179  * calls, not required to write in ascending offset order. Assumes resid
1180  * set to scsi_bufflen() prior to any calls.
1181  */
1182 static int p_fill_from_dev_buffer(struct scsi_cmnd *scp, const void *arr,
1183                                   int arr_len, unsigned int off_dst)
1184 {
1185         unsigned int act_len, n;
1186         struct scsi_data_buffer *sdb = &scp->sdb;
1187         off_t skip = off_dst;
1188
1189         if (sdb->length <= off_dst)
1190                 return 0;
1191         if (scp->sc_data_direction != DMA_FROM_DEVICE)
1192                 return DID_ERROR << 16;
1193
1194         act_len = sg_pcopy_from_buffer(sdb->table.sgl, sdb->table.nents,
1195                                        arr, arr_len, skip);
1196         pr_debug("%s: off_dst=%u, scsi_bufflen=%u, act_len=%u, resid=%d\n",
1197                  __func__, off_dst, scsi_bufflen(scp), act_len,
1198                  scsi_get_resid(scp));
1199         n = scsi_bufflen(scp) - (off_dst + act_len);
1200         scsi_set_resid(scp, min_t(int, scsi_get_resid(scp), n));
1201         return 0;
1202 }
1203
1204 /* Fetches from SCSI "data-out" buffer. Returns number of bytes fetched into
1205  * 'arr' or -1 if error.
1206  */
1207 static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1208                                int arr_len)
1209 {
1210         if (!scsi_bufflen(scp))
1211                 return 0;
1212         if (scp->sc_data_direction != DMA_TO_DEVICE)
1213                 return -1;
1214
1215         return scsi_sg_copy_to_buffer(scp, arr, arr_len);
1216 }
1217
1218
1219 static char sdebug_inq_vendor_id[9] = "Linux   ";
1220 static char sdebug_inq_product_id[17] = "scsi_debug      ";
1221 static char sdebug_inq_product_rev[5] = SDEBUG_VERSION;
1222 /* Use some locally assigned NAAs for SAS addresses. */
1223 static const u64 naa3_comp_a = 0x3222222000000000ULL;
1224 static const u64 naa3_comp_b = 0x3333333000000000ULL;
1225 static const u64 naa3_comp_c = 0x3111111000000000ULL;
1226
1227 /* Device identification VPD page. Returns number of bytes placed in arr */
1228 static int inquiry_vpd_83(unsigned char *arr, int port_group_id,
1229                           int target_dev_id, int dev_id_num,
1230                           const char *dev_id_str, int dev_id_str_len,
1231                           const uuid_t *lu_name)
1232 {
1233         int num, port_a;
1234         char b[32];
1235
1236         port_a = target_dev_id + 1;
1237         /* T10 vendor identifier field format (faked) */
1238         arr[0] = 0x2;   /* ASCII */
1239         arr[1] = 0x1;
1240         arr[2] = 0x0;
1241         memcpy(&arr[4], sdebug_inq_vendor_id, 8);
1242         memcpy(&arr[12], sdebug_inq_product_id, 16);
1243         memcpy(&arr[28], dev_id_str, dev_id_str_len);
1244         num = 8 + 16 + dev_id_str_len;
1245         arr[3] = num;
1246         num += 4;
1247         if (dev_id_num >= 0) {
1248                 if (sdebug_uuid_ctl) {
1249                         /* Locally assigned UUID */
1250                         arr[num++] = 0x1;  /* binary (not necessarily sas) */
1251                         arr[num++] = 0xa;  /* PIV=0, lu, naa */
1252                         arr[num++] = 0x0;
1253                         arr[num++] = 0x12;
1254                         arr[num++] = 0x10; /* uuid type=1, locally assigned */
1255                         arr[num++] = 0x0;
1256                         memcpy(arr + num, lu_name, 16);
1257                         num += 16;
1258                 } else {
1259                         /* NAA-3, Logical unit identifier (binary) */
1260                         arr[num++] = 0x1;  /* binary (not necessarily sas) */
1261                         arr[num++] = 0x3;  /* PIV=0, lu, naa */
1262                         arr[num++] = 0x0;
1263                         arr[num++] = 0x8;
1264                         put_unaligned_be64(naa3_comp_b + dev_id_num, arr + num);
1265                         num += 8;
1266                 }
1267                 /* Target relative port number */
1268                 arr[num++] = 0x61;      /* proto=sas, binary */
1269                 arr[num++] = 0x94;      /* PIV=1, target port, rel port */
1270                 arr[num++] = 0x0;       /* reserved */
1271                 arr[num++] = 0x4;       /* length */
1272                 arr[num++] = 0x0;       /* reserved */
1273                 arr[num++] = 0x0;       /* reserved */
1274                 arr[num++] = 0x0;
1275                 arr[num++] = 0x1;       /* relative port A */
1276         }
1277         /* NAA-3, Target port identifier */
1278         arr[num++] = 0x61;      /* proto=sas, binary */
1279         arr[num++] = 0x93;      /* piv=1, target port, naa */
1280         arr[num++] = 0x0;
1281         arr[num++] = 0x8;
1282         put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1283         num += 8;
1284         /* NAA-3, Target port group identifier */
1285         arr[num++] = 0x61;      /* proto=sas, binary */
1286         arr[num++] = 0x95;      /* piv=1, target port group id */
1287         arr[num++] = 0x0;
1288         arr[num++] = 0x4;
1289         arr[num++] = 0;
1290         arr[num++] = 0;
1291         put_unaligned_be16(port_group_id, arr + num);
1292         num += 2;
1293         /* NAA-3, Target device identifier */
1294         arr[num++] = 0x61;      /* proto=sas, binary */
1295         arr[num++] = 0xa3;      /* piv=1, target device, naa */
1296         arr[num++] = 0x0;
1297         arr[num++] = 0x8;
1298         put_unaligned_be64(naa3_comp_a + target_dev_id, arr + num);
1299         num += 8;
1300         /* SCSI name string: Target device identifier */
1301         arr[num++] = 0x63;      /* proto=sas, UTF-8 */
1302         arr[num++] = 0xa8;      /* piv=1, target device, SCSI name string */
1303         arr[num++] = 0x0;
1304         arr[num++] = 24;
1305         memcpy(arr + num, "naa.32222220", 12);
1306         num += 12;
1307         snprintf(b, sizeof(b), "%08X", target_dev_id);
1308         memcpy(arr + num, b, 8);
1309         num += 8;
1310         memset(arr + num, 0, 4);
1311         num += 4;
1312         return num;
1313 }
1314
1315 static unsigned char vpd84_data[] = {
1316 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
1317     0x22,0x22,0x22,0x0,0xbb,0x1,
1318     0x22,0x22,0x22,0x0,0xbb,0x2,
1319 };
1320
1321 /*  Software interface identification VPD page */
1322 static int inquiry_vpd_84(unsigned char *arr)
1323 {
1324         memcpy(arr, vpd84_data, sizeof(vpd84_data));
1325         return sizeof(vpd84_data);
1326 }
1327
1328 /* Management network addresses VPD page */
1329 static int inquiry_vpd_85(unsigned char *arr)
1330 {
1331         int num = 0;
1332         const char *na1 = "https://www.kernel.org/config";
1333         const char *na2 = "http://www.kernel.org/log";
1334         int plen, olen;
1335
1336         arr[num++] = 0x1;       /* lu, storage config */
1337         arr[num++] = 0x0;       /* reserved */
1338         arr[num++] = 0x0;
1339         olen = strlen(na1);
1340         plen = olen + 1;
1341         if (plen % 4)
1342                 plen = ((plen / 4) + 1) * 4;
1343         arr[num++] = plen;      /* length, null termianted, padded */
1344         memcpy(arr + num, na1, olen);
1345         memset(arr + num + olen, 0, plen - olen);
1346         num += plen;
1347
1348         arr[num++] = 0x4;       /* lu, logging */
1349         arr[num++] = 0x0;       /* reserved */
1350         arr[num++] = 0x0;
1351         olen = strlen(na2);
1352         plen = olen + 1;
1353         if (plen % 4)
1354                 plen = ((plen / 4) + 1) * 4;
1355         arr[num++] = plen;      /* length, null terminated, padded */
1356         memcpy(arr + num, na2, olen);
1357         memset(arr + num + olen, 0, plen - olen);
1358         num += plen;
1359
1360         return num;
1361 }
1362
1363 /* SCSI ports VPD page */
1364 static int inquiry_vpd_88(unsigned char *arr, int target_dev_id)
1365 {
1366         int num = 0;
1367         int port_a, port_b;
1368
1369         port_a = target_dev_id + 1;
1370         port_b = port_a + 1;
1371         arr[num++] = 0x0;       /* reserved */
1372         arr[num++] = 0x0;       /* reserved */
1373         arr[num++] = 0x0;
1374         arr[num++] = 0x1;       /* relative port 1 (primary) */
1375         memset(arr + num, 0, 6);
1376         num += 6;
1377         arr[num++] = 0x0;
1378         arr[num++] = 12;        /* length tp descriptor */
1379         /* naa-5 target port identifier (A) */
1380         arr[num++] = 0x61;      /* proto=sas, binary */
1381         arr[num++] = 0x93;      /* PIV=1, target port, NAA */
1382         arr[num++] = 0x0;       /* reserved */
1383         arr[num++] = 0x8;       /* length */
1384         put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1385         num += 8;
1386         arr[num++] = 0x0;       /* reserved */
1387         arr[num++] = 0x0;       /* reserved */
1388         arr[num++] = 0x0;
1389         arr[num++] = 0x2;       /* relative port 2 (secondary) */
1390         memset(arr + num, 0, 6);
1391         num += 6;
1392         arr[num++] = 0x0;
1393         arr[num++] = 12;        /* length tp descriptor */
1394         /* naa-5 target port identifier (B) */
1395         arr[num++] = 0x61;      /* proto=sas, binary */
1396         arr[num++] = 0x93;      /* PIV=1, target port, NAA */
1397         arr[num++] = 0x0;       /* reserved */
1398         arr[num++] = 0x8;       /* length */
1399         put_unaligned_be64(naa3_comp_a + port_b, arr + num);
1400         num += 8;
1401
1402         return num;
1403 }
1404
1405
1406 static unsigned char vpd89_data[] = {
1407 /* from 4th byte */ 0,0,0,0,
1408 'l','i','n','u','x',' ',' ',' ',
1409 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
1410 '1','2','3','4',
1411 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
1412 0xec,0,0,0,
1413 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
1414 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
1415 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
1416 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
1417 0x53,0x41,
1418 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1419 0x20,0x20,
1420 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1421 0x10,0x80,
1422 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
1423 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
1424 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
1425 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
1426 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
1427 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
1428 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
1429 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1430 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1431 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1432 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
1433 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
1434 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
1435 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
1436 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1437 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1438 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1439 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1440 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1441 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1442 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1443 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1444 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1445 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1446 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1447 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
1448 };
1449
1450 /* ATA Information VPD page */
1451 static int inquiry_vpd_89(unsigned char *arr)
1452 {
1453         memcpy(arr, vpd89_data, sizeof(vpd89_data));
1454         return sizeof(vpd89_data);
1455 }
1456
1457
1458 static unsigned char vpdb0_data[] = {
1459         /* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
1460         0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1461         0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1462         0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1463 };
1464
1465 /* Block limits VPD page (SBC-3) */
1466 static int inquiry_vpd_b0(unsigned char *arr)
1467 {
1468         unsigned int gran;
1469
1470         memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
1471
1472         /* Optimal transfer length granularity */
1473         if (sdebug_opt_xferlen_exp != 0 &&
1474             sdebug_physblk_exp < sdebug_opt_xferlen_exp)
1475                 gran = 1 << sdebug_opt_xferlen_exp;
1476         else
1477                 gran = 1 << sdebug_physblk_exp;
1478         put_unaligned_be16(gran, arr + 2);
1479
1480         /* Maximum Transfer Length */
1481         if (sdebug_store_sectors > 0x400)
1482                 put_unaligned_be32(sdebug_store_sectors, arr + 4);
1483
1484         /* Optimal Transfer Length */
1485         put_unaligned_be32(sdebug_opt_blks, &arr[8]);
1486
1487         if (sdebug_lbpu) {
1488                 /* Maximum Unmap LBA Count */
1489                 put_unaligned_be32(sdebug_unmap_max_blocks, &arr[16]);
1490
1491                 /* Maximum Unmap Block Descriptor Count */
1492                 put_unaligned_be32(sdebug_unmap_max_desc, &arr[20]);
1493         }
1494
1495         /* Unmap Granularity Alignment */
1496         if (sdebug_unmap_alignment) {
1497                 put_unaligned_be32(sdebug_unmap_alignment, &arr[28]);
1498                 arr[28] |= 0x80; /* UGAVALID */
1499         }
1500
1501         /* Optimal Unmap Granularity */
1502         put_unaligned_be32(sdebug_unmap_granularity, &arr[24]);
1503
1504         /* Maximum WRITE SAME Length */
1505         put_unaligned_be64(sdebug_write_same_length, &arr[32]);
1506
1507         return 0x3c; /* Mandatory page length for Logical Block Provisioning */
1508
1509         return sizeof(vpdb0_data);
1510 }
1511
1512 /* Block device characteristics VPD page (SBC-3) */
1513 static int inquiry_vpd_b1(struct sdebug_dev_info *devip, unsigned char *arr)
1514 {
1515         memset(arr, 0, 0x3c);
1516         arr[0] = 0;
1517         arr[1] = 1;     /* non rotating medium (e.g. solid state) */
1518         arr[2] = 0;
1519         arr[3] = 5;     /* less than 1.8" */
1520         if (devip->zmodel == BLK_ZONED_HA)
1521                 arr[4] = 1 << 4;        /* zoned field = 01b */
1522
1523         return 0x3c;
1524 }
1525
1526 /* Logical block provisioning VPD page (SBC-4) */
1527 static int inquiry_vpd_b2(unsigned char *arr)
1528 {
1529         memset(arr, 0, 0x4);
1530         arr[0] = 0;                     /* threshold exponent */
1531         if (sdebug_lbpu)
1532                 arr[1] = 1 << 7;
1533         if (sdebug_lbpws)
1534                 arr[1] |= 1 << 6;
1535         if (sdebug_lbpws10)
1536                 arr[1] |= 1 << 5;
1537         if (sdebug_lbprz && scsi_debug_lbp())
1538                 arr[1] |= (sdebug_lbprz & 0x7) << 2;  /* sbc4r07 and later */
1539         /* anc_sup=0; dp=0 (no provisioning group descriptor) */
1540         /* minimum_percentage=0; provisioning_type=0 (unknown) */
1541         /* threshold_percentage=0 */
1542         return 0x4;
1543 }
1544
1545 /* Zoned block device characteristics VPD page (ZBC mandatory) */
1546 static int inquiry_vpd_b6(struct sdebug_dev_info *devip, unsigned char *arr)
1547 {
1548         memset(arr, 0, 0x3c);
1549         arr[0] = 0x1; /* set URSWRZ (unrestricted read in seq. wr req zone) */
1550         /*
1551          * Set Optimal number of open sequential write preferred zones and
1552          * Optimal number of non-sequentially written sequential write
1553          * preferred zones fields to 'not reported' (0xffffffff). Leave other
1554          * fields set to zero, apart from Max. number of open swrz_s field.
1555          */
1556         put_unaligned_be32(0xffffffff, &arr[4]);
1557         put_unaligned_be32(0xffffffff, &arr[8]);
1558         if (sdeb_zbc_model == BLK_ZONED_HM && devip->max_open)
1559                 put_unaligned_be32(devip->max_open, &arr[12]);
1560         else
1561                 put_unaligned_be32(0xffffffff, &arr[12]);
1562         return 0x3c;
1563 }
1564
1565 #define SDEBUG_LONG_INQ_SZ 96
1566 #define SDEBUG_MAX_INQ_ARR_SZ 584
1567
1568 static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1569 {
1570         unsigned char pq_pdt;
1571         unsigned char *arr;
1572         unsigned char *cmd = scp->cmnd;
1573         int alloc_len, n, ret;
1574         bool have_wlun, is_disk, is_zbc, is_disk_zbc;
1575
1576         alloc_len = get_unaligned_be16(cmd + 3);
1577         arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
1578         if (! arr)
1579                 return DID_REQUEUE << 16;
1580         is_disk = (sdebug_ptype == TYPE_DISK);
1581         is_zbc = (devip->zmodel != BLK_ZONED_NONE);
1582         is_disk_zbc = (is_disk || is_zbc);
1583         have_wlun = scsi_is_wlun(scp->device->lun);
1584         if (have_wlun)
1585                 pq_pdt = TYPE_WLUN;     /* present, wlun */
1586         else if (sdebug_no_lun_0 && (devip->lun == SDEBUG_LUN_0_VAL))
1587                 pq_pdt = 0x7f;  /* not present, PQ=3, PDT=0x1f */
1588         else
1589                 pq_pdt = (sdebug_ptype & 0x1f);
1590         arr[0] = pq_pdt;
1591         if (0x2 & cmd[1]) {  /* CMDDT bit set */
1592                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
1593                 kfree(arr);
1594                 return check_condition_result;
1595         } else if (0x1 & cmd[1]) {  /* EVPD bit set */
1596                 int lu_id_num, port_group_id, target_dev_id, len;
1597                 char lu_id_str[6];
1598                 int host_no = devip->sdbg_host->shost->host_no;
1599                 
1600                 port_group_id = (((host_no + 1) & 0x7f) << 8) +
1601                     (devip->channel & 0x7f);
1602                 if (sdebug_vpd_use_hostno == 0)
1603                         host_no = 0;
1604                 lu_id_num = have_wlun ? -1 : (((host_no + 1) * 2000) +
1605                             (devip->target * 1000) + devip->lun);
1606                 target_dev_id = ((host_no + 1) * 2000) +
1607                                  (devip->target * 1000) - 3;
1608                 len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
1609                 if (0 == cmd[2]) { /* supported vital product data pages */
1610                         arr[1] = cmd[2];        /*sanity */
1611                         n = 4;
1612                         arr[n++] = 0x0;   /* this page */
1613                         arr[n++] = 0x80;  /* unit serial number */
1614                         arr[n++] = 0x83;  /* device identification */
1615                         arr[n++] = 0x84;  /* software interface ident. */
1616                         arr[n++] = 0x85;  /* management network addresses */
1617                         arr[n++] = 0x86;  /* extended inquiry */
1618                         arr[n++] = 0x87;  /* mode page policy */
1619                         arr[n++] = 0x88;  /* SCSI ports */
1620                         if (is_disk_zbc) {        /* SBC or ZBC */
1621                                 arr[n++] = 0x89;  /* ATA information */
1622                                 arr[n++] = 0xb0;  /* Block limits */
1623                                 arr[n++] = 0xb1;  /* Block characteristics */
1624                                 if (is_disk)
1625                                         arr[n++] = 0xb2;  /* LB Provisioning */
1626                                 if (is_zbc)
1627                                         arr[n++] = 0xb6;  /* ZB dev. char. */
1628                         }
1629                         arr[3] = n - 4;   /* number of supported VPD pages */
1630                 } else if (0x80 == cmd[2]) { /* unit serial number */
1631                         arr[1] = cmd[2];        /*sanity */
1632                         arr[3] = len;
1633                         memcpy(&arr[4], lu_id_str, len);
1634                 } else if (0x83 == cmd[2]) { /* device identification */
1635                         arr[1] = cmd[2];        /*sanity */
1636                         arr[3] = inquiry_vpd_83(&arr[4], port_group_id,
1637                                                 target_dev_id, lu_id_num,
1638                                                 lu_id_str, len,
1639                                                 &devip->lu_name);
1640                 } else if (0x84 == cmd[2]) { /* Software interface ident. */
1641                         arr[1] = cmd[2];        /*sanity */
1642                         arr[3] = inquiry_vpd_84(&arr[4]);
1643                 } else if (0x85 == cmd[2]) { /* Management network addresses */
1644                         arr[1] = cmd[2];        /*sanity */
1645                         arr[3] = inquiry_vpd_85(&arr[4]);
1646                 } else if (0x86 == cmd[2]) { /* extended inquiry */
1647                         arr[1] = cmd[2];        /*sanity */
1648                         arr[3] = 0x3c;  /* number of following entries */
1649                         if (sdebug_dif == T10_PI_TYPE3_PROTECTION)
1650                                 arr[4] = 0x4;   /* SPT: GRD_CHK:1 */
1651                         else if (have_dif_prot)
1652                                 arr[4] = 0x5;   /* SPT: GRD_CHK:1, REF_CHK:1 */
1653                         else
1654                                 arr[4] = 0x0;   /* no protection stuff */
1655                         arr[5] = 0x7;   /* head of q, ordered + simple q's */
1656                 } else if (0x87 == cmd[2]) { /* mode page policy */
1657                         arr[1] = cmd[2];        /*sanity */
1658                         arr[3] = 0x8;   /* number of following entries */
1659                         arr[4] = 0x2;   /* disconnect-reconnect mp */
1660                         arr[6] = 0x80;  /* mlus, shared */
1661                         arr[8] = 0x18;   /* protocol specific lu */
1662                         arr[10] = 0x82;  /* mlus, per initiator port */
1663                 } else if (0x88 == cmd[2]) { /* SCSI Ports */
1664                         arr[1] = cmd[2];        /*sanity */
1665                         arr[3] = inquiry_vpd_88(&arr[4], target_dev_id);
1666                 } else if (is_disk_zbc && 0x89 == cmd[2]) { /* ATA info */
1667                         arr[1] = cmd[2];        /*sanity */
1668                         n = inquiry_vpd_89(&arr[4]);
1669                         put_unaligned_be16(n, arr + 2);
1670                 } else if (is_disk_zbc && 0xb0 == cmd[2]) { /* Block limits */
1671                         arr[1] = cmd[2];        /*sanity */
1672                         arr[3] = inquiry_vpd_b0(&arr[4]);
1673                 } else if (is_disk_zbc && 0xb1 == cmd[2]) { /* Block char. */
1674                         arr[1] = cmd[2];        /*sanity */
1675                         arr[3] = inquiry_vpd_b1(devip, &arr[4]);
1676                 } else if (is_disk && 0xb2 == cmd[2]) { /* LB Prov. */
1677                         arr[1] = cmd[2];        /*sanity */
1678                         arr[3] = inquiry_vpd_b2(&arr[4]);
1679                 } else if (is_zbc && cmd[2] == 0xb6) { /* ZB dev. charact. */
1680                         arr[1] = cmd[2];        /*sanity */
1681                         arr[3] = inquiry_vpd_b6(devip, &arr[4]);
1682                 } else {
1683                         mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
1684                         kfree(arr);
1685                         return check_condition_result;
1686                 }
1687                 len = min(get_unaligned_be16(arr + 2) + 4, alloc_len);
1688                 ret = fill_from_dev_buffer(scp, arr,
1689                             min(len, SDEBUG_MAX_INQ_ARR_SZ));
1690                 kfree(arr);
1691                 return ret;
1692         }
1693         /* drops through here for a standard inquiry */
1694         arr[1] = sdebug_removable ? 0x80 : 0;   /* Removable disk */
1695         arr[2] = sdebug_scsi_level;
1696         arr[3] = 2;    /* response_data_format==2 */
1697         arr[4] = SDEBUG_LONG_INQ_SZ - 5;
1698         arr[5] = (int)have_dif_prot;    /* PROTECT bit */
1699         if (sdebug_vpd_use_hostno == 0)
1700                 arr[5] |= 0x10; /* claim: implicit TPGS */
1701         arr[6] = 0x10; /* claim: MultiP */
1702         /* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
1703         arr[7] = 0xa; /* claim: LINKED + CMDQUE */
1704         memcpy(&arr[8], sdebug_inq_vendor_id, 8);
1705         memcpy(&arr[16], sdebug_inq_product_id, 16);
1706         memcpy(&arr[32], sdebug_inq_product_rev, 4);
1707         /* Use Vendor Specific area to place driver date in ASCII hex */
1708         memcpy(&arr[36], sdebug_version_date, 8);
1709         /* version descriptors (2 bytes each) follow */
1710         put_unaligned_be16(0xc0, arr + 58);   /* SAM-6 no version claimed */
1711         put_unaligned_be16(0x5c0, arr + 60);  /* SPC-5 no version claimed */
1712         n = 62;
1713         if (is_disk) {          /* SBC-4 no version claimed */
1714                 put_unaligned_be16(0x600, arr + n);
1715                 n += 2;
1716         } else if (sdebug_ptype == TYPE_TAPE) { /* SSC-4 rev 3 */
1717                 put_unaligned_be16(0x525, arr + n);
1718                 n += 2;
1719         } else if (is_zbc) {    /* ZBC BSR INCITS 536 revision 05 */
1720                 put_unaligned_be16(0x624, arr + n);
1721                 n += 2;
1722         }
1723         put_unaligned_be16(0x2100, arr + n);    /* SPL-4 no version claimed */
1724         ret = fill_from_dev_buffer(scp, arr,
1725                             min_t(int, alloc_len, SDEBUG_LONG_INQ_SZ));
1726         kfree(arr);
1727         return ret;
1728 }
1729
1730 static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1731                                    0, 0, 0x0, 0x0};
1732
1733 static int resp_requests(struct scsi_cmnd *scp,
1734                          struct sdebug_dev_info *devip)
1735 {
1736         unsigned char *sbuff;
1737         unsigned char *cmd = scp->cmnd;
1738         unsigned char arr[SCSI_SENSE_BUFFERSIZE];
1739         bool dsense;
1740         int len = 18;
1741
1742         memset(arr, 0, sizeof(arr));
1743         dsense = !!(cmd[1] & 1);
1744         sbuff = scp->sense_buffer;
1745         if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
1746                 if (dsense) {
1747                         arr[0] = 0x72;
1748                         arr[1] = 0x0;           /* NO_SENSE in sense_key */
1749                         arr[2] = THRESHOLD_EXCEEDED;
1750                         arr[3] = 0xff;          /* TEST set and MRIE==6 */
1751                         len = 8;
1752                 } else {
1753                         arr[0] = 0x70;
1754                         arr[2] = 0x0;           /* NO_SENSE in sense_key */
1755                         arr[7] = 0xa;           /* 18 byte sense buffer */
1756                         arr[12] = THRESHOLD_EXCEEDED;
1757                         arr[13] = 0xff;         /* TEST set and MRIE==6 */
1758                 }
1759         } else {
1760                 memcpy(arr, sbuff, SCSI_SENSE_BUFFERSIZE);
1761                 if (arr[0] >= 0x70 && dsense == sdebug_dsense)
1762                         ;       /* have sense and formats match */
1763                 else if (arr[0] <= 0x70) {
1764                         if (dsense) {
1765                                 memset(arr, 0, 8);
1766                                 arr[0] = 0x72;
1767                                 len = 8;
1768                         } else {
1769                                 memset(arr, 0, 18);
1770                                 arr[0] = 0x70;
1771                                 arr[7] = 0xa;
1772                         }
1773                 } else if (dsense) {
1774                         memset(arr, 0, 8);
1775                         arr[0] = 0x72;
1776                         arr[1] = sbuff[2];     /* sense key */
1777                         arr[2] = sbuff[12];    /* asc */
1778                         arr[3] = sbuff[13];    /* ascq */
1779                         len = 8;
1780                 } else {
1781                         memset(arr, 0, 18);
1782                         arr[0] = 0x70;
1783                         arr[2] = sbuff[1];
1784                         arr[7] = 0xa;
1785                         arr[12] = sbuff[1];
1786                         arr[13] = sbuff[3];
1787                 }
1788
1789         }
1790         mk_sense_buffer(scp, 0, NO_ADDITIONAL_SENSE, 0);
1791         return fill_from_dev_buffer(scp, arr, len);
1792 }
1793
1794 static int resp_start_stop(struct scsi_cmnd *scp,
1795                            struct sdebug_dev_info *devip)
1796 {
1797         unsigned char *cmd = scp->cmnd;
1798         int power_cond, stop;
1799         bool changing;
1800
1801         power_cond = (cmd[4] & 0xf0) >> 4;
1802         if (power_cond) {
1803                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 7);
1804                 return check_condition_result;
1805         }
1806         stop = !(cmd[4] & 1);
1807         changing = atomic_read(&devip->stopped) == !stop;
1808         atomic_xchg(&devip->stopped, stop);
1809         if (!changing || cmd[1] & 0x1)  /* state unchanged or IMMED set */
1810                 return SDEG_RES_IMMED_MASK;
1811         else
1812                 return 0;
1813 }
1814
1815 static sector_t get_sdebug_capacity(void)
1816 {
1817         static const unsigned int gibibyte = 1073741824;
1818
1819         if (sdebug_virtual_gb > 0)
1820                 return (sector_t)sdebug_virtual_gb *
1821                         (gibibyte / sdebug_sector_size);
1822         else
1823                 return sdebug_store_sectors;
1824 }
1825
1826 #define SDEBUG_READCAP_ARR_SZ 8
1827 static int resp_readcap(struct scsi_cmnd *scp,
1828                         struct sdebug_dev_info *devip)
1829 {
1830         unsigned char arr[SDEBUG_READCAP_ARR_SZ];
1831         unsigned int capac;
1832
1833         /* following just in case virtual_gb changed */
1834         sdebug_capacity = get_sdebug_capacity();
1835         memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
1836         if (sdebug_capacity < 0xffffffff) {
1837                 capac = (unsigned int)sdebug_capacity - 1;
1838                 put_unaligned_be32(capac, arr + 0);
1839         } else
1840                 put_unaligned_be32(0xffffffff, arr + 0);
1841         put_unaligned_be16(sdebug_sector_size, arr + 6);
1842         return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
1843 }
1844
1845 #define SDEBUG_READCAP16_ARR_SZ 32
1846 static int resp_readcap16(struct scsi_cmnd *scp,
1847                           struct sdebug_dev_info *devip)
1848 {
1849         unsigned char *cmd = scp->cmnd;
1850         unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
1851         int alloc_len;
1852
1853         alloc_len = get_unaligned_be32(cmd + 10);
1854         /* following just in case virtual_gb changed */
1855         sdebug_capacity = get_sdebug_capacity();
1856         memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
1857         put_unaligned_be64((u64)(sdebug_capacity - 1), arr + 0);
1858         put_unaligned_be32(sdebug_sector_size, arr + 8);
1859         arr[13] = sdebug_physblk_exp & 0xf;
1860         arr[14] = (sdebug_lowest_aligned >> 8) & 0x3f;
1861
1862         if (scsi_debug_lbp()) {
1863                 arr[14] |= 0x80; /* LBPME */
1864                 /* from sbc4r07, this LBPRZ field is 1 bit, but the LBPRZ in
1865                  * the LB Provisioning VPD page is 3 bits. Note that lbprz=2
1866                  * in the wider field maps to 0 in this field.
1867                  */
1868                 if (sdebug_lbprz & 1)   /* precisely what the draft requires */
1869                         arr[14] |= 0x40;
1870         }
1871
1872         arr[15] = sdebug_lowest_aligned & 0xff;
1873
1874         if (have_dif_prot) {
1875                 arr[12] = (sdebug_dif - 1) << 1; /* P_TYPE */
1876                 arr[12] |= 1; /* PROT_EN */
1877         }
1878
1879         return fill_from_dev_buffer(scp, arr,
1880                             min_t(int, alloc_len, SDEBUG_READCAP16_ARR_SZ));
1881 }
1882
1883 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
1884
1885 static int resp_report_tgtpgs(struct scsi_cmnd *scp,
1886                               struct sdebug_dev_info *devip)
1887 {
1888         unsigned char *cmd = scp->cmnd;
1889         unsigned char *arr;
1890         int host_no = devip->sdbg_host->shost->host_no;
1891         int n, ret, alen, rlen;
1892         int port_group_a, port_group_b, port_a, port_b;
1893
1894         alen = get_unaligned_be32(cmd + 6);
1895         arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
1896         if (! arr)
1897                 return DID_REQUEUE << 16;
1898         /*
1899          * EVPD page 0x88 states we have two ports, one
1900          * real and a fake port with no device connected.
1901          * So we create two port groups with one port each
1902          * and set the group with port B to unavailable.
1903          */
1904         port_a = 0x1; /* relative port A */
1905         port_b = 0x2; /* relative port B */
1906         port_group_a = (((host_no + 1) & 0x7f) << 8) +
1907                         (devip->channel & 0x7f);
1908         port_group_b = (((host_no + 1) & 0x7f) << 8) +
1909                         (devip->channel & 0x7f) + 0x80;
1910
1911         /*
1912          * The asymmetric access state is cycled according to the host_id.
1913          */
1914         n = 4;
1915         if (sdebug_vpd_use_hostno == 0) {
1916                 arr[n++] = host_no % 3; /* Asymm access state */
1917                 arr[n++] = 0x0F; /* claim: all states are supported */
1918         } else {
1919                 arr[n++] = 0x0; /* Active/Optimized path */
1920                 arr[n++] = 0x01; /* only support active/optimized paths */
1921         }
1922         put_unaligned_be16(port_group_a, arr + n);
1923         n += 2;
1924         arr[n++] = 0;    /* Reserved */
1925         arr[n++] = 0;    /* Status code */
1926         arr[n++] = 0;    /* Vendor unique */
1927         arr[n++] = 0x1;  /* One port per group */
1928         arr[n++] = 0;    /* Reserved */
1929         arr[n++] = 0;    /* Reserved */
1930         put_unaligned_be16(port_a, arr + n);
1931         n += 2;
1932         arr[n++] = 3;    /* Port unavailable */
1933         arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
1934         put_unaligned_be16(port_group_b, arr + n);
1935         n += 2;
1936         arr[n++] = 0;    /* Reserved */
1937         arr[n++] = 0;    /* Status code */
1938         arr[n++] = 0;    /* Vendor unique */
1939         arr[n++] = 0x1;  /* One port per group */
1940         arr[n++] = 0;    /* Reserved */
1941         arr[n++] = 0;    /* Reserved */
1942         put_unaligned_be16(port_b, arr + n);
1943         n += 2;
1944
1945         rlen = n - 4;
1946         put_unaligned_be32(rlen, arr + 0);
1947
1948         /*
1949          * Return the smallest value of either
1950          * - The allocated length
1951          * - The constructed command length
1952          * - The maximum array size
1953          */
1954         rlen = min_t(int, alen, n);
1955         ret = fill_from_dev_buffer(scp, arr,
1956                            min_t(int, rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
1957         kfree(arr);
1958         return ret;
1959 }
1960
1961 static int resp_rsup_opcodes(struct scsi_cmnd *scp,
1962                              struct sdebug_dev_info *devip)
1963 {
1964         bool rctd;
1965         u8 reporting_opts, req_opcode, sdeb_i, supp;
1966         u16 req_sa, u;
1967         u32 alloc_len, a_len;
1968         int k, offset, len, errsts, count, bump, na;
1969         const struct opcode_info_t *oip;
1970         const struct opcode_info_t *r_oip;
1971         u8 *arr;
1972         u8 *cmd = scp->cmnd;
1973
1974         rctd = !!(cmd[2] & 0x80);
1975         reporting_opts = cmd[2] & 0x7;
1976         req_opcode = cmd[3];
1977         req_sa = get_unaligned_be16(cmd + 4);
1978         alloc_len = get_unaligned_be32(cmd + 6);
1979         if (alloc_len < 4 || alloc_len > 0xffff) {
1980                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
1981                 return check_condition_result;
1982         }
1983         if (alloc_len > 8192)
1984                 a_len = 8192;
1985         else
1986                 a_len = alloc_len;
1987         arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_ATOMIC);
1988         if (NULL == arr) {
1989                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
1990                                 INSUFF_RES_ASCQ);
1991                 return check_condition_result;
1992         }
1993         switch (reporting_opts) {
1994         case 0: /* all commands */
1995                 /* count number of commands */
1996                 for (count = 0, oip = opcode_info_arr;
1997                      oip->num_attached != 0xff; ++oip) {
1998                         if (F_INV_OP & oip->flags)
1999                                 continue;
2000                         count += (oip->num_attached + 1);
2001                 }
2002                 bump = rctd ? 20 : 8;
2003                 put_unaligned_be32(count * bump, arr);
2004                 for (offset = 4, oip = opcode_info_arr;
2005                      oip->num_attached != 0xff && offset < a_len; ++oip) {
2006                         if (F_INV_OP & oip->flags)
2007                                 continue;
2008                         na = oip->num_attached;
2009                         arr[offset] = oip->opcode;
2010                         put_unaligned_be16(oip->sa, arr + offset + 2);
2011                         if (rctd)
2012                                 arr[offset + 5] |= 0x2;
2013                         if (FF_SA & oip->flags)
2014                                 arr[offset + 5] |= 0x1;
2015                         put_unaligned_be16(oip->len_mask[0], arr + offset + 6);
2016                         if (rctd)
2017                                 put_unaligned_be16(0xa, arr + offset + 8);
2018                         r_oip = oip;
2019                         for (k = 0, oip = oip->arrp; k < na; ++k, ++oip) {
2020                                 if (F_INV_OP & oip->flags)
2021                                         continue;
2022                                 offset += bump;
2023                                 arr[offset] = oip->opcode;
2024                                 put_unaligned_be16(oip->sa, arr + offset + 2);
2025                                 if (rctd)
2026                                         arr[offset + 5] |= 0x2;
2027                                 if (FF_SA & oip->flags)
2028                                         arr[offset + 5] |= 0x1;
2029                                 put_unaligned_be16(oip->len_mask[0],
2030                                                    arr + offset + 6);
2031                                 if (rctd)
2032                                         put_unaligned_be16(0xa,
2033                                                            arr + offset + 8);
2034                         }
2035                         oip = r_oip;
2036                         offset += bump;
2037                 }
2038                 break;
2039         case 1: /* one command: opcode only */
2040         case 2: /* one command: opcode plus service action */
2041         case 3: /* one command: if sa==0 then opcode only else opcode+sa */
2042                 sdeb_i = opcode_ind_arr[req_opcode];
2043                 oip = &opcode_info_arr[sdeb_i];
2044                 if (F_INV_OP & oip->flags) {
2045                         supp = 1;
2046                         offset = 4;
2047                 } else {
2048                         if (1 == reporting_opts) {
2049                                 if (FF_SA & oip->flags) {
2050                                         mk_sense_invalid_fld(scp, SDEB_IN_CDB,
2051                                                              2, 2);
2052                                         kfree(arr);
2053                                         return check_condition_result;
2054                                 }
2055                                 req_sa = 0;
2056                         } else if (2 == reporting_opts &&
2057                                    0 == (FF_SA & oip->flags)) {
2058                                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
2059                                 kfree(arr);     /* point at requested sa */
2060                                 return check_condition_result;
2061                         }
2062                         if (0 == (FF_SA & oip->flags) &&
2063                             req_opcode == oip->opcode)
2064                                 supp = 3;
2065                         else if (0 == (FF_SA & oip->flags)) {
2066                                 na = oip->num_attached;
2067                                 for (k = 0, oip = oip->arrp; k < na;
2068                                      ++k, ++oip) {
2069                                         if (req_opcode == oip->opcode)
2070                                                 break;
2071                                 }
2072                                 supp = (k >= na) ? 1 : 3;
2073                         } else if (req_sa != oip->sa) {
2074                                 na = oip->num_attached;
2075                                 for (k = 0, oip = oip->arrp; k < na;
2076                                      ++k, ++oip) {
2077                                         if (req_sa == oip->sa)
2078                                                 break;
2079                                 }
2080                                 supp = (k >= na) ? 1 : 3;
2081                         } else
2082                                 supp = 3;
2083                         if (3 == supp) {
2084                                 u = oip->len_mask[0];
2085                                 put_unaligned_be16(u, arr + 2);
2086                                 arr[4] = oip->opcode;
2087                                 for (k = 1; k < u; ++k)
2088                                         arr[4 + k] = (k < 16) ?
2089                                                  oip->len_mask[k] : 0xff;
2090                                 offset = 4 + u;
2091                         } else
2092                                 offset = 4;
2093                 }
2094                 arr[1] = (rctd ? 0x80 : 0) | supp;
2095                 if (rctd) {
2096                         put_unaligned_be16(0xa, arr + offset);
2097                         offset += 12;
2098                 }
2099                 break;
2100         default:
2101                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
2102                 kfree(arr);
2103                 return check_condition_result;
2104         }
2105         offset = (offset < a_len) ? offset : a_len;
2106         len = (offset < alloc_len) ? offset : alloc_len;
2107         errsts = fill_from_dev_buffer(scp, arr, len);
2108         kfree(arr);
2109         return errsts;
2110 }
2111
2112 static int resp_rsup_tmfs(struct scsi_cmnd *scp,
2113                           struct sdebug_dev_info *devip)
2114 {
2115         bool repd;
2116         u32 alloc_len, len;
2117         u8 arr[16];
2118         u8 *cmd = scp->cmnd;
2119
2120         memset(arr, 0, sizeof(arr));
2121         repd = !!(cmd[2] & 0x80);
2122         alloc_len = get_unaligned_be32(cmd + 6);
2123         if (alloc_len < 4) {
2124                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
2125                 return check_condition_result;
2126         }
2127         arr[0] = 0xc8;          /* ATS | ATSS | LURS */
2128         arr[1] = 0x1;           /* ITNRS */
2129         if (repd) {
2130                 arr[3] = 0xc;
2131                 len = 16;
2132         } else
2133                 len = 4;
2134
2135         len = (len < alloc_len) ? len : alloc_len;
2136         return fill_from_dev_buffer(scp, arr, len);
2137 }
2138
2139 /* <<Following mode page info copied from ST318451LW>> */
2140
2141 static int resp_err_recov_pg(unsigned char *p, int pcontrol, int target)
2142 {       /* Read-Write Error Recovery page for mode_sense */
2143         unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
2144                                         5, 0, 0xff, 0xff};
2145
2146         memcpy(p, err_recov_pg, sizeof(err_recov_pg));
2147         if (1 == pcontrol)
2148                 memset(p + 2, 0, sizeof(err_recov_pg) - 2);
2149         return sizeof(err_recov_pg);
2150 }
2151
2152 static int resp_disconnect_pg(unsigned char *p, int pcontrol, int target)
2153 {       /* Disconnect-Reconnect page for mode_sense */
2154         unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
2155                                          0, 0, 0, 0, 0, 0, 0, 0};
2156
2157         memcpy(p, disconnect_pg, sizeof(disconnect_pg));
2158         if (1 == pcontrol)
2159                 memset(p + 2, 0, sizeof(disconnect_pg) - 2);
2160         return sizeof(disconnect_pg);
2161 }
2162
2163 static int resp_format_pg(unsigned char *p, int pcontrol, int target)
2164 {       /* Format device page for mode_sense */
2165         unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
2166                                      0, 0, 0, 0, 0, 0, 0, 0,
2167                                      0, 0, 0, 0, 0x40, 0, 0, 0};
2168
2169         memcpy(p, format_pg, sizeof(format_pg));
2170         put_unaligned_be16(sdebug_sectors_per, p + 10);
2171         put_unaligned_be16(sdebug_sector_size, p + 12);
2172         if (sdebug_removable)
2173                 p[20] |= 0x20; /* should agree with INQUIRY */
2174         if (1 == pcontrol)
2175                 memset(p + 2, 0, sizeof(format_pg) - 2);
2176         return sizeof(format_pg);
2177 }
2178
2179 static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2180                                      0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,
2181                                      0, 0, 0, 0};
2182
2183 static int resp_caching_pg(unsigned char *p, int pcontrol, int target)
2184 {       /* Caching page for mode_sense */
2185         unsigned char ch_caching_pg[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0,
2186                 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
2187         unsigned char d_caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2188                 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,     0, 0, 0, 0};
2189
2190         if (SDEBUG_OPT_N_WCE & sdebug_opts)
2191                 caching_pg[2] &= ~0x4;  /* set WCE=0 (default WCE=1) */
2192         memcpy(p, caching_pg, sizeof(caching_pg));
2193         if (1 == pcontrol)
2194                 memcpy(p + 2, ch_caching_pg, sizeof(ch_caching_pg));
2195         else if (2 == pcontrol)
2196                 memcpy(p, d_caching_pg, sizeof(d_caching_pg));
2197         return sizeof(caching_pg);
2198 }
2199
2200 static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2201                                     0, 0, 0x2, 0x4b};
2202
2203 static int resp_ctrl_m_pg(unsigned char *p, int pcontrol, int target)
2204 {       /* Control mode page for mode_sense */
2205         unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
2206                                         0, 0, 0, 0};
2207         unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2208                                      0, 0, 0x2, 0x4b};
2209
2210         if (sdebug_dsense)
2211                 ctrl_m_pg[2] |= 0x4;
2212         else
2213                 ctrl_m_pg[2] &= ~0x4;
2214
2215         if (sdebug_ato)
2216                 ctrl_m_pg[5] |= 0x80; /* ATO=1 */
2217
2218         memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
2219         if (1 == pcontrol)
2220                 memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
2221         else if (2 == pcontrol)
2222                 memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
2223         return sizeof(ctrl_m_pg);
2224 }
2225
2226
2227 static int resp_iec_m_pg(unsigned char *p, int pcontrol, int target)
2228 {       /* Informational Exceptions control mode page for mode_sense */
2229         unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
2230                                        0, 0, 0x0, 0x0};
2231         unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
2232                                       0, 0, 0x0, 0x0};
2233
2234         memcpy(p, iec_m_pg, sizeof(iec_m_pg));
2235         if (1 == pcontrol)
2236                 memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
2237         else if (2 == pcontrol)
2238                 memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
2239         return sizeof(iec_m_pg);
2240 }
2241
2242 static int resp_sas_sf_m_pg(unsigned char *p, int pcontrol, int target)
2243 {       /* SAS SSP mode page - short format for mode_sense */
2244         unsigned char sas_sf_m_pg[] = {0x19, 0x6,
2245                 0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
2246
2247         memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
2248         if (1 == pcontrol)
2249                 memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
2250         return sizeof(sas_sf_m_pg);
2251 }
2252
2253
2254 static int resp_sas_pcd_m_spg(unsigned char *p, int pcontrol, int target,
2255                               int target_dev_id)
2256 {       /* SAS phy control and discover mode page for mode_sense */
2257         unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
2258                     0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
2259                     0, 0, 0, 0, 0, 0, 0, 0,     /* insert SAS addr */
2260                     0, 0, 0, 0, 0, 0, 0, 0,     /* insert SAS addr */
2261                     0x2, 0, 0, 0, 0, 0, 0, 0,
2262                     0x88, 0x99, 0, 0, 0, 0, 0, 0,
2263                     0, 0, 0, 0, 0, 0, 0, 0,
2264                     0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
2265                     0, 0, 0, 0, 0, 0, 0, 0,     /* insert SAS addr */
2266                     0, 0, 0, 0, 0, 0, 0, 0,     /* insert SAS addr */
2267                     0x3, 0, 0, 0, 0, 0, 0, 0,
2268                     0x88, 0x99, 0, 0, 0, 0, 0, 0,
2269                     0, 0, 0, 0, 0, 0, 0, 0,
2270                 };
2271         int port_a, port_b;
2272
2273         put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 16);
2274         put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 24);
2275         put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 64);
2276         put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 72);
2277         port_a = target_dev_id + 1;
2278         port_b = port_a + 1;
2279         memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
2280         put_unaligned_be32(port_a, p + 20);
2281         put_unaligned_be32(port_b, p + 48 + 20);
2282         if (1 == pcontrol)
2283                 memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
2284         return sizeof(sas_pcd_m_pg);
2285 }
2286
2287 static int resp_sas_sha_m_spg(unsigned char *p, int pcontrol)
2288 {       /* SAS SSP shared protocol specific port mode subpage */
2289         unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
2290                     0, 0, 0, 0, 0, 0, 0, 0,
2291                 };
2292
2293         memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
2294         if (1 == pcontrol)
2295                 memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
2296         return sizeof(sas_sha_m_pg);
2297 }
2298
2299 #define SDEBUG_MAX_MSENSE_SZ 256
2300
2301 static int resp_mode_sense(struct scsi_cmnd *scp,
2302                            struct sdebug_dev_info *devip)
2303 {
2304         int pcontrol, pcode, subpcode, bd_len;
2305         unsigned char dev_spec;
2306         int alloc_len, offset, len, target_dev_id;
2307         int target = scp->device->id;
2308         unsigned char *ap;
2309         unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
2310         unsigned char *cmd = scp->cmnd;
2311         bool dbd, llbaa, msense_6, is_disk, is_zbc, bad_pcode;
2312
2313         dbd = !!(cmd[1] & 0x8);         /* disable block descriptors */
2314         pcontrol = (cmd[2] & 0xc0) >> 6;
2315         pcode = cmd[2] & 0x3f;
2316         subpcode = cmd[3];
2317         msense_6 = (MODE_SENSE == cmd[0]);
2318         llbaa = msense_6 ? false : !!(cmd[1] & 0x10);
2319         is_disk = (sdebug_ptype == TYPE_DISK);
2320         is_zbc = (devip->zmodel != BLK_ZONED_NONE);
2321         if ((is_disk || is_zbc) && !dbd)
2322                 bd_len = llbaa ? 16 : 8;
2323         else
2324                 bd_len = 0;
2325         alloc_len = msense_6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2326         memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
2327         if (0x3 == pcontrol) {  /* Saving values not supported */
2328                 mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0);
2329                 return check_condition_result;
2330         }
2331         target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
2332                         (devip->target * 1000) - 3;
2333         /* for disks+zbc set DPOFUA bit and clear write protect (WP) bit */
2334         if (is_disk || is_zbc) {
2335                 dev_spec = 0x10;        /* =0x90 if WP=1 implies read-only */
2336                 if (sdebug_wp)
2337                         dev_spec |= 0x80;
2338         } else
2339                 dev_spec = 0x0;
2340         if (msense_6) {
2341                 arr[2] = dev_spec;
2342                 arr[3] = bd_len;
2343                 offset = 4;
2344         } else {
2345                 arr[3] = dev_spec;
2346                 if (16 == bd_len)
2347                         arr[4] = 0x1;   /* set LONGLBA bit */
2348                 arr[7] = bd_len;        /* assume 255 or less */
2349                 offset = 8;
2350         }
2351         ap = arr + offset;
2352         if ((bd_len > 0) && (!sdebug_capacity))
2353                 sdebug_capacity = get_sdebug_capacity();
2354
2355         if (8 == bd_len) {
2356                 if (sdebug_capacity > 0xfffffffe)
2357                         put_unaligned_be32(0xffffffff, ap + 0);
2358                 else
2359                         put_unaligned_be32(sdebug_capacity, ap + 0);
2360                 put_unaligned_be16(sdebug_sector_size, ap + 6);
2361                 offset += bd_len;
2362                 ap = arr + offset;
2363         } else if (16 == bd_len) {
2364                 put_unaligned_be64((u64)sdebug_capacity, ap + 0);
2365                 put_unaligned_be32(sdebug_sector_size, ap + 12);
2366                 offset += bd_len;
2367                 ap = arr + offset;
2368         }
2369
2370         if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) {
2371                 /* TODO: Control Extension page */
2372                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2373                 return check_condition_result;
2374         }
2375         bad_pcode = false;
2376
2377         switch (pcode) {
2378         case 0x1:       /* Read-Write error recovery page, direct access */
2379                 len = resp_err_recov_pg(ap, pcontrol, target);
2380                 offset += len;
2381                 break;
2382         case 0x2:       /* Disconnect-Reconnect page, all devices */
2383                 len = resp_disconnect_pg(ap, pcontrol, target);
2384                 offset += len;
2385                 break;
2386         case 0x3:       /* Format device page, direct access */
2387                 if (is_disk) {
2388                         len = resp_format_pg(ap, pcontrol, target);
2389                         offset += len;
2390                 } else
2391                         bad_pcode = true;
2392                 break;
2393         case 0x8:       /* Caching page, direct access */
2394                 if (is_disk || is_zbc) {
2395                         len = resp_caching_pg(ap, pcontrol, target);
2396                         offset += len;
2397                 } else
2398                         bad_pcode = true;
2399                 break;
2400         case 0xa:       /* Control Mode page, all devices */
2401                 len = resp_ctrl_m_pg(ap, pcontrol, target);
2402                 offset += len;
2403                 break;
2404         case 0x19:      /* if spc==1 then sas phy, control+discover */
2405                 if ((subpcode > 0x2) && (subpcode < 0xff)) {
2406                         mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2407                         return check_condition_result;
2408                 }
2409                 len = 0;
2410                 if ((0x0 == subpcode) || (0xff == subpcode))
2411                         len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2412                 if ((0x1 == subpcode) || (0xff == subpcode))
2413                         len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
2414                                                   target_dev_id);
2415                 if ((0x2 == subpcode) || (0xff == subpcode))
2416                         len += resp_sas_sha_m_spg(ap + len, pcontrol);
2417                 offset += len;
2418                 break;
2419         case 0x1c:      /* Informational Exceptions Mode page, all devices */
2420                 len = resp_iec_m_pg(ap, pcontrol, target);
2421                 offset += len;
2422                 break;
2423         case 0x3f:      /* Read all Mode pages */
2424                 if ((0 == subpcode) || (0xff == subpcode)) {
2425                         len = resp_err_recov_pg(ap, pcontrol, target);
2426                         len += resp_disconnect_pg(ap + len, pcontrol, target);
2427                         if (is_disk) {
2428                                 len += resp_format_pg(ap + len, pcontrol,
2429                                                       target);
2430                                 len += resp_caching_pg(ap + len, pcontrol,
2431                                                        target);
2432                         } else if (is_zbc) {
2433                                 len += resp_caching_pg(ap + len, pcontrol,
2434                                                        target);
2435                         }
2436                         len += resp_ctrl_m_pg(ap + len, pcontrol, target);
2437                         len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2438                         if (0xff == subpcode) {
2439                                 len += resp_sas_pcd_m_spg(ap + len, pcontrol,
2440                                                   target, target_dev_id);
2441                                 len += resp_sas_sha_m_spg(ap + len, pcontrol);
2442                         }
2443                         len += resp_iec_m_pg(ap + len, pcontrol, target);
2444                         offset += len;
2445                 } else {
2446                         mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2447                         return check_condition_result;
2448                 }
2449                 break;
2450         default:
2451                 bad_pcode = true;
2452                 break;
2453         }
2454         if (bad_pcode) {
2455                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2456                 return check_condition_result;
2457         }
2458         if (msense_6)
2459                 arr[0] = offset - 1;
2460         else
2461                 put_unaligned_be16((offset - 2), arr + 0);
2462         return fill_from_dev_buffer(scp, arr, min_t(int, alloc_len, offset));
2463 }
2464
2465 #define SDEBUG_MAX_MSELECT_SZ 512
2466
2467 static int resp_mode_select(struct scsi_cmnd *scp,
2468                             struct sdebug_dev_info *devip)
2469 {
2470         int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
2471         int param_len, res, mpage;
2472         unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
2473         unsigned char *cmd = scp->cmnd;
2474         int mselect6 = (MODE_SELECT == cmd[0]);
2475
2476         memset(arr, 0, sizeof(arr));
2477         pf = cmd[1] & 0x10;
2478         sp = cmd[1] & 0x1;
2479         param_len = mselect6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2480         if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
2481                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1);
2482                 return check_condition_result;
2483         }
2484         res = fetch_to_dev_buffer(scp, arr, param_len);
2485         if (-1 == res)
2486                 return DID_ERROR << 16;
2487         else if (sdebug_verbose && (res < param_len))
2488                 sdev_printk(KERN_INFO, scp->device,
2489                             "%s: cdb indicated=%d, IO sent=%d bytes\n",
2490                             __func__, param_len, res);
2491         md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2);
2492         bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6);
2493         if (md_len > 2) {
2494                 mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
2495                 return check_condition_result;
2496         }
2497         off = bd_len + (mselect6 ? 4 : 8);
2498         mpage = arr[off] & 0x3f;
2499         ps = !!(arr[off] & 0x80);
2500         if (ps) {
2501                 mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 7);
2502                 return check_condition_result;
2503         }
2504         spf = !!(arr[off] & 0x40);
2505         pg_len = spf ? (get_unaligned_be16(arr + off + 2) + 4) :
2506                        (arr[off + 1] + 2);
2507         if ((pg_len + off) > param_len) {
2508                 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2509                                 PARAMETER_LIST_LENGTH_ERR, 0);
2510                 return check_condition_result;
2511         }
2512         switch (mpage) {
2513         case 0x8:      /* Caching Mode page */
2514                 if (caching_pg[1] == arr[off + 1]) {
2515                         memcpy(caching_pg + 2, arr + off + 2,
2516                                sizeof(caching_pg) - 2);
2517                         goto set_mode_changed_ua;
2518                 }
2519                 break;
2520         case 0xa:      /* Control Mode page */
2521                 if (ctrl_m_pg[1] == arr[off + 1]) {
2522                         memcpy(ctrl_m_pg + 2, arr + off + 2,
2523                                sizeof(ctrl_m_pg) - 2);
2524                         if (ctrl_m_pg[4] & 0x8)
2525                                 sdebug_wp = true;
2526                         else
2527                                 sdebug_wp = false;
2528                         sdebug_dsense = !!(ctrl_m_pg[2] & 0x4);
2529                         goto set_mode_changed_ua;
2530                 }
2531                 break;
2532         case 0x1c:      /* Informational Exceptions Mode page */
2533                 if (iec_m_pg[1] == arr[off + 1]) {
2534                         memcpy(iec_m_pg + 2, arr + off + 2,
2535                                sizeof(iec_m_pg) - 2);
2536                         goto set_mode_changed_ua;
2537                 }
2538                 break;
2539         default:
2540                 break;
2541         }
2542         mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 5);
2543         return check_condition_result;
2544 set_mode_changed_ua:
2545         set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm);
2546         return 0;
2547 }
2548
2549 static int resp_temp_l_pg(unsigned char *arr)
2550 {
2551         unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
2552                                      0x0, 0x1, 0x3, 0x2, 0x0, 65,
2553                 };
2554
2555         memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
2556         return sizeof(temp_l_pg);
2557 }
2558
2559 static int resp_ie_l_pg(unsigned char *arr)
2560 {
2561         unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
2562                 };
2563
2564         memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
2565         if (iec_m_pg[2] & 0x4) {        /* TEST bit set */
2566                 arr[4] = THRESHOLD_EXCEEDED;
2567                 arr[5] = 0xff;
2568         }
2569         return sizeof(ie_l_pg);
2570 }
2571
2572 #define SDEBUG_MAX_LSENSE_SZ 512
2573
2574 static int resp_log_sense(struct scsi_cmnd *scp,
2575                           struct sdebug_dev_info *devip)
2576 {
2577         int ppc, sp, pcode, subpcode, alloc_len, len, n;
2578         unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
2579         unsigned char *cmd = scp->cmnd;
2580
2581         memset(arr, 0, sizeof(arr));
2582         ppc = cmd[1] & 0x2;
2583         sp = cmd[1] & 0x1;
2584         if (ppc || sp) {
2585                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, ppc ? 1 : 0);
2586                 return check_condition_result;
2587         }
2588         pcode = cmd[2] & 0x3f;
2589         subpcode = cmd[3] & 0xff;
2590         alloc_len = get_unaligned_be16(cmd + 7);
2591         arr[0] = pcode;
2592         if (0 == subpcode) {
2593                 switch (pcode) {
2594                 case 0x0:       /* Supported log pages log page */
2595                         n = 4;
2596                         arr[n++] = 0x0;         /* this page */
2597                         arr[n++] = 0xd;         /* Temperature */
2598                         arr[n++] = 0x2f;        /* Informational exceptions */
2599                         arr[3] = n - 4;
2600                         break;
2601                 case 0xd:       /* Temperature log page */
2602                         arr[3] = resp_temp_l_pg(arr + 4);
2603                         break;
2604                 case 0x2f:      /* Informational exceptions log page */
2605                         arr[3] = resp_ie_l_pg(arr + 4);
2606                         break;
2607                 default:
2608                         mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2609                         return check_condition_result;
2610                 }
2611         } else if (0xff == subpcode) {
2612                 arr[0] |= 0x40;
2613                 arr[1] = subpcode;
2614                 switch (pcode) {
2615                 case 0x0:       /* Supported log pages and subpages log page */
2616                         n = 4;
2617                         arr[n++] = 0x0;
2618                         arr[n++] = 0x0;         /* 0,0 page */
2619                         arr[n++] = 0x0;
2620                         arr[n++] = 0xff;        /* this page */
2621                         arr[n++] = 0xd;
2622                         arr[n++] = 0x0;         /* Temperature */
2623                         arr[n++] = 0x2f;
2624                         arr[n++] = 0x0; /* Informational exceptions */
2625                         arr[3] = n - 4;
2626                         break;
2627                 case 0xd:       /* Temperature subpages */
2628                         n = 4;
2629                         arr[n++] = 0xd;
2630                         arr[n++] = 0x0;         /* Temperature */
2631                         arr[3] = n - 4;
2632                         break;
2633                 case 0x2f:      /* Informational exceptions subpages */
2634                         n = 4;
2635                         arr[n++] = 0x2f;
2636                         arr[n++] = 0x0;         /* Informational exceptions */
2637                         arr[3] = n - 4;
2638                         break;
2639                 default:
2640                         mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2641                         return check_condition_result;
2642                 }
2643         } else {
2644                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2645                 return check_condition_result;
2646         }
2647         len = min_t(int, get_unaligned_be16(arr + 2) + 4, alloc_len);
2648         return fill_from_dev_buffer(scp, arr,
2649                     min_t(int, len, SDEBUG_MAX_INQ_ARR_SZ));
2650 }
2651
2652 static inline bool sdebug_dev_is_zoned(struct sdebug_dev_info *devip)
2653 {
2654         return devip->nr_zones != 0;
2655 }
2656
2657 static struct sdeb_zone_state *zbc_zone(struct sdebug_dev_info *devip,
2658                                         unsigned long long lba)
2659 {
2660         return &devip->zstate[lba >> devip->zsize_shift];
2661 }
2662
2663 static inline bool zbc_zone_is_conv(struct sdeb_zone_state *zsp)
2664 {
2665         return zsp->z_type == ZBC_ZONE_TYPE_CNV;
2666 }
2667
2668 static void zbc_close_zone(struct sdebug_dev_info *devip,
2669                            struct sdeb_zone_state *zsp)
2670 {
2671         enum sdebug_z_cond zc;
2672
2673         if (zbc_zone_is_conv(zsp))
2674                 return;
2675
2676         zc = zsp->z_cond;
2677         if (!(zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN))
2678                 return;
2679
2680         if (zc == ZC2_IMPLICIT_OPEN)
2681                 devip->nr_imp_open--;
2682         else
2683                 devip->nr_exp_open--;
2684
2685         if (zsp->z_wp == zsp->z_start) {
2686                 zsp->z_cond = ZC1_EMPTY;
2687         } else {
2688                 zsp->z_cond = ZC4_CLOSED;
2689                 devip->nr_closed++;
2690         }
2691 }
2692
2693 static void zbc_close_imp_open_zone(struct sdebug_dev_info *devip)
2694 {
2695         struct sdeb_zone_state *zsp = &devip->zstate[0];
2696         unsigned int i;
2697
2698         for (i = 0; i < devip->nr_zones; i++, zsp++) {
2699                 if (zsp->z_cond == ZC2_IMPLICIT_OPEN) {
2700                         zbc_close_zone(devip, zsp);
2701                         return;
2702                 }
2703         }
2704 }
2705
2706 static void zbc_open_zone(struct sdebug_dev_info *devip,
2707                           struct sdeb_zone_state *zsp, bool explicit)
2708 {
2709         enum sdebug_z_cond zc;
2710
2711         if (zbc_zone_is_conv(zsp))
2712                 return;
2713
2714         zc = zsp->z_cond;
2715         if ((explicit && zc == ZC3_EXPLICIT_OPEN) ||
2716             (!explicit && zc == ZC2_IMPLICIT_OPEN))
2717                 return;
2718
2719         /* Close an implicit open zone if necessary */
2720         if (explicit && zsp->z_cond == ZC2_IMPLICIT_OPEN)
2721                 zbc_close_zone(devip, zsp);
2722         else if (devip->max_open &&
2723                  devip->nr_imp_open + devip->nr_exp_open >= devip->max_open)
2724                 zbc_close_imp_open_zone(devip);
2725
2726         if (zsp->z_cond == ZC4_CLOSED)
2727                 devip->nr_closed--;
2728         if (explicit) {
2729                 zsp->z_cond = ZC3_EXPLICIT_OPEN;
2730                 devip->nr_exp_open++;
2731         } else {
2732                 zsp->z_cond = ZC2_IMPLICIT_OPEN;
2733                 devip->nr_imp_open++;
2734         }
2735 }
2736
2737 static void zbc_inc_wp(struct sdebug_dev_info *devip,
2738                        unsigned long long lba, unsigned int num)
2739 {
2740         struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
2741         unsigned long long n, end, zend = zsp->z_start + zsp->z_size;
2742
2743         if (zbc_zone_is_conv(zsp))
2744                 return;
2745
2746         if (zsp->z_type == ZBC_ZONE_TYPE_SWR) {
2747                 zsp->z_wp += num;
2748                 if (zsp->z_wp >= zend)
2749                         zsp->z_cond = ZC5_FULL;
2750                 return;
2751         }
2752
2753         while (num) {
2754                 if (lba != zsp->z_wp)
2755                         zsp->z_non_seq_resource = true;
2756
2757                 end = lba + num;
2758                 if (end >= zend) {
2759                         n = zend - lba;
2760                         zsp->z_wp = zend;
2761                 } else if (end > zsp->z_wp) {
2762                         n = num;
2763                         zsp->z_wp = end;
2764                 } else {
2765                         n = num;
2766                 }
2767                 if (zsp->z_wp >= zend)
2768                         zsp->z_cond = ZC5_FULL;
2769
2770                 num -= n;
2771                 lba += n;
2772                 if (num) {
2773                         zsp++;
2774                         zend = zsp->z_start + zsp->z_size;
2775                 }
2776         }
2777 }
2778
2779 static int check_zbc_access_params(struct scsi_cmnd *scp,
2780                         unsigned long long lba, unsigned int num, bool write)
2781 {
2782         struct scsi_device *sdp = scp->device;
2783         struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
2784         struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
2785         struct sdeb_zone_state *zsp_end = zbc_zone(devip, lba + num - 1);
2786
2787         if (!write) {
2788                 if (devip->zmodel == BLK_ZONED_HA)
2789                         return 0;
2790                 /* For host-managed, reads cannot cross zone types boundaries */
2791                 if (zsp_end != zsp &&
2792                     zbc_zone_is_conv(zsp) &&
2793                     !zbc_zone_is_conv(zsp_end)) {
2794                         mk_sense_buffer(scp, ILLEGAL_REQUEST,
2795                                         LBA_OUT_OF_RANGE,
2796                                         READ_INVDATA_ASCQ);
2797                         return check_condition_result;
2798                 }
2799                 return 0;
2800         }
2801
2802         /* No restrictions for writes within conventional zones */
2803         if (zbc_zone_is_conv(zsp)) {
2804                 if (!zbc_zone_is_conv(zsp_end)) {
2805                         mk_sense_buffer(scp, ILLEGAL_REQUEST,
2806                                         LBA_OUT_OF_RANGE,
2807                                         WRITE_BOUNDARY_ASCQ);
2808                         return check_condition_result;
2809                 }
2810                 return 0;
2811         }
2812
2813         if (zsp->z_type == ZBC_ZONE_TYPE_SWR) {
2814                 /* Writes cannot cross sequential zone boundaries */
2815                 if (zsp_end != zsp) {
2816                         mk_sense_buffer(scp, ILLEGAL_REQUEST,
2817                                         LBA_OUT_OF_RANGE,
2818                                         WRITE_BOUNDARY_ASCQ);
2819                         return check_condition_result;
2820                 }
2821                 /* Cannot write full zones */
2822                 if (zsp->z_cond == ZC5_FULL) {
2823                         mk_sense_buffer(scp, ILLEGAL_REQUEST,
2824                                         INVALID_FIELD_IN_CDB, 0);
2825                         return check_condition_result;
2826                 }
2827                 /* Writes must be aligned to the zone WP */
2828                 if (lba != zsp->z_wp) {
2829                         mk_sense_buffer(scp, ILLEGAL_REQUEST,
2830                                         LBA_OUT_OF_RANGE,
2831                                         UNALIGNED_WRITE_ASCQ);
2832                         return check_condition_result;
2833                 }
2834         }
2835
2836         /* Handle implicit open of closed and empty zones */
2837         if (zsp->z_cond == ZC1_EMPTY || zsp->z_cond == ZC4_CLOSED) {
2838                 if (devip->max_open &&
2839                     devip->nr_exp_open >= devip->max_open) {
2840                         mk_sense_buffer(scp, DATA_PROTECT,
2841                                         INSUFF_RES_ASC,
2842                                         INSUFF_ZONE_ASCQ);
2843                         return check_condition_result;
2844                 }
2845                 zbc_open_zone(devip, zsp, false);
2846         }
2847
2848         return 0;
2849 }
2850
2851 static inline int check_device_access_params
2852                         (struct scsi_cmnd *scp, unsigned long long lba,
2853                          unsigned int num, bool write)
2854 {
2855         struct scsi_device *sdp = scp->device;
2856         struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
2857
2858         if (lba + num > sdebug_capacity) {
2859                 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2860                 return check_condition_result;
2861         }
2862         /* transfer length excessive (tie in to block limits VPD page) */
2863         if (num > sdebug_store_sectors) {
2864                 /* needs work to find which cdb byte 'num' comes from */
2865                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2866                 return check_condition_result;
2867         }
2868         if (write && unlikely(sdebug_wp)) {
2869                 mk_sense_buffer(scp, DATA_PROTECT, WRITE_PROTECTED, 0x2);
2870                 return check_condition_result;
2871         }
2872         if (sdebug_dev_is_zoned(devip))
2873                 return check_zbc_access_params(scp, lba, num, write);
2874
2875         return 0;
2876 }
2877
2878 static inline struct sdeb_store_info *devip2sip(struct sdebug_dev_info *devip)
2879 {
2880         return sdebug_fake_rw ?
2881                         NULL : xa_load(per_store_ap, devip->sdbg_host->si_idx);
2882 }
2883
2884 /* Returns number of bytes copied or -1 if error. */
2885 static int do_device_access(struct sdeb_store_info *sip, struct scsi_cmnd *scp,
2886                             u32 sg_skip, u64 lba, u32 num, bool do_write)
2887 {
2888         int ret;
2889         u64 block, rest = 0;
2890         enum dma_data_direction dir;
2891         struct scsi_data_buffer *sdb = &scp->sdb;
2892         u8 *fsp;
2893
2894         if (do_write) {
2895                 dir = DMA_TO_DEVICE;
2896                 write_since_sync = true;
2897         } else {
2898                 dir = DMA_FROM_DEVICE;
2899         }
2900
2901         if (!sdb->length || !sip)
2902                 return 0;
2903         if (scp->sc_data_direction != dir)
2904                 return -1;
2905         fsp = sip->storep;
2906
2907         block = do_div(lba, sdebug_store_sectors);
2908         if (block + num > sdebug_store_sectors)
2909                 rest = block + num - sdebug_store_sectors;
2910
2911         ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
2912                    fsp + (block * sdebug_sector_size),
2913                    (num - rest) * sdebug_sector_size, sg_skip, do_write);
2914         if (ret != (num - rest) * sdebug_sector_size)
2915                 return ret;
2916
2917         if (rest) {
2918                 ret += sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
2919                             fsp, rest * sdebug_sector_size,
2920                             sg_skip + ((num - rest) * sdebug_sector_size),
2921                             do_write);
2922         }
2923
2924         return ret;
2925 }
2926
2927 /* Returns number of bytes copied or -1 if error. */
2928 static int do_dout_fetch(struct scsi_cmnd *scp, u32 num, u8 *doutp)
2929 {
2930         struct scsi_data_buffer *sdb = &scp->sdb;
2931
2932         if (!sdb->length)
2933                 return 0;
2934         if (scp->sc_data_direction != DMA_TO_DEVICE)
2935                 return -1;
2936         return sg_copy_buffer(sdb->table.sgl, sdb->table.nents, doutp,
2937                               num * sdebug_sector_size, 0, true);
2938 }
2939
2940 /* If sip->storep+lba compares equal to arr(num), then copy top half of
2941  * arr into sip->storep+lba and return true. If comparison fails then
2942  * return false. */
2943 static bool comp_write_worker(struct sdeb_store_info *sip, u64 lba, u32 num,
2944                               const u8 *arr, bool compare_only)
2945 {
2946         bool res;
2947         u64 block, rest = 0;
2948         u32 store_blks = sdebug_store_sectors;
2949         u32 lb_size = sdebug_sector_size;
2950         u8 *fsp = sip->storep;
2951
2952         block = do_div(lba, store_blks);
2953         if (block + num > store_blks)
2954                 rest = block + num - store_blks;
2955
2956         res = !memcmp(fsp + (block * lb_size), arr, (num - rest) * lb_size);
2957         if (!res)
2958                 return res;
2959         if (rest)
2960                 res = memcmp(fsp, arr + ((num - rest) * lb_size),
2961                              rest * lb_size);
2962         if (!res)
2963                 return res;
2964         if (compare_only)
2965                 return true;
2966         arr += num * lb_size;
2967         memcpy(fsp + (block * lb_size), arr, (num - rest) * lb_size);
2968         if (rest)
2969                 memcpy(fsp, arr + ((num - rest) * lb_size), rest * lb_size);
2970         return res;
2971 }
2972
2973 static __be16 dif_compute_csum(const void *buf, int len)
2974 {
2975         __be16 csum;
2976
2977         if (sdebug_guard)
2978                 csum = (__force __be16)ip_compute_csum(buf, len);
2979         else
2980                 csum = cpu_to_be16(crc_t10dif(buf, len));
2981
2982         return csum;
2983 }
2984
2985 static int dif_verify(struct t10_pi_tuple *sdt, const void *data,
2986                       sector_t sector, u32 ei_lba)
2987 {
2988         __be16 csum = dif_compute_csum(data, sdebug_sector_size);
2989
2990         if (sdt->guard_tag != csum) {
2991                 pr_err("GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
2992                         (unsigned long)sector,
2993                         be16_to_cpu(sdt->guard_tag),
2994                         be16_to_cpu(csum));
2995                 return 0x01;
2996         }
2997         if (sdebug_dif == T10_PI_TYPE1_PROTECTION &&
2998             be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
2999                 pr_err("REF check failed on sector %lu\n",
3000                         (unsigned long)sector);
3001                 return 0x03;
3002         }
3003         if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3004             be32_to_cpu(sdt->ref_tag) != ei_lba) {
3005                 pr_err("REF check failed on sector %lu\n",
3006                         (unsigned long)sector);
3007                 return 0x03;
3008         }
3009         return 0;
3010 }
3011
3012 static void dif_copy_prot(struct scsi_cmnd *scp, sector_t sector,
3013                           unsigned int sectors, bool read)
3014 {
3015         size_t resid;
3016         void *paddr;
3017         struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3018                                                 scp->device->hostdata);
3019         struct t10_pi_tuple *dif_storep = sip->dif_storep;
3020         const void *dif_store_end = dif_storep + sdebug_store_sectors;
3021         struct sg_mapping_iter miter;
3022
3023         /* Bytes of protection data to copy into sgl */
3024         resid = sectors * sizeof(*dif_storep);
3025
3026         sg_miter_start(&miter, scsi_prot_sglist(scp),
3027                        scsi_prot_sg_count(scp), SG_MITER_ATOMIC |
3028                        (read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
3029
3030         while (sg_miter_next(&miter) && resid > 0) {
3031                 size_t len = min_t(size_t, miter.length, resid);
3032                 void *start = dif_store(sip, sector);
3033                 size_t rest = 0;
3034
3035                 if (dif_store_end < start + len)
3036                         rest = start + len - dif_store_end;
3037
3038                 paddr = miter.addr;
3039
3040                 if (read)
3041                         memcpy(paddr, start, len - rest);
3042                 else
3043                         memcpy(start, paddr, len - rest);
3044
3045                 if (rest) {
3046                         if (read)
3047                                 memcpy(paddr + len - rest, dif_storep, rest);
3048                         else
3049                                 memcpy(dif_storep, paddr + len - rest, rest);
3050                 }
3051
3052                 sector += len / sizeof(*dif_storep);
3053                 resid -= len;
3054         }
3055         sg_miter_stop(&miter);
3056 }
3057
3058 static int prot_verify_read(struct scsi_cmnd *scp, sector_t start_sec,
3059                             unsigned int sectors, u32 ei_lba)
3060 {
3061         unsigned int i;
3062         sector_t sector;
3063         struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3064                                                 scp->device->hostdata);
3065         struct t10_pi_tuple *sdt;
3066
3067         for (i = 0; i < sectors; i++, ei_lba++) {
3068                 int ret;
3069
3070                 sector = start_sec + i;
3071                 sdt = dif_store(sip, sector);
3072
3073                 if (sdt->app_tag == cpu_to_be16(0xffff))
3074                         continue;
3075
3076                 ret = dif_verify(sdt, lba2fake_store(sip, sector), sector,
3077                                  ei_lba);
3078                 if (ret) {
3079                         dif_errors++;
3080                         return ret;
3081                 }
3082         }
3083
3084         dif_copy_prot(scp, start_sec, sectors, true);
3085         dix_reads++;
3086
3087         return 0;
3088 }
3089
3090 static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3091 {
3092         bool check_prot;
3093         u32 num;
3094         u32 ei_lba;
3095         int ret;
3096         u64 lba;
3097         struct sdeb_store_info *sip = devip2sip(devip);
3098         rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
3099         u8 *cmd = scp->cmnd;
3100         struct sdebug_queued_cmd *sqcp;
3101
3102         switch (cmd[0]) {
3103         case READ_16:
3104                 ei_lba = 0;
3105                 lba = get_unaligned_be64(cmd + 2);
3106                 num = get_unaligned_be32(cmd + 10);
3107                 check_prot = true;
3108                 break;
3109         case READ_10:
3110                 ei_lba = 0;
3111                 lba = get_unaligned_be32(cmd + 2);
3112                 num = get_unaligned_be16(cmd + 7);
3113                 check_prot = true;
3114                 break;
3115         case READ_6:
3116                 ei_lba = 0;
3117                 lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3118                       (u32)(cmd[1] & 0x1f) << 16;
3119                 num = (0 == cmd[4]) ? 256 : cmd[4];
3120                 check_prot = true;
3121                 break;
3122         case READ_12:
3123                 ei_lba = 0;
3124                 lba = get_unaligned_be32(cmd + 2);
3125                 num = get_unaligned_be32(cmd + 6);
3126                 check_prot = true;
3127                 break;
3128         case XDWRITEREAD_10:
3129                 ei_lba = 0;
3130                 lba = get_unaligned_be32(cmd + 2);
3131                 num = get_unaligned_be16(cmd + 7);
3132                 check_prot = false;
3133                 break;
3134         default:        /* assume READ(32) */
3135                 lba = get_unaligned_be64(cmd + 12);
3136                 ei_lba = get_unaligned_be32(cmd + 20);
3137                 num = get_unaligned_be32(cmd + 28);
3138                 check_prot = false;
3139                 break;
3140         }
3141         if (unlikely(have_dif_prot && check_prot)) {
3142                 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3143                     (cmd[1] & 0xe0)) {
3144                         mk_sense_invalid_opcode(scp);
3145                         return check_condition_result;
3146                 }
3147                 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3148                      sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3149                     (cmd[1] & 0xe0) == 0)
3150                         sdev_printk(KERN_ERR, scp->device, "Unprotected RD "
3151                                     "to DIF device\n");
3152         }
3153         if (unlikely(sdebug_any_injecting_opt)) {
3154                 sqcp = (struct sdebug_queued_cmd *)scp->host_scribble;
3155
3156                 if (sqcp) {
3157                         if (sqcp->inj_short)
3158                                 num /= 2;
3159                 }
3160         } else
3161                 sqcp = NULL;
3162
3163         ret = check_device_access_params(scp, lba, num, false);
3164         if (ret)
3165                 return ret;
3166         if (unlikely((SDEBUG_OPT_MEDIUM_ERR & sdebug_opts) &&
3167                      (lba <= (sdebug_medium_error_start + sdebug_medium_error_count - 1)) &&
3168                      ((lba + num) > sdebug_medium_error_start))) {
3169                 /* claim unrecoverable read error */
3170                 mk_sense_buffer(scp, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
3171                 /* set info field and valid bit for fixed descriptor */
3172                 if (0x70 == (scp->sense_buffer[0] & 0x7f)) {
3173                         scp->sense_buffer[0] |= 0x80;   /* Valid bit */
3174                         ret = (lba < OPT_MEDIUM_ERR_ADDR)
3175                               ? OPT_MEDIUM_ERR_ADDR : (int)lba;
3176                         put_unaligned_be32(ret, scp->sense_buffer + 3);
3177                 }
3178                 scsi_set_resid(scp, scsi_bufflen(scp));
3179                 return check_condition_result;
3180         }
3181
3182         read_lock(macc_lckp);
3183
3184         /* DIX + T10 DIF */
3185         if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3186                 int prot_ret = prot_verify_read(scp, lba, num, ei_lba);
3187
3188                 if (prot_ret) {
3189                         read_unlock(macc_lckp);
3190                         mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, prot_ret);
3191                         return illegal_condition_result;
3192                 }
3193         }
3194
3195         ret = do_device_access(sip, scp, 0, lba, num, false);
3196         read_unlock(macc_lckp);
3197         if (unlikely(ret == -1))
3198                 return DID_ERROR << 16;
3199
3200         scsi_set_resid(scp, scsi_bufflen(scp) - ret);
3201
3202         if (unlikely(sqcp)) {
3203                 if (sqcp->inj_recovered) {
3204                         mk_sense_buffer(scp, RECOVERED_ERROR,
3205                                         THRESHOLD_EXCEEDED, 0);
3206                         return check_condition_result;
3207                 } else if (sqcp->inj_transport) {
3208                         mk_sense_buffer(scp, ABORTED_COMMAND,
3209                                         TRANSPORT_PROBLEM, ACK_NAK_TO);
3210                         return check_condition_result;
3211                 } else if (sqcp->inj_dif) {
3212                         /* Logical block guard check failed */
3213                         mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3214                         return illegal_condition_result;
3215                 } else if (sqcp->inj_dix) {
3216                         mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3217                         return illegal_condition_result;
3218                 }
3219         }
3220         return 0;
3221 }
3222
3223 static void dump_sector(unsigned char *buf, int len)
3224 {
3225         int i, j, n;
3226
3227         pr_err(">>> Sector Dump <<<\n");
3228         for (i = 0 ; i < len ; i += 16) {
3229                 char b[128];
3230
3231                 for (j = 0, n = 0; j < 16; j++) {
3232                         unsigned char c = buf[i+j];
3233
3234                         if (c >= 0x20 && c < 0x7e)
3235                                 n += scnprintf(b + n, sizeof(b) - n,
3236                                                " %c ", buf[i+j]);
3237                         else
3238                                 n += scnprintf(b + n, sizeof(b) - n,
3239                                                "%02x ", buf[i+j]);
3240                 }
3241                 pr_err("%04d: %s\n", i, b);
3242         }
3243 }
3244
3245 static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
3246                              unsigned int sectors, u32 ei_lba)
3247 {
3248         int ret;
3249         struct t10_pi_tuple *sdt;
3250         void *daddr;
3251         sector_t sector = start_sec;
3252         int ppage_offset;
3253         int dpage_offset;
3254         struct sg_mapping_iter diter;
3255         struct sg_mapping_iter piter;
3256
3257         BUG_ON(scsi_sg_count(SCpnt) == 0);
3258         BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
3259
3260         sg_miter_start(&piter, scsi_prot_sglist(SCpnt),
3261                         scsi_prot_sg_count(SCpnt),
3262                         SG_MITER_ATOMIC | SG_MITER_FROM_SG);
3263         sg_miter_start(&diter, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
3264                         SG_MITER_ATOMIC | SG_MITER_FROM_SG);
3265
3266         /* For each protection page */
3267         while (sg_miter_next(&piter)) {
3268                 dpage_offset = 0;
3269                 if (WARN_ON(!sg_miter_next(&diter))) {
3270                         ret = 0x01;
3271                         goto out;
3272                 }
3273
3274                 for (ppage_offset = 0; ppage_offset < piter.length;
3275                      ppage_offset += sizeof(struct t10_pi_tuple)) {
3276                         /* If we're at the end of the current
3277                          * data page advance to the next one
3278                          */
3279                         if (dpage_offset >= diter.length) {
3280                                 if (WARN_ON(!sg_miter_next(&diter))) {
3281                                         ret = 0x01;
3282                                         goto out;
3283                                 }
3284                                 dpage_offset = 0;
3285                         }
3286
3287                         sdt = piter.addr + ppage_offset;
3288                         daddr = diter.addr + dpage_offset;
3289
3290                         ret = dif_verify(sdt, daddr, sector, ei_lba);
3291                         if (ret) {
3292                                 dump_sector(daddr, sdebug_sector_size);
3293                                 goto out;
3294                         }
3295
3296                         sector++;
3297                         ei_lba++;
3298                         dpage_offset += sdebug_sector_size;
3299                 }
3300                 diter.consumed = dpage_offset;
3301                 sg_miter_stop(&diter);
3302         }
3303         sg_miter_stop(&piter);
3304
3305         dif_copy_prot(SCpnt, start_sec, sectors, false);
3306         dix_writes++;
3307
3308         return 0;
3309
3310 out:
3311         dif_errors++;
3312         sg_miter_stop(&diter);
3313         sg_miter_stop(&piter);
3314         return ret;
3315 }
3316
3317 static unsigned long lba_to_map_index(sector_t lba)
3318 {
3319         if (sdebug_unmap_alignment)
3320                 lba += sdebug_unmap_granularity - sdebug_unmap_alignment;
3321         sector_div(lba, sdebug_unmap_granularity);
3322         return lba;
3323 }
3324
3325 static sector_t map_index_to_lba(unsigned long index)
3326 {
3327         sector_t lba = index * sdebug_unmap_granularity;
3328
3329         if (sdebug_unmap_alignment)
3330                 lba -= sdebug_unmap_granularity - sdebug_unmap_alignment;
3331         return lba;
3332 }
3333
3334 static unsigned int map_state(struct sdeb_store_info *sip, sector_t lba,
3335                               unsigned int *num)
3336 {
3337         sector_t end;
3338         unsigned int mapped;
3339         unsigned long index;
3340         unsigned long next;
3341
3342         index = lba_to_map_index(lba);
3343         mapped = test_bit(index, sip->map_storep);
3344
3345         if (mapped)
3346                 next = find_next_zero_bit(sip->map_storep, map_size, index);
3347         else
3348                 next = find_next_bit(sip->map_storep, map_size, index);
3349
3350         end = min_t(sector_t, sdebug_store_sectors,  map_index_to_lba(next));
3351         *num = end - lba;
3352         return mapped;
3353 }
3354
3355 static void map_region(struct sdeb_store_info *sip, sector_t lba,
3356                        unsigned int len)
3357 {
3358         sector_t end = lba + len;
3359
3360         while (lba < end) {
3361                 unsigned long index = lba_to_map_index(lba);
3362
3363                 if (index < map_size)
3364                         set_bit(index, sip->map_storep);
3365
3366                 lba = map_index_to_lba(index + 1);
3367         }
3368 }
3369
3370 static void unmap_region(struct sdeb_store_info *sip, sector_t lba,
3371                          unsigned int len)
3372 {
3373         sector_t end = lba + len;
3374         u8 *fsp = sip->storep;
3375
3376         while (lba < end) {
3377                 unsigned long index = lba_to_map_index(lba);
3378
3379                 if (lba == map_index_to_lba(index) &&
3380                     lba + sdebug_unmap_granularity <= end &&
3381                     index < map_size) {
3382                         clear_bit(index, sip->map_storep);
3383                         if (sdebug_lbprz) {  /* for LBPRZ=2 return 0xff_s */
3384                                 memset(fsp + lba * sdebug_sector_size,
3385                                        (sdebug_lbprz & 1) ? 0 : 0xff,
3386                                        sdebug_sector_size *
3387                                        sdebug_unmap_granularity);
3388                         }
3389                         if (sip->dif_storep) {
3390                                 memset(sip->dif_storep + lba, 0xff,
3391                                        sizeof(*sip->dif_storep) *
3392                                        sdebug_unmap_granularity);
3393                         }
3394                 }
3395                 lba = map_index_to_lba(index + 1);
3396         }
3397 }
3398
3399 static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3400 {
3401         bool check_prot;
3402         u32 num;
3403         u32 ei_lba;
3404         int ret;
3405         u64 lba;
3406         struct sdeb_store_info *sip = devip2sip(devip);
3407         rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
3408         u8 *cmd = scp->cmnd;
3409
3410         switch (cmd[0]) {
3411         case WRITE_16:
3412                 ei_lba = 0;
3413                 lba = get_unaligned_be64(cmd + 2);
3414                 num = get_unaligned_be32(cmd + 10);
3415                 check_prot = true;
3416                 break;
3417         case WRITE_10:
3418                 ei_lba = 0;
3419                 lba = get_unaligned_be32(cmd + 2);
3420                 num = get_unaligned_be16(cmd + 7);
3421                 check_prot = true;
3422                 break;
3423         case WRITE_6:
3424                 ei_lba = 0;
3425                 lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3426                       (u32)(cmd[1] & 0x1f) << 16;
3427                 num = (0 == cmd[4]) ? 256 : cmd[4];
3428                 check_prot = true;
3429                 break;
3430         case WRITE_12:
3431                 ei_lba = 0;
3432                 lba = get_unaligned_be32(cmd + 2);
3433                 num = get_unaligned_be32(cmd + 6);
3434                 check_prot = true;
3435                 break;
3436         case 0x53:      /* XDWRITEREAD(10) */
3437                 ei_lba = 0;
3438                 lba = get_unaligned_be32(cmd + 2);
3439                 num = get_unaligned_be16(cmd + 7);
3440                 check_prot = false;
3441                 break;
3442         default:        /* assume WRITE(32) */
3443                 lba = get_unaligned_be64(cmd + 12);
3444                 ei_lba = get_unaligned_be32(cmd + 20);
3445                 num = get_unaligned_be32(cmd + 28);
3446                 check_prot = false;
3447                 break;
3448         }
3449         if (unlikely(have_dif_prot && check_prot)) {
3450                 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3451                     (cmd[1] & 0xe0)) {
3452                         mk_sense_invalid_opcode(scp);
3453                         return check_condition_result;
3454                 }
3455                 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3456                      sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3457                     (cmd[1] & 0xe0) == 0)
3458                         sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3459                                     "to DIF device\n");
3460         }
3461
3462         write_lock(macc_lckp);
3463         ret = check_device_access_params(scp, lba, num, true);
3464         if (ret) {
3465                 write_unlock(macc_lckp);
3466                 return ret;
3467         }
3468
3469         /* DIX + T10 DIF */
3470         if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3471                 int prot_ret = prot_verify_write(scp, lba, num, ei_lba);
3472
3473                 if (prot_ret) {
3474                         write_unlock(macc_lckp);
3475                         mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, prot_ret);
3476                         return illegal_condition_result;
3477                 }
3478         }
3479
3480         ret = do_device_access(sip, scp, 0, lba, num, true);
3481         if (unlikely(scsi_debug_lbp()))
3482                 map_region(sip, lba, num);
3483         /* If ZBC zone then bump its write pointer */
3484         if (sdebug_dev_is_zoned(devip))
3485                 zbc_inc_wp(devip, lba, num);
3486         write_unlock(macc_lckp);
3487         if (unlikely(-1 == ret))
3488                 return DID_ERROR << 16;
3489         else if (unlikely(sdebug_verbose &&
3490                           (ret < (num * sdebug_sector_size))))
3491                 sdev_printk(KERN_INFO, scp->device,
3492                             "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3493                             my_name, num * sdebug_sector_size, ret);
3494
3495         if (unlikely(sdebug_any_injecting_opt)) {
3496                 struct sdebug_queued_cmd *sqcp =
3497                                 (struct sdebug_queued_cmd *)scp->host_scribble;
3498
3499                 if (sqcp) {
3500                         if (sqcp->inj_recovered) {
3501                                 mk_sense_buffer(scp, RECOVERED_ERROR,
3502                                                 THRESHOLD_EXCEEDED, 0);
3503                                 return check_condition_result;
3504                         } else if (sqcp->inj_dif) {
3505                                 /* Logical block guard check failed */
3506                                 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3507                                 return illegal_condition_result;
3508                         } else if (sqcp->inj_dix) {
3509                                 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3510                                 return illegal_condition_result;
3511                         }
3512                 }
3513         }
3514         return 0;
3515 }
3516
3517 /*
3518  * T10 has only specified WRITE SCATTERED(16) and WRITE SCATTERED(32).
3519  * No READ GATHERED yet (requires bidi or long cdb holding gather list).
3520  */
3521 static int resp_write_scat(struct scsi_cmnd *scp,
3522                            struct sdebug_dev_info *devip)
3523 {
3524         u8 *cmd = scp->cmnd;
3525         u8 *lrdp = NULL;
3526         u8 *up;
3527         struct sdeb_store_info *sip = devip2sip(devip);
3528         rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
3529         u8 wrprotect;
3530         u16 lbdof, num_lrd, k;
3531         u32 num, num_by, bt_len, lbdof_blen, sg_off, cum_lb;
3532         u32 lb_size = sdebug_sector_size;
3533         u32 ei_lba;
3534         u64 lba;
3535         int ret, res;
3536         bool is_16;
3537         static const u32 lrd_size = 32; /* + parameter list header size */
3538
3539         if (cmd[0] == VARIABLE_LENGTH_CMD) {
3540                 is_16 = false;
3541                 wrprotect = (cmd[10] >> 5) & 0x7;
3542                 lbdof = get_unaligned_be16(cmd + 12);
3543                 num_lrd = get_unaligned_be16(cmd + 16);
3544                 bt_len = get_unaligned_be32(cmd + 28);
3545         } else {        /* that leaves WRITE SCATTERED(16) */
3546                 is_16 = true;
3547                 wrprotect = (cmd[2] >> 5) & 0x7;
3548                 lbdof = get_unaligned_be16(cmd + 4);
3549                 num_lrd = get_unaligned_be16(cmd + 8);
3550                 bt_len = get_unaligned_be32(cmd + 10);
3551                 if (unlikely(have_dif_prot)) {
3552                         if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3553                             wrprotect) {
3554                                 mk_sense_invalid_opcode(scp);
3555                                 return illegal_condition_result;
3556                         }
3557                         if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3558                              sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3559                              wrprotect == 0)
3560                                 sdev_printk(KERN_ERR, scp->device,
3561                                             "Unprotected WR to DIF device\n");
3562                 }
3563         }
3564         if ((num_lrd == 0) || (bt_len == 0))
3565                 return 0;       /* T10 says these do-nothings are not errors */
3566         if (lbdof == 0) {
3567                 if (sdebug_verbose)
3568                         sdev_printk(KERN_INFO, scp->device,
3569                                 "%s: %s: LB Data Offset field bad\n",
3570                                 my_name, __func__);
3571                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3572                 return illegal_condition_result;
3573         }
3574         lbdof_blen = lbdof * lb_size;
3575         if ((lrd_size + (num_lrd * lrd_size)) > lbdof_blen) {
3576                 if (sdebug_verbose)
3577                         sdev_printk(KERN_INFO, scp->device,
3578                                 "%s: %s: LBA range descriptors don't fit\n",
3579                                 my_name, __func__);
3580                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3581                 return illegal_condition_result;
3582         }
3583         lrdp = kzalloc(lbdof_blen, GFP_ATOMIC);
3584         if (lrdp == NULL)
3585                 return SCSI_MLQUEUE_HOST_BUSY;
3586         if (sdebug_verbose)
3587                 sdev_printk(KERN_INFO, scp->device,
3588                         "%s: %s: Fetch header+scatter_list, lbdof_blen=%u\n",
3589                         my_name, __func__, lbdof_blen);
3590         res = fetch_to_dev_buffer(scp, lrdp, lbdof_blen);
3591         if (res == -1) {
3592                 ret = DID_ERROR << 16;
3593                 goto err_out;
3594         }
3595
3596         write_lock(macc_lckp);
3597         sg_off = lbdof_blen;
3598         /* Spec says Buffer xfer Length field in number of LBs in dout */
3599         cum_lb = 0;
3600         for (k = 0, up = lrdp + lrd_size; k < num_lrd; ++k, up += lrd_size) {
3601                 lba = get_unaligned_be64(up + 0);
3602                 num = get_unaligned_be32(up + 8);
3603                 if (sdebug_verbose)
3604                         sdev_printk(KERN_INFO, scp->device,
3605                                 "%s: %s: k=%d  LBA=0x%llx num=%u  sg_off=%u\n",
3606                                 my_name, __func__, k, lba, num, sg_off);
3607                 if (num == 0)
3608                         continue;
3609                 ret = check_device_access_params(scp, lba, num, true);
3610                 if (ret)
3611                         goto err_out_unlock;
3612                 num_by = num * lb_size;
3613                 ei_lba = is_16 ? 0 : get_unaligned_be32(up + 12);
3614
3615                 if ((cum_lb + num) > bt_len) {
3616                         if (sdebug_verbose)
3617                                 sdev_printk(KERN_INFO, scp->device,
3618                                     "%s: %s: sum of blocks > data provided\n",
3619                                     my_name, __func__);
3620                         mk_sense_buffer(scp, ILLEGAL_REQUEST, WRITE_ERROR_ASC,
3621                                         0);
3622                         ret = illegal_condition_result;
3623                         goto err_out_unlock;
3624                 }
3625
3626                 /* DIX + T10 DIF */
3627                 if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3628                         int prot_ret = prot_verify_write(scp, lba, num,
3629                                                          ei_lba);
3630
3631                         if (prot_ret) {
3632                                 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10,
3633                                                 prot_ret);
3634                                 ret = illegal_condition_result;
3635                                 goto err_out_unlock;
3636                         }
3637                 }
3638
3639                 ret = do_device_access(sip, scp, sg_off, lba, num, true);
3640                 /* If ZBC zone then bump its write pointer */
3641                 if (sdebug_dev_is_zoned(devip))
3642                         zbc_inc_wp(devip, lba, num);
3643                 if (unlikely(scsi_debug_lbp()))
3644                         map_region(sip, lba, num);
3645                 if (unlikely(-1 == ret)) {
3646                         ret = DID_ERROR << 16;
3647                         goto err_out_unlock;
3648                 } else if (unlikely(sdebug_verbose && (ret < num_by)))
3649                         sdev_printk(KERN_INFO, scp->device,
3650                             "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3651                             my_name, num_by, ret);
3652
3653                 if (unlikely(sdebug_any_injecting_opt)) {
3654                         struct sdebug_queued_cmd *sqcp =
3655                                 (struct sdebug_queued_cmd *)scp->host_scribble;
3656
3657                         if (sqcp) {
3658                                 if (sqcp->inj_recovered) {
3659                                         mk_sense_buffer(scp, RECOVERED_ERROR,
3660                                                         THRESHOLD_EXCEEDED, 0);
3661                                         ret = illegal_condition_result;
3662                                         goto err_out_unlock;
3663                                 } else if (sqcp->inj_dif) {
3664                                         /* Logical block guard check failed */
3665                                         mk_sense_buffer(scp, ABORTED_COMMAND,
3666                                                         0x10, 1);
3667                                         ret = illegal_condition_result;
3668                                         goto err_out_unlock;
3669                                 } else if (sqcp->inj_dix) {
3670                                         mk_sense_buffer(scp, ILLEGAL_REQUEST,
3671                                                         0x10, 1);
3672                                         ret = illegal_condition_result;
3673                                         goto err_out_unlock;
3674                                 }
3675                         }
3676                 }
3677                 sg_off += num_by;
3678                 cum_lb += num;
3679         }
3680         ret = 0;
3681 err_out_unlock:
3682         write_unlock(macc_lckp);
3683 err_out:
3684         kfree(lrdp);
3685         return ret;
3686 }
3687
3688 static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
3689                            u32 ei_lba, bool unmap, bool ndob)
3690 {
3691         struct scsi_device *sdp = scp->device;
3692         struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
3693         unsigned long long i;
3694         u64 block, lbaa;
3695         u32 lb_size = sdebug_sector_size;
3696         int ret;
3697         struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3698                                                 scp->device->hostdata);
3699         rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
3700         u8 *fs1p;
3701         u8 *fsp;
3702
3703         write_lock(macc_lckp);
3704
3705         ret = check_device_access_params(scp, lba, num, true);
3706         if (ret) {
3707                 write_unlock(macc_lckp);
3708                 return ret;
3709         }
3710
3711         if (unmap && scsi_debug_lbp()) {
3712                 unmap_region(sip, lba, num);
3713                 goto out;
3714         }
3715         lbaa = lba;
3716         block = do_div(lbaa, sdebug_store_sectors);
3717         /* if ndob then zero 1 logical block, else fetch 1 logical block */
3718         fsp = sip->storep;
3719         fs1p = fsp + (block * lb_size);
3720         if (ndob) {
3721                 memset(fs1p, 0, lb_size);
3722                 ret = 0;
3723         } else
3724                 ret = fetch_to_dev_buffer(scp, fs1p, lb_size);
3725
3726         if (-1 == ret) {
3727                 write_unlock(&sip->macc_lck);
3728                 return DID_ERROR << 16;
3729         } else if (sdebug_verbose && !ndob && (ret < lb_size))
3730                 sdev_printk(KERN_INFO, scp->device,
3731                             "%s: %s: lb size=%u, IO sent=%d bytes\n",
3732                             my_name, "write same", lb_size, ret);
3733
3734         /* Copy first sector to remaining blocks */
3735         for (i = 1 ; i < num ; i++) {
3736                 lbaa = lba + i;
3737                 block = do_div(lbaa, sdebug_store_sectors);
3738                 memmove(fsp + (block * lb_size), fs1p, lb_size);
3739         }
3740         if (scsi_debug_lbp())
3741                 map_region(sip, lba, num);
3742         /* If ZBC zone then bump its write pointer */
3743         if (sdebug_dev_is_zoned(devip))
3744                 zbc_inc_wp(devip, lba, num);
3745 out:
3746         write_unlock(macc_lckp);
3747
3748         return 0;
3749 }
3750
3751 static int resp_write_same_10(struct scsi_cmnd *scp,
3752                               struct sdebug_dev_info *devip)
3753 {
3754         u8 *cmd = scp->cmnd;
3755         u32 lba;
3756         u16 num;
3757         u32 ei_lba = 0;
3758         bool unmap = false;
3759
3760         if (cmd[1] & 0x8) {
3761                 if (sdebug_lbpws10 == 0) {
3762                         mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3763                         return check_condition_result;
3764                 } else
3765                         unmap = true;
3766         }
3767         lba = get_unaligned_be32(cmd + 2);
3768         num = get_unaligned_be16(cmd + 7);
3769         if (num > sdebug_write_same_length) {
3770                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3771                 return check_condition_result;
3772         }
3773         return resp_write_same(scp, lba, num, ei_lba, unmap, false);
3774 }
3775
3776 static int resp_write_same_16(struct scsi_cmnd *scp,
3777                               struct sdebug_dev_info *devip)
3778 {
3779         u8 *cmd = scp->cmnd;
3780         u64 lba;
3781         u32 num;
3782         u32 ei_lba = 0;
3783         bool unmap = false;
3784         bool ndob = false;
3785
3786         if (cmd[1] & 0x8) {     /* UNMAP */
3787                 if (sdebug_lbpws == 0) {
3788                         mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3789                         return check_condition_result;
3790                 } else
3791                         unmap = true;
3792         }
3793         if (cmd[1] & 0x1)  /* NDOB (no data-out buffer, assumes zeroes) */
3794                 ndob = true;
3795         lba = get_unaligned_be64(cmd + 2);
3796         num = get_unaligned_be32(cmd + 10);
3797         if (num > sdebug_write_same_length) {
3798                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
3799                 return check_condition_result;
3800         }
3801         return resp_write_same(scp, lba, num, ei_lba, unmap, ndob);
3802 }
3803
3804 /* Note the mode field is in the same position as the (lower) service action
3805  * field. For the Report supported operation codes command, SPC-4 suggests
3806  * each mode of this command should be reported separately; for future. */
3807 static int resp_write_buffer(struct scsi_cmnd *scp,
3808                              struct sdebug_dev_info *devip)
3809 {
3810         u8 *cmd = scp->cmnd;
3811         struct scsi_device *sdp = scp->device;
3812         struct sdebug_dev_info *dp;
3813         u8 mode;
3814
3815         mode = cmd[1] & 0x1f;
3816         switch (mode) {
3817         case 0x4:       /* download microcode (MC) and activate (ACT) */
3818                 /* set UAs on this device only */
3819                 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3820                 set_bit(SDEBUG_UA_MICROCODE_CHANGED, devip->uas_bm);
3821                 break;
3822         case 0x5:       /* download MC, save and ACT */
3823                 set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET, devip->uas_bm);
3824                 break;
3825         case 0x6:       /* download MC with offsets and ACT */
3826                 /* set UAs on most devices (LUs) in this target */
3827                 list_for_each_entry(dp,
3828                                     &devip->sdbg_host->dev_info_list,
3829                                     dev_list)
3830                         if (dp->target == sdp->id) {
3831                                 set_bit(SDEBUG_UA_BUS_RESET, dp->uas_bm);
3832                                 if (devip != dp)
3833                                         set_bit(SDEBUG_UA_MICROCODE_CHANGED,
3834                                                 dp->uas_bm);
3835                         }
3836                 break;
3837         case 0x7:       /* download MC with offsets, save, and ACT */
3838                 /* set UA on all devices (LUs) in this target */
3839                 list_for_each_entry(dp,
3840                                     &devip->sdbg_host->dev_info_list,
3841                                     dev_list)
3842                         if (dp->target == sdp->id)
3843                                 set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET,
3844                                         dp->uas_bm);
3845                 break;
3846         default:
3847                 /* do nothing for this command for other mode values */
3848                 break;
3849         }
3850         return 0;
3851 }
3852
3853 static int resp_comp_write(struct scsi_cmnd *scp,
3854                            struct sdebug_dev_info *devip)
3855 {
3856         u8 *cmd = scp->cmnd;
3857         u8 *arr;
3858         struct sdeb_store_info *sip = devip2sip(devip);
3859         rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
3860         u64 lba;
3861         u32 dnum;
3862         u32 lb_size = sdebug_sector_size;
3863         u8 num;
3864         int ret;
3865         int retval = 0;
3866
3867         lba = get_unaligned_be64(cmd + 2);
3868         num = cmd[13];          /* 1 to a maximum of 255 logical blocks */
3869         if (0 == num)
3870                 return 0;       /* degenerate case, not an error */
3871         if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3872             (cmd[1] & 0xe0)) {
3873                 mk_sense_invalid_opcode(scp);
3874                 return check_condition_result;
3875         }
3876         if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3877              sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3878             (cmd[1] & 0xe0) == 0)
3879                 sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3880                             "to DIF device\n");
3881         ret = check_device_access_params(scp, lba, num, false);
3882         if (ret)
3883                 return ret;
3884         dnum = 2 * num;
3885         arr = kcalloc(lb_size, dnum, GFP_ATOMIC);
3886         if (NULL == arr) {
3887                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3888                                 INSUFF_RES_ASCQ);
3889                 return check_condition_result;
3890         }
3891
3892         write_lock(macc_lckp);
3893
3894         ret = do_dout_fetch(scp, dnum, arr);
3895         if (ret == -1) {
3896                 retval = DID_ERROR << 16;
3897                 goto cleanup;
3898         } else if (sdebug_verbose && (ret < (dnum * lb_size)))
3899                 sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb "
3900                             "indicated=%u, IO sent=%d bytes\n", my_name,
3901                             dnum * lb_size, ret);
3902         if (!comp_write_worker(sip, lba, num, arr, false)) {
3903                 mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
3904                 retval = check_condition_result;
3905                 goto cleanup;
3906         }
3907         if (scsi_debug_lbp())
3908                 map_region(sip, lba, num);
3909 cleanup:
3910         write_unlock(macc_lckp);
3911         kfree(arr);
3912         return retval;
3913 }
3914
3915 struct unmap_block_desc {
3916         __be64  lba;
3917         __be32  blocks;
3918         __be32  __reserved;
3919 };
3920
3921 static int resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3922 {
3923         unsigned char *buf;
3924         struct unmap_block_desc *desc;
3925         struct sdeb_store_info *sip = devip2sip(devip);
3926         rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
3927         unsigned int i, payload_len, descriptors;
3928         int ret;
3929
3930         if (!scsi_debug_lbp())
3931                 return 0;       /* fib and say its done */
3932         payload_len = get_unaligned_be16(scp->cmnd + 7);
3933         BUG_ON(scsi_bufflen(scp) != payload_len);
3934
3935         descriptors = (payload_len - 8) / 16;
3936         if (descriptors > sdebug_unmap_max_desc) {
3937                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3938                 return check_condition_result;
3939         }
3940
3941         buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC);
3942         if (!buf) {
3943                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3944                                 INSUFF_RES_ASCQ);
3945                 return check_condition_result;
3946         }
3947
3948         scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
3949
3950         BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
3951         BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
3952
3953         desc = (void *)&buf[8];
3954
3955         write_lock(macc_lckp);
3956
3957         for (i = 0 ; i < descriptors ; i++) {
3958                 unsigned long long lba = get_unaligned_be64(&desc[i].lba);
3959                 unsigned int num = get_unaligned_be32(&desc[i].blocks);
3960
3961                 ret = check_device_access_params(scp, lba, num, true);
3962                 if (ret)
3963                         goto out;
3964
3965                 unmap_region(sip, lba, num);
3966         }
3967
3968         ret = 0;
3969
3970 out:
3971         write_unlock(macc_lckp);
3972         kfree(buf);
3973
3974         return ret;
3975 }
3976
3977 #define SDEBUG_GET_LBA_STATUS_LEN 32
3978
3979 static int resp_get_lba_status(struct scsi_cmnd *scp,
3980                                struct sdebug_dev_info *devip)
3981 {
3982         u8 *cmd = scp->cmnd;
3983         struct sdeb_store_info *sip = devip2sip(devip);
3984         u64 lba;
3985         u32 alloc_len, mapped, num;
3986         int ret;
3987         u8 arr[SDEBUG_GET_LBA_STATUS_LEN];
3988
3989         lba = get_unaligned_be64(cmd + 2);
3990         alloc_len = get_unaligned_be32(cmd + 10);
3991
3992         if (alloc_len < 24)
3993                 return 0;
3994
3995         ret = check_device_access_params(scp, lba, 1, false);
3996         if (ret)
3997                 return ret;
3998
3999         if (scsi_debug_lbp())
4000                 mapped = map_state(sip, lba, &num);
4001         else {
4002                 mapped = 1;
4003                 /* following just in case virtual_gb changed */
4004                 sdebug_capacity = get_sdebug_capacity();
4005                 if (sdebug_capacity - lba <= 0xffffffff)
4006                         num = sdebug_capacity - lba;
4007                 else
4008                         num = 0xffffffff;
4009         }
4010
4011         memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
4012         put_unaligned_be32(20, arr);            /* Parameter Data Length */
4013         put_unaligned_be64(lba, arr + 8);       /* LBA */
4014         put_unaligned_be32(num, arr + 16);      /* Number of blocks */
4015         arr[20] = !mapped;              /* prov_stat=0: mapped; 1: dealloc */
4016
4017         return fill_from_dev_buffer(scp, arr, SDEBUG_GET_LBA_STATUS_LEN);
4018 }
4019
4020 static int resp_sync_cache(struct scsi_cmnd *scp,
4021                            struct sdebug_dev_info *devip)
4022 {
4023         int res = 0;
4024         u64 lba;
4025         u32 num_blocks;
4026         u8 *cmd = scp->cmnd;
4027
4028         if (cmd[0] == SYNCHRONIZE_CACHE) {      /* 10 byte cdb */
4029                 lba = get_unaligned_be32(cmd + 2);
4030                 num_blocks = get_unaligned_be16(cmd + 7);
4031         } else {                                /* SYNCHRONIZE_CACHE(16) */
4032                 lba = get_unaligned_be64(cmd + 2);
4033                 num_blocks = get_unaligned_be32(cmd + 10);
4034         }
4035         if (lba + num_blocks > sdebug_capacity) {
4036                 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4037                 return check_condition_result;
4038         }
4039         if (!write_since_sync || cmd[1] & 0x2)
4040                 res = SDEG_RES_IMMED_MASK;
4041         else            /* delay if write_since_sync and IMMED clear */
4042                 write_since_sync = false;
4043         return res;
4044 }
4045
4046 /*
4047  * Assuming the LBA+num_blocks is not out-of-range, this function will return
4048  * CONDITION MET if the specified blocks will/have fitted in the cache, and
4049  * a GOOD status otherwise. Model a disk with a big cache and yield
4050  * CONDITION MET. Actually tries to bring range in main memory into the
4051  * cache associated with the CPU(s).
4052  */
4053 static int resp_pre_fetch(struct scsi_cmnd *scp,
4054                           struct sdebug_dev_info *devip)
4055 {
4056         int res = 0;
4057         u64 lba;
4058         u64 block, rest = 0;
4059         u32 nblks;
4060         u8 *cmd = scp->cmnd;
4061         struct sdeb_store_info *sip = devip2sip(devip);
4062         rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4063         u8 *fsp = sip ? sip->storep : NULL;
4064
4065         if (cmd[0] == PRE_FETCH) {      /* 10 byte cdb */
4066                 lba = get_unaligned_be32(cmd + 2);
4067                 nblks = get_unaligned_be16(cmd + 7);
4068         } else {                        /* PRE-FETCH(16) */
4069                 lba = get_unaligned_be64(cmd + 2);
4070                 nblks = get_unaligned_be32(cmd + 10);
4071         }
4072         if (lba + nblks > sdebug_capacity) {
4073                 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4074                 return check_condition_result;
4075         }
4076         if (!fsp)
4077                 goto fini;
4078         /* PRE-FETCH spec says nothing about LBP or PI so skip them */
4079         block = do_div(lba, sdebug_store_sectors);
4080         if (block + nblks > sdebug_store_sectors)
4081                 rest = block + nblks - sdebug_store_sectors;
4082
4083         /* Try to bring the PRE-FETCH range into CPU's cache */
4084         read_lock(macc_lckp);
4085         prefetch_range(fsp + (sdebug_sector_size * block),
4086                        (nblks - rest) * sdebug_sector_size);
4087         if (rest)
4088                 prefetch_range(fsp, rest * sdebug_sector_size);
4089         read_unlock(macc_lckp);
4090 fini:
4091         if (cmd[1] & 0x2)
4092                 res = SDEG_RES_IMMED_MASK;
4093         return res | condition_met_result;
4094 }
4095
4096 #define RL_BUCKET_ELEMS 8
4097
4098 /* Even though each pseudo target has a REPORT LUNS "well known logical unit"
4099  * (W-LUN), the normal Linux scanning logic does not associate it with a
4100  * device (e.g. /dev/sg7). The following magic will make that association:
4101  *   "cd /sys/class/scsi_host/host<n> ; echo '- - 49409' > scan"
4102  * where <n> is a host number. If there are multiple targets in a host then
4103  * the above will associate a W-LUN to each target. To only get a W-LUN
4104  * for target 2, then use "echo '- 2 49409' > scan" .
4105  */
4106 static int resp_report_luns(struct scsi_cmnd *scp,
4107                             struct sdebug_dev_info *devip)
4108 {
4109         unsigned char *cmd = scp->cmnd;
4110         unsigned int alloc_len;
4111         unsigned char select_report;
4112         u64 lun;
4113         struct scsi_lun *lun_p;
4114         u8 arr[RL_BUCKET_ELEMS * sizeof(struct scsi_lun)];
4115         unsigned int lun_cnt;   /* normal LUN count (max: 256) */
4116         unsigned int wlun_cnt;  /* report luns W-LUN count */
4117         unsigned int tlun_cnt;  /* total LUN count */
4118         unsigned int rlen;      /* response length (in bytes) */
4119         int k, j, n, res;
4120         unsigned int off_rsp = 0;
4121         const int sz_lun = sizeof(struct scsi_lun);
4122
4123         clear_luns_changed_on_target(devip);
4124
4125         select_report = cmd[2];
4126         alloc_len = get_unaligned_be32(cmd + 6);
4127
4128         if (alloc_len < 4) {
4129                 pr_err("alloc len too small %d\n", alloc_len);
4130                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
4131                 return check_condition_result;
4132         }
4133
4134         switch (select_report) {
4135         case 0:         /* all LUNs apart from W-LUNs */
4136                 lun_cnt = sdebug_max_luns;
4137                 wlun_cnt = 0;
4138                 break;
4139         case 1:         /* only W-LUNs */
4140                 lun_cnt = 0;
4141                 wlun_cnt = 1;
4142                 break;
4143         case 2:         /* all LUNs */
4144                 lun_cnt = sdebug_max_luns;
4145                 wlun_cnt = 1;
4146                 break;
4147         case 0x10:      /* only administrative LUs */
4148         case 0x11:      /* see SPC-5 */
4149         case 0x12:      /* only subsiduary LUs owned by referenced LU */
4150         default:
4151                 pr_debug("select report invalid %d\n", select_report);
4152                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
4153                 return check_condition_result;
4154         }
4155
4156         if (sdebug_no_lun_0 && (lun_cnt > 0))
4157                 --lun_cnt;
4158
4159         tlun_cnt = lun_cnt + wlun_cnt;
4160         rlen = tlun_cnt * sz_lun;       /* excluding 8 byte header */
4161         scsi_set_resid(scp, scsi_bufflen(scp));
4162         pr_debug("select_report %d luns = %d wluns = %d no_lun0 %d\n",
4163                  select_report, lun_cnt, wlun_cnt, sdebug_no_lun_0);
4164
4165         /* loops rely on sizeof response header same as sizeof lun (both 8) */
4166         lun = sdebug_no_lun_0 ? 1 : 0;
4167         for (k = 0, j = 0, res = 0; true; ++k, j = 0) {
4168                 memset(arr, 0, sizeof(arr));
4169                 lun_p = (struct scsi_lun *)&arr[0];
4170                 if (k == 0) {
4171                         put_unaligned_be32(rlen, &arr[0]);
4172                         ++lun_p;
4173                         j = 1;
4174                 }
4175                 for ( ; j < RL_BUCKET_ELEMS; ++j, ++lun_p) {
4176                         if ((k * RL_BUCKET_ELEMS) + j > lun_cnt)
4177                                 break;
4178                         int_to_scsilun(lun++, lun_p);
4179                 }
4180                 if (j < RL_BUCKET_ELEMS)
4181                         break;
4182                 n = j * sz_lun;
4183                 res = p_fill_from_dev_buffer(scp, arr, n, off_rsp);
4184                 if (res)
4185                         return res;
4186                 off_rsp += n;
4187         }
4188         if (wlun_cnt) {
4189                 int_to_scsilun(SCSI_W_LUN_REPORT_LUNS, lun_p);
4190                 ++j;
4191         }
4192         if (j > 0)
4193                 res = p_fill_from_dev_buffer(scp, arr, j * sz_lun, off_rsp);
4194         return res;
4195 }
4196
4197 static int resp_verify(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4198 {
4199         bool is_bytchk3 = false;
4200         u8 bytchk;
4201         int ret, j;
4202         u32 vnum, a_num, off;
4203         const u32 lb_size = sdebug_sector_size;
4204         u64 lba;
4205         u8 *arr;
4206         u8 *cmd = scp->cmnd;
4207         struct sdeb_store_info *sip = devip2sip(devip);
4208         rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4209
4210         bytchk = (cmd[1] >> 1) & 0x3;
4211         if (bytchk == 0) {
4212                 return 0;       /* always claim internal verify okay */
4213         } else if (bytchk == 2) {
4214                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
4215                 return check_condition_result;
4216         } else if (bytchk == 3) {
4217                 is_bytchk3 = true;      /* 1 block sent, compared repeatedly */
4218         }
4219         switch (cmd[0]) {
4220         case VERIFY_16:
4221                 lba = get_unaligned_be64(cmd + 2);
4222                 vnum = get_unaligned_be32(cmd + 10);
4223                 break;
4224         case VERIFY:            /* is VERIFY(10) */
4225                 lba = get_unaligned_be32(cmd + 2);
4226                 vnum = get_unaligned_be16(cmd + 7);
4227                 break;
4228         default:
4229                 mk_sense_invalid_opcode(scp);
4230                 return check_condition_result;
4231         }
4232         a_num = is_bytchk3 ? 1 : vnum;
4233         /* Treat following check like one for read (i.e. no write) access */
4234         ret = check_device_access_params(scp, lba, a_num, false);
4235         if (ret)
4236                 return ret;
4237
4238         arr = kcalloc(lb_size, vnum, GFP_ATOMIC);
4239         if (!arr) {
4240                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4241                                 INSUFF_RES_ASCQ);
4242                 return check_condition_result;
4243         }
4244         /* Not changing store, so only need read access */
4245         read_lock(macc_lckp);
4246
4247         ret = do_dout_fetch(scp, a_num, arr);
4248         if (ret == -1) {
4249                 ret = DID_ERROR << 16;
4250                 goto cleanup;
4251         } else if (sdebug_verbose && (ret < (a_num * lb_size))) {
4252                 sdev_printk(KERN_INFO, scp->device,
4253                             "%s: %s: cdb indicated=%u, IO sent=%d bytes\n",
4254                             my_name, __func__, a_num * lb_size, ret);
4255         }
4256         if (is_bytchk3) {
4257                 for (j = 1, off = lb_size; j < vnum; ++j, off += lb_size)
4258                         memcpy(arr + off, arr, lb_size);
4259         }
4260         ret = 0;
4261         if (!comp_write_worker(sip, lba, vnum, arr, true)) {
4262                 mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
4263                 ret = check_condition_result;
4264                 goto cleanup;
4265         }
4266 cleanup:
4267         read_unlock(macc_lckp);
4268         kfree(arr);
4269         return ret;
4270 }
4271
4272 #define RZONES_DESC_HD 64
4273
4274 /* Report zones depending on start LBA nad reporting options */
4275 static int resp_report_zones(struct scsi_cmnd *scp,
4276                              struct sdebug_dev_info *devip)
4277 {
4278         unsigned int i, max_zones, rep_max_zones, nrz = 0;
4279         int ret = 0;
4280         u32 alloc_len, rep_opts, rep_len;
4281         bool partial;
4282         u64 lba, zs_lba;
4283         u8 *arr = NULL, *desc;
4284         u8 *cmd = scp->cmnd;
4285         struct sdeb_zone_state *zsp;
4286         struct sdeb_store_info *sip = devip2sip(devip);
4287         rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4288
4289         if (!sdebug_dev_is_zoned(devip)) {
4290                 mk_sense_invalid_opcode(scp);
4291                 return check_condition_result;
4292         }
4293         zs_lba = get_unaligned_be64(cmd + 2);
4294         alloc_len = get_unaligned_be32(cmd + 10);
4295         rep_opts = cmd[14] & 0x3f;
4296         partial = cmd[14] & 0x80;
4297
4298         if (zs_lba >= sdebug_capacity) {
4299                 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4300                 return check_condition_result;
4301         }
4302
4303         max_zones = devip->nr_zones - (zs_lba >> devip->zsize_shift);
4304         rep_max_zones = min((alloc_len - 64) >> ilog2(RZONES_DESC_HD),
4305                             max_zones);
4306
4307         arr = kcalloc(RZONES_DESC_HD, alloc_len, GFP_ATOMIC);
4308         if (!arr) {
4309                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4310                                 INSUFF_RES_ASCQ);
4311                 return check_condition_result;
4312         }
4313
4314         read_lock(macc_lckp);
4315
4316         desc = arr + 64;
4317         for (i = 0; i < max_zones; i++) {
4318                 lba = zs_lba + devip->zsize * i;
4319                 if (lba > sdebug_capacity)
4320                         break;
4321                 zsp = zbc_zone(devip, lba);
4322                 switch (rep_opts) {
4323                 case 0x00:
4324                         /* All zones */
4325                         break;
4326                 case 0x01:
4327                         /* Empty zones */
4328                         if (zsp->z_cond != ZC1_EMPTY)
4329                                 continue;
4330                         break;
4331                 case 0x02:
4332                         /* Implicit open zones */
4333                         if (zsp->z_cond != ZC2_IMPLICIT_OPEN)
4334                                 continue;
4335                         break;
4336                 case 0x03:
4337                         /* Explicit open zones */
4338                         if (zsp->z_cond != ZC3_EXPLICIT_OPEN)
4339                                 continue;
4340                         break;
4341                 case 0x04:
4342                         /* Closed zones */
4343                         if (zsp->z_cond != ZC4_CLOSED)
4344                                 continue;
4345                         break;
4346                 case 0x05:
4347                         /* Full zones */
4348                         if (zsp->z_cond != ZC5_FULL)
4349                                 continue;
4350                         break;
4351                 case 0x06:
4352                 case 0x07:
4353                 case 0x10:
4354                         /*
4355                          * Read-only, offline, reset WP recommended are
4356                          * not emulated: no zones to report;
4357                          */
4358                         continue;
4359                 case 0x11:
4360                         /* non-seq-resource set */
4361                         if (!zsp->z_non_seq_resource)
4362                                 continue;
4363                         break;
4364                 case 0x3f:
4365                         /* Not write pointer (conventional) zones */
4366                         if (!zbc_zone_is_conv(zsp))
4367                                 continue;
4368                         break;
4369                 default:
4370                         mk_sense_buffer(scp, ILLEGAL_REQUEST,
4371                                         INVALID_FIELD_IN_CDB, 0);
4372                         ret = check_condition_result;
4373                         goto fini;
4374                 }
4375
4376                 if (nrz < rep_max_zones) {
4377                         /* Fill zone descriptor */
4378                         desc[0] = zsp->z_type;
4379                         desc[1] = zsp->z_cond << 4;
4380                         if (zsp->z_non_seq_resource)
4381                                 desc[1] |= 1 << 1;
4382                         put_unaligned_be64((u64)zsp->z_size, desc + 8);
4383                         put_unaligned_be64((u64)zsp->z_start, desc + 16);
4384                         put_unaligned_be64((u64)zsp->z_wp, desc + 24);
4385                         desc += 64;
4386                 }
4387
4388                 if (partial && nrz >= rep_max_zones)
4389                         break;
4390
4391                 nrz++;
4392         }
4393
4394         /* Report header */
4395         put_unaligned_be32(nrz * RZONES_DESC_HD, arr + 0);
4396         put_unaligned_be64(sdebug_capacity - 1, arr + 8);
4397
4398         rep_len = (unsigned long)desc - (unsigned long)arr;
4399         ret = fill_from_dev_buffer(scp, arr, min_t(int, alloc_len, rep_len));
4400
4401 fini:
4402         read_unlock(macc_lckp);
4403         kfree(arr);
4404         return ret;
4405 }
4406
4407 /* Logic transplanted from tcmu-runner, file_zbc.c */
4408 static void zbc_open_all(struct sdebug_dev_info *devip)
4409 {
4410         struct sdeb_zone_state *zsp = &devip->zstate[0];
4411         unsigned int i;
4412
4413         for (i = 0; i < devip->nr_zones; i++, zsp++) {
4414                 if (zsp->z_cond == ZC4_CLOSED)
4415                         zbc_open_zone(devip, &devip->zstate[i], true);
4416         }
4417 }
4418
4419 static int resp_open_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4420 {
4421         int res = 0;
4422         u64 z_id;
4423         enum sdebug_z_cond zc;
4424         u8 *cmd = scp->cmnd;
4425         struct sdeb_zone_state *zsp;
4426         bool all = cmd[14] & 0x01;
4427         struct sdeb_store_info *sip = devip2sip(devip);
4428         rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4429
4430         if (!sdebug_dev_is_zoned(devip)) {
4431                 mk_sense_invalid_opcode(scp);
4432                 return check_condition_result;
4433         }
4434
4435         write_lock(macc_lckp);
4436
4437         if (all) {
4438                 /* Check if all closed zones can be open */
4439                 if (devip->max_open &&
4440                     devip->nr_exp_open + devip->nr_closed > devip->max_open) {
4441                         mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
4442                                         INSUFF_ZONE_ASCQ);
4443                         res = check_condition_result;
4444                         goto fini;
4445                 }
4446                 /* Open all closed zones */
4447                 zbc_open_all(devip);
4448                 goto fini;
4449         }
4450
4451         /* Open the specified zone */
4452         z_id = get_unaligned_be64(cmd + 2);
4453         if (z_id >= sdebug_capacity) {
4454                 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4455                 res = check_condition_result;
4456                 goto fini;
4457         }
4458
4459         zsp = zbc_zone(devip, z_id);
4460         if (z_id != zsp->z_start) {
4461                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4462                 res = check_condition_result;
4463                 goto fini;
4464         }
4465         if (zbc_zone_is_conv(zsp)) {
4466                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4467                 res = check_condition_result;
4468                 goto fini;
4469         }
4470
4471         zc = zsp->z_cond;
4472         if (zc == ZC3_EXPLICIT_OPEN || zc == ZC5_FULL)
4473                 goto fini;
4474
4475         if (devip->max_open && devip->nr_exp_open >= devip->max_open) {
4476                 mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
4477                                 INSUFF_ZONE_ASCQ);
4478                 res = check_condition_result;
4479                 goto fini;
4480         }
4481
4482         if (zc == ZC2_IMPLICIT_OPEN)
4483                 zbc_close_zone(devip, zsp);
4484         zbc_open_zone(devip, zsp, true);
4485 fini:
4486         write_unlock(macc_lckp);
4487         return res;
4488 }
4489
4490 static void zbc_close_all(struct sdebug_dev_info *devip)
4491 {
4492         unsigned int i;
4493
4494         for (i = 0; i < devip->nr_zones; i++)
4495                 zbc_close_zone(devip, &devip->zstate[i]);
4496 }
4497
4498 static int resp_close_zone(struct scsi_cmnd *scp,
4499                            struct sdebug_dev_info *devip)
4500 {
4501         int res = 0;
4502         u64 z_id;
4503         u8 *cmd = scp->cmnd;
4504         struct sdeb_zone_state *zsp;
4505         bool all = cmd[14] & 0x01;
4506         struct sdeb_store_info *sip = devip2sip(devip);
4507         rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4508
4509         if (!sdebug_dev_is_zoned(devip)) {
4510                 mk_sense_invalid_opcode(scp);
4511                 return check_condition_result;
4512         }
4513
4514         write_lock(macc_lckp);
4515
4516         if (all) {
4517                 zbc_close_all(devip);
4518                 goto fini;
4519         }
4520
4521         /* Close specified zone */
4522         z_id = get_unaligned_be64(cmd + 2);
4523         if (z_id >= sdebug_capacity) {
4524                 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4525                 res = check_condition_result;
4526                 goto fini;
4527         }
4528
4529         zsp = zbc_zone(devip, z_id);
4530         if (z_id != zsp->z_start) {
4531                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4532                 res = check_condition_result;
4533                 goto fini;
4534         }
4535         if (zbc_zone_is_conv(zsp)) {
4536                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4537                 res = check_condition_result;
4538                 goto fini;
4539         }
4540
4541         zbc_close_zone(devip, zsp);
4542 fini:
4543         write_unlock(macc_lckp);
4544         return res;
4545 }
4546
4547 static void zbc_finish_zone(struct sdebug_dev_info *devip,
4548                             struct sdeb_zone_state *zsp, bool empty)
4549 {
4550         enum sdebug_z_cond zc = zsp->z_cond;
4551
4552         if (zc == ZC4_CLOSED || zc == ZC2_IMPLICIT_OPEN ||
4553             zc == ZC3_EXPLICIT_OPEN || (empty && zc == ZC1_EMPTY)) {
4554                 if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
4555                         zbc_close_zone(devip, zsp);
4556                 if (zsp->z_cond == ZC4_CLOSED)
4557                         devip->nr_closed--;
4558                 zsp->z_wp = zsp->z_start + zsp->z_size;
4559                 zsp->z_cond = ZC5_FULL;
4560         }
4561 }
4562
4563 static void zbc_finish_all(struct sdebug_dev_info *devip)
4564 {
4565         unsigned int i;
4566
4567         for (i = 0; i < devip->nr_zones; i++)
4568                 zbc_finish_zone(devip, &devip->zstate[i], false);
4569 }
4570
4571 static int resp_finish_zone(struct scsi_cmnd *scp,
4572                             struct sdebug_dev_info *devip)
4573 {
4574         struct sdeb_zone_state *zsp;
4575         int res = 0;
4576         u64 z_id;
4577         u8 *cmd = scp->cmnd;
4578         bool all = cmd[14] & 0x01;
4579         struct sdeb_store_info *sip = devip2sip(devip);
4580         rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4581
4582         if (!sdebug_dev_is_zoned(devip)) {
4583                 mk_sense_invalid_opcode(scp);
4584                 return check_condition_result;
4585         }
4586
4587         write_lock(macc_lckp);
4588
4589         if (all) {
4590                 zbc_finish_all(devip);
4591                 goto fini;
4592         }
4593
4594         /* Finish the specified zone */
4595         z_id = get_unaligned_be64(cmd + 2);
4596         if (z_id >= sdebug_capacity) {
4597                 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4598                 res = check_condition_result;
4599                 goto fini;
4600         }
4601
4602         zsp = zbc_zone(devip, z_id);
4603         if (z_id != zsp->z_start) {
4604                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4605                 res = check_condition_result;
4606                 goto fini;
4607         }
4608         if (zbc_zone_is_conv(zsp)) {
4609                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4610                 res = check_condition_result;
4611                 goto fini;
4612         }
4613
4614         zbc_finish_zone(devip, zsp, true);
4615 fini:
4616         write_unlock(macc_lckp);
4617         return res;
4618 }
4619
4620 static void zbc_rwp_zone(struct sdebug_dev_info *devip,
4621                          struct sdeb_zone_state *zsp)
4622 {
4623         enum sdebug_z_cond zc;
4624
4625         if (zbc_zone_is_conv(zsp))
4626                 return;
4627
4628         zc = zsp->z_cond;
4629         if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
4630                 zbc_close_zone(devip, zsp);
4631
4632         if (zsp->z_cond == ZC4_CLOSED)
4633                 devip->nr_closed--;
4634
4635         zsp->z_non_seq_resource = false;
4636         zsp->z_wp = zsp->z_start;
4637         zsp->z_cond = ZC1_EMPTY;
4638 }
4639
4640 static void zbc_rwp_all(struct sdebug_dev_info *devip)
4641 {
4642         unsigned int i;
4643
4644         for (i = 0; i < devip->nr_zones; i++)
4645                 zbc_rwp_zone(devip, &devip->zstate[i]);
4646 }
4647
4648 static int resp_rwp_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4649 {
4650         struct sdeb_zone_state *zsp;
4651         int res = 0;
4652         u64 z_id;
4653         u8 *cmd = scp->cmnd;
4654         bool all = cmd[14] & 0x01;
4655         struct sdeb_store_info *sip = devip2sip(devip);
4656         rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4657
4658         if (!sdebug_dev_is_zoned(devip)) {
4659                 mk_sense_invalid_opcode(scp);
4660                 return check_condition_result;
4661         }
4662
4663         write_lock(macc_lckp);
4664
4665         if (all) {
4666                 zbc_rwp_all(devip);
4667                 goto fini;
4668         }
4669
4670         z_id = get_unaligned_be64(cmd + 2);
4671         if (z_id >= sdebug_capacity) {
4672                 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4673                 res = check_condition_result;
4674                 goto fini;
4675         }
4676
4677         zsp = zbc_zone(devip, z_id);
4678         if (z_id != zsp->z_start) {
4679                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4680                 res = check_condition_result;
4681                 goto fini;
4682         }
4683         if (zbc_zone_is_conv(zsp)) {
4684                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4685                 res = check_condition_result;
4686                 goto fini;
4687         }
4688
4689         zbc_rwp_zone(devip, zsp);
4690 fini:
4691         write_unlock(macc_lckp);
4692         return res;
4693 }
4694
4695 static struct sdebug_queue *get_queue(struct scsi_cmnd *cmnd)
4696 {
4697         u32 tag = blk_mq_unique_tag(cmnd->request);
4698         u16 hwq = blk_mq_unique_tag_to_hwq(tag);
4699
4700         pr_debug("tag=%#x, hwq=%d\n", tag, hwq);
4701         if (WARN_ON_ONCE(hwq >= submit_queues))
4702                 hwq = 0;
4703         return sdebug_q_arr + hwq;
4704 }
4705
4706 /* Queued (deferred) command completions converge here. */
4707 static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
4708 {
4709         bool aborted = sd_dp->aborted;
4710         int qc_idx;
4711         int retiring = 0;
4712         unsigned long iflags;
4713         struct sdebug_queue *sqp;
4714         struct sdebug_queued_cmd *sqcp;
4715         struct scsi_cmnd *scp;
4716         struct sdebug_dev_info *devip;
4717
4718         sd_dp->defer_t = SDEB_DEFER_NONE;
4719         if (unlikely(aborted))
4720                 sd_dp->aborted = false;
4721         qc_idx = sd_dp->qc_idx;
4722         sqp = sdebug_q_arr + sd_dp->sqa_idx;
4723         if (sdebug_statistics) {
4724                 atomic_inc(&sdebug_completions);
4725                 if (raw_smp_processor_id() != sd_dp->issuing_cpu)
4726                         atomic_inc(&sdebug_miss_cpus);
4727         }
4728         if (unlikely((qc_idx < 0) || (qc_idx >= SDEBUG_CANQUEUE))) {
4729                 pr_err("wild qc_idx=%d\n", qc_idx);
4730                 return;
4731         }
4732         spin_lock_irqsave(&sqp->qc_lock, iflags);
4733         sqcp = &sqp->qc_arr[qc_idx];
4734         scp = sqcp->a_cmnd;
4735         if (unlikely(scp == NULL)) {
4736                 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4737                 pr_err("scp is NULL, sqa_idx=%d, qc_idx=%d\n",
4738                        sd_dp->sqa_idx, qc_idx);
4739                 return;
4740         }
4741         devip = (struct sdebug_dev_info *)scp->device->hostdata;
4742         if (likely(devip))
4743                 atomic_dec(&devip->num_in_q);
4744         else
4745                 pr_err("devip=NULL\n");
4746         if (unlikely(atomic_read(&retired_max_queue) > 0))
4747                 retiring = 1;
4748
4749         sqcp->a_cmnd = NULL;
4750         if (unlikely(!test_and_clear_bit(qc_idx, sqp->in_use_bm))) {
4751                 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4752                 pr_err("Unexpected completion\n");
4753                 return;
4754         }
4755
4756         if (unlikely(retiring)) {       /* user has reduced max_queue */
4757                 int k, retval;
4758
4759                 retval = atomic_read(&retired_max_queue);
4760                 if (qc_idx >= retval) {
4761                         spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4762                         pr_err("index %d too large\n", retval);
4763                         return;
4764                 }
4765                 k = find_last_bit(sqp->in_use_bm, retval);
4766                 if ((k < sdebug_max_queue) || (k == retval))
4767                         atomic_set(&retired_max_queue, 0);
4768                 else
4769                         atomic_set(&retired_max_queue, k + 1);
4770         }
4771         spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4772         if (unlikely(aborted)) {
4773                 if (sdebug_verbose)
4774                         pr_info("bypassing scsi_done() due to aborted cmd\n");
4775                 return;
4776         }
4777         scp->scsi_done(scp); /* callback to mid level */
4778 }
4779
4780 /* When high resolution timer goes off this function is called. */
4781 static enum hrtimer_restart sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
4782 {
4783         struct sdebug_defer *sd_dp = container_of(timer, struct sdebug_defer,
4784                                                   hrt);
4785         sdebug_q_cmd_complete(sd_dp);
4786         return HRTIMER_NORESTART;
4787 }
4788
4789 /* When work queue schedules work, it calls this function. */
4790 static void sdebug_q_cmd_wq_complete(struct work_struct *work)
4791 {
4792         struct sdebug_defer *sd_dp = container_of(work, struct sdebug_defer,
4793                                                   ew.work);
4794         sdebug_q_cmd_complete(sd_dp);
4795 }
4796
4797 static bool got_shared_uuid;
4798 static uuid_t shared_uuid;
4799
4800 static int sdebug_device_create_zones(struct sdebug_dev_info *devip)
4801 {
4802         struct sdeb_zone_state *zsp;
4803         sector_t capacity = get_sdebug_capacity();
4804         sector_t zstart = 0;
4805         unsigned int i;
4806
4807         /*
4808          * Set the zone size: if sdeb_zbc_zone_size_mb is not set, figure out
4809          * a zone size allowing for at least 4 zones on the device. Otherwise,
4810          * use the specified zone size checking that at least 2 zones can be
4811          * created for the device.
4812          */
4813         if (!sdeb_zbc_zone_size_mb) {
4814                 devip->zsize = (DEF_ZBC_ZONE_SIZE_MB * SZ_1M)
4815                         >> ilog2(sdebug_sector_size);
4816                 while (capacity < devip->zsize << 2 && devip->zsize >= 2)
4817                         devip->zsize >>= 1;
4818                 if (devip->zsize < 2) {
4819                         pr_err("Device capacity too small\n");
4820                         return -EINVAL;
4821                 }
4822         } else {
4823                 if (!is_power_of_2(sdeb_zbc_zone_size_mb)) {
4824                         pr_err("Zone size is not a power of 2\n");
4825                         return -EINVAL;
4826                 }
4827                 devip->zsize = (sdeb_zbc_zone_size_mb * SZ_1M)
4828                         >> ilog2(sdebug_sector_size);
4829                 if (devip->zsize >= capacity) {
4830                         pr_err("Zone size too large for device capacity\n");
4831                         return -EINVAL;
4832                 }
4833         }
4834
4835         devip->zsize_shift = ilog2(devip->zsize);
4836         devip->nr_zones = (capacity + devip->zsize - 1) >> devip->zsize_shift;
4837
4838         if (sdeb_zbc_nr_conv >= devip->nr_zones) {
4839                 pr_err("Number of conventional zones too large\n");
4840                 return -EINVAL;
4841         }
4842         devip->nr_conv_zones = sdeb_zbc_nr_conv;
4843
4844         if (devip->zmodel == BLK_ZONED_HM) {
4845                 /* zbc_max_open_zones can be 0, meaning "not reported" */
4846                 if (sdeb_zbc_max_open >= devip->nr_zones - 1)
4847                         devip->max_open = (devip->nr_zones - 1) / 2;
4848                 else
4849                         devip->max_open = sdeb_zbc_max_open;
4850         }
4851
4852         devip->zstate = kcalloc(devip->nr_zones,
4853                                 sizeof(struct sdeb_zone_state), GFP_KERNEL);
4854         if (!devip->zstate)
4855                 return -ENOMEM;
4856
4857         for (i = 0; i < devip->nr_zones; i++) {
4858                 zsp = &devip->zstate[i];
4859
4860                 zsp->z_start = zstart;
4861
4862                 if (i < devip->nr_conv_zones) {
4863                         zsp->z_type = ZBC_ZONE_TYPE_CNV;
4864                         zsp->z_cond = ZBC_NOT_WRITE_POINTER;
4865                         zsp->z_wp = (sector_t)-1;
4866                 } else {
4867                         if (devip->zmodel == BLK_ZONED_HM)
4868                                 zsp->z_type = ZBC_ZONE_TYPE_SWR;
4869                         else
4870                                 zsp->z_type = ZBC_ZONE_TYPE_SWP;
4871                         zsp->z_cond = ZC1_EMPTY;
4872                         zsp->z_wp = zsp->z_start;
4873                 }
4874
4875                 if (zsp->z_start + devip->zsize < capacity)
4876                         zsp->z_size = devip->zsize;
4877                 else
4878                         zsp->z_size = capacity - zsp->z_start;
4879
4880                 zstart += zsp->z_size;
4881         }
4882
4883         return 0;
4884 }
4885
4886 static struct sdebug_dev_info *sdebug_device_create(
4887                         struct sdebug_host_info *sdbg_host, gfp_t flags)
4888 {
4889         struct sdebug_dev_info *devip;
4890
4891         devip = kzalloc(sizeof(*devip), flags);
4892         if (devip) {
4893                 if (sdebug_uuid_ctl == 1)
4894                         uuid_gen(&devip->lu_name);
4895                 else if (sdebug_uuid_ctl == 2) {
4896                         if (got_shared_uuid)
4897                                 devip->lu_name = shared_uuid;
4898                         else {
4899                                 uuid_gen(&shared_uuid);
4900                                 got_shared_uuid = true;
4901                                 devip->lu_name = shared_uuid;
4902                         }
4903                 }
4904                 devip->sdbg_host = sdbg_host;
4905                 if (sdeb_zbc_in_use) {
4906                         devip->zmodel = sdeb_zbc_model;
4907                         if (sdebug_device_create_zones(devip)) {
4908                                 kfree(devip);
4909                                 return NULL;
4910                         }
4911                 } else {
4912                         devip->zmodel = BLK_ZONED_NONE;
4913                 }
4914                 devip->sdbg_host = sdbg_host;
4915                 list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
4916         }
4917         return devip;
4918 }
4919
4920 static struct sdebug_dev_info *find_build_dev_info(struct scsi_device *sdev)
4921 {
4922         struct sdebug_host_info *sdbg_host;
4923         struct sdebug_dev_info *open_devip = NULL;
4924         struct sdebug_dev_info *devip;
4925
4926         sdbg_host = *(struct sdebug_host_info **)shost_priv(sdev->host);
4927         if (!sdbg_host) {
4928                 pr_err("Host info NULL\n");
4929                 return NULL;
4930         }
4931         list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
4932                 if ((devip->used) && (devip->channel == sdev->channel) &&
4933                     (devip->target == sdev->id) &&
4934                     (devip->lun == sdev->lun))
4935                         return devip;
4936                 else {
4937                         if ((!devip->used) && (!open_devip))
4938                                 open_devip = devip;
4939                 }
4940         }
4941         if (!open_devip) { /* try and make a new one */
4942                 open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
4943                 if (!open_devip) {
4944                         pr_err("out of memory at line %d\n", __LINE__);
4945                         return NULL;
4946                 }
4947         }
4948
4949         open_devip->channel = sdev->channel;
4950         open_devip->target = sdev->id;
4951         open_devip->lun = sdev->lun;
4952         open_devip->sdbg_host = sdbg_host;
4953         atomic_set(&open_devip->num_in_q, 0);
4954         set_bit(SDEBUG_UA_POR, open_devip->uas_bm);
4955         open_devip->used = true;
4956         return open_devip;
4957 }
4958
4959 static int scsi_debug_slave_alloc(struct scsi_device *sdp)
4960 {
4961         if (sdebug_verbose)
4962                 pr_info("slave_alloc <%u %u %u %llu>\n",
4963                        sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
4964         return 0;
4965 }
4966
4967 static int scsi_debug_slave_configure(struct scsi_device *sdp)
4968 {
4969         struct sdebug_dev_info *devip =
4970                         (struct sdebug_dev_info *)sdp->hostdata;
4971
4972         if (sdebug_verbose)
4973                 pr_info("slave_configure <%u %u %u %llu>\n",
4974                        sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
4975         if (sdp->host->max_cmd_len != SDEBUG_MAX_CMD_LEN)
4976                 sdp->host->max_cmd_len = SDEBUG_MAX_CMD_LEN;
4977         if (devip == NULL) {
4978                 devip = find_build_dev_info(sdp);
4979                 if (devip == NULL)
4980                         return 1;  /* no resources, will be marked offline */
4981         }
4982         sdp->hostdata = devip;
4983         if (sdebug_no_uld)
4984                 sdp->no_uld_attach = 1;
4985         config_cdb_len(sdp);
4986         return 0;
4987 }
4988
4989 static void scsi_debug_slave_destroy(struct scsi_device *sdp)
4990 {
4991         struct sdebug_dev_info *devip =
4992                 (struct sdebug_dev_info *)sdp->hostdata;
4993
4994         if (sdebug_verbose)
4995                 pr_info("slave_destroy <%u %u %u %llu>\n",
4996                        sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
4997         if (devip) {
4998                 /* make this slot available for re-use */
4999                 devip->used = false;
5000                 sdp->hostdata = NULL;
5001         }
5002 }
5003
5004 static void stop_qc_helper(struct sdebug_defer *sd_dp,
5005                            enum sdeb_defer_type defer_t)
5006 {
5007         if (!sd_dp)
5008                 return;
5009         if (defer_t == SDEB_DEFER_HRT)
5010                 hrtimer_cancel(&sd_dp->hrt);
5011         else if (defer_t == SDEB_DEFER_WQ)
5012                 cancel_work_sync(&sd_dp->ew.work);
5013 }
5014
5015 /* If @cmnd found deletes its timer or work queue and returns true; else
5016    returns false */
5017 static bool stop_queued_cmnd(struct scsi_cmnd *cmnd)
5018 {
5019         unsigned long iflags;
5020         int j, k, qmax, r_qmax;
5021         enum sdeb_defer_type l_defer_t;
5022         struct sdebug_queue *sqp;
5023         struct sdebug_queued_cmd *sqcp;
5024         struct sdebug_dev_info *devip;
5025         struct sdebug_defer *sd_dp;
5026
5027         for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5028                 spin_lock_irqsave(&sqp->qc_lock, iflags);
5029                 qmax = sdebug_max_queue;
5030                 r_qmax = atomic_read(&retired_max_queue);
5031                 if (r_qmax > qmax)
5032                         qmax = r_qmax;
5033                 for (k = 0; k < qmax; ++k) {
5034                         if (test_bit(k, sqp->in_use_bm)) {
5035                                 sqcp = &sqp->qc_arr[k];
5036                                 if (cmnd != sqcp->a_cmnd)
5037                                         continue;
5038                                 /* found */
5039                                 devip = (struct sdebug_dev_info *)
5040                                                 cmnd->device->hostdata;
5041                                 if (devip)
5042                                         atomic_dec(&devip->num_in_q);
5043                                 sqcp->a_cmnd = NULL;
5044                                 sd_dp = sqcp->sd_dp;
5045                                 if (sd_dp) {
5046                                         l_defer_t = sd_dp->defer_t;
5047                                         sd_dp->defer_t = SDEB_DEFER_NONE;
5048                                 } else
5049                                         l_defer_t = SDEB_DEFER_NONE;
5050                                 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5051                                 stop_qc_helper(sd_dp, l_defer_t);
5052                                 clear_bit(k, sqp->in_use_bm);
5053                                 return true;
5054                         }
5055                 }
5056                 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5057         }
5058         return false;
5059 }
5060
5061 /* Deletes (stops) timers or work queues of all queued commands */
5062 static void stop_all_queued(void)
5063 {
5064         unsigned long iflags;
5065         int j, k;
5066         enum sdeb_defer_type l_defer_t;
5067         struct sdebug_queue *sqp;
5068         struct sdebug_queued_cmd *sqcp;
5069         struct sdebug_dev_info *devip;
5070         struct sdebug_defer *sd_dp;
5071
5072         for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5073                 spin_lock_irqsave(&sqp->qc_lock, iflags);
5074                 for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
5075                         if (test_bit(k, sqp->in_use_bm)) {
5076                                 sqcp = &sqp->qc_arr[k];
5077                                 if (sqcp->a_cmnd == NULL)
5078                                         continue;
5079                                 devip = (struct sdebug_dev_info *)
5080                                         sqcp->a_cmnd->device->hostdata;
5081                                 if (devip)
5082                                         atomic_dec(&devip->num_in_q);
5083                                 sqcp->a_cmnd = NULL;
5084                                 sd_dp = sqcp->sd_dp;
5085                                 if (sd_dp) {
5086                                         l_defer_t = sd_dp->defer_t;
5087                                         sd_dp->defer_t = SDEB_DEFER_NONE;
5088                                 } else
5089                                         l_defer_t = SDEB_DEFER_NONE;
5090                                 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5091                                 stop_qc_helper(sd_dp, l_defer_t);
5092                                 clear_bit(k, sqp->in_use_bm);
5093                                 spin_lock_irqsave(&sqp->qc_lock, iflags);
5094                         }
5095                 }
5096                 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5097         }
5098 }
5099
5100 /* Free queued command memory on heap */
5101 static void free_all_queued(void)
5102 {
5103         int j, k;
5104         struct sdebug_queue *sqp;
5105         struct sdebug_queued_cmd *sqcp;
5106
5107         for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5108                 for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
5109                         sqcp = &sqp->qc_arr[k];
5110                         kfree(sqcp->sd_dp);
5111                         sqcp->sd_dp = NULL;
5112                 }
5113         }
5114 }
5115
5116 static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
5117 {
5118         bool ok;
5119
5120         ++num_aborts;
5121         if (SCpnt) {
5122                 ok = stop_queued_cmnd(SCpnt);
5123                 if (SCpnt->device && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
5124                         sdev_printk(KERN_INFO, SCpnt->device,
5125                                     "%s: command%s found\n", __func__,
5126                                     ok ? "" : " not");
5127         }
5128         return SUCCESS;
5129 }
5130
5131 static int scsi_debug_device_reset(struct scsi_cmnd *SCpnt)
5132 {
5133         ++num_dev_resets;
5134         if (SCpnt && SCpnt->device) {
5135                 struct scsi_device *sdp = SCpnt->device;
5136                 struct sdebug_dev_info *devip =
5137                                 (struct sdebug_dev_info *)sdp->hostdata;
5138
5139                 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5140                         sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5141                 if (devip)
5142                         set_bit(SDEBUG_UA_POR, devip->uas_bm);
5143         }
5144         return SUCCESS;
5145 }
5146
5147 static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
5148 {
5149         struct sdebug_host_info *sdbg_host;
5150         struct sdebug_dev_info *devip;
5151         struct scsi_device *sdp;
5152         struct Scsi_Host *hp;
5153         int k = 0;
5154
5155         ++num_target_resets;
5156         if (!SCpnt)
5157                 goto lie;
5158         sdp = SCpnt->device;
5159         if (!sdp)
5160                 goto lie;
5161         if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5162                 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5163         hp = sdp->host;
5164         if (!hp)
5165                 goto lie;
5166         sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
5167         if (sdbg_host) {
5168                 list_for_each_entry(devip,
5169                                     &sdbg_host->dev_info_list,
5170                                     dev_list)
5171                         if (devip->target == sdp->id) {
5172                                 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5173                                 ++k;
5174                         }
5175         }
5176         if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5177                 sdev_printk(KERN_INFO, sdp,
5178                             "%s: %d device(s) found in target\n", __func__, k);
5179 lie:
5180         return SUCCESS;
5181 }
5182
5183 static int scsi_debug_bus_reset(struct scsi_cmnd *SCpnt)
5184 {
5185         struct sdebug_host_info *sdbg_host;
5186         struct sdebug_dev_info *devip;
5187         struct scsi_device *sdp;
5188         struct Scsi_Host *hp;
5189         int k = 0;
5190
5191         ++num_bus_resets;
5192         if (!(SCpnt && SCpnt->device))
5193                 goto lie;
5194         sdp = SCpnt->device;
5195         if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5196                 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5197         hp = sdp->host;
5198         if (hp) {
5199                 sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
5200                 if (sdbg_host) {
5201                         list_for_each_entry(devip,
5202                                             &sdbg_host->dev_info_list,
5203                                             dev_list) {
5204                                 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5205                                 ++k;
5206                         }
5207                 }
5208         }
5209         if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5210                 sdev_printk(KERN_INFO, sdp,
5211                             "%s: %d device(s) found in host\n", __func__, k);
5212 lie:
5213         return SUCCESS;
5214 }
5215
5216 static int scsi_debug_host_reset(struct scsi_cmnd *SCpnt)
5217 {
5218         struct sdebug_host_info *sdbg_host;
5219         struct sdebug_dev_info *devip;
5220         int k = 0;
5221
5222         ++num_host_resets;
5223         if ((SCpnt->device) && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
5224                 sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__);
5225         spin_lock(&sdebug_host_list_lock);
5226         list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
5227                 list_for_each_entry(devip, &sdbg_host->dev_info_list,
5228                                     dev_list) {
5229                         set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5230                         ++k;
5231                 }
5232         }
5233         spin_unlock(&sdebug_host_list_lock);
5234         stop_all_queued();
5235         if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5236                 sdev_printk(KERN_INFO, SCpnt->device,
5237                             "%s: %d device(s) found\n", __func__, k);
5238         return SUCCESS;
5239 }
5240
5241 static void sdebug_build_parts(unsigned char *ramp, unsigned long store_size)
5242 {
5243         struct msdos_partition *pp;
5244         int starts[SDEBUG_MAX_PARTS + 2];
5245         int sectors_per_part, num_sectors, k;
5246         int heads_by_sects, start_sec, end_sec;
5247
5248         /* assume partition table already zeroed */
5249         if ((sdebug_num_parts < 1) || (store_size < 1048576))
5250                 return;
5251         if (sdebug_num_parts > SDEBUG_MAX_PARTS) {
5252                 sdebug_num_parts = SDEBUG_MAX_PARTS;
5253                 pr_warn("reducing partitions to %d\n", SDEBUG_MAX_PARTS);
5254         }
5255         num_sectors = (int)sdebug_store_sectors;
5256         sectors_per_part = (num_sectors - sdebug_sectors_per)
5257                            / sdebug_num_parts;
5258         heads_by_sects = sdebug_heads * sdebug_sectors_per;
5259         starts[0] = sdebug_sectors_per;
5260         for (k = 1; k < sdebug_num_parts; ++k)
5261                 starts[k] = ((k * sectors_per_part) / heads_by_sects)
5262                             * heads_by_sects;
5263         starts[sdebug_num_parts] = num_sectors;
5264         starts[sdebug_num_parts + 1] = 0;
5265
5266         ramp[510] = 0x55;       /* magic partition markings */
5267         ramp[511] = 0xAA;
5268         pp = (struct msdos_partition *)(ramp + 0x1be);
5269         for (k = 0; starts[k + 1]; ++k, ++pp) {
5270                 start_sec = starts[k];
5271                 end_sec = starts[k + 1] - 1;
5272                 pp->boot_ind = 0;
5273
5274                 pp->cyl = start_sec / heads_by_sects;
5275                 pp->head = (start_sec - (pp->cyl * heads_by_sects))
5276                            / sdebug_sectors_per;
5277                 pp->sector = (start_sec % sdebug_sectors_per) + 1;
5278
5279                 pp->end_cyl = end_sec / heads_by_sects;
5280                 pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
5281                                / sdebug_sectors_per;
5282                 pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
5283
5284                 pp->start_sect = cpu_to_le32(start_sec);
5285                 pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1);
5286                 pp->sys_ind = 0x83;     /* plain Linux partition */
5287         }
5288 }
5289
5290 static void block_unblock_all_queues(bool block)
5291 {
5292         int j;
5293         struct sdebug_queue *sqp;
5294
5295         for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp)
5296                 atomic_set(&sqp->blocked, (int)block);
5297 }
5298
5299 /* Adjust (by rounding down) the sdebug_cmnd_count so abs(every_nth)-1
5300  * commands will be processed normally before triggers occur.
5301  */
5302 static void tweak_cmnd_count(void)
5303 {
5304         int count, modulo;
5305
5306         modulo = abs(sdebug_every_nth);
5307         if (modulo < 2)
5308                 return;
5309         block_unblock_all_queues(true);
5310         count = atomic_read(&sdebug_cmnd_count);
5311         atomic_set(&sdebug_cmnd_count, (count / modulo) * modulo);
5312         block_unblock_all_queues(false);
5313 }
5314
5315 static void clear_queue_stats(void)
5316 {
5317         atomic_set(&sdebug_cmnd_count, 0);
5318         atomic_set(&sdebug_completions, 0);
5319         atomic_set(&sdebug_miss_cpus, 0);
5320         atomic_set(&sdebug_a_tsf, 0);
5321 }
5322
5323 static void setup_inject(struct sdebug_queue *sqp,
5324                          struct sdebug_queued_cmd *sqcp)
5325 {
5326         if ((atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) > 0) {
5327                 if (sdebug_every_nth > 0)
5328                         sqcp->inj_recovered = sqcp->inj_transport
5329                                 = sqcp->inj_dif
5330                                 = sqcp->inj_dix = sqcp->inj_short
5331                                 = sqcp->inj_host_busy = sqcp->inj_cmd_abort = 0;
5332                 return;
5333         }
5334         sqcp->inj_recovered = !!(SDEBUG_OPT_RECOVERED_ERR & sdebug_opts);
5335         sqcp->inj_transport = !!(SDEBUG_OPT_TRANSPORT_ERR & sdebug_opts);
5336         sqcp->inj_dif = !!(SDEBUG_OPT_DIF_ERR & sdebug_opts);
5337         sqcp->inj_dix = !!(SDEBUG_OPT_DIX_ERR & sdebug_opts);
5338         sqcp->inj_short = !!(SDEBUG_OPT_SHORT_TRANSFER & sdebug_opts);
5339         sqcp->inj_host_busy = !!(SDEBUG_OPT_HOST_BUSY & sdebug_opts);
5340         sqcp->inj_cmd_abort = !!(SDEBUG_OPT_CMD_ABORT & sdebug_opts);
5341 }
5342
5343 #define INCLUSIVE_TIMING_MAX_NS 1000000         /* 1 millisecond */
5344
5345 /* Complete the processing of the thread that queued a SCSI command to this
5346  * driver. It either completes the command by calling cmnd_done() or
5347  * schedules a hr timer or work queue then returns 0. Returns
5348  * SCSI_MLQUEUE_HOST_BUSY if temporarily out of resources.
5349  */
5350 static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
5351                          int scsi_result,
5352                          int (*pfp)(struct scsi_cmnd *,
5353                                     struct sdebug_dev_info *),
5354                          int delta_jiff, int ndelay)
5355 {
5356         bool new_sd_dp;
5357         int k, num_in_q, qdepth, inject;
5358         unsigned long iflags;
5359         u64 ns_from_boot = 0;
5360         struct sdebug_queue *sqp;
5361         struct sdebug_queued_cmd *sqcp;
5362         struct scsi_device *sdp;
5363         struct sdebug_defer *sd_dp;
5364
5365         if (unlikely(devip == NULL)) {
5366                 if (scsi_result == 0)
5367                         scsi_result = DID_NO_CONNECT << 16;
5368                 goto respond_in_thread;
5369         }
5370         sdp = cmnd->device;
5371
5372         if (delta_jiff == 0)
5373                 goto respond_in_thread;
5374
5375         sqp = get_queue(cmnd);
5376         spin_lock_irqsave(&sqp->qc_lock, iflags);
5377         if (unlikely(atomic_read(&sqp->blocked))) {
5378                 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5379                 return SCSI_MLQUEUE_HOST_BUSY;
5380         }
5381         num_in_q = atomic_read(&devip->num_in_q);
5382         qdepth = cmnd->device->queue_depth;
5383         inject = 0;
5384         if (unlikely((qdepth > 0) && (num_in_q >= qdepth))) {
5385                 if (scsi_result) {
5386                         spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5387                         goto respond_in_thread;
5388                 } else
5389                         scsi_result = device_qfull_result;
5390         } else if (unlikely(sdebug_every_nth &&
5391                             (SDEBUG_OPT_RARE_TSF & sdebug_opts) &&
5392                             (scsi_result == 0))) {
5393                 if ((num_in_q == (qdepth - 1)) &&
5394                     (atomic_inc_return(&sdebug_a_tsf) >=
5395                      abs(sdebug_every_nth))) {
5396                         atomic_set(&sdebug_a_tsf, 0);
5397                         inject = 1;
5398                         scsi_result = device_qfull_result;
5399                 }
5400         }
5401
5402         k = find_first_zero_bit(sqp->in_use_bm, sdebug_max_queue);
5403         if (unlikely(k >= sdebug_max_queue)) {
5404                 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5405                 if (scsi_result)
5406                         goto respond_in_thread;
5407                 else if (SDEBUG_OPT_ALL_TSF & sdebug_opts)
5408                         scsi_result = device_qfull_result;
5409                 if (SDEBUG_OPT_Q_NOISE & sdebug_opts)
5410                         sdev_printk(KERN_INFO, sdp,
5411                                     "%s: max_queue=%d exceeded, %s\n",
5412                                     __func__, sdebug_max_queue,
5413                                     (scsi_result ?  "status: TASK SET FULL" :
5414                                                     "report: host busy"));
5415                 if (scsi_result)
5416                         goto respond_in_thread;
5417                 else
5418                         return SCSI_MLQUEUE_HOST_BUSY;
5419         }
5420         __set_bit(k, sqp->in_use_bm);
5421         atomic_inc(&devip->num_in_q);
5422         sqcp = &sqp->qc_arr[k];
5423         sqcp->a_cmnd = cmnd;
5424         cmnd->host_scribble = (unsigned char *)sqcp;
5425         sd_dp = sqcp->sd_dp;
5426         spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5427         if (unlikely(sdebug_every_nth && sdebug_any_injecting_opt))
5428                 setup_inject(sqp, sqcp);
5429         if (sd_dp == NULL) {
5430                 sd_dp = kzalloc(sizeof(*sd_dp), GFP_ATOMIC);
5431                 if (sd_dp == NULL)
5432                         return SCSI_MLQUEUE_HOST_BUSY;
5433                 new_sd_dp = true;
5434         } else {
5435                 new_sd_dp = false;
5436         }
5437
5438         if (ndelay > 0 && ndelay < INCLUSIVE_TIMING_MAX_NS)
5439                 ns_from_boot = ktime_get_boottime_ns();
5440
5441         /* one of the resp_*() response functions is called here */
5442         cmnd->result = pfp != NULL ? pfp(cmnd, devip) : 0;
5443         if (cmnd->result & SDEG_RES_IMMED_MASK) {
5444                 cmnd->result &= ~SDEG_RES_IMMED_MASK;
5445                 delta_jiff = ndelay = 0;
5446         }
5447         if (cmnd->result == 0 && scsi_result != 0)
5448                 cmnd->result = scsi_result;
5449
5450         if (unlikely(sdebug_verbose && cmnd->result))
5451                 sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n",
5452                             __func__, cmnd->result);
5453
5454         if (delta_jiff > 0 || ndelay > 0) {
5455                 ktime_t kt;
5456
5457                 if (delta_jiff > 0) {
5458                         u64 ns = jiffies_to_nsecs(delta_jiff);
5459
5460                         if (sdebug_random && ns < U32_MAX) {
5461                                 ns = prandom_u32_max((u32)ns);
5462                         } else if (sdebug_random) {
5463                                 ns >>= 12;      /* scale to 4 usec precision */
5464                                 if (ns < U32_MAX)       /* over 4 hours max */
5465                                         ns = prandom_u32_max((u32)ns);
5466                                 ns <<= 12;
5467                         }
5468                         kt = ns_to_ktime(ns);
5469                 } else {        /* ndelay has a 4.2 second max */
5470                         kt = sdebug_random ? prandom_u32_max((u32)ndelay) :
5471                                              (u32)ndelay;
5472                         if (ndelay < INCLUSIVE_TIMING_MAX_NS) {
5473                                 u64 d = ktime_get_boottime_ns() - ns_from_boot;
5474
5475                                 if (kt <= d) {  /* elapsed duration >= kt */
5476                                         sqcp->a_cmnd = NULL;
5477                                         atomic_dec(&devip->num_in_q);
5478                                         clear_bit(k, sqp->in_use_bm);
5479                                         if (new_sd_dp)
5480                                                 kfree(sd_dp);
5481                                         /* call scsi_done() from this thread */
5482                                         cmnd->scsi_done(cmnd);
5483                                         return 0;
5484                                 }
5485                                 /* otherwise reduce kt by elapsed time */
5486                                 kt -= d;
5487                         }
5488                 }
5489                 if (!sd_dp->init_hrt) {
5490                         sd_dp->init_hrt = true;
5491                         sqcp->sd_dp = sd_dp;
5492                         hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC,
5493                                      HRTIMER_MODE_REL_PINNED);
5494                         sd_dp->hrt.function = sdebug_q_cmd_hrt_complete;
5495                         sd_dp->sqa_idx = sqp - sdebug_q_arr;
5496                         sd_dp->qc_idx = k;
5497                 }
5498                 if (sdebug_statistics)
5499                         sd_dp->issuing_cpu = raw_smp_processor_id();
5500                 sd_dp->defer_t = SDEB_DEFER_HRT;
5501                 /* schedule the invocation of scsi_done() for a later time */
5502                 hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED);
5503         } else {        /* jdelay < 0, use work queue */
5504                 if (!sd_dp->init_wq) {
5505                         sd_dp->init_wq = true;
5506                         sqcp->sd_dp = sd_dp;
5507                         sd_dp->sqa_idx = sqp - sdebug_q_arr;
5508                         sd_dp->qc_idx = k;
5509                         INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete);
5510                 }
5511                 if (sdebug_statistics)
5512                         sd_dp->issuing_cpu = raw_smp_processor_id();
5513                 sd_dp->defer_t = SDEB_DEFER_WQ;
5514                 if (unlikely(sqcp->inj_cmd_abort))
5515                         sd_dp->aborted = true;
5516                 schedule_work(&sd_dp->ew.work);
5517                 if (unlikely(sqcp->inj_cmd_abort)) {
5518                         sdev_printk(KERN_INFO, sdp, "abort request tag %d\n",
5519                                     cmnd->request->tag);
5520                         blk_abort_request(cmnd->request);
5521                 }
5522         }
5523         if (unlikely((SDEBUG_OPT_Q_NOISE & sdebug_opts) &&
5524                      (scsi_result == device_qfull_result)))
5525                 sdev_printk(KERN_INFO, sdp,
5526                             "%s: num_in_q=%d +1, %s%s\n", __func__,
5527                             num_in_q, (inject ? "<inject> " : ""),
5528                             "status: TASK SET FULL");
5529         return 0;
5530
5531 respond_in_thread:      /* call back to mid-layer using invocation thread */
5532         cmnd->result = pfp != NULL ? pfp(cmnd, devip) : 0;
5533         cmnd->result &= ~SDEG_RES_IMMED_MASK;
5534         if (cmnd->result == 0 && scsi_result != 0)
5535                 cmnd->result = scsi_result;
5536         cmnd->scsi_done(cmnd);
5537         return 0;
5538 }
5539
5540 /* Note: The following macros create attribute files in the
5541    /sys/module/scsi_debug/parameters directory. Unfortunately this
5542    driver is unaware of a change and cannot trigger auxiliary actions
5543    as it can when the corresponding attribute in the
5544    /sys/bus/pseudo/drivers/scsi_debug directory is changed.
5545  */
5546 module_param_named(add_host, sdebug_add_host, int, S_IRUGO | S_IWUSR);
5547 module_param_named(ato, sdebug_ato, int, S_IRUGO);
5548 module_param_named(cdb_len, sdebug_cdb_len, int, 0644);
5549 module_param_named(clustering, sdebug_clustering, bool, S_IRUGO | S_IWUSR);
5550 module_param_named(delay, sdebug_jdelay, int, S_IRUGO | S_IWUSR);
5551 module_param_named(dev_size_mb, sdebug_dev_size_mb, int, S_IRUGO);
5552 module_param_named(dif, sdebug_dif, int, S_IRUGO);
5553 module_param_named(dix, sdebug_dix, int, S_IRUGO);
5554 module_param_named(dsense, sdebug_dsense, int, S_IRUGO | S_IWUSR);
5555 module_param_named(every_nth, sdebug_every_nth, int, S_IRUGO | S_IWUSR);
5556 module_param_named(fake_rw, sdebug_fake_rw, int, S_IRUGO | S_IWUSR);
5557 module_param_named(guard, sdebug_guard, uint, S_IRUGO);
5558 module_param_named(host_lock, sdebug_host_lock, bool, S_IRUGO | S_IWUSR);
5559 module_param_string(inq_product, sdebug_inq_product_id,
5560                     sizeof(sdebug_inq_product_id), S_IRUGO | S_IWUSR);
5561 module_param_string(inq_rev, sdebug_inq_product_rev,
5562                     sizeof(sdebug_inq_product_rev), S_IRUGO | S_IWUSR);
5563 module_param_string(inq_vendor, sdebug_inq_vendor_id,
5564                     sizeof(sdebug_inq_vendor_id), S_IRUGO | S_IWUSR);
5565 module_param_named(lbprz, sdebug_lbprz, int, S_IRUGO);
5566 module_param_named(lbpu, sdebug_lbpu, int, S_IRUGO);
5567 module_param_named(lbpws, sdebug_lbpws, int, S_IRUGO);
5568 module_param_named(lbpws10, sdebug_lbpws10, int, S_IRUGO);
5569 module_param_named(lowest_aligned, sdebug_lowest_aligned, int, S_IRUGO);
5570 module_param_named(max_luns, sdebug_max_luns, int, S_IRUGO | S_IWUSR);
5571 module_param_named(max_queue, sdebug_max_queue, int, S_IRUGO | S_IWUSR);
5572 module_param_named(medium_error_count, sdebug_medium_error_count, int,
5573                    S_IRUGO | S_IWUSR);
5574 module_param_named(medium_error_start, sdebug_medium_error_start, int,
5575                    S_IRUGO | S_IWUSR);
5576 module_param_named(ndelay, sdebug_ndelay, int, S_IRUGO | S_IWUSR);
5577 module_param_named(no_lun_0, sdebug_no_lun_0, int, S_IRUGO | S_IWUSR);
5578 module_param_named(no_uld, sdebug_no_uld, int, S_IRUGO);
5579 module_param_named(num_parts, sdebug_num_parts, int, S_IRUGO);
5580 module_param_named(num_tgts, sdebug_num_tgts, int, S_IRUGO | S_IWUSR);
5581 module_param_named(opt_blks, sdebug_opt_blks, int, S_IRUGO);
5582 module_param_named(opt_xferlen_exp, sdebug_opt_xferlen_exp, int, S_IRUGO);
5583 module_param_named(opts, sdebug_opts, int, S_IRUGO | S_IWUSR);
5584 module_param_named(per_host_store, sdebug_per_host_store, bool,
5585                    S_IRUGO | S_IWUSR);
5586 module_param_named(physblk_exp, sdebug_physblk_exp, int, S_IRUGO);
5587 module_param_named(ptype, sdebug_ptype, int, S_IRUGO | S_IWUSR);
5588 module_param_named(random, sdebug_random, bool, S_IRUGO | S_IWUSR);
5589 module_param_named(removable, sdebug_removable, bool, S_IRUGO | S_IWUSR);
5590 module_param_named(scsi_level, sdebug_scsi_level, int, S_IRUGO);
5591 module_param_named(sector_size, sdebug_sector_size, int, S_IRUGO);
5592 module_param_named(statistics, sdebug_statistics, bool, S_IRUGO | S_IWUSR);
5593 module_param_named(strict, sdebug_strict, bool, S_IRUGO | S_IWUSR);
5594 module_param_named(submit_queues, submit_queues, int, S_IRUGO);
5595 module_param_named(unmap_alignment, sdebug_unmap_alignment, int, S_IRUGO);
5596 module_param_named(unmap_granularity, sdebug_unmap_granularity, int, S_IRUGO);
5597 module_param_named(unmap_max_blocks, sdebug_unmap_max_blocks, int, S_IRUGO);
5598 module_param_named(unmap_max_desc, sdebug_unmap_max_desc, int, S_IRUGO);
5599 module_param_named(uuid_ctl, sdebug_uuid_ctl, int, S_IRUGO);
5600 module_param_named(virtual_gb, sdebug_virtual_gb, int, S_IRUGO | S_IWUSR);
5601 module_param_named(vpd_use_hostno, sdebug_vpd_use_hostno, int,
5602                    S_IRUGO | S_IWUSR);
5603 module_param_named(wp, sdebug_wp, bool, S_IRUGO | S_IWUSR);
5604 module_param_named(write_same_length, sdebug_write_same_length, int,
5605                    S_IRUGO | S_IWUSR);
5606 module_param_named(zbc, sdeb_zbc_model_s, charp, S_IRUGO);
5607 module_param_named(zone_max_open, sdeb_zbc_max_open, int, S_IRUGO);
5608 module_param_named(zone_nr_conv, sdeb_zbc_nr_conv, int, S_IRUGO);
5609 module_param_named(zone_size_mb, sdeb_zbc_zone_size_mb, int, S_IRUGO);
5610
5611 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
5612 MODULE_DESCRIPTION("SCSI debug adapter driver");
5613 MODULE_LICENSE("GPL");
5614 MODULE_VERSION(SDEBUG_VERSION);
5615
5616 MODULE_PARM_DESC(add_host, "add n hosts, in sysfs if negative remove host(s) (def=1)");
5617 MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
5618 MODULE_PARM_DESC(cdb_len, "suggest CDB lengths to drivers (def=10)");
5619 MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
5620 MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
5621 MODULE_PARM_DESC(dev_size_mb, "size in MiB of ram shared by devs(def=8)");
5622 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
5623 MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
5624 MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
5625 MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
5626 MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
5627 MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
5628 MODULE_PARM_DESC(host_lock, "host_lock is ignored (def=0)");
5629 MODULE_PARM_DESC(inq_product, "SCSI INQUIRY product string (def=\"scsi_debug\")");
5630 MODULE_PARM_DESC(inq_rev, "SCSI INQUIRY revision string (def=\""
5631                  SDEBUG_VERSION "\")");
5632 MODULE_PARM_DESC(inq_vendor, "SCSI INQUIRY vendor string (def=\"Linux\")");
5633 MODULE_PARM_DESC(lbprz,
5634                  "on read unmapped LBs return 0 when 1 (def), return 0xff when 2");
5635 MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
5636 MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
5637 MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
5638 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
5639 MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
5640 MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))");
5641 MODULE_PARM_DESC(medium_error_count, "count of sectors to return follow on MEDIUM error");
5642 MODULE_PARM_DESC(medium_error_start, "starting sector number to return MEDIUM error");
5643 MODULE_PARM_DESC(ndelay, "response delay in nanoseconds (def=0 -> ignore)");
5644 MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
5645 MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
5646 MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
5647 MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
5648 MODULE_PARM_DESC(opt_blks, "optimal transfer length in blocks (def=1024)");
5649 MODULE_PARM_DESC(opt_xferlen_exp, "optimal transfer length granularity exponent (def=physblk_exp)");
5650 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
5651 MODULE_PARM_DESC(per_host_store, "If set, next positive add_host will get new store (def=0)");
5652 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
5653 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
5654 MODULE_PARM_DESC(random, "If set, uniformly randomize command duration between 0 and delay_in_ns");
5655 MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
5656 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=7[SPC-5])");
5657 MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
5658 MODULE_PARM_DESC(statistics, "collect statistics on commands, queues (def=0)");
5659 MODULE_PARM_DESC(strict, "stricter checks: reserved field in cdb (def=0)");
5660 MODULE_PARM_DESC(submit_queues, "support for block multi-queue (def=1)");
5661 MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
5662 MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
5663 MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
5664 MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
5665 MODULE_PARM_DESC(uuid_ctl,
5666                  "1->use uuid for lu name, 0->don't, 2->all use same (def=0)");
5667 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)");
5668 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
5669 MODULE_PARM_DESC(wp, "Write Protect (def=0)");
5670 MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
5671 MODULE_PARM_DESC(zbc, "'none' [0]; 'aware' [1]; 'managed' [2] (def=0). Can have 'host-' prefix");
5672 MODULE_PARM_DESC(zone_max_open, "Maximum number of open zones; [0] for no limit (def=auto)");
5673 MODULE_PARM_DESC(zone_nr_conv, "Number of conventional zones (def=1)");
5674 MODULE_PARM_DESC(zone_size_mb, "Zone size in MiB (def=auto)");
5675
5676 #define SDEBUG_INFO_LEN 256
5677 static char sdebug_info[SDEBUG_INFO_LEN];
5678
5679 static const char *scsi_debug_info(struct Scsi_Host *shp)
5680 {
5681         int k;
5682
5683         k = scnprintf(sdebug_info, SDEBUG_INFO_LEN, "%s: version %s [%s]\n",
5684                       my_name, SDEBUG_VERSION, sdebug_version_date);
5685         if (k >= (SDEBUG_INFO_LEN - 1))
5686                 return sdebug_info;
5687         scnprintf(sdebug_info + k, SDEBUG_INFO_LEN - k,
5688                   "  dev_size_mb=%d, opts=0x%x, submit_queues=%d, %s=%d",
5689                   sdebug_dev_size_mb, sdebug_opts, submit_queues,
5690                   "statistics", (int)sdebug_statistics);
5691         return sdebug_info;
5692 }
5693
5694 /* 'echo <val> > /proc/scsi/scsi_debug/<host_id>' writes to opts */
5695 static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer,
5696                                  int length)
5697 {
5698         char arr[16];
5699         int opts;
5700         int minLen = length > 15 ? 15 : length;
5701
5702         if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
5703                 return -EACCES;
5704         memcpy(arr, buffer, minLen);
5705         arr[minLen] = '\0';
5706         if (1 != sscanf(arr, "%d", &opts))
5707                 return -EINVAL;
5708         sdebug_opts = opts;
5709         sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
5710         sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
5711         if (sdebug_every_nth != 0)
5712                 tweak_cmnd_count();
5713         return length;
5714 }
5715
5716 /* Output seen with 'cat /proc/scsi/scsi_debug/<host_id>'. It will be the
5717  * same for each scsi_debug host (if more than one). Some of the counters
5718  * output are not atomics so might be inaccurate in a busy system. */
5719 static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
5720 {
5721         int f, j, l;
5722         struct sdebug_queue *sqp;
5723         struct sdebug_host_info *sdhp;
5724
5725         seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n",
5726                    SDEBUG_VERSION, sdebug_version_date);
5727         seq_printf(m, "num_tgts=%d, %ssize=%d MB, opts=0x%x, every_nth=%d\n",
5728                    sdebug_num_tgts, "shared (ram) ", sdebug_dev_size_mb,
5729                    sdebug_opts, sdebug_every_nth);
5730         seq_printf(m, "delay=%d, ndelay=%d, max_luns=%d, sector_size=%d %s\n",
5731                    sdebug_jdelay, sdebug_ndelay, sdebug_max_luns,
5732                    sdebug_sector_size, "bytes");
5733         seq_printf(m, "cylinders=%d, heads=%d, sectors=%d, command aborts=%d\n",
5734                    sdebug_cylinders_per, sdebug_heads, sdebug_sectors_per,
5735                    num_aborts);
5736         seq_printf(m, "RESETs: device=%d, target=%d, bus=%d, host=%d\n",
5737                    num_dev_resets, num_target_resets, num_bus_resets,
5738                    num_host_resets);
5739         seq_printf(m, "dix_reads=%d, dix_writes=%d, dif_errors=%d\n",
5740                    dix_reads, dix_writes, dif_errors);
5741         seq_printf(m, "usec_in_jiffy=%lu, statistics=%d\n", TICK_NSEC / 1000,
5742                    sdebug_statistics);
5743         seq_printf(m, "cmnd_count=%d, completions=%d, %s=%d, a_tsf=%d\n",
5744                    atomic_read(&sdebug_cmnd_count),
5745                    atomic_read(&sdebug_completions),
5746                    "miss_cpus", atomic_read(&sdebug_miss_cpus),
5747                    atomic_read(&sdebug_a_tsf));
5748
5749         seq_printf(m, "submit_queues=%d\n", submit_queues);
5750         for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5751                 seq_printf(m, "  queue %d:\n", j);
5752                 f = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
5753                 if (f != sdebug_max_queue) {
5754                         l = find_last_bit(sqp->in_use_bm, sdebug_max_queue);
5755                         seq_printf(m, "    in_use_bm BUSY: %s: %d,%d\n",
5756                                    "first,last bits", f, l);
5757                 }
5758         }
5759
5760         seq_printf(m, "this host_no=%d\n", host->host_no);
5761         if (!xa_empty(per_store_ap)) {
5762                 bool niu;
5763                 int idx;
5764                 unsigned long l_idx;
5765                 struct sdeb_store_info *sip;
5766
5767                 seq_puts(m, "\nhost list:\n");
5768                 j = 0;
5769                 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
5770                         idx = sdhp->si_idx;
5771                         seq_printf(m, "  %d: host_no=%d, si_idx=%d\n", j,
5772                                    sdhp->shost->host_no, idx);
5773                         ++j;
5774                 }
5775                 seq_printf(m, "\nper_store array [most_recent_idx=%d]:\n",
5776                            sdeb_most_recent_idx);
5777                 j = 0;
5778                 xa_for_each(per_store_ap, l_idx, sip) {
5779                         niu = xa_get_mark(per_store_ap, l_idx,
5780                                           SDEB_XA_NOT_IN_USE);
5781                         idx = (int)l_idx;
5782                         seq_printf(m, "  %d: idx=%d%s\n", j, idx,
5783                                    (niu ? "  not_in_use" : ""));
5784                         ++j;
5785                 }
5786         }
5787         return 0;
5788 }
5789
5790 static ssize_t delay_show(struct device_driver *ddp, char *buf)
5791 {
5792         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_jdelay);
5793 }
5794 /* Returns -EBUSY if jdelay is being changed and commands are queued. The unit
5795  * of delay is jiffies.
5796  */
5797 static ssize_t delay_store(struct device_driver *ddp, const char *buf,
5798                            size_t count)
5799 {
5800         int jdelay, res;
5801
5802         if (count > 0 && sscanf(buf, "%d", &jdelay) == 1) {
5803                 res = count;
5804                 if (sdebug_jdelay != jdelay) {
5805                         int j, k;
5806                         struct sdebug_queue *sqp;
5807
5808                         block_unblock_all_queues(true);
5809                         for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
5810                              ++j, ++sqp) {
5811                                 k = find_first_bit(sqp->in_use_bm,
5812                                                    sdebug_max_queue);
5813                                 if (k != sdebug_max_queue) {
5814                                         res = -EBUSY;   /* queued commands */
5815                                         break;
5816                                 }
5817                         }
5818                         if (res > 0) {
5819                                 sdebug_jdelay = jdelay;
5820                                 sdebug_ndelay = 0;
5821                         }
5822                         block_unblock_all_queues(false);
5823                 }
5824                 return res;
5825         }
5826         return -EINVAL;
5827 }
5828 static DRIVER_ATTR_RW(delay);
5829
5830 static ssize_t ndelay_show(struct device_driver *ddp, char *buf)
5831 {
5832         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ndelay);
5833 }
5834 /* Returns -EBUSY if ndelay is being changed and commands are queued */
5835 /* If > 0 and accepted then sdebug_jdelay is set to JDELAY_OVERRIDDEN */
5836 static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
5837                             size_t count)
5838 {
5839         int ndelay, res;
5840
5841         if ((count > 0) && (1 == sscanf(buf, "%d", &ndelay)) &&
5842             (ndelay >= 0) && (ndelay < (1000 * 1000 * 1000))) {
5843                 res = count;
5844                 if (sdebug_ndelay != ndelay) {
5845                         int j, k;
5846                         struct sdebug_queue *sqp;
5847
5848                         block_unblock_all_queues(true);
5849                         for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
5850                              ++j, ++sqp) {
5851                                 k = find_first_bit(sqp->in_use_bm,
5852                                                    sdebug_max_queue);
5853                                 if (k != sdebug_max_queue) {
5854                                         res = -EBUSY;   /* queued commands */
5855                                         break;
5856                                 }
5857                         }
5858                         if (res > 0) {
5859                                 sdebug_ndelay = ndelay;
5860                                 sdebug_jdelay = ndelay  ? JDELAY_OVERRIDDEN
5861                                                         : DEF_JDELAY;
5862                         }
5863                         block_unblock_all_queues(false);
5864                 }
5865                 return res;
5866         }
5867         return -EINVAL;
5868 }
5869 static DRIVER_ATTR_RW(ndelay);
5870
5871 static ssize_t opts_show(struct device_driver *ddp, char *buf)
5872 {
5873         return scnprintf(buf, PAGE_SIZE, "0x%x\n", sdebug_opts);
5874 }
5875
5876 static ssize_t opts_store(struct device_driver *ddp, const char *buf,
5877                           size_t count)
5878 {
5879         int opts;
5880         char work[20];
5881
5882         if (sscanf(buf, "%10s", work) == 1) {
5883                 if (strncasecmp(work, "0x", 2) == 0) {
5884                         if (kstrtoint(work + 2, 16, &opts) == 0)
5885                                 goto opts_done;
5886                 } else {
5887                         if (kstrtoint(work, 10, &opts) == 0)
5888                                 goto opts_done;
5889                 }
5890         }
5891         return -EINVAL;
5892 opts_done:
5893         sdebug_opts = opts;
5894         sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
5895         sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
5896         tweak_cmnd_count();
5897         return count;
5898 }
5899 static DRIVER_ATTR_RW(opts);
5900
5901 static ssize_t ptype_show(struct device_driver *ddp, char *buf)
5902 {
5903         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ptype);
5904 }
5905 static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
5906                            size_t count)
5907 {
5908         int n;
5909
5910         /* Cannot change from or to TYPE_ZBC with sysfs */
5911         if (sdebug_ptype == TYPE_ZBC)
5912                 return -EINVAL;
5913
5914         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
5915                 if (n == TYPE_ZBC)
5916                         return -EINVAL;
5917                 sdebug_ptype = n;
5918                 return count;
5919         }
5920         return -EINVAL;
5921 }
5922 static DRIVER_ATTR_RW(ptype);
5923
5924 static ssize_t dsense_show(struct device_driver *ddp, char *buf)
5925 {
5926         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dsense);
5927 }
5928 static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
5929                             size_t count)
5930 {
5931         int n;
5932
5933         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
5934                 sdebug_dsense = n;
5935                 return count;
5936         }
5937         return -EINVAL;
5938 }
5939 static DRIVER_ATTR_RW(dsense);
5940
5941 static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
5942 {
5943         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_fake_rw);
5944 }
5945 static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
5946                              size_t count)
5947 {
5948         int n, idx;
5949
5950         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
5951                 bool want_store = (n == 0);
5952                 struct sdebug_host_info *sdhp;
5953
5954                 n = (n > 0);
5955                 sdebug_fake_rw = (sdebug_fake_rw > 0);
5956                 if (sdebug_fake_rw == n)
5957                         return count;   /* not transitioning so do nothing */
5958
5959                 if (want_store) {       /* 1 --> 0 transition, set up store */
5960                         if (sdeb_first_idx < 0) {
5961                                 idx = sdebug_add_store();
5962                                 if (idx < 0)
5963                                         return idx;
5964                         } else {
5965                                 idx = sdeb_first_idx;
5966                                 xa_clear_mark(per_store_ap, idx,
5967                                               SDEB_XA_NOT_IN_USE);
5968                         }
5969                         /* make all hosts use same store */
5970                         list_for_each_entry(sdhp, &sdebug_host_list,
5971                                             host_list) {
5972                                 if (sdhp->si_idx != idx) {
5973                                         xa_set_mark(per_store_ap, sdhp->si_idx,
5974                                                     SDEB_XA_NOT_IN_USE);
5975                                         sdhp->si_idx = idx;
5976                                 }
5977                         }
5978                         sdeb_most_recent_idx = idx;
5979                 } else {        /* 0 --> 1 transition is trigger for shrink */
5980                         sdebug_erase_all_stores(true /* apart from first */);
5981                 }
5982                 sdebug_fake_rw = n;
5983                 return count;
5984         }
5985         return -EINVAL;
5986 }
5987 static DRIVER_ATTR_RW(fake_rw);
5988
5989 static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
5990 {
5991         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_lun_0);
5992 }
5993 static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
5994                               size_t count)
5995 {
5996         int n;
5997
5998         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
5999                 sdebug_no_lun_0 = n;
6000                 return count;
6001         }
6002         return -EINVAL;
6003 }
6004 static DRIVER_ATTR_RW(no_lun_0);
6005
6006 static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
6007 {
6008         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_tgts);
6009 }
6010 static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
6011                               size_t count)
6012 {
6013         int n;
6014
6015         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6016                 sdebug_num_tgts = n;
6017                 sdebug_max_tgts_luns();
6018                 return count;
6019         }
6020         return -EINVAL;
6021 }
6022 static DRIVER_ATTR_RW(num_tgts);
6023
6024 static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)
6025 {
6026         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dev_size_mb);
6027 }
6028 static DRIVER_ATTR_RO(dev_size_mb);
6029
6030 static ssize_t per_host_store_show(struct device_driver *ddp, char *buf)
6031 {
6032         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_per_host_store);
6033 }
6034
6035 static ssize_t per_host_store_store(struct device_driver *ddp, const char *buf,
6036                                     size_t count)
6037 {
6038         bool v;
6039
6040         if (kstrtobool(buf, &v))
6041                 return -EINVAL;
6042
6043         sdebug_per_host_store = v;
6044         return count;
6045 }
6046 static DRIVER_ATTR_RW(per_host_store);
6047
6048 static ssize_t num_parts_show(struct device_driver *ddp, char *buf)
6049 {
6050         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_parts);
6051 }
6052 static DRIVER_ATTR_RO(num_parts);
6053
6054 static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
6055 {
6056         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_every_nth);
6057 }
6058 static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
6059                                size_t count)
6060 {
6061         int nth;
6062
6063         if ((count > 0) && (1 == sscanf(buf, "%d", &nth))) {
6064                 sdebug_every_nth = nth;
6065                 if (nth && !sdebug_statistics) {
6066                         pr_info("every_nth needs statistics=1, set it\n");
6067                         sdebug_statistics = true;
6068                 }
6069                 tweak_cmnd_count();
6070                 return count;
6071         }
6072         return -EINVAL;
6073 }
6074 static DRIVER_ATTR_RW(every_nth);
6075
6076 static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
6077 {
6078         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_luns);
6079 }
6080 static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
6081                               size_t count)
6082 {
6083         int n;
6084         bool changed;
6085
6086         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6087                 if (n > 256) {
6088                         pr_warn("max_luns can be no more than 256\n");
6089                         return -EINVAL;
6090                 }
6091                 changed = (sdebug_max_luns != n);
6092                 sdebug_max_luns = n;
6093                 sdebug_max_tgts_luns();
6094                 if (changed && (sdebug_scsi_level >= 5)) {      /* >= SPC-3 */
6095                         struct sdebug_host_info *sdhp;
6096                         struct sdebug_dev_info *dp;
6097
6098                         spin_lock(&sdebug_host_list_lock);
6099                         list_for_each_entry(sdhp, &sdebug_host_list,
6100                                             host_list) {
6101                                 list_for_each_entry(dp, &sdhp->dev_info_list,
6102                                                     dev_list) {
6103                                         set_bit(SDEBUG_UA_LUNS_CHANGED,
6104                                                 dp->uas_bm);
6105                                 }
6106                         }
6107                         spin_unlock(&sdebug_host_list_lock);
6108                 }
6109                 return count;
6110         }
6111         return -EINVAL;
6112 }
6113 static DRIVER_ATTR_RW(max_luns);
6114
6115 static ssize_t max_queue_show(struct device_driver *ddp, char *buf)
6116 {
6117         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_queue);
6118 }
6119 /* N.B. max_queue can be changed while there are queued commands. In flight
6120  * commands beyond the new max_queue will be completed. */
6121 static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
6122                                size_t count)
6123 {
6124         int j, n, k, a;
6125         struct sdebug_queue *sqp;
6126
6127         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
6128             (n <= SDEBUG_CANQUEUE)) {
6129                 block_unblock_all_queues(true);
6130                 k = 0;
6131                 for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
6132                      ++j, ++sqp) {
6133                         a = find_last_bit(sqp->in_use_bm, SDEBUG_CANQUEUE);
6134                         if (a > k)
6135                                 k = a;
6136                 }
6137                 sdebug_max_queue = n;
6138                 if (k == SDEBUG_CANQUEUE)
6139                         atomic_set(&retired_max_queue, 0);
6140                 else if (k >= n)
6141                         atomic_set(&retired_max_queue, k + 1);
6142                 else
6143                         atomic_set(&retired_max_queue, 0);
6144                 block_unblock_all_queues(false);
6145                 return count;
6146         }
6147         return -EINVAL;
6148 }
6149 static DRIVER_ATTR_RW(max_queue);
6150
6151 static ssize_t no_uld_show(struct device_driver *ddp, char *buf)
6152 {
6153         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_uld);
6154 }
6155 static DRIVER_ATTR_RO(no_uld);
6156
6157 static ssize_t scsi_level_show(struct device_driver *ddp, char *buf)
6158 {
6159         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_scsi_level);
6160 }
6161 static DRIVER_ATTR_RO(scsi_level);
6162
6163 static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
6164 {
6165         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_virtual_gb);
6166 }
6167 static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
6168                                 size_t count)
6169 {
6170         int n;
6171         bool changed;
6172
6173         /* Ignore capacity change for ZBC drives for now */
6174         if (sdeb_zbc_in_use)
6175                 return -ENOTSUPP;
6176
6177         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6178                 changed = (sdebug_virtual_gb != n);
6179                 sdebug_virtual_gb = n;
6180                 sdebug_capacity = get_sdebug_capacity();
6181                 if (changed) {
6182                         struct sdebug_host_info *sdhp;
6183                         struct sdebug_dev_info *dp;
6184
6185                         spin_lock(&sdebug_host_list_lock);
6186                         list_for_each_entry(sdhp, &sdebug_host_list,
6187                                             host_list) {
6188                                 list_for_each_entry(dp, &sdhp->dev_info_list,
6189                                                     dev_list) {
6190                                         set_bit(SDEBUG_UA_CAPACITY_CHANGED,
6191                                                 dp->uas_bm);
6192                                 }
6193                         }
6194                         spin_unlock(&sdebug_host_list_lock);
6195                 }
6196                 return count;
6197         }
6198         return -EINVAL;
6199 }
6200 static DRIVER_ATTR_RW(virtual_gb);
6201
6202 static ssize_t add_host_show(struct device_driver *ddp, char *buf)
6203 {
6204         /* absolute number of hosts currently active is what is shown */
6205         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_hosts);
6206 }
6207
6208 static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
6209                               size_t count)
6210 {
6211         bool found;
6212         unsigned long idx;
6213         struct sdeb_store_info *sip;
6214         bool want_phs = (sdebug_fake_rw == 0) && sdebug_per_host_store;
6215         int delta_hosts;
6216
6217         if (sscanf(buf, "%d", &delta_hosts) != 1)
6218                 return -EINVAL;
6219         if (delta_hosts > 0) {
6220                 do {
6221                         found = false;
6222                         if (want_phs) {
6223                                 xa_for_each_marked(per_store_ap, idx, sip,
6224                                                    SDEB_XA_NOT_IN_USE) {
6225                                         sdeb_most_recent_idx = (int)idx;
6226                                         found = true;
6227                                         break;
6228                                 }
6229                                 if (found)      /* re-use case */
6230                                         sdebug_add_host_helper((int)idx);
6231                                 else
6232                                         sdebug_do_add_host(true);
6233                         } else {
6234                                 sdebug_do_add_host(false);
6235                         }
6236                 } while (--delta_hosts);
6237         } else if (delta_hosts < 0) {
6238                 do {
6239                         sdebug_do_remove_host(false);
6240                 } while (++delta_hosts);
6241         }
6242         return count;
6243 }
6244 static DRIVER_ATTR_RW(add_host);
6245
6246 static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf)
6247 {
6248         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_vpd_use_hostno);
6249 }
6250 static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf,
6251                                     size_t count)
6252 {
6253         int n;
6254
6255         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6256                 sdebug_vpd_use_hostno = n;
6257                 return count;
6258         }
6259         return -EINVAL;
6260 }
6261 static DRIVER_ATTR_RW(vpd_use_hostno);
6262
6263 static ssize_t statistics_show(struct device_driver *ddp, char *buf)
6264 {
6265         return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_statistics);
6266 }
6267 static ssize_t statistics_store(struct device_driver *ddp, const char *buf,
6268                                 size_t count)
6269 {
6270         int n;
6271
6272         if ((count > 0) && (sscanf(buf, "%d", &n) == 1) && (n >= 0)) {
6273                 if (n > 0)
6274                         sdebug_statistics = true;
6275                 else {
6276                         clear_queue_stats();
6277                         sdebug_statistics = false;
6278                 }
6279                 return count;
6280         }
6281         return -EINVAL;
6282 }
6283 static DRIVER_ATTR_RW(statistics);
6284
6285 static ssize_t sector_size_show(struct device_driver *ddp, char *buf)
6286 {
6287         return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_sector_size);
6288 }
6289 static DRIVER_ATTR_RO(sector_size);
6290
6291 static ssize_t submit_queues_show(struct device_driver *ddp, char *buf)
6292 {
6293         return scnprintf(buf, PAGE_SIZE, "%d\n", submit_queues);
6294 }
6295 static DRIVER_ATTR_RO(submit_queues);
6296
6297 static ssize_t dix_show(struct device_driver *ddp, char *buf)
6298 {
6299         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dix);
6300 }
6301 static DRIVER_ATTR_RO(dix);
6302
6303 static ssize_t dif_show(struct device_driver *ddp, char *buf)
6304 {
6305         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dif);
6306 }
6307 static DRIVER_ATTR_RO(dif);
6308
6309 static ssize_t guard_show(struct device_driver *ddp, char *buf)
6310 {
6311         return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_guard);
6312 }
6313 static DRIVER_ATTR_RO(guard);
6314
6315 static ssize_t ato_show(struct device_driver *ddp, char *buf)
6316 {
6317         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ato);
6318 }
6319 static DRIVER_ATTR_RO(ato);
6320
6321 static ssize_t map_show(struct device_driver *ddp, char *buf)
6322 {
6323         ssize_t count = 0;
6324
6325         if (!scsi_debug_lbp())
6326                 return scnprintf(buf, PAGE_SIZE, "0-%u\n",
6327                                  sdebug_store_sectors);
6328
6329         if (sdebug_fake_rw == 0 && !xa_empty(per_store_ap)) {
6330                 struct sdeb_store_info *sip = xa_load(per_store_ap, 0);
6331
6332                 if (sip)
6333                         count = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
6334                                           (int)map_size, sip->map_storep);
6335         }
6336         buf[count++] = '\n';
6337         buf[count] = '\0';
6338
6339         return count;
6340 }
6341 static DRIVER_ATTR_RO(map);
6342
6343 static ssize_t random_show(struct device_driver *ddp, char *buf)
6344 {
6345         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_random);
6346 }
6347
6348 static ssize_t random_store(struct device_driver *ddp, const char *buf,
6349                             size_t count)
6350 {
6351         bool v;
6352
6353         if (kstrtobool(buf, &v))
6354                 return -EINVAL;
6355
6356         sdebug_random = v;
6357         return count;
6358 }
6359 static DRIVER_ATTR_RW(random);
6360
6361 static ssize_t removable_show(struct device_driver *ddp, char *buf)
6362 {
6363         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_removable ? 1 : 0);
6364 }
6365 static ssize_t removable_store(struct device_driver *ddp, const char *buf,
6366                                size_t count)
6367 {
6368         int n;
6369
6370         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6371                 sdebug_removable = (n > 0);
6372                 return count;
6373         }
6374         return -EINVAL;
6375 }
6376 static DRIVER_ATTR_RW(removable);
6377
6378 static ssize_t host_lock_show(struct device_driver *ddp, char *buf)
6379 {
6380         return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_host_lock);
6381 }
6382 /* N.B. sdebug_host_lock does nothing, kept for backward compatibility */
6383 static ssize_t host_lock_store(struct device_driver *ddp, const char *buf,
6384                                size_t count)
6385 {
6386         int n;
6387
6388         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6389                 sdebug_host_lock = (n > 0);
6390                 return count;
6391         }
6392         return -EINVAL;
6393 }
6394 static DRIVER_ATTR_RW(host_lock);
6395
6396 static ssize_t strict_show(struct device_driver *ddp, char *buf)
6397 {
6398         return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_strict);
6399 }
6400 static ssize_t strict_store(struct device_driver *ddp, const char *buf,
6401                             size_t count)
6402 {
6403         int n;
6404
6405         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6406                 sdebug_strict = (n > 0);
6407                 return count;
6408         }
6409         return -EINVAL;
6410 }
6411 static DRIVER_ATTR_RW(strict);
6412
6413 static ssize_t uuid_ctl_show(struct device_driver *ddp, char *buf)
6414 {
6415         return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_uuid_ctl);
6416 }
6417 static DRIVER_ATTR_RO(uuid_ctl);
6418
6419 static ssize_t cdb_len_show(struct device_driver *ddp, char *buf)
6420 {
6421         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_cdb_len);
6422 }
6423 static ssize_t cdb_len_store(struct device_driver *ddp, const char *buf,
6424                              size_t count)
6425 {
6426         int ret, n;
6427
6428         ret = kstrtoint(buf, 0, &n);
6429         if (ret)
6430                 return ret;
6431         sdebug_cdb_len = n;
6432         all_config_cdb_len();
6433         return count;
6434 }
6435 static DRIVER_ATTR_RW(cdb_len);
6436
6437 static const char * const zbc_model_strs_a[] = {
6438         [BLK_ZONED_NONE] = "none",
6439         [BLK_ZONED_HA]   = "host-aware",
6440         [BLK_ZONED_HM]   = "host-managed",
6441 };
6442
6443 static const char * const zbc_model_strs_b[] = {
6444         [BLK_ZONED_NONE] = "no",
6445         [BLK_ZONED_HA]   = "aware",
6446         [BLK_ZONED_HM]   = "managed",
6447 };
6448
6449 static const char * const zbc_model_strs_c[] = {
6450         [BLK_ZONED_NONE] = "0",
6451         [BLK_ZONED_HA]   = "1",
6452         [BLK_ZONED_HM]   = "2",
6453 };
6454
6455 static int sdeb_zbc_model_str(const char *cp)
6456 {
6457         int res = sysfs_match_string(zbc_model_strs_a, cp);
6458
6459         if (res < 0) {
6460                 res = sysfs_match_string(zbc_model_strs_b, cp);
6461                 if (res < 0) {
6462                         res = sysfs_match_string(zbc_model_strs_c, cp);
6463                         if (sdeb_zbc_model < 0)
6464                                 return -EINVAL;
6465                 }
6466         }
6467         return res;
6468 }
6469
6470 static ssize_t zbc_show(struct device_driver *ddp, char *buf)
6471 {
6472         return scnprintf(buf, PAGE_SIZE, "%s\n",
6473                          zbc_model_strs_a[sdeb_zbc_model]);
6474 }
6475 static DRIVER_ATTR_RO(zbc);
6476
6477 /* Note: The following array creates attribute files in the
6478    /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
6479    files (over those found in the /sys/module/scsi_debug/parameters
6480    directory) is that auxiliary actions can be triggered when an attribute
6481    is changed. For example see: add_host_store() above.
6482  */
6483
6484 static struct attribute *sdebug_drv_attrs[] = {
6485         &driver_attr_delay.attr,
6486         &driver_attr_opts.attr,
6487         &driver_attr_ptype.attr,
6488         &driver_attr_dsense.attr,
6489         &driver_attr_fake_rw.attr,
6490         &driver_attr_no_lun_0.attr,
6491         &driver_attr_num_tgts.attr,
6492         &driver_attr_dev_size_mb.attr,
6493         &driver_attr_num_parts.attr,
6494         &driver_attr_every_nth.attr,
6495         &driver_attr_max_luns.attr,
6496         &driver_attr_max_queue.attr,
6497         &driver_attr_no_uld.attr,
6498         &driver_attr_scsi_level.attr,
6499         &driver_attr_virtual_gb.attr,
6500         &driver_attr_add_host.attr,
6501         &driver_attr_per_host_store.attr,
6502         &driver_attr_vpd_use_hostno.attr,
6503         &driver_attr_sector_size.attr,
6504         &driver_attr_statistics.attr,
6505         &driver_attr_submit_queues.attr,
6506         &driver_attr_dix.attr,
6507         &driver_attr_dif.attr,
6508         &driver_attr_guard.attr,
6509         &driver_attr_ato.attr,
6510         &driver_attr_map.attr,
6511         &driver_attr_random.attr,
6512         &driver_attr_removable.attr,
6513         &driver_attr_host_lock.attr,
6514         &driver_attr_ndelay.attr,
6515         &driver_attr_strict.attr,
6516         &driver_attr_uuid_ctl.attr,
6517         &driver_attr_cdb_len.attr,
6518         &driver_attr_zbc.attr,
6519         NULL,
6520 };
6521 ATTRIBUTE_GROUPS(sdebug_drv);
6522
6523 static struct device *pseudo_primary;
6524
6525 static int __init scsi_debug_init(void)
6526 {
6527         bool want_store = (sdebug_fake_rw == 0);
6528         unsigned long sz;
6529         int k, ret, hosts_to_add;
6530         int idx = -1;
6531
6532         ramdisk_lck_a[0] = &atomic_rw;
6533         ramdisk_lck_a[1] = &atomic_rw2;
6534         atomic_set(&retired_max_queue, 0);
6535
6536         if (sdebug_ndelay >= 1000 * 1000 * 1000) {
6537                 pr_warn("ndelay must be less than 1 second, ignored\n");
6538                 sdebug_ndelay = 0;
6539         } else if (sdebug_ndelay > 0)
6540                 sdebug_jdelay = JDELAY_OVERRIDDEN;
6541
6542         switch (sdebug_sector_size) {
6543         case  512:
6544         case 1024:
6545         case 2048:
6546         case 4096:
6547                 break;
6548         default:
6549                 pr_err("invalid sector_size %d\n", sdebug_sector_size);
6550                 return -EINVAL;
6551         }
6552
6553         switch (sdebug_dif) {
6554         case T10_PI_TYPE0_PROTECTION:
6555                 break;
6556         case T10_PI_TYPE1_PROTECTION:
6557         case T10_PI_TYPE2_PROTECTION:
6558         case T10_PI_TYPE3_PROTECTION:
6559                 have_dif_prot = true;
6560                 break;
6561
6562         default:
6563                 pr_err("dif must be 0, 1, 2 or 3\n");
6564                 return -EINVAL;
6565         }
6566
6567         if (sdebug_num_tgts < 0) {
6568                 pr_err("num_tgts must be >= 0\n");
6569                 return -EINVAL;
6570         }
6571
6572         if (sdebug_guard > 1) {
6573                 pr_err("guard must be 0 or 1\n");
6574                 return -EINVAL;
6575         }
6576
6577         if (sdebug_ato > 1) {
6578                 pr_err("ato must be 0 or 1\n");
6579                 return -EINVAL;
6580         }
6581
6582         if (sdebug_physblk_exp > 15) {
6583                 pr_err("invalid physblk_exp %u\n", sdebug_physblk_exp);
6584                 return -EINVAL;
6585         }
6586         if (sdebug_max_luns > 256) {
6587                 pr_warn("max_luns can be no more than 256, use default\n");
6588                 sdebug_max_luns = DEF_MAX_LUNS;
6589         }
6590
6591         if (sdebug_lowest_aligned > 0x3fff) {
6592                 pr_err("lowest_aligned too big: %u\n", sdebug_lowest_aligned);
6593                 return -EINVAL;
6594         }
6595
6596         if (submit_queues < 1) {
6597                 pr_err("submit_queues must be 1 or more\n");
6598                 return -EINVAL;
6599         }
6600         sdebug_q_arr = kcalloc(submit_queues, sizeof(struct sdebug_queue),
6601                                GFP_KERNEL);
6602         if (sdebug_q_arr == NULL)
6603                 return -ENOMEM;
6604         for (k = 0; k < submit_queues; ++k)
6605                 spin_lock_init(&sdebug_q_arr[k].qc_lock);
6606
6607         /*
6608          * check for host managed zoned block device specified with
6609          * ptype=0x14 or zbc=XXX.
6610          */
6611         if (sdebug_ptype == TYPE_ZBC) {
6612                 sdeb_zbc_model = BLK_ZONED_HM;
6613         } else if (sdeb_zbc_model_s && *sdeb_zbc_model_s) {
6614                 k = sdeb_zbc_model_str(sdeb_zbc_model_s);
6615                 if (k < 0) {
6616                         ret = k;
6617                         goto free_vm;
6618                 }
6619                 sdeb_zbc_model = k;
6620                 switch (sdeb_zbc_model) {
6621                 case BLK_ZONED_NONE:
6622                 case BLK_ZONED_HA:
6623                         sdebug_ptype = TYPE_DISK;
6624                         break;
6625                 case BLK_ZONED_HM:
6626                         sdebug_ptype = TYPE_ZBC;
6627                         break;
6628                 default:
6629                         pr_err("Invalid ZBC model\n");
6630                         return -EINVAL;
6631                 }
6632         }
6633         if (sdeb_zbc_model != BLK_ZONED_NONE) {
6634                 sdeb_zbc_in_use = true;
6635                 if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
6636                         sdebug_dev_size_mb = DEF_ZBC_DEV_SIZE_MB;
6637         }
6638
6639         if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
6640                 sdebug_dev_size_mb = DEF_DEV_SIZE_MB;
6641         if (sdebug_dev_size_mb < 1)
6642                 sdebug_dev_size_mb = 1;  /* force minimum 1 MB ramdisk */
6643         sz = (unsigned long)sdebug_dev_size_mb * 1048576;
6644         sdebug_store_sectors = sz / sdebug_sector_size;
6645         sdebug_capacity = get_sdebug_capacity();
6646
6647         /* play around with geometry, don't waste too much on track 0 */
6648         sdebug_heads = 8;
6649         sdebug_sectors_per = 32;
6650         if (sdebug_dev_size_mb >= 256)
6651                 sdebug_heads = 64;
6652         else if (sdebug_dev_size_mb >= 16)
6653                 sdebug_heads = 32;
6654         sdebug_cylinders_per = (unsigned long)sdebug_capacity /
6655                                (sdebug_sectors_per * sdebug_heads);
6656         if (sdebug_cylinders_per >= 1024) {
6657                 /* other LLDs do this; implies >= 1GB ram disk ... */
6658                 sdebug_heads = 255;
6659                 sdebug_sectors_per = 63;
6660                 sdebug_cylinders_per = (unsigned long)sdebug_capacity /
6661                                (sdebug_sectors_per * sdebug_heads);
6662         }
6663         if (scsi_debug_lbp()) {
6664                 sdebug_unmap_max_blocks =
6665                         clamp(sdebug_unmap_max_blocks, 0U, 0xffffffffU);
6666
6667                 sdebug_unmap_max_desc =
6668                         clamp(sdebug_unmap_max_desc, 0U, 256U);
6669
6670                 sdebug_unmap_granularity =
6671                         clamp(sdebug_unmap_granularity, 1U, 0xffffffffU);
6672
6673                 if (sdebug_unmap_alignment &&
6674                     sdebug_unmap_granularity <=
6675                     sdebug_unmap_alignment) {
6676                         pr_err("ERR: unmap_granularity <= unmap_alignment\n");
6677                         ret = -EINVAL;
6678                         goto free_q_arr;
6679                 }
6680         }
6681         xa_init_flags(per_store_ap, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
6682         if (want_store) {
6683                 idx = sdebug_add_store();
6684                 if (idx < 0) {
6685                         ret = idx;
6686                         goto free_q_arr;
6687                 }
6688         }
6689
6690         pseudo_primary = root_device_register("pseudo_0");
6691         if (IS_ERR(pseudo_primary)) {
6692                 pr_warn("root_device_register() error\n");
6693                 ret = PTR_ERR(pseudo_primary);
6694                 goto free_vm;
6695         }
6696         ret = bus_register(&pseudo_lld_bus);
6697         if (ret < 0) {
6698                 pr_warn("bus_register error: %d\n", ret);
6699                 goto dev_unreg;
6700         }
6701         ret = driver_register(&sdebug_driverfs_driver);
6702         if (ret < 0) {
6703                 pr_warn("driver_register error: %d\n", ret);
6704                 goto bus_unreg;
6705         }
6706
6707         hosts_to_add = sdebug_add_host;
6708         sdebug_add_host = 0;
6709
6710         for (k = 0; k < hosts_to_add; k++) {
6711                 if (want_store && k == 0) {
6712                         ret = sdebug_add_host_helper(idx);
6713                         if (ret < 0) {
6714                                 pr_err("add_host_helper k=%d, error=%d\n",
6715                                        k, -ret);
6716                                 break;
6717                         }
6718                 } else {
6719                         ret = sdebug_do_add_host(want_store &&
6720                                                  sdebug_per_host_store);
6721                         if (ret < 0) {
6722                                 pr_err("add_host k=%d error=%d\n", k, -ret);
6723                                 break;
6724                         }
6725                 }
6726         }
6727         if (sdebug_verbose)
6728                 pr_info("built %d host(s)\n", sdebug_num_hosts);
6729
6730         return 0;
6731
6732 bus_unreg:
6733         bus_unregister(&pseudo_lld_bus);
6734 dev_unreg:
6735         root_device_unregister(pseudo_primary);
6736 free_vm:
6737         sdebug_erase_store(idx, NULL);
6738 free_q_arr:
6739         kfree(sdebug_q_arr);
6740         return ret;
6741 }
6742
6743 static void __exit scsi_debug_exit(void)
6744 {
6745         int k = sdebug_num_hosts;
6746
6747         stop_all_queued();
6748         for (; k; k--)
6749                 sdebug_do_remove_host(true);
6750         free_all_queued();
6751         driver_unregister(&sdebug_driverfs_driver);
6752         bus_unregister(&pseudo_lld_bus);
6753         root_device_unregister(pseudo_primary);
6754
6755         sdebug_erase_all_stores(false);
6756         xa_destroy(per_store_ap);
6757 }
6758
6759 device_initcall(scsi_debug_init);
6760 module_exit(scsi_debug_exit);
6761
6762 static void sdebug_release_adapter(struct device *dev)
6763 {
6764         struct sdebug_host_info *sdbg_host;
6765
6766         sdbg_host = to_sdebug_host(dev);
6767         kfree(sdbg_host);
6768 }
6769
6770 /* idx must be valid, if sip is NULL then it will be obtained using idx */
6771 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip)
6772 {
6773         if (idx < 0)
6774                 return;
6775         if (!sip) {
6776                 if (xa_empty(per_store_ap))
6777                         return;
6778                 sip = xa_load(per_store_ap, idx);
6779                 if (!sip)
6780                         return;
6781         }
6782         vfree(sip->map_storep);
6783         vfree(sip->dif_storep);
6784         vfree(sip->storep);
6785         xa_erase(per_store_ap, idx);
6786         kfree(sip);
6787 }
6788
6789 /* Assume apart_from_first==false only in shutdown case. */
6790 static void sdebug_erase_all_stores(bool apart_from_first)
6791 {
6792         unsigned long idx;
6793         struct sdeb_store_info *sip = NULL;
6794
6795         xa_for_each(per_store_ap, idx, sip) {
6796                 if (apart_from_first)
6797                         apart_from_first = false;
6798                 else
6799                         sdebug_erase_store(idx, sip);
6800         }
6801         if (apart_from_first)
6802                 sdeb_most_recent_idx = sdeb_first_idx;
6803 }
6804
6805 /*
6806  * Returns store xarray new element index (idx) if >=0 else negated errno.
6807  * Limit the number of stores to 65536.
6808  */
6809 static int sdebug_add_store(void)
6810 {
6811         int res;
6812         u32 n_idx;
6813         unsigned long iflags;
6814         unsigned long sz = (unsigned long)sdebug_dev_size_mb * 1048576;
6815         struct sdeb_store_info *sip = NULL;
6816         struct xa_limit xal = { .max = 1 << 16, .min = 0 };
6817
6818         sip = kzalloc(sizeof(*sip), GFP_KERNEL);
6819         if (!sip)
6820                 return -ENOMEM;
6821
6822         xa_lock_irqsave(per_store_ap, iflags);
6823         res = __xa_alloc(per_store_ap, &n_idx, sip, xal, GFP_ATOMIC);
6824         if (unlikely(res < 0)) {
6825                 xa_unlock_irqrestore(per_store_ap, iflags);
6826                 kfree(sip);
6827                 pr_warn("%s: xa_alloc() errno=%d\n", __func__, -res);
6828                 return res;
6829         }
6830         sdeb_most_recent_idx = n_idx;
6831         if (sdeb_first_idx < 0)
6832                 sdeb_first_idx = n_idx;
6833         xa_unlock_irqrestore(per_store_ap, iflags);
6834
6835         res = -ENOMEM;
6836         sip->storep = vzalloc(sz);
6837         if (!sip->storep) {
6838                 pr_err("user data oom\n");
6839                 goto err;
6840         }
6841         if (sdebug_num_parts > 0)
6842                 sdebug_build_parts(sip->storep, sz);
6843
6844         /* DIF/DIX: what T10 calls Protection Information (PI) */
6845         if (sdebug_dix) {
6846                 int dif_size;
6847
6848                 dif_size = sdebug_store_sectors * sizeof(struct t10_pi_tuple);
6849                 sip->dif_storep = vmalloc(dif_size);
6850
6851                 pr_info("dif_storep %u bytes @ %pK\n", dif_size,
6852                         sip->dif_storep);
6853
6854                 if (!sip->dif_storep) {
6855                         pr_err("DIX oom\n");
6856                         goto err;
6857                 }
6858                 memset(sip->dif_storep, 0xff, dif_size);
6859         }
6860         /* Logical Block Provisioning */
6861         if (scsi_debug_lbp()) {
6862                 map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
6863                 sip->map_storep = vmalloc(array_size(sizeof(long),
6864                                                      BITS_TO_LONGS(map_size)));
6865
6866                 pr_info("%lu provisioning blocks\n", map_size);
6867
6868                 if (!sip->map_storep) {
6869                         pr_err("LBP map oom\n");
6870                         goto err;
6871                 }
6872
6873                 bitmap_zero(sip->map_storep, map_size);
6874
6875                 /* Map first 1KB for partition table */
6876                 if (sdebug_num_parts)
6877                         map_region(sip, 0, 2);
6878         }
6879
6880         rwlock_init(&sip->macc_lck);
6881         return (int)n_idx;
6882 err:
6883         sdebug_erase_store((int)n_idx, sip);
6884         pr_warn("%s: failed, errno=%d\n", __func__, -res);
6885         return res;
6886 }
6887
6888 static int sdebug_add_host_helper(int per_host_idx)
6889 {
6890         int k, devs_per_host, idx;
6891         int error = -ENOMEM;
6892         struct sdebug_host_info *sdbg_host;
6893         struct sdebug_dev_info *sdbg_devinfo, *tmp;
6894
6895         sdbg_host = kzalloc(sizeof(*sdbg_host), GFP_KERNEL);
6896         if (!sdbg_host)
6897                 return -ENOMEM;
6898         idx = (per_host_idx < 0) ? sdeb_first_idx : per_host_idx;
6899         if (xa_get_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE))
6900                 xa_clear_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
6901         sdbg_host->si_idx = idx;
6902
6903         INIT_LIST_HEAD(&sdbg_host->dev_info_list);
6904
6905         devs_per_host = sdebug_num_tgts * sdebug_max_luns;
6906         for (k = 0; k < devs_per_host; k++) {
6907                 sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
6908                 if (!sdbg_devinfo)
6909                         goto clean;
6910         }
6911
6912         spin_lock(&sdebug_host_list_lock);
6913         list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
6914         spin_unlock(&sdebug_host_list_lock);
6915
6916         sdbg_host->dev.bus = &pseudo_lld_bus;
6917         sdbg_host->dev.parent = pseudo_primary;
6918         sdbg_host->dev.release = &sdebug_release_adapter;
6919         dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_num_hosts);
6920
6921         error = device_register(&sdbg_host->dev);
6922         if (error)
6923                 goto clean;
6924
6925         ++sdebug_num_hosts;
6926         return 0;
6927
6928 clean:
6929         list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
6930                                  dev_list) {
6931                 list_del(&sdbg_devinfo->dev_list);
6932                 kfree(sdbg_devinfo->zstate);
6933                 kfree(sdbg_devinfo);
6934         }
6935         kfree(sdbg_host);
6936         pr_warn("%s: failed, errno=%d\n", __func__, -error);
6937         return error;
6938 }
6939
6940 static int sdebug_do_add_host(bool mk_new_store)
6941 {
6942         int ph_idx = sdeb_most_recent_idx;
6943
6944         if (mk_new_store) {
6945                 ph_idx = sdebug_add_store();
6946                 if (ph_idx < 0)
6947                         return ph_idx;
6948         }
6949         return sdebug_add_host_helper(ph_idx);
6950 }
6951
6952 static void sdebug_do_remove_host(bool the_end)
6953 {
6954         int idx = -1;
6955         struct sdebug_host_info *sdbg_host = NULL;
6956         struct sdebug_host_info *sdbg_host2;
6957
6958         spin_lock(&sdebug_host_list_lock);
6959         if (!list_empty(&sdebug_host_list)) {
6960                 sdbg_host = list_entry(sdebug_host_list.prev,
6961                                        struct sdebug_host_info, host_list);
6962                 idx = sdbg_host->si_idx;
6963         }
6964         if (!the_end && idx >= 0) {
6965                 bool unique = true;
6966
6967                 list_for_each_entry(sdbg_host2, &sdebug_host_list, host_list) {
6968                         if (sdbg_host2 == sdbg_host)
6969                                 continue;
6970                         if (idx == sdbg_host2->si_idx) {
6971                                 unique = false;
6972                                 break;
6973                         }
6974                 }
6975                 if (unique) {
6976                         xa_set_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
6977                         if (idx == sdeb_most_recent_idx)
6978                                 --sdeb_most_recent_idx;
6979                 }
6980         }
6981         if (sdbg_host)
6982                 list_del(&sdbg_host->host_list);
6983         spin_unlock(&sdebug_host_list_lock);
6984
6985         if (!sdbg_host)
6986                 return;
6987
6988         device_unregister(&sdbg_host->dev);
6989         --sdebug_num_hosts;
6990 }
6991
6992 static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
6993 {
6994         int num_in_q = 0;
6995         struct sdebug_dev_info *devip;
6996
6997         block_unblock_all_queues(true);
6998         devip = (struct sdebug_dev_info *)sdev->hostdata;
6999         if (NULL == devip) {
7000                 block_unblock_all_queues(false);
7001                 return  -ENODEV;
7002         }
7003         num_in_q = atomic_read(&devip->num_in_q);
7004
7005         if (qdepth < 1)
7006                 qdepth = 1;
7007         /* allow to exceed max host qc_arr elements for testing */
7008         if (qdepth > SDEBUG_CANQUEUE + 10)
7009                 qdepth = SDEBUG_CANQUEUE + 10;
7010         scsi_change_queue_depth(sdev, qdepth);
7011
7012         if (SDEBUG_OPT_Q_NOISE & sdebug_opts) {
7013                 sdev_printk(KERN_INFO, sdev, "%s: qdepth=%d, num_in_q=%d\n",
7014                             __func__, qdepth, num_in_q);
7015         }
7016         block_unblock_all_queues(false);
7017         return sdev->queue_depth;
7018 }
7019
7020 static bool fake_timeout(struct scsi_cmnd *scp)
7021 {
7022         if (0 == (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth))) {
7023                 if (sdebug_every_nth < -1)
7024                         sdebug_every_nth = -1;
7025                 if (SDEBUG_OPT_TIMEOUT & sdebug_opts)
7026                         return true; /* ignore command causing timeout */
7027                 else if (SDEBUG_OPT_MAC_TIMEOUT & sdebug_opts &&
7028                          scsi_medium_access_command(scp))
7029                         return true; /* time out reads and writes */
7030         }
7031         return false;
7032 }
7033
7034 static bool fake_host_busy(struct scsi_cmnd *scp)
7035 {
7036         return (sdebug_opts & SDEBUG_OPT_HOST_BUSY) &&
7037                 (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) == 0;
7038 }
7039
7040 static int scsi_debug_queuecommand(struct Scsi_Host *shost,
7041                                    struct scsi_cmnd *scp)
7042 {
7043         u8 sdeb_i;
7044         struct scsi_device *sdp = scp->device;
7045         const struct opcode_info_t *oip;
7046         const struct opcode_info_t *r_oip;
7047         struct sdebug_dev_info *devip;
7048
7049         u8 *cmd = scp->cmnd;
7050         int (*r_pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
7051         int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *) = NULL;
7052         int k, na;
7053         int errsts = 0;
7054         u32 flags;
7055         u16 sa;
7056         u8 opcode = cmd[0];
7057         bool has_wlun_rl;
7058
7059         scsi_set_resid(scp, 0);
7060         if (sdebug_statistics)
7061                 atomic_inc(&sdebug_cmnd_count);
7062         if (unlikely(sdebug_verbose &&
7063                      !(SDEBUG_OPT_NO_CDB_NOISE & sdebug_opts))) {
7064                 char b[120];
7065                 int n, len, sb;
7066
7067                 len = scp->cmd_len;
7068                 sb = (int)sizeof(b);
7069                 if (len > 32)
7070                         strcpy(b, "too long, over 32 bytes");
7071                 else {
7072                         for (k = 0, n = 0; k < len && n < sb; ++k)
7073                                 n += scnprintf(b + n, sb - n, "%02x ",
7074                                                (u32)cmd[k]);
7075                 }
7076                 sdev_printk(KERN_INFO, sdp, "%s: tag=%#x, cmd %s\n", my_name,
7077                             blk_mq_unique_tag(scp->request), b);
7078         }
7079         if (fake_host_busy(scp))
7080                 return SCSI_MLQUEUE_HOST_BUSY;
7081         has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS);
7082         if (unlikely((sdp->lun >= sdebug_max_luns) && !has_wlun_rl))
7083                 goto err_out;
7084
7085         sdeb_i = opcode_ind_arr[opcode];        /* fully mapped */
7086         oip = &opcode_info_arr[sdeb_i];         /* safe if table consistent */
7087         devip = (struct sdebug_dev_info *)sdp->hostdata;
7088         if (unlikely(!devip)) {
7089                 devip = find_build_dev_info(sdp);
7090                 if (NULL == devip)
7091                         goto err_out;
7092         }
7093         na = oip->num_attached;
7094         r_pfp = oip->pfp;
7095         if (na) {       /* multiple commands with this opcode */
7096                 r_oip = oip;
7097                 if (FF_SA & r_oip->flags) {
7098                         if (F_SA_LOW & oip->flags)
7099                                 sa = 0x1f & cmd[1];
7100                         else
7101                                 sa = get_unaligned_be16(cmd + 8);
7102                         for (k = 0; k <= na; oip = r_oip->arrp + k++) {
7103                                 if (opcode == oip->opcode && sa == oip->sa)
7104                                         break;
7105                         }
7106                 } else {   /* since no service action only check opcode */
7107                         for (k = 0; k <= na; oip = r_oip->arrp + k++) {
7108                                 if (opcode == oip->opcode)
7109                                         break;
7110                         }
7111                 }
7112                 if (k > na) {
7113                         if (F_SA_LOW & r_oip->flags)
7114                                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 4);
7115                         else if (F_SA_HIGH & r_oip->flags)
7116                                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, 7);
7117                         else
7118                                 mk_sense_invalid_opcode(scp);
7119                         goto check_cond;
7120                 }
7121         }       /* else (when na==0) we assume the oip is a match */
7122         flags = oip->flags;
7123         if (unlikely(F_INV_OP & flags)) {
7124                 mk_sense_invalid_opcode(scp);
7125                 goto check_cond;
7126         }
7127         if (unlikely(has_wlun_rl && !(F_RL_WLUN_OK & flags))) {
7128                 if (sdebug_verbose)
7129                         sdev_printk(KERN_INFO, sdp, "%s: Opcode 0x%x not%s\n",
7130                                     my_name, opcode, " supported for wlun");
7131                 mk_sense_invalid_opcode(scp);
7132                 goto check_cond;
7133         }
7134         if (unlikely(sdebug_strict)) {  /* check cdb against mask */
7135                 u8 rem;
7136                 int j;
7137
7138                 for (k = 1; k < oip->len_mask[0] && k < 16; ++k) {
7139                         rem = ~oip->len_mask[k] & cmd[k];
7140                         if (rem) {
7141                                 for (j = 7; j >= 0; --j, rem <<= 1) {
7142                                         if (0x80 & rem)
7143                                                 break;
7144                                 }
7145                                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, k, j);
7146                                 goto check_cond;
7147                         }
7148                 }
7149         }
7150         if (unlikely(!(F_SKIP_UA & flags) &&
7151                      find_first_bit(devip->uas_bm,
7152                                     SDEBUG_NUM_UAS) != SDEBUG_NUM_UAS)) {
7153                 errsts = make_ua(scp, devip);
7154                 if (errsts)
7155                         goto check_cond;
7156         }
7157         if (unlikely((F_M_ACCESS & flags) && atomic_read(&devip->stopped))) {
7158                 mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x2);
7159                 if (sdebug_verbose)
7160                         sdev_printk(KERN_INFO, sdp, "%s reports: Not ready: "
7161                                     "%s\n", my_name, "initializing command "
7162                                     "required");
7163                 errsts = check_condition_result;
7164                 goto fini;
7165         }
7166         if (sdebug_fake_rw && (F_FAKE_RW & flags))
7167                 goto fini;
7168         if (unlikely(sdebug_every_nth)) {
7169                 if (fake_timeout(scp))
7170                         return 0;       /* ignore command: make trouble */
7171         }
7172         if (likely(oip->pfp))
7173                 pfp = oip->pfp; /* calls a resp_* function */
7174         else
7175                 pfp = r_pfp;    /* if leaf function ptr NULL, try the root's */
7176
7177 fini:
7178         if (F_DELAY_OVERR & flags)      /* cmds like INQUIRY respond asap */
7179                 return schedule_resp(scp, devip, errsts, pfp, 0, 0);
7180         else if ((flags & F_LONG_DELAY) && (sdebug_jdelay > 0 ||
7181                                             sdebug_ndelay > 10000)) {
7182                 /*
7183                  * Skip long delays if ndelay <= 10 microseconds. Otherwise
7184                  * for Start Stop Unit (SSU) want at least 1 second delay and
7185                  * if sdebug_jdelay>1 want a long delay of that many seconds.
7186                  * For Synchronize Cache want 1/20 of SSU's delay.
7187                  */
7188                 int jdelay = (sdebug_jdelay < 2) ? 1 : sdebug_jdelay;
7189                 int denom = (flags & F_SYNC_DELAY) ? 20 : 1;
7190
7191                 jdelay = mult_frac(USER_HZ * jdelay, HZ, denom * USER_HZ);
7192                 return schedule_resp(scp, devip, errsts, pfp, jdelay, 0);
7193         } else
7194                 return schedule_resp(scp, devip, errsts, pfp, sdebug_jdelay,
7195                                      sdebug_ndelay);
7196 check_cond:
7197         return schedule_resp(scp, devip, check_condition_result, NULL, 0, 0);
7198 err_out:
7199         return schedule_resp(scp, NULL, DID_NO_CONNECT << 16, NULL, 0, 0);
7200 }
7201
7202 static struct scsi_host_template sdebug_driver_template = {
7203         .show_info =            scsi_debug_show_info,
7204         .write_info =           scsi_debug_write_info,
7205         .proc_name =            sdebug_proc_name,
7206         .name =                 "SCSI DEBUG",
7207         .info =                 scsi_debug_info,
7208         .slave_alloc =          scsi_debug_slave_alloc,
7209         .slave_configure =      scsi_debug_slave_configure,
7210         .slave_destroy =        scsi_debug_slave_destroy,
7211         .ioctl =                scsi_debug_ioctl,
7212         .queuecommand =         scsi_debug_queuecommand,
7213         .change_queue_depth =   sdebug_change_qdepth,
7214         .eh_abort_handler =     scsi_debug_abort,
7215         .eh_device_reset_handler = scsi_debug_device_reset,
7216         .eh_target_reset_handler = scsi_debug_target_reset,
7217         .eh_bus_reset_handler = scsi_debug_bus_reset,
7218         .eh_host_reset_handler = scsi_debug_host_reset,
7219         .can_queue =            SDEBUG_CANQUEUE,
7220         .this_id =              7,
7221         .sg_tablesize =         SG_MAX_SEGMENTS,
7222         .cmd_per_lun =          DEF_CMD_PER_LUN,
7223         .max_sectors =          -1U,
7224         .max_segment_size =     -1U,
7225         .module =               THIS_MODULE,
7226         .track_queue_depth =    1,
7227 };
7228
7229 static int sdebug_driver_probe(struct device *dev)
7230 {
7231         int error = 0;
7232         struct sdebug_host_info *sdbg_host;
7233         struct Scsi_Host *hpnt;
7234         int hprot;
7235
7236         sdbg_host = to_sdebug_host(dev);
7237
7238         sdebug_driver_template.can_queue = sdebug_max_queue;
7239         if (!sdebug_clustering)
7240                 sdebug_driver_template.dma_boundary = PAGE_SIZE - 1;
7241
7242         hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
7243         if (NULL == hpnt) {
7244                 pr_err("scsi_host_alloc failed\n");
7245                 error = -ENODEV;
7246                 return error;
7247         }
7248         if (submit_queues > nr_cpu_ids) {
7249                 pr_warn("%s: trim submit_queues (was %d) to nr_cpu_ids=%u\n",
7250                         my_name, submit_queues, nr_cpu_ids);
7251                 submit_queues = nr_cpu_ids;
7252         }
7253         /* Decide whether to tell scsi subsystem that we want mq */
7254         /* Following should give the same answer for each host */
7255         hpnt->nr_hw_queues = submit_queues;
7256
7257         sdbg_host->shost = hpnt;
7258         *((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host;
7259         if ((hpnt->this_id >= 0) && (sdebug_num_tgts > hpnt->this_id))
7260                 hpnt->max_id = sdebug_num_tgts + 1;
7261         else
7262                 hpnt->max_id = sdebug_num_tgts;
7263         /* = sdebug_max_luns; */
7264         hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
7265
7266         hprot = 0;
7267
7268         switch (sdebug_dif) {
7269
7270         case T10_PI_TYPE1_PROTECTION:
7271                 hprot = SHOST_DIF_TYPE1_PROTECTION;
7272                 if (sdebug_dix)
7273                         hprot |= SHOST_DIX_TYPE1_PROTECTION;
7274                 break;
7275
7276         case T10_PI_TYPE2_PROTECTION:
7277                 hprot = SHOST_DIF_TYPE2_PROTECTION;
7278                 if (sdebug_dix)
7279                         hprot |= SHOST_DIX_TYPE2_PROTECTION;
7280                 break;
7281
7282         case T10_PI_TYPE3_PROTECTION:
7283                 hprot = SHOST_DIF_TYPE3_PROTECTION;
7284                 if (sdebug_dix)
7285                         hprot |= SHOST_DIX_TYPE3_PROTECTION;
7286                 break;
7287
7288         default:
7289                 if (sdebug_dix)
7290                         hprot |= SHOST_DIX_TYPE0_PROTECTION;
7291                 break;
7292         }
7293
7294         scsi_host_set_prot(hpnt, hprot);
7295
7296         if (have_dif_prot || sdebug_dix)
7297                 pr_info("host protection%s%s%s%s%s%s%s\n",
7298                         (hprot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
7299                         (hprot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
7300                         (hprot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
7301                         (hprot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
7302                         (hprot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
7303                         (hprot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
7304                         (hprot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
7305
7306         if (sdebug_guard == 1)
7307                 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
7308         else
7309                 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
7310
7311         sdebug_verbose = !!(SDEBUG_OPT_NOISE & sdebug_opts);
7312         sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & sdebug_opts);
7313         if (sdebug_every_nth)   /* need stats counters for every_nth */
7314                 sdebug_statistics = true;
7315         error = scsi_add_host(hpnt, &sdbg_host->dev);
7316         if (error) {
7317                 pr_err("scsi_add_host failed\n");
7318                 error = -ENODEV;
7319                 scsi_host_put(hpnt);
7320         } else {
7321                 scsi_scan_host(hpnt);
7322         }
7323
7324         return error;
7325 }
7326
7327 static int sdebug_driver_remove(struct device *dev)
7328 {
7329         struct sdebug_host_info *sdbg_host;
7330         struct sdebug_dev_info *sdbg_devinfo, *tmp;
7331
7332         sdbg_host = to_sdebug_host(dev);
7333
7334         if (!sdbg_host) {
7335                 pr_err("Unable to locate host info\n");
7336                 return -ENODEV;
7337         }
7338
7339         scsi_remove_host(sdbg_host->shost);
7340
7341         list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
7342                                  dev_list) {
7343                 list_del(&sdbg_devinfo->dev_list);
7344                 kfree(sdbg_devinfo->zstate);
7345                 kfree(sdbg_devinfo);
7346         }
7347
7348         scsi_host_put(sdbg_host->shost);
7349         return 0;
7350 }
7351
7352 static int pseudo_lld_bus_match(struct device *dev,
7353                                 struct device_driver *dev_driver)
7354 {
7355         return 1;
7356 }
7357
7358 static struct bus_type pseudo_lld_bus = {
7359         .name = "pseudo",
7360         .match = pseudo_lld_bus_match,
7361         .probe = sdebug_driver_probe,
7362         .remove = sdebug_driver_remove,
7363         .drv_groups = sdebug_drv_groups,
7364 };