31e8d0b0274707779fa9fad04da0f1fb797d8816
[linux-2.6-microblaze.git] / drivers / scsi / scsi_debug.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
4  *  Copyright (C) 1992  Eric Youngdale
5  *  Simulate a host adapter with 2 disks attached.  Do a lot of checking
6  *  to make sure that we are not getting blocks mixed up, and PANIC if
7  *  anything out of the ordinary is seen.
8  * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
9  *
10  * Copyright (C) 2001 - 2020 Douglas Gilbert
11  *
12  *  For documentation see http://sg.danny.cz/sg/sdebug26.html
13  */
14
15
16 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
17
18 #include <linux/module.h>
19
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/jiffies.h>
23 #include <linux/slab.h>
24 #include <linux/types.h>
25 #include <linux/string.h>
26 #include <linux/genhd.h>
27 #include <linux/fs.h>
28 #include <linux/init.h>
29 #include <linux/proc_fs.h>
30 #include <linux/vmalloc.h>
31 #include <linux/moduleparam.h>
32 #include <linux/scatterlist.h>
33 #include <linux/blkdev.h>
34 #include <linux/crc-t10dif.h>
35 #include <linux/spinlock.h>
36 #include <linux/interrupt.h>
37 #include <linux/atomic.h>
38 #include <linux/hrtimer.h>
39 #include <linux/uuid.h>
40 #include <linux/t10-pi.h>
41 #include <linux/msdos_partition.h>
42 #include <linux/random.h>
43 #include <linux/xarray.h>
44 #include <linux/prefetch.h>
45
46 #include <net/checksum.h>
47
48 #include <asm/unaligned.h>
49
50 #include <scsi/scsi.h>
51 #include <scsi/scsi_cmnd.h>
52 #include <scsi/scsi_device.h>
53 #include <scsi/scsi_host.h>
54 #include <scsi/scsicam.h>
55 #include <scsi/scsi_eh.h>
56 #include <scsi/scsi_tcq.h>
57 #include <scsi/scsi_dbg.h>
58
59 #include "sd.h"
60 #include "scsi_logging.h"
61
62 /* make sure inq_product_rev string corresponds to this version */
63 #define SDEBUG_VERSION "0189"   /* format to fit INQUIRY revision field */
64 static const char *sdebug_version_date = "20200421";
65
66 #define MY_NAME "scsi_debug"
67
68 /* Additional Sense Code (ASC) */
69 #define NO_ADDITIONAL_SENSE 0x0
70 #define LOGICAL_UNIT_NOT_READY 0x4
71 #define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
72 #define UNRECOVERED_READ_ERR 0x11
73 #define PARAMETER_LIST_LENGTH_ERR 0x1a
74 #define INVALID_OPCODE 0x20
75 #define LBA_OUT_OF_RANGE 0x21
76 #define INVALID_FIELD_IN_CDB 0x24
77 #define INVALID_FIELD_IN_PARAM_LIST 0x26
78 #define WRITE_PROTECTED 0x27
79 #define UA_RESET_ASC 0x29
80 #define UA_CHANGED_ASC 0x2a
81 #define TARGET_CHANGED_ASC 0x3f
82 #define LUNS_CHANGED_ASCQ 0x0e
83 #define INSUFF_RES_ASC 0x55
84 #define INSUFF_RES_ASCQ 0x3
85 #define POWER_ON_RESET_ASCQ 0x0
86 #define BUS_RESET_ASCQ 0x2      /* scsi bus reset occurred */
87 #define MODE_CHANGED_ASCQ 0x1   /* mode parameters changed */
88 #define CAPACITY_CHANGED_ASCQ 0x9
89 #define SAVING_PARAMS_UNSUP 0x39
90 #define TRANSPORT_PROBLEM 0x4b
91 #define THRESHOLD_EXCEEDED 0x5d
92 #define LOW_POWER_COND_ON 0x5e
93 #define MISCOMPARE_VERIFY_ASC 0x1d
94 #define MICROCODE_CHANGED_ASCQ 0x1      /* with TARGET_CHANGED_ASC */
95 #define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16
96 #define WRITE_ERROR_ASC 0xc
97 #define UNALIGNED_WRITE_ASCQ 0x4
98 #define WRITE_BOUNDARY_ASCQ 0x5
99 #define READ_INVDATA_ASCQ 0x6
100 #define READ_BOUNDARY_ASCQ 0x7
101 #define INSUFF_ZONE_ASCQ 0xe
102
103 /* Additional Sense Code Qualifier (ASCQ) */
104 #define ACK_NAK_TO 0x3
105
106 /* Default values for driver parameters */
107 #define DEF_NUM_HOST   1
108 #define DEF_NUM_TGTS   1
109 #define DEF_MAX_LUNS   1
110 /* With these defaults, this driver will make 1 host with 1 target
111  * (id 0) containing 1 logical unit (lun 0). That is 1 device.
112  */
113 #define DEF_ATO 1
114 #define DEF_CDB_LEN 10
115 #define DEF_JDELAY   1          /* if > 0 unit is a jiffy */
116 #define DEF_DEV_SIZE_PRE_INIT   0
117 #define DEF_DEV_SIZE_MB   8
118 #define DEF_ZBC_DEV_SIZE_MB   128
119 #define DEF_DIF 0
120 #define DEF_DIX 0
121 #define DEF_PER_HOST_STORE false
122 #define DEF_D_SENSE   0
123 #define DEF_EVERY_NTH   0
124 #define DEF_FAKE_RW     0
125 #define DEF_GUARD 0
126 #define DEF_HOST_LOCK 0
127 #define DEF_LBPU 0
128 #define DEF_LBPWS 0
129 #define DEF_LBPWS10 0
130 #define DEF_LBPRZ 1
131 #define DEF_LOWEST_ALIGNED 0
132 #define DEF_NDELAY   0          /* if > 0 unit is a nanosecond */
133 #define DEF_NO_LUN_0   0
134 #define DEF_NUM_PARTS   0
135 #define DEF_OPTS   0
136 #define DEF_OPT_BLKS 1024
137 #define DEF_PHYSBLK_EXP 0
138 #define DEF_OPT_XFERLEN_EXP 0
139 #define DEF_PTYPE   TYPE_DISK
140 #define DEF_RANDOM false
141 #define DEF_REMOVABLE false
142 #define DEF_SCSI_LEVEL   7    /* INQUIRY, byte2 [6->SPC-4; 7->SPC-5] */
143 #define DEF_SECTOR_SIZE 512
144 #define DEF_UNMAP_ALIGNMENT 0
145 #define DEF_UNMAP_GRANULARITY 1
146 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
147 #define DEF_UNMAP_MAX_DESC 256
148 #define DEF_VIRTUAL_GB   0
149 #define DEF_VPD_USE_HOSTNO 1
150 #define DEF_WRITESAME_LENGTH 0xFFFF
151 #define DEF_STRICT 0
152 #define DEF_STATISTICS false
153 #define DEF_SUBMIT_QUEUES 1
154 #define DEF_UUID_CTL 0
155 #define JDELAY_OVERRIDDEN -9999
156
157 /* Default parameters for ZBC drives */
158 #define DEF_ZBC_ZONE_SIZE_MB    128
159 #define DEF_ZBC_MAX_OPEN_ZONES  8
160
161 #define SDEBUG_LUN_0_VAL 0
162
163 /* bit mask values for sdebug_opts */
164 #define SDEBUG_OPT_NOISE                1
165 #define SDEBUG_OPT_MEDIUM_ERR           2
166 #define SDEBUG_OPT_TIMEOUT              4
167 #define SDEBUG_OPT_RECOVERED_ERR        8
168 #define SDEBUG_OPT_TRANSPORT_ERR        16
169 #define SDEBUG_OPT_DIF_ERR              32
170 #define SDEBUG_OPT_DIX_ERR              64
171 #define SDEBUG_OPT_MAC_TIMEOUT          128
172 #define SDEBUG_OPT_SHORT_TRANSFER       0x100
173 #define SDEBUG_OPT_Q_NOISE              0x200
174 #define SDEBUG_OPT_ALL_TSF              0x400
175 #define SDEBUG_OPT_RARE_TSF             0x800
176 #define SDEBUG_OPT_N_WCE                0x1000
177 #define SDEBUG_OPT_RESET_NOISE          0x2000
178 #define SDEBUG_OPT_NO_CDB_NOISE         0x4000
179 #define SDEBUG_OPT_HOST_BUSY            0x8000
180 #define SDEBUG_OPT_CMD_ABORT            0x10000
181 #define SDEBUG_OPT_ALL_NOISE (SDEBUG_OPT_NOISE | SDEBUG_OPT_Q_NOISE | \
182                               SDEBUG_OPT_RESET_NOISE)
183 #define SDEBUG_OPT_ALL_INJECTING (SDEBUG_OPT_RECOVERED_ERR | \
184                                   SDEBUG_OPT_TRANSPORT_ERR | \
185                                   SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR | \
186                                   SDEBUG_OPT_SHORT_TRANSFER | \
187                                   SDEBUG_OPT_HOST_BUSY | \
188                                   SDEBUG_OPT_CMD_ABORT)
189 /* When "every_nth" > 0 then modulo "every_nth" commands:
190  *   - a missing response is simulated if SDEBUG_OPT_TIMEOUT is set
191  *   - a RECOVERED_ERROR is simulated on successful read and write
192  *     commands if SDEBUG_OPT_RECOVERED_ERR is set.
193  *   - a TRANSPORT_ERROR is simulated on successful read and write
194  *     commands if SDEBUG_OPT_TRANSPORT_ERR is set.
195  *   - similarly for DIF_ERR, DIX_ERR, SHORT_TRANSFER, HOST_BUSY and
196  *     CMD_ABORT
197  *
198  * When "every_nth" < 0 then after "- every_nth" commands the selected
199  * error will be injected. The error will be injected on every subsequent
200  * command until some other action occurs; for example, the user writing
201  * a new value (other than -1 or 1) to every_nth:
202  *      echo 0 > /sys/bus/pseudo/drivers/scsi_debug/every_nth
203  */
204
205 /* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs) are returned in
206  * priority order. In the subset implemented here lower numbers have higher
207  * priority. The UA numbers should be a sequence starting from 0 with
208  * SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */
209 #define SDEBUG_UA_POR 0         /* Power on, reset, or bus device reset */
210 #define SDEBUG_UA_BUS_RESET 1
211 #define SDEBUG_UA_MODE_CHANGED 2
212 #define SDEBUG_UA_CAPACITY_CHANGED 3
213 #define SDEBUG_UA_LUNS_CHANGED 4
214 #define SDEBUG_UA_MICROCODE_CHANGED 5   /* simulate firmware change */
215 #define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 6
216 #define SDEBUG_NUM_UAS 7
217
218 /* when 1==SDEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
219  * sector on read commands: */
220 #define OPT_MEDIUM_ERR_ADDR   0x1234 /* that's sector 4660 in decimal */
221 #define OPT_MEDIUM_ERR_NUM    10     /* number of consecutive medium errs */
222
223 /* If REPORT LUNS has luns >= 256 it can choose "flat space" (value 1)
224  * or "peripheral device" addressing (value 0) */
225 #define SAM2_LUN_ADDRESS_METHOD 0
226
227 /* SDEBUG_CANQUEUE is the maximum number of commands that can be queued
228  * (for response) per submit queue at one time. Can be reduced by max_queue
229  * option. Command responses are not queued when jdelay=0 and ndelay=0. The
230  * per-device DEF_CMD_PER_LUN can be changed via sysfs:
231  * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth
232  * but cannot exceed SDEBUG_CANQUEUE .
233  */
234 #define SDEBUG_CANQUEUE_WORDS  3        /* a WORD is bits in a long */
235 #define SDEBUG_CANQUEUE  (SDEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
236 #define DEF_CMD_PER_LUN  255
237
238 #define F_D_IN                  1
239 #define F_D_OUT                 2
240 #define F_D_OUT_MAYBE           4       /* WRITE SAME, NDOB bit */
241 #define F_D_UNKN                8
242 #define F_RL_WLUN_OK            0x10
243 #define F_SKIP_UA               0x20
244 #define F_DELAY_OVERR           0x40
245 #define F_SA_LOW                0x80    /* cdb byte 1, bits 4 to 0 */
246 #define F_SA_HIGH               0x100   /* as used by variable length cdbs */
247 #define F_INV_OP                0x200
248 #define F_FAKE_RW               0x400
249 #define F_M_ACCESS              0x800   /* media access */
250 #define F_SSU_DELAY             0x1000
251 #define F_SYNC_DELAY            0x2000
252
253 #define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
254 #define FF_MEDIA_IO (F_M_ACCESS | F_FAKE_RW)
255 #define FF_SA (F_SA_HIGH | F_SA_LOW)
256 #define F_LONG_DELAY            (F_SSU_DELAY | F_SYNC_DELAY)
257
258 #define SDEBUG_MAX_PARTS 4
259
260 #define SDEBUG_MAX_CMD_LEN 32
261
262 #define SDEB_XA_NOT_IN_USE XA_MARK_1
263
264 /* enumeration names taken from table 26, zbcr05 */
265 enum sdebug_z_cond {
266         ZBC_NOT_WRITE_POINTER   = 0x0,
267         ZC1_EMPTY               = 0x1,
268         ZC2_IMPLICIT_OPEN       = 0x2,
269         ZC3_EXPLICIT_OPEN       = 0x3,
270         ZC4_CLOSED              = 0x4,
271         ZC6_READ_ONLY           = 0xd,
272         ZC5_FULL                = 0xe,
273         ZC7_OFFLINE             = 0xf,
274 };
275
276 struct sdeb_zone_state {        /* ZBC: per zone state */
277         enum sdebug_z_cond z_cond;
278         unsigned int z_size;
279         sector_t z_start;
280         sector_t z_wp;
281 };
282
283 struct sdebug_dev_info {
284         struct list_head dev_list;
285         unsigned int channel;
286         unsigned int target;
287         u64 lun;
288         uuid_t lu_name;
289         struct sdebug_host_info *sdbg_host;
290         unsigned long uas_bm[1];
291         atomic_t num_in_q;
292         atomic_t stopped;
293         bool used;
294
295         /* For ZBC devices */
296         unsigned int zsize;
297         unsigned int zsize_shift;
298         unsigned int nr_zones;
299         unsigned int nr_imp_open;
300         unsigned int nr_exp_open;
301         unsigned int nr_closed;
302         unsigned int max_open;
303         struct sdeb_zone_state *zstate;
304 };
305
306 struct sdebug_host_info {
307         struct list_head host_list;
308         int si_idx;     /* sdeb_store_info (per host) xarray index */
309         struct Scsi_Host *shost;
310         struct device dev;
311         struct list_head dev_info_list;
312 };
313
314 /* There is an xarray of pointers to this struct's objects, one per host */
315 struct sdeb_store_info {
316         rwlock_t macc_lck;      /* for atomic media access on this store */
317         u8 *storep;             /* user data storage (ram) */
318         struct t10_pi_tuple *dif_storep; /* protection info */
319         void *map_storep;       /* provisioning map */
320 };
321
322 #define to_sdebug_host(d)       \
323         container_of(d, struct sdebug_host_info, dev)
324
325 enum sdeb_defer_type {SDEB_DEFER_NONE = 0, SDEB_DEFER_HRT = 1,
326                       SDEB_DEFER_WQ = 2};
327
328 struct sdebug_defer {
329         struct hrtimer hrt;
330         struct execute_work ew;
331         int sqa_idx;    /* index of sdebug_queue array */
332         int qc_idx;     /* index of sdebug_queued_cmd array within sqa_idx */
333         int issuing_cpu;
334         bool init_hrt;
335         bool init_wq;
336         bool aborted;   /* true when blk_abort_request() already called */
337         enum sdeb_defer_type defer_t;
338 };
339
340 struct sdebug_queued_cmd {
341         /* corresponding bit set in in_use_bm[] in owning struct sdebug_queue
342          * instance indicates this slot is in use.
343          */
344         struct sdebug_defer *sd_dp;
345         struct scsi_cmnd *a_cmnd;
346         unsigned int inj_recovered:1;
347         unsigned int inj_transport:1;
348         unsigned int inj_dif:1;
349         unsigned int inj_dix:1;
350         unsigned int inj_short:1;
351         unsigned int inj_host_busy:1;
352         unsigned int inj_cmd_abort:1;
353 };
354
355 struct sdebug_queue {
356         struct sdebug_queued_cmd qc_arr[SDEBUG_CANQUEUE];
357         unsigned long in_use_bm[SDEBUG_CANQUEUE_WORDS];
358         spinlock_t qc_lock;
359         atomic_t blocked;       /* to temporarily stop more being queued */
360 };
361
362 static atomic_t sdebug_cmnd_count;   /* number of incoming commands */
363 static atomic_t sdebug_completions;  /* count of deferred completions */
364 static atomic_t sdebug_miss_cpus;    /* submission + completion cpus differ */
365 static atomic_t sdebug_a_tsf;        /* 'almost task set full' counter */
366
367 struct opcode_info_t {
368         u8 num_attached;        /* 0 if this is it (i.e. a leaf); use 0xff */
369                                 /* for terminating element */
370         u8 opcode;              /* if num_attached > 0, preferred */
371         u16 sa;                 /* service action */
372         u32 flags;              /* OR-ed set of SDEB_F_* */
373         int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
374         const struct opcode_info_t *arrp;  /* num_attached elements or NULL */
375         u8 len_mask[16];        /* len_mask[0]-->cdb_len, then mask for cdb */
376                                 /* 1 to min(cdb_len, 15); ignore cdb[15...] */
377 };
378
379 /* SCSI opcodes (first byte of cdb) of interest mapped onto these indexes */
380 enum sdeb_opcode_index {
381         SDEB_I_INVALID_OPCODE = 0,
382         SDEB_I_INQUIRY = 1,
383         SDEB_I_REPORT_LUNS = 2,
384         SDEB_I_REQUEST_SENSE = 3,
385         SDEB_I_TEST_UNIT_READY = 4,
386         SDEB_I_MODE_SENSE = 5,          /* 6, 10 */
387         SDEB_I_MODE_SELECT = 6,         /* 6, 10 */
388         SDEB_I_LOG_SENSE = 7,
389         SDEB_I_READ_CAPACITY = 8,       /* 10; 16 is in SA_IN(16) */
390         SDEB_I_READ = 9,                /* 6, 10, 12, 16 */
391         SDEB_I_WRITE = 10,              /* 6, 10, 12, 16 */
392         SDEB_I_START_STOP = 11,
393         SDEB_I_SERV_ACT_IN_16 = 12,     /* add ...SERV_ACT_IN_12 if needed */
394         SDEB_I_SERV_ACT_OUT_16 = 13,    /* add ...SERV_ACT_OUT_12 if needed */
395         SDEB_I_MAINT_IN = 14,
396         SDEB_I_MAINT_OUT = 15,
397         SDEB_I_VERIFY = 16,             /* VERIFY(10), VERIFY(16) */
398         SDEB_I_VARIABLE_LEN = 17,       /* READ(32), WRITE(32), WR_SCAT(32) */
399         SDEB_I_RESERVE = 18,            /* 6, 10 */
400         SDEB_I_RELEASE = 19,            /* 6, 10 */
401         SDEB_I_ALLOW_REMOVAL = 20,      /* PREVENT ALLOW MEDIUM REMOVAL */
402         SDEB_I_REZERO_UNIT = 21,        /* REWIND in SSC */
403         SDEB_I_ATA_PT = 22,             /* 12, 16 */
404         SDEB_I_SEND_DIAG = 23,
405         SDEB_I_UNMAP = 24,
406         SDEB_I_WRITE_BUFFER = 25,
407         SDEB_I_WRITE_SAME = 26,         /* 10, 16 */
408         SDEB_I_SYNC_CACHE = 27,         /* 10, 16 */
409         SDEB_I_COMP_WRITE = 28,
410         SDEB_I_PRE_FETCH = 29,          /* 10, 16 */
411         SDEB_I_ZONE_OUT = 30,           /* 0x94+SA; includes no data xfer */
412         SDEB_I_ZONE_IN = 31,            /* 0x95+SA; all have data-in */
413         SDEB_I_LAST_ELEM_P1 = 32,       /* keep this last (previous + 1) */
414 };
415
416
417 static const unsigned char opcode_ind_arr[256] = {
418 /* 0x0; 0x0->0x1f: 6 byte cdbs */
419         SDEB_I_TEST_UNIT_READY, SDEB_I_REZERO_UNIT, 0, SDEB_I_REQUEST_SENSE,
420             0, 0, 0, 0,
421         SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, 0,
422         0, 0, SDEB_I_INQUIRY, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
423             SDEB_I_RELEASE,
424         0, 0, SDEB_I_MODE_SENSE, SDEB_I_START_STOP, 0, SDEB_I_SEND_DIAG,
425             SDEB_I_ALLOW_REMOVAL, 0,
426 /* 0x20; 0x20->0x3f: 10 byte cdbs */
427         0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY, 0, 0,
428         SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, SDEB_I_VERIFY,
429         0, 0, 0, 0, SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, 0,
430         0, 0, 0, SDEB_I_WRITE_BUFFER, 0, 0, 0, 0,
431 /* 0x40; 0x40->0x5f: 10 byte cdbs */
432         0, SDEB_I_WRITE_SAME, SDEB_I_UNMAP, 0, 0, 0, 0, 0,
433         0, 0, 0, 0, 0, SDEB_I_LOG_SENSE, 0, 0,
434         0, 0, 0, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
435             SDEB_I_RELEASE,
436         0, 0, SDEB_I_MODE_SENSE, 0, 0, 0, 0, 0,
437 /* 0x60; 0x60->0x7d are reserved, 0x7e is "extended cdb" */
438         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
439         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
440         0, SDEB_I_VARIABLE_LEN,
441 /* 0x80; 0x80->0x9f: 16 byte cdbs */
442         0, 0, 0, 0, 0, SDEB_I_ATA_PT, 0, 0,
443         SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0,
444         0, 0, 0, SDEB_I_VERIFY,
445         SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, SDEB_I_WRITE_SAME,
446         SDEB_I_ZONE_OUT, SDEB_I_ZONE_IN, 0, 0,
447         0, 0, 0, 0, 0, 0, SDEB_I_SERV_ACT_IN_16, SDEB_I_SERV_ACT_OUT_16,
448 /* 0xa0; 0xa0->0xbf: 12 byte cdbs */
449         SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN,
450              SDEB_I_MAINT_OUT, 0, 0, 0,
451         SDEB_I_READ, 0 /* SDEB_I_SERV_ACT_OUT_12 */, SDEB_I_WRITE,
452              0 /* SDEB_I_SERV_ACT_IN_12 */, 0, 0, 0, 0,
453         0, 0, 0, 0, 0, 0, 0, 0,
454         0, 0, 0, 0, 0, 0, 0, 0,
455 /* 0xc0; 0xc0->0xff: vendor specific */
456         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
457         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
458         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
459         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
460 };
461
462 /*
463  * The following "response" functions return the SCSI mid-level's 4 byte
464  * tuple-in-an-int. To handle commands with an IMMED bit, for a faster
465  * command completion, they can mask their return value with
466  * SDEG_RES_IMMED_MASK .
467  */
468 #define SDEG_RES_IMMED_MASK 0x40000000
469
470 static int resp_inquiry(struct scsi_cmnd *, struct sdebug_dev_info *);
471 static int resp_report_luns(struct scsi_cmnd *, struct sdebug_dev_info *);
472 static int resp_requests(struct scsi_cmnd *, struct sdebug_dev_info *);
473 static int resp_mode_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
474 static int resp_mode_select(struct scsi_cmnd *, struct sdebug_dev_info *);
475 static int resp_log_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
476 static int resp_readcap(struct scsi_cmnd *, struct sdebug_dev_info *);
477 static int resp_read_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
478 static int resp_write_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
479 static int resp_write_scat(struct scsi_cmnd *, struct sdebug_dev_info *);
480 static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *);
481 static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *);
482 static int resp_get_lba_status(struct scsi_cmnd *, struct sdebug_dev_info *);
483 static int resp_report_tgtpgs(struct scsi_cmnd *, struct sdebug_dev_info *);
484 static int resp_unmap(struct scsi_cmnd *, struct sdebug_dev_info *);
485 static int resp_rsup_opcodes(struct scsi_cmnd *, struct sdebug_dev_info *);
486 static int resp_rsup_tmfs(struct scsi_cmnd *, struct sdebug_dev_info *);
487 static int resp_verify(struct scsi_cmnd *, struct sdebug_dev_info *);
488 static int resp_write_same_10(struct scsi_cmnd *, struct sdebug_dev_info *);
489 static int resp_write_same_16(struct scsi_cmnd *, struct sdebug_dev_info *);
490 static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *);
491 static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *);
492 static int resp_sync_cache(struct scsi_cmnd *, struct sdebug_dev_info *);
493 static int resp_pre_fetch(struct scsi_cmnd *, struct sdebug_dev_info *);
494 static int resp_report_zones(struct scsi_cmnd *, struct sdebug_dev_info *);
495 static int resp_open_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
496 static int resp_close_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
497 static int resp_finish_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
498 static int resp_rwp_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
499
500 static int sdebug_do_add_host(bool mk_new_store);
501 static int sdebug_add_host_helper(int per_host_idx);
502 static void sdebug_do_remove_host(bool the_end);
503 static int sdebug_add_store(void);
504 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip);
505 static void sdebug_erase_all_stores(bool apart_from_first);
506
507 /*
508  * The following are overflow arrays for cdbs that "hit" the same index in
509  * the opcode_info_arr array. The most time sensitive (or commonly used) cdb
510  * should be placed in opcode_info_arr[], the others should be placed here.
511  */
512 static const struct opcode_info_t msense_iarr[] = {
513         {0, 0x1a, 0, F_D_IN, NULL, NULL,
514             {6,  0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
515 };
516
517 static const struct opcode_info_t mselect_iarr[] = {
518         {0, 0x15, 0, F_D_OUT, NULL, NULL,
519             {6,  0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
520 };
521
522 static const struct opcode_info_t read_iarr[] = {
523         {0, 0x28, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(10) */
524             {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
525              0, 0, 0, 0} },
526         {0, 0x8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL, /* READ(6) */
527             {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
528         {0, 0xa8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(12) */
529             {12,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf,
530              0xc7, 0, 0, 0, 0} },
531 };
532
533 static const struct opcode_info_t write_iarr[] = {
534         {0, 0x2a, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,  /* WRITE(10) */
535             NULL, {10,  0xfb, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7,
536                    0, 0, 0, 0, 0, 0} },
537         {0, 0xa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,   /* WRITE(6) */
538             NULL, {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0,
539                    0, 0, 0} },
540         {0, 0xaa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,  /* WRITE(12) */
541             NULL, {12,  0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
542                    0xbf, 0xc7, 0, 0, 0, 0} },
543 };
544
545 static const struct opcode_info_t verify_iarr[] = {
546         {0, 0x2f, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,/* VERIFY(10) */
547             NULL, {10,  0xf7, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xff, 0xff, 0xc7,
548                    0, 0, 0, 0, 0, 0} },
549 };
550
551 static const struct opcode_info_t sa_in_16_iarr[] = {
552         {0, 0x9e, 0x12, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
553             {16,  0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
554              0xff, 0xff, 0xff, 0, 0xc7} },      /* GET LBA STATUS(16) */
555 };
556
557 static const struct opcode_info_t vl_iarr[] = { /* VARIABLE LENGTH */
558         {0, 0x7f, 0xb, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_dt0,
559             NULL, {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0xb, 0xfa,
560                    0, 0xff, 0xff, 0xff, 0xff} },        /* WRITE(32) */
561         {0, 0x7f, 0x11, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
562             NULL, {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x11, 0xf8,
563                    0, 0xff, 0xff, 0x0, 0x0} },  /* WRITE SCATTERED(32) */
564 };
565
566 static const struct opcode_info_t maint_in_iarr[] = {   /* MAINT IN */
567         {0, 0xa3, 0xc, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL,
568             {12,  0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
569              0xc7, 0, 0, 0, 0} }, /* REPORT SUPPORTED OPERATION CODES */
570         {0, 0xa3, 0xd, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL,
571             {12,  0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
572              0, 0} },   /* REPORTED SUPPORTED TASK MANAGEMENT FUNCTIONS */
573 };
574
575 static const struct opcode_info_t write_same_iarr[] = {
576         {0, 0x93, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_write_same_16, NULL,
577             {16,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
578              0xff, 0xff, 0xff, 0x3f, 0xc7} },           /* WRITE SAME(16) */
579 };
580
581 static const struct opcode_info_t reserve_iarr[] = {
582         {0, 0x16, 0, F_D_OUT, NULL, NULL,               /* RESERVE(6) */
583             {6,  0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
584 };
585
586 static const struct opcode_info_t release_iarr[] = {
587         {0, 0x17, 0, F_D_OUT, NULL, NULL,               /* RELEASE(6) */
588             {6,  0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
589 };
590
591 static const struct opcode_info_t sync_cache_iarr[] = {
592         {0, 0x91, 0, F_SYNC_DELAY | F_M_ACCESS, resp_sync_cache, NULL,
593             {16,  0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
594              0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },     /* SYNC_CACHE (16) */
595 };
596
597 static const struct opcode_info_t pre_fetch_iarr[] = {
598         {0, 0x90, 0, F_SYNC_DELAY | F_M_ACCESS, resp_pre_fetch, NULL,
599             {16,  0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
600              0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },     /* PRE-FETCH (16) */
601 };
602
603 static const struct opcode_info_t zone_out_iarr[] = {   /* ZONE OUT(16) */
604         {0, 0x94, 0x1, F_SA_LOW, resp_close_zone, NULL,
605             {16, 0x1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
606              0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },      /* CLOSE ZONE */
607         {0, 0x94, 0x2, F_SA_LOW, resp_finish_zone, NULL,
608             {16, 0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
609              0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },      /* FINISH ZONE */
610         {0, 0x94, 0x4, F_SA_LOW, resp_rwp_zone, NULL,
611             {16, 0x4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
612              0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },  /* RESET WRITE POINTER */
613 };
614
615 static const struct opcode_info_t zone_in_iarr[] = {    /* ZONE IN(16) */
616         {0, 0x95, 0x6, F_SA_LOW | F_D_IN, NULL, NULL,
617             {16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
618              0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* REPORT ZONES */
619 };
620
621
622 /* This array is accessed via SDEB_I_* values. Make sure all are mapped,
623  * plus the terminating elements for logic that scans this table such as
624  * REPORT SUPPORTED OPERATION CODES. */
625 static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEM_P1 + 1] = {
626 /* 0 */
627         {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL,    /* unknown opcodes */
628             {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
629         {0, 0x12, 0, FF_RESPOND | F_D_IN, resp_inquiry, NULL, /* INQUIRY */
630             {6,  0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
631         {0, 0xa0, 0, FF_RESPOND | F_D_IN, resp_report_luns, NULL,
632             {12,  0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
633              0, 0} },                                   /* REPORT LUNS */
634         {0, 0x3, 0, FF_RESPOND | F_D_IN, resp_requests, NULL,
635             {6,  0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
636         {0, 0x0, 0, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */
637             {6,  0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
638 /* 5 */
639         {ARRAY_SIZE(msense_iarr), 0x5a, 0, F_D_IN,      /* MODE SENSE(10) */
640             resp_mode_sense, msense_iarr, {10,  0xf8, 0xff, 0xff, 0, 0, 0,
641                 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
642         {ARRAY_SIZE(mselect_iarr), 0x55, 0, F_D_OUT,    /* MODE SELECT(10) */
643             resp_mode_select, mselect_iarr, {10,  0xf1, 0, 0, 0, 0, 0, 0xff,
644                 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
645         {0, 0x4d, 0, F_D_IN, resp_log_sense, NULL,      /* LOG SENSE */
646             {10,  0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0,
647              0, 0, 0} },
648         {0, 0x25, 0, F_D_IN, resp_readcap, NULL,    /* READ CAPACITY(10) */
649             {10,  0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0,
650              0, 0} },
651         {ARRAY_SIZE(read_iarr), 0x88, 0, F_D_IN | FF_MEDIA_IO, /* READ(16) */
652             resp_read_dt0, read_iarr, {16,  0xfe, 0xff, 0xff, 0xff, 0xff,
653             0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
654 /* 10 */
655         {ARRAY_SIZE(write_iarr), 0x8a, 0, F_D_OUT | FF_MEDIA_IO,
656             resp_write_dt0, write_iarr,                 /* WRITE(16) */
657                 {16,  0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
658                  0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
659         {0, 0x1b, 0, F_SSU_DELAY, resp_start_stop, NULL,/* START STOP UNIT */
660             {6,  0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
661         {ARRAY_SIZE(sa_in_16_iarr), 0x9e, 0x10, F_SA_LOW | F_D_IN,
662             resp_readcap16, sa_in_16_iarr, /* SA_IN(16), READ CAPACITY(16) */
663                 {16,  0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
664                  0xff, 0xff, 0xff, 0xff, 0x1, 0xc7} },
665         {0, 0x9f, 0x12, F_SA_LOW | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
666             NULL, {16,  0x12, 0xf9, 0x0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff,
667             0xff, 0xff, 0xff, 0xff, 0xc7} },  /* SA_OUT(16), WRITE SCAT(16) */
668         {ARRAY_SIZE(maint_in_iarr), 0xa3, 0xa, F_SA_LOW | F_D_IN,
669             resp_report_tgtpgs, /* MAINT IN, REPORT TARGET PORT GROUPS */
670                 maint_in_iarr, {12,  0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff,
671                                 0xff, 0, 0xc7, 0, 0, 0, 0} },
672 /* 15 */
673         {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
674             {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
675         {ARRAY_SIZE(verify_iarr), 0x8f, 0,
676             F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,   /* VERIFY(16) */
677             verify_iarr, {16,  0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
678                           0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },
679         {ARRAY_SIZE(vl_iarr), 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_MEDIA_IO,
680             resp_read_dt0, vl_iarr,     /* VARIABLE LENGTH, READ(32) */
681             {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x9, 0xfe, 0, 0xff, 0xff,
682              0xff, 0xff} },
683         {ARRAY_SIZE(reserve_iarr), 0x56, 0, F_D_OUT,
684             NULL, reserve_iarr, /* RESERVE(10) <no response function> */
685             {10,  0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
686              0} },
687         {ARRAY_SIZE(release_iarr), 0x57, 0, F_D_OUT,
688             NULL, release_iarr, /* RELEASE(10) <no response function> */
689             {10,  0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
690              0} },
691 /* 20 */
692         {0, 0x1e, 0, 0, NULL, NULL, /* ALLOW REMOVAL */
693             {6,  0, 0, 0, 0x3, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
694         {0, 0x1, 0, 0, resp_start_stop, NULL, /* REWIND ?? */
695             {6,  0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
696         {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */
697             {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
698         {0, 0x1d, F_D_OUT, 0, NULL, NULL,       /* SEND DIAGNOSTIC */
699             {6,  0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
700         {0, 0x42, 0, F_D_OUT | FF_MEDIA_IO, resp_unmap, NULL, /* UNMAP */
701             {10,  0x1, 0, 0, 0, 0, 0x3f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
702 /* 25 */
703         {0, 0x3b, 0, F_D_OUT_MAYBE, resp_write_buffer, NULL,
704             {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0,
705              0, 0, 0, 0} },                     /* WRITE_BUFFER */
706         {ARRAY_SIZE(write_same_iarr), 0x41, 0, F_D_OUT_MAYBE | FF_MEDIA_IO,
707             resp_write_same_10, write_same_iarr,        /* WRITE SAME(10) */
708                 {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0,
709                  0, 0, 0, 0, 0} },
710         {ARRAY_SIZE(sync_cache_iarr), 0x35, 0, F_SYNC_DELAY | F_M_ACCESS,
711             resp_sync_cache, sync_cache_iarr,
712             {10,  0x7, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
713              0, 0, 0, 0} },                     /* SYNC_CACHE (10) */
714         {0, 0x89, 0, F_D_OUT | FF_MEDIA_IO, resp_comp_write, NULL,
715             {16,  0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
716              0, 0xff, 0x3f, 0xc7} },            /* COMPARE AND WRITE */
717         {ARRAY_SIZE(pre_fetch_iarr), 0x34, 0, F_SYNC_DELAY | F_M_ACCESS,
718             resp_pre_fetch, pre_fetch_iarr,
719             {10,  0x2, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
720              0, 0, 0, 0} },                     /* PRE-FETCH (10) */
721
722 /* 30 */
723         {ARRAY_SIZE(zone_out_iarr), 0x94, 0x3, F_SA_LOW,
724             resp_open_zone, zone_out_iarr, /* ZONE_OUT(16), OPEN ZONE) */
725                 {16,  0x3 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
726                  0xff, 0xff, 0x0, 0x0, 0xff, 0xff, 0x1, 0xc7} },
727         {ARRAY_SIZE(zone_in_iarr), 0x95, 0x0, F_SA_LOW | F_D_IN,
728             resp_report_zones, zone_in_iarr, /* ZONE_IN(16), REPORT ZONES) */
729                 {16,  0x0 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
730                  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xc7} },
731 /* sentinel */
732         {0xff, 0, 0, 0, NULL, NULL,             /* terminating element */
733             {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
734 };
735
736 static int sdebug_num_hosts;
737 static int sdebug_add_host = DEF_NUM_HOST;  /* in sysfs this is relative */
738 static int sdebug_ato = DEF_ATO;
739 static int sdebug_cdb_len = DEF_CDB_LEN;
740 static int sdebug_jdelay = DEF_JDELAY;  /* if > 0 then unit is jiffies */
741 static int sdebug_dev_size_mb = DEF_DEV_SIZE_PRE_INIT;
742 static int sdebug_dif = DEF_DIF;
743 static int sdebug_dix = DEF_DIX;
744 static int sdebug_dsense = DEF_D_SENSE;
745 static int sdebug_every_nth = DEF_EVERY_NTH;
746 static int sdebug_fake_rw = DEF_FAKE_RW;
747 static unsigned int sdebug_guard = DEF_GUARD;
748 static int sdebug_lowest_aligned = DEF_LOWEST_ALIGNED;
749 static int sdebug_max_luns = DEF_MAX_LUNS;
750 static int sdebug_max_queue = SDEBUG_CANQUEUE;  /* per submit queue */
751 static unsigned int sdebug_medium_error_start = OPT_MEDIUM_ERR_ADDR;
752 static int sdebug_medium_error_count = OPT_MEDIUM_ERR_NUM;
753 static atomic_t retired_max_queue;      /* if > 0 then was prior max_queue */
754 static int sdebug_ndelay = DEF_NDELAY;  /* if > 0 then unit is nanoseconds */
755 static int sdebug_no_lun_0 = DEF_NO_LUN_0;
756 static int sdebug_no_uld;
757 static int sdebug_num_parts = DEF_NUM_PARTS;
758 static int sdebug_num_tgts = DEF_NUM_TGTS; /* targets per host */
759 static int sdebug_opt_blks = DEF_OPT_BLKS;
760 static int sdebug_opts = DEF_OPTS;
761 static int sdebug_physblk_exp = DEF_PHYSBLK_EXP;
762 static int sdebug_opt_xferlen_exp = DEF_OPT_XFERLEN_EXP;
763 static int sdebug_ptype = DEF_PTYPE; /* SCSI peripheral device type */
764 static int sdebug_scsi_level = DEF_SCSI_LEVEL;
765 static int sdebug_sector_size = DEF_SECTOR_SIZE;
766 static int sdebug_virtual_gb = DEF_VIRTUAL_GB;
767 static int sdebug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
768 static unsigned int sdebug_lbpu = DEF_LBPU;
769 static unsigned int sdebug_lbpws = DEF_LBPWS;
770 static unsigned int sdebug_lbpws10 = DEF_LBPWS10;
771 static unsigned int sdebug_lbprz = DEF_LBPRZ;
772 static unsigned int sdebug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
773 static unsigned int sdebug_unmap_granularity = DEF_UNMAP_GRANULARITY;
774 static unsigned int sdebug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
775 static unsigned int sdebug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
776 static unsigned int sdebug_write_same_length = DEF_WRITESAME_LENGTH;
777 static int sdebug_uuid_ctl = DEF_UUID_CTL;
778 static bool sdebug_random = DEF_RANDOM;
779 static bool sdebug_per_host_store = DEF_PER_HOST_STORE;
780 static bool sdebug_removable = DEF_REMOVABLE;
781 static bool sdebug_clustering;
782 static bool sdebug_host_lock = DEF_HOST_LOCK;
783 static bool sdebug_strict = DEF_STRICT;
784 static bool sdebug_any_injecting_opt;
785 static bool sdebug_verbose;
786 static bool have_dif_prot;
787 static bool write_since_sync;
788 static bool sdebug_statistics = DEF_STATISTICS;
789 static bool sdebug_wp;
790 /* Following enum: 0: no zbc, def; 1: host aware; 2: host managed */
791 static enum blk_zoned_model sdeb_zbc_model = BLK_ZONED_NONE;
792 static char *sdeb_zbc_model_s;
793
794 static unsigned int sdebug_store_sectors;
795 static sector_t sdebug_capacity;        /* in sectors */
796
797 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
798    may still need them */
799 static int sdebug_heads;                /* heads per disk */
800 static int sdebug_cylinders_per;        /* cylinders per surface */
801 static int sdebug_sectors_per;          /* sectors per cylinder */
802
803 static LIST_HEAD(sdebug_host_list);
804 static DEFINE_SPINLOCK(sdebug_host_list_lock);
805
806 static struct xarray per_store_arr;
807 static struct xarray *per_store_ap = &per_store_arr;
808 static int sdeb_first_idx = -1;         /* invalid index ==> none created */
809 static int sdeb_most_recent_idx = -1;
810 static DEFINE_RWLOCK(sdeb_fake_rw_lck); /* need a RW lock when fake_rw=1 */
811
812 static unsigned long map_size;
813 static int num_aborts;
814 static int num_dev_resets;
815 static int num_target_resets;
816 static int num_bus_resets;
817 static int num_host_resets;
818 static int dix_writes;
819 static int dix_reads;
820 static int dif_errors;
821
822 /* ZBC global data */
823 static bool sdeb_zbc_in_use;            /* true when ptype=TYPE_ZBC [0x14] */
824 static const int zbc_zone_size_mb;
825 static int sdeb_zbc_max_open = DEF_ZBC_MAX_OPEN_ZONES;
826
827 static int submit_queues = DEF_SUBMIT_QUEUES;  /* > 1 for multi-queue (mq) */
828 static struct sdebug_queue *sdebug_q_arr;  /* ptr to array of submit queues */
829
830 static DEFINE_RWLOCK(atomic_rw);
831 static DEFINE_RWLOCK(atomic_rw2);
832
833 static rwlock_t *ramdisk_lck_a[2];
834
835 static char sdebug_proc_name[] = MY_NAME;
836 static const char *my_name = MY_NAME;
837
838 static struct bus_type pseudo_lld_bus;
839
840 static struct device_driver sdebug_driverfs_driver = {
841         .name           = sdebug_proc_name,
842         .bus            = &pseudo_lld_bus,
843 };
844
845 static const int check_condition_result =
846                 (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
847
848 static const int illegal_condition_result =
849         (DRIVER_SENSE << 24) | (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
850
851 static const int device_qfull_result =
852         (DID_OK << 16) | (COMMAND_COMPLETE << 8) | SAM_STAT_TASK_SET_FULL;
853
854 static const int condition_met_result = SAM_STAT_CONDITION_MET;
855
856
857 /* Only do the extra work involved in logical block provisioning if one or
858  * more of the lbpu, lbpws or lbpws10 parameters are given and we are doing
859  * real reads and writes (i.e. not skipping them for speed).
860  */
861 static inline bool scsi_debug_lbp(void)
862 {
863         return 0 == sdebug_fake_rw &&
864                 (sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10);
865 }
866
867 static void *lba2fake_store(struct sdeb_store_info *sip,
868                             unsigned long long lba)
869 {
870         struct sdeb_store_info *lsip = sip;
871
872         lba = do_div(lba, sdebug_store_sectors);
873         if (!sip || !sip->storep) {
874                 WARN_ON_ONCE(true);
875                 lsip = xa_load(per_store_ap, 0);  /* should never be NULL */
876         }
877         return lsip->storep + lba * sdebug_sector_size;
878 }
879
880 static struct t10_pi_tuple *dif_store(struct sdeb_store_info *sip,
881                                       sector_t sector)
882 {
883         sector = sector_div(sector, sdebug_store_sectors);
884
885         return sip->dif_storep + sector;
886 }
887
888 static void sdebug_max_tgts_luns(void)
889 {
890         struct sdebug_host_info *sdbg_host;
891         struct Scsi_Host *hpnt;
892
893         spin_lock(&sdebug_host_list_lock);
894         list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
895                 hpnt = sdbg_host->shost;
896                 if ((hpnt->this_id >= 0) &&
897                     (sdebug_num_tgts > hpnt->this_id))
898                         hpnt->max_id = sdebug_num_tgts + 1;
899                 else
900                         hpnt->max_id = sdebug_num_tgts;
901                 /* sdebug_max_luns; */
902                 hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
903         }
904         spin_unlock(&sdebug_host_list_lock);
905 }
906
907 enum sdeb_cmd_data {SDEB_IN_DATA = 0, SDEB_IN_CDB = 1};
908
909 /* Set in_bit to -1 to indicate no bit position of invalid field */
910 static void mk_sense_invalid_fld(struct scsi_cmnd *scp,
911                                  enum sdeb_cmd_data c_d,
912                                  int in_byte, int in_bit)
913 {
914         unsigned char *sbuff;
915         u8 sks[4];
916         int sl, asc;
917
918         sbuff = scp->sense_buffer;
919         if (!sbuff) {
920                 sdev_printk(KERN_ERR, scp->device,
921                             "%s: sense_buffer is NULL\n", __func__);
922                 return;
923         }
924         asc = c_d ? INVALID_FIELD_IN_CDB : INVALID_FIELD_IN_PARAM_LIST;
925         memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
926         scsi_build_sense_buffer(sdebug_dsense, sbuff, ILLEGAL_REQUEST, asc, 0);
927         memset(sks, 0, sizeof(sks));
928         sks[0] = 0x80;
929         if (c_d)
930                 sks[0] |= 0x40;
931         if (in_bit >= 0) {
932                 sks[0] |= 0x8;
933                 sks[0] |= 0x7 & in_bit;
934         }
935         put_unaligned_be16(in_byte, sks + 1);
936         if (sdebug_dsense) {
937                 sl = sbuff[7] + 8;
938                 sbuff[7] = sl;
939                 sbuff[sl] = 0x2;
940                 sbuff[sl + 1] = 0x6;
941                 memcpy(sbuff + sl + 4, sks, 3);
942         } else
943                 memcpy(sbuff + 15, sks, 3);
944         if (sdebug_verbose)
945                 sdev_printk(KERN_INFO, scp->device, "%s:  [sense_key,asc,ascq"
946                             "]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n",
947                             my_name, asc, c_d ? 'C' : 'D', in_byte, in_bit);
948 }
949
950 static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
951 {
952         unsigned char *sbuff;
953
954         sbuff = scp->sense_buffer;
955         if (!sbuff) {
956                 sdev_printk(KERN_ERR, scp->device,
957                             "%s: sense_buffer is NULL\n", __func__);
958                 return;
959         }
960         memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
961
962         scsi_build_sense_buffer(sdebug_dsense, sbuff, key, asc, asq);
963
964         if (sdebug_verbose)
965                 sdev_printk(KERN_INFO, scp->device,
966                             "%s:  [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
967                             my_name, key, asc, asq);
968 }
969
970 static void mk_sense_invalid_opcode(struct scsi_cmnd *scp)
971 {
972         mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
973 }
974
975 static int scsi_debug_ioctl(struct scsi_device *dev, unsigned int cmd,
976                             void __user *arg)
977 {
978         if (sdebug_verbose) {
979                 if (0x1261 == cmd)
980                         sdev_printk(KERN_INFO, dev,
981                                     "%s: BLKFLSBUF [0x1261]\n", __func__);
982                 else if (0x5331 == cmd)
983                         sdev_printk(KERN_INFO, dev,
984                                     "%s: CDROM_GET_CAPABILITY [0x5331]\n",
985                                     __func__);
986                 else
987                         sdev_printk(KERN_INFO, dev, "%s: cmd=0x%x\n",
988                                     __func__, cmd);
989         }
990         return -EINVAL;
991         /* return -ENOTTY; // correct return but upsets fdisk */
992 }
993
994 static void config_cdb_len(struct scsi_device *sdev)
995 {
996         switch (sdebug_cdb_len) {
997         case 6: /* suggest 6 byte READ, WRITE and MODE SENSE/SELECT */
998                 sdev->use_10_for_rw = false;
999                 sdev->use_16_for_rw = false;
1000                 sdev->use_10_for_ms = false;
1001                 break;
1002         case 10: /* suggest 10 byte RWs and 6 byte MODE SENSE/SELECT */
1003                 sdev->use_10_for_rw = true;
1004                 sdev->use_16_for_rw = false;
1005                 sdev->use_10_for_ms = false;
1006                 break;
1007         case 12: /* suggest 10 byte RWs and 10 byte MODE SENSE/SELECT */
1008                 sdev->use_10_for_rw = true;
1009                 sdev->use_16_for_rw = false;
1010                 sdev->use_10_for_ms = true;
1011                 break;
1012         case 16:
1013                 sdev->use_10_for_rw = false;
1014                 sdev->use_16_for_rw = true;
1015                 sdev->use_10_for_ms = true;
1016                 break;
1017         case 32: /* No knobs to suggest this so same as 16 for now */
1018                 sdev->use_10_for_rw = false;
1019                 sdev->use_16_for_rw = true;
1020                 sdev->use_10_for_ms = true;
1021                 break;
1022         default:
1023                 pr_warn("unexpected cdb_len=%d, force to 10\n",
1024                         sdebug_cdb_len);
1025                 sdev->use_10_for_rw = true;
1026                 sdev->use_16_for_rw = false;
1027                 sdev->use_10_for_ms = false;
1028                 sdebug_cdb_len = 10;
1029                 break;
1030         }
1031 }
1032
1033 static void all_config_cdb_len(void)
1034 {
1035         struct sdebug_host_info *sdbg_host;
1036         struct Scsi_Host *shost;
1037         struct scsi_device *sdev;
1038
1039         spin_lock(&sdebug_host_list_lock);
1040         list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
1041                 shost = sdbg_host->shost;
1042                 shost_for_each_device(sdev, shost) {
1043                         config_cdb_len(sdev);
1044                 }
1045         }
1046         spin_unlock(&sdebug_host_list_lock);
1047 }
1048
1049 static void clear_luns_changed_on_target(struct sdebug_dev_info *devip)
1050 {
1051         struct sdebug_host_info *sdhp;
1052         struct sdebug_dev_info *dp;
1053
1054         spin_lock(&sdebug_host_list_lock);
1055         list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
1056                 list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
1057                         if ((devip->sdbg_host == dp->sdbg_host) &&
1058                             (devip->target == dp->target))
1059                                 clear_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
1060                 }
1061         }
1062         spin_unlock(&sdebug_host_list_lock);
1063 }
1064
1065 static int make_ua(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1066 {
1067         int k;
1068
1069         k = find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS);
1070         if (k != SDEBUG_NUM_UAS) {
1071                 const char *cp = NULL;
1072
1073                 switch (k) {
1074                 case SDEBUG_UA_POR:
1075                         mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1076                                         POWER_ON_RESET_ASCQ);
1077                         if (sdebug_verbose)
1078                                 cp = "power on reset";
1079                         break;
1080                 case SDEBUG_UA_BUS_RESET:
1081                         mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1082                                         BUS_RESET_ASCQ);
1083                         if (sdebug_verbose)
1084                                 cp = "bus reset";
1085                         break;
1086                 case SDEBUG_UA_MODE_CHANGED:
1087                         mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1088                                         MODE_CHANGED_ASCQ);
1089                         if (sdebug_verbose)
1090                                 cp = "mode parameters changed";
1091                         break;
1092                 case SDEBUG_UA_CAPACITY_CHANGED:
1093                         mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1094                                         CAPACITY_CHANGED_ASCQ);
1095                         if (sdebug_verbose)
1096                                 cp = "capacity data changed";
1097                         break;
1098                 case SDEBUG_UA_MICROCODE_CHANGED:
1099                         mk_sense_buffer(scp, UNIT_ATTENTION,
1100                                         TARGET_CHANGED_ASC,
1101                                         MICROCODE_CHANGED_ASCQ);
1102                         if (sdebug_verbose)
1103                                 cp = "microcode has been changed";
1104                         break;
1105                 case SDEBUG_UA_MICROCODE_CHANGED_WO_RESET:
1106                         mk_sense_buffer(scp, UNIT_ATTENTION,
1107                                         TARGET_CHANGED_ASC,
1108                                         MICROCODE_CHANGED_WO_RESET_ASCQ);
1109                         if (sdebug_verbose)
1110                                 cp = "microcode has been changed without reset";
1111                         break;
1112                 case SDEBUG_UA_LUNS_CHANGED:
1113                         /*
1114                          * SPC-3 behavior is to report a UNIT ATTENTION with
1115                          * ASC/ASCQ REPORTED LUNS DATA HAS CHANGED on every LUN
1116                          * on the target, until a REPORT LUNS command is
1117                          * received.  SPC-4 behavior is to report it only once.
1118                          * NOTE:  sdebug_scsi_level does not use the same
1119                          * values as struct scsi_device->scsi_level.
1120                          */
1121                         if (sdebug_scsi_level >= 6)     /* SPC-4 and above */
1122                                 clear_luns_changed_on_target(devip);
1123                         mk_sense_buffer(scp, UNIT_ATTENTION,
1124                                         TARGET_CHANGED_ASC,
1125                                         LUNS_CHANGED_ASCQ);
1126                         if (sdebug_verbose)
1127                                 cp = "reported luns data has changed";
1128                         break;
1129                 default:
1130                         pr_warn("unexpected unit attention code=%d\n", k);
1131                         if (sdebug_verbose)
1132                                 cp = "unknown";
1133                         break;
1134                 }
1135                 clear_bit(k, devip->uas_bm);
1136                 if (sdebug_verbose)
1137                         sdev_printk(KERN_INFO, scp->device,
1138                                    "%s reports: Unit attention: %s\n",
1139                                    my_name, cp);
1140                 return check_condition_result;
1141         }
1142         return 0;
1143 }
1144
1145 /* Build SCSI "data-in" buffer. Returns 0 if ok else (DID_ERROR << 16). */
1146 static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1147                                 int arr_len)
1148 {
1149         int act_len;
1150         struct scsi_data_buffer *sdb = &scp->sdb;
1151
1152         if (!sdb->length)
1153                 return 0;
1154         if (scp->sc_data_direction != DMA_FROM_DEVICE)
1155                 return DID_ERROR << 16;
1156
1157         act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
1158                                       arr, arr_len);
1159         scsi_set_resid(scp, scsi_bufflen(scp) - act_len);
1160
1161         return 0;
1162 }
1163
1164 /* Partial build of SCSI "data-in" buffer. Returns 0 if ok else
1165  * (DID_ERROR << 16). Can write to offset in data-in buffer. If multiple
1166  * calls, not required to write in ascending offset order. Assumes resid
1167  * set to scsi_bufflen() prior to any calls.
1168  */
1169 static int p_fill_from_dev_buffer(struct scsi_cmnd *scp, const void *arr,
1170                                   int arr_len, unsigned int off_dst)
1171 {
1172         unsigned int act_len, n;
1173         struct scsi_data_buffer *sdb = &scp->sdb;
1174         off_t skip = off_dst;
1175
1176         if (sdb->length <= off_dst)
1177                 return 0;
1178         if (scp->sc_data_direction != DMA_FROM_DEVICE)
1179                 return DID_ERROR << 16;
1180
1181         act_len = sg_pcopy_from_buffer(sdb->table.sgl, sdb->table.nents,
1182                                        arr, arr_len, skip);
1183         pr_debug("%s: off_dst=%u, scsi_bufflen=%u, act_len=%u, resid=%d\n",
1184                  __func__, off_dst, scsi_bufflen(scp), act_len,
1185                  scsi_get_resid(scp));
1186         n = scsi_bufflen(scp) - (off_dst + act_len);
1187         scsi_set_resid(scp, min_t(int, scsi_get_resid(scp), n));
1188         return 0;
1189 }
1190
1191 /* Fetches from SCSI "data-out" buffer. Returns number of bytes fetched into
1192  * 'arr' or -1 if error.
1193  */
1194 static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1195                                int arr_len)
1196 {
1197         if (!scsi_bufflen(scp))
1198                 return 0;
1199         if (scp->sc_data_direction != DMA_TO_DEVICE)
1200                 return -1;
1201
1202         return scsi_sg_copy_to_buffer(scp, arr, arr_len);
1203 }
1204
1205
1206 static char sdebug_inq_vendor_id[9] = "Linux   ";
1207 static char sdebug_inq_product_id[17] = "scsi_debug      ";
1208 static char sdebug_inq_product_rev[5] = SDEBUG_VERSION;
1209 /* Use some locally assigned NAAs for SAS addresses. */
1210 static const u64 naa3_comp_a = 0x3222222000000000ULL;
1211 static const u64 naa3_comp_b = 0x3333333000000000ULL;
1212 static const u64 naa3_comp_c = 0x3111111000000000ULL;
1213
1214 /* Device identification VPD page. Returns number of bytes placed in arr */
1215 static int inquiry_vpd_83(unsigned char *arr, int port_group_id,
1216                           int target_dev_id, int dev_id_num,
1217                           const char *dev_id_str, int dev_id_str_len,
1218                           const uuid_t *lu_name)
1219 {
1220         int num, port_a;
1221         char b[32];
1222
1223         port_a = target_dev_id + 1;
1224         /* T10 vendor identifier field format (faked) */
1225         arr[0] = 0x2;   /* ASCII */
1226         arr[1] = 0x1;
1227         arr[2] = 0x0;
1228         memcpy(&arr[4], sdebug_inq_vendor_id, 8);
1229         memcpy(&arr[12], sdebug_inq_product_id, 16);
1230         memcpy(&arr[28], dev_id_str, dev_id_str_len);
1231         num = 8 + 16 + dev_id_str_len;
1232         arr[3] = num;
1233         num += 4;
1234         if (dev_id_num >= 0) {
1235                 if (sdebug_uuid_ctl) {
1236                         /* Locally assigned UUID */
1237                         arr[num++] = 0x1;  /* binary (not necessarily sas) */
1238                         arr[num++] = 0xa;  /* PIV=0, lu, naa */
1239                         arr[num++] = 0x0;
1240                         arr[num++] = 0x12;
1241                         arr[num++] = 0x10; /* uuid type=1, locally assigned */
1242                         arr[num++] = 0x0;
1243                         memcpy(arr + num, lu_name, 16);
1244                         num += 16;
1245                 } else {
1246                         /* NAA-3, Logical unit identifier (binary) */
1247                         arr[num++] = 0x1;  /* binary (not necessarily sas) */
1248                         arr[num++] = 0x3;  /* PIV=0, lu, naa */
1249                         arr[num++] = 0x0;
1250                         arr[num++] = 0x8;
1251                         put_unaligned_be64(naa3_comp_b + dev_id_num, arr + num);
1252                         num += 8;
1253                 }
1254                 /* Target relative port number */
1255                 arr[num++] = 0x61;      /* proto=sas, binary */
1256                 arr[num++] = 0x94;      /* PIV=1, target port, rel port */
1257                 arr[num++] = 0x0;       /* reserved */
1258                 arr[num++] = 0x4;       /* length */
1259                 arr[num++] = 0x0;       /* reserved */
1260                 arr[num++] = 0x0;       /* reserved */
1261                 arr[num++] = 0x0;
1262                 arr[num++] = 0x1;       /* relative port A */
1263         }
1264         /* NAA-3, Target port identifier */
1265         arr[num++] = 0x61;      /* proto=sas, binary */
1266         arr[num++] = 0x93;      /* piv=1, target port, naa */
1267         arr[num++] = 0x0;
1268         arr[num++] = 0x8;
1269         put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1270         num += 8;
1271         /* NAA-3, Target port group identifier */
1272         arr[num++] = 0x61;      /* proto=sas, binary */
1273         arr[num++] = 0x95;      /* piv=1, target port group id */
1274         arr[num++] = 0x0;
1275         arr[num++] = 0x4;
1276         arr[num++] = 0;
1277         arr[num++] = 0;
1278         put_unaligned_be16(port_group_id, arr + num);
1279         num += 2;
1280         /* NAA-3, Target device identifier */
1281         arr[num++] = 0x61;      /* proto=sas, binary */
1282         arr[num++] = 0xa3;      /* piv=1, target device, naa */
1283         arr[num++] = 0x0;
1284         arr[num++] = 0x8;
1285         put_unaligned_be64(naa3_comp_a + target_dev_id, arr + num);
1286         num += 8;
1287         /* SCSI name string: Target device identifier */
1288         arr[num++] = 0x63;      /* proto=sas, UTF-8 */
1289         arr[num++] = 0xa8;      /* piv=1, target device, SCSI name string */
1290         arr[num++] = 0x0;
1291         arr[num++] = 24;
1292         memcpy(arr + num, "naa.32222220", 12);
1293         num += 12;
1294         snprintf(b, sizeof(b), "%08X", target_dev_id);
1295         memcpy(arr + num, b, 8);
1296         num += 8;
1297         memset(arr + num, 0, 4);
1298         num += 4;
1299         return num;
1300 }
1301
1302 static unsigned char vpd84_data[] = {
1303 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
1304     0x22,0x22,0x22,0x0,0xbb,0x1,
1305     0x22,0x22,0x22,0x0,0xbb,0x2,
1306 };
1307
1308 /*  Software interface identification VPD page */
1309 static int inquiry_vpd_84(unsigned char *arr)
1310 {
1311         memcpy(arr, vpd84_data, sizeof(vpd84_data));
1312         return sizeof(vpd84_data);
1313 }
1314
1315 /* Management network addresses VPD page */
1316 static int inquiry_vpd_85(unsigned char *arr)
1317 {
1318         int num = 0;
1319         const char *na1 = "https://www.kernel.org/config";
1320         const char *na2 = "http://www.kernel.org/log";
1321         int plen, olen;
1322
1323         arr[num++] = 0x1;       /* lu, storage config */
1324         arr[num++] = 0x0;       /* reserved */
1325         arr[num++] = 0x0;
1326         olen = strlen(na1);
1327         plen = olen + 1;
1328         if (plen % 4)
1329                 plen = ((plen / 4) + 1) * 4;
1330         arr[num++] = plen;      /* length, null termianted, padded */
1331         memcpy(arr + num, na1, olen);
1332         memset(arr + num + olen, 0, plen - olen);
1333         num += plen;
1334
1335         arr[num++] = 0x4;       /* lu, logging */
1336         arr[num++] = 0x0;       /* reserved */
1337         arr[num++] = 0x0;
1338         olen = strlen(na2);
1339         plen = olen + 1;
1340         if (plen % 4)
1341                 plen = ((plen / 4) + 1) * 4;
1342         arr[num++] = plen;      /* length, null terminated, padded */
1343         memcpy(arr + num, na2, olen);
1344         memset(arr + num + olen, 0, plen - olen);
1345         num += plen;
1346
1347         return num;
1348 }
1349
1350 /* SCSI ports VPD page */
1351 static int inquiry_vpd_88(unsigned char *arr, int target_dev_id)
1352 {
1353         int num = 0;
1354         int port_a, port_b;
1355
1356         port_a = target_dev_id + 1;
1357         port_b = port_a + 1;
1358         arr[num++] = 0x0;       /* reserved */
1359         arr[num++] = 0x0;       /* reserved */
1360         arr[num++] = 0x0;
1361         arr[num++] = 0x1;       /* relative port 1 (primary) */
1362         memset(arr + num, 0, 6);
1363         num += 6;
1364         arr[num++] = 0x0;
1365         arr[num++] = 12;        /* length tp descriptor */
1366         /* naa-5 target port identifier (A) */
1367         arr[num++] = 0x61;      /* proto=sas, binary */
1368         arr[num++] = 0x93;      /* PIV=1, target port, NAA */
1369         arr[num++] = 0x0;       /* reserved */
1370         arr[num++] = 0x8;       /* length */
1371         put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1372         num += 8;
1373         arr[num++] = 0x0;       /* reserved */
1374         arr[num++] = 0x0;       /* reserved */
1375         arr[num++] = 0x0;
1376         arr[num++] = 0x2;       /* relative port 2 (secondary) */
1377         memset(arr + num, 0, 6);
1378         num += 6;
1379         arr[num++] = 0x0;
1380         arr[num++] = 12;        /* length tp descriptor */
1381         /* naa-5 target port identifier (B) */
1382         arr[num++] = 0x61;      /* proto=sas, binary */
1383         arr[num++] = 0x93;      /* PIV=1, target port, NAA */
1384         arr[num++] = 0x0;       /* reserved */
1385         arr[num++] = 0x8;       /* length */
1386         put_unaligned_be64(naa3_comp_a + port_b, arr + num);
1387         num += 8;
1388
1389         return num;
1390 }
1391
1392
1393 static unsigned char vpd89_data[] = {
1394 /* from 4th byte */ 0,0,0,0,
1395 'l','i','n','u','x',' ',' ',' ',
1396 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
1397 '1','2','3','4',
1398 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
1399 0xec,0,0,0,
1400 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
1401 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
1402 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
1403 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
1404 0x53,0x41,
1405 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1406 0x20,0x20,
1407 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1408 0x10,0x80,
1409 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
1410 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
1411 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
1412 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
1413 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
1414 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
1415 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
1416 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1417 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1418 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1419 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
1420 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
1421 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
1422 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
1423 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1424 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1425 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1426 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1427 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1428 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1429 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1430 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1431 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1432 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1433 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1434 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
1435 };
1436
1437 /* ATA Information VPD page */
1438 static int inquiry_vpd_89(unsigned char *arr)
1439 {
1440         memcpy(arr, vpd89_data, sizeof(vpd89_data));
1441         return sizeof(vpd89_data);
1442 }
1443
1444
1445 static unsigned char vpdb0_data[] = {
1446         /* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
1447         0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1448         0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1449         0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1450 };
1451
1452 /* Block limits VPD page (SBC-3) */
1453 static int inquiry_vpd_b0(unsigned char *arr)
1454 {
1455         unsigned int gran;
1456
1457         memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
1458
1459         /* Optimal transfer length granularity */
1460         if (sdebug_opt_xferlen_exp != 0 &&
1461             sdebug_physblk_exp < sdebug_opt_xferlen_exp)
1462                 gran = 1 << sdebug_opt_xferlen_exp;
1463         else
1464                 gran = 1 << sdebug_physblk_exp;
1465         put_unaligned_be16(gran, arr + 2);
1466
1467         /* Maximum Transfer Length */
1468         if (sdebug_store_sectors > 0x400)
1469                 put_unaligned_be32(sdebug_store_sectors, arr + 4);
1470
1471         /* Optimal Transfer Length */
1472         put_unaligned_be32(sdebug_opt_blks, &arr[8]);
1473
1474         if (sdebug_lbpu) {
1475                 /* Maximum Unmap LBA Count */
1476                 put_unaligned_be32(sdebug_unmap_max_blocks, &arr[16]);
1477
1478                 /* Maximum Unmap Block Descriptor Count */
1479                 put_unaligned_be32(sdebug_unmap_max_desc, &arr[20]);
1480         }
1481
1482         /* Unmap Granularity Alignment */
1483         if (sdebug_unmap_alignment) {
1484                 put_unaligned_be32(sdebug_unmap_alignment, &arr[28]);
1485                 arr[28] |= 0x80; /* UGAVALID */
1486         }
1487
1488         /* Optimal Unmap Granularity */
1489         put_unaligned_be32(sdebug_unmap_granularity, &arr[24]);
1490
1491         /* Maximum WRITE SAME Length */
1492         put_unaligned_be64(sdebug_write_same_length, &arr[32]);
1493
1494         return 0x3c; /* Mandatory page length for Logical Block Provisioning */
1495
1496         return sizeof(vpdb0_data);
1497 }
1498
1499 /* Block device characteristics VPD page (SBC-3) */
1500 static int inquiry_vpd_b1(unsigned char *arr)
1501 {
1502         memset(arr, 0, 0x3c);
1503         arr[0] = 0;
1504         arr[1] = 1;     /* non rotating medium (e.g. solid state) */
1505         arr[2] = 0;
1506         arr[3] = 5;     /* less than 1.8" */
1507
1508         return 0x3c;
1509 }
1510
1511 /* Logical block provisioning VPD page (SBC-4) */
1512 static int inquiry_vpd_b2(unsigned char *arr)
1513 {
1514         memset(arr, 0, 0x4);
1515         arr[0] = 0;                     /* threshold exponent */
1516         if (sdebug_lbpu)
1517                 arr[1] = 1 << 7;
1518         if (sdebug_lbpws)
1519                 arr[1] |= 1 << 6;
1520         if (sdebug_lbpws10)
1521                 arr[1] |= 1 << 5;
1522         if (sdebug_lbprz && scsi_debug_lbp())
1523                 arr[1] |= (sdebug_lbprz & 0x7) << 2;  /* sbc4r07 and later */
1524         /* anc_sup=0; dp=0 (no provisioning group descriptor) */
1525         /* minimum_percentage=0; provisioning_type=0 (unknown) */
1526         /* threshold_percentage=0 */
1527         return 0x4;
1528 }
1529
1530 /* Zoned block device characteristics VPD page (ZBC mandatory) */
1531 static int inquiry_vpd_b6(struct sdebug_dev_info *devip, unsigned char *arr)
1532 {
1533         memset(arr, 0, 0x3c);
1534         arr[0] = 0x1; /* set URSWRZ (unrestricted read in seq. wr req zone) */
1535         /*
1536          * Set Optimal number of open sequential write preferred zones and
1537          * Optimal number of non-sequentially written sequential write
1538          * preferred zones fields to 'not reported' (0xffffffff). Leave other
1539          * fields set to zero, apart from Max. number of open swrz_s field.
1540          */
1541         put_unaligned_be32(0xffffffff, &arr[4]);
1542         put_unaligned_be32(0xffffffff, &arr[8]);
1543         if (devip->max_open)
1544                 put_unaligned_be32(devip->max_open, &arr[12]);
1545         else
1546                 put_unaligned_be32(0xffffffff, &arr[12]);
1547         return 0x3c;
1548 }
1549
1550 #define SDEBUG_LONG_INQ_SZ 96
1551 #define SDEBUG_MAX_INQ_ARR_SZ 584
1552
1553 static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1554 {
1555         unsigned char pq_pdt;
1556         unsigned char *arr;
1557         unsigned char *cmd = scp->cmnd;
1558         int alloc_len, n, ret;
1559         bool have_wlun, is_disk, is_zbc, is_disk_zbc;
1560
1561         alloc_len = get_unaligned_be16(cmd + 3);
1562         arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
1563         if (! arr)
1564                 return DID_REQUEUE << 16;
1565         is_disk = (sdebug_ptype == TYPE_DISK);
1566         is_zbc = (sdebug_ptype == TYPE_ZBC);
1567         is_disk_zbc = (is_disk || is_zbc);
1568         have_wlun = scsi_is_wlun(scp->device->lun);
1569         if (have_wlun)
1570                 pq_pdt = TYPE_WLUN;     /* present, wlun */
1571         else if (sdebug_no_lun_0 && (devip->lun == SDEBUG_LUN_0_VAL))
1572                 pq_pdt = 0x7f;  /* not present, PQ=3, PDT=0x1f */
1573         else
1574                 pq_pdt = (sdebug_ptype & 0x1f);
1575         arr[0] = pq_pdt;
1576         if (0x2 & cmd[1]) {  /* CMDDT bit set */
1577                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
1578                 kfree(arr);
1579                 return check_condition_result;
1580         } else if (0x1 & cmd[1]) {  /* EVPD bit set */
1581                 int lu_id_num, port_group_id, target_dev_id, len;
1582                 char lu_id_str[6];
1583                 int host_no = devip->sdbg_host->shost->host_no;
1584                 
1585                 port_group_id = (((host_no + 1) & 0x7f) << 8) +
1586                     (devip->channel & 0x7f);
1587                 if (sdebug_vpd_use_hostno == 0)
1588                         host_no = 0;
1589                 lu_id_num = have_wlun ? -1 : (((host_no + 1) * 2000) +
1590                             (devip->target * 1000) + devip->lun);
1591                 target_dev_id = ((host_no + 1) * 2000) +
1592                                  (devip->target * 1000) - 3;
1593                 len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
1594                 if (0 == cmd[2]) { /* supported vital product data pages */
1595                         arr[1] = cmd[2];        /*sanity */
1596                         n = 4;
1597                         arr[n++] = 0x0;   /* this page */
1598                         arr[n++] = 0x80;  /* unit serial number */
1599                         arr[n++] = 0x83;  /* device identification */
1600                         arr[n++] = 0x84;  /* software interface ident. */
1601                         arr[n++] = 0x85;  /* management network addresses */
1602                         arr[n++] = 0x86;  /* extended inquiry */
1603                         arr[n++] = 0x87;  /* mode page policy */
1604                         arr[n++] = 0x88;  /* SCSI ports */
1605                         if (is_disk_zbc) {        /* SBC or ZBC */
1606                                 arr[n++] = 0x89;  /* ATA information */
1607                                 arr[n++] = 0xb0;  /* Block limits */
1608                                 arr[n++] = 0xb1;  /* Block characteristics */
1609                                 if (is_disk)
1610                                         arr[n++] = 0xb2;  /* LB Provisioning */
1611                                 else if (is_zbc)
1612                                         arr[n++] = 0xb6;  /* ZB dev. char. */
1613                         }
1614                         arr[3] = n - 4;   /* number of supported VPD pages */
1615                 } else if (0x80 == cmd[2]) { /* unit serial number */
1616                         arr[1] = cmd[2];        /*sanity */
1617                         arr[3] = len;
1618                         memcpy(&arr[4], lu_id_str, len);
1619                 } else if (0x83 == cmd[2]) { /* device identification */
1620                         arr[1] = cmd[2];        /*sanity */
1621                         arr[3] = inquiry_vpd_83(&arr[4], port_group_id,
1622                                                 target_dev_id, lu_id_num,
1623                                                 lu_id_str, len,
1624                                                 &devip->lu_name);
1625                 } else if (0x84 == cmd[2]) { /* Software interface ident. */
1626                         arr[1] = cmd[2];        /*sanity */
1627                         arr[3] = inquiry_vpd_84(&arr[4]);
1628                 } else if (0x85 == cmd[2]) { /* Management network addresses */
1629                         arr[1] = cmd[2];        /*sanity */
1630                         arr[3] = inquiry_vpd_85(&arr[4]);
1631                 } else if (0x86 == cmd[2]) { /* extended inquiry */
1632                         arr[1] = cmd[2];        /*sanity */
1633                         arr[3] = 0x3c;  /* number of following entries */
1634                         if (sdebug_dif == T10_PI_TYPE3_PROTECTION)
1635                                 arr[4] = 0x4;   /* SPT: GRD_CHK:1 */
1636                         else if (have_dif_prot)
1637                                 arr[4] = 0x5;   /* SPT: GRD_CHK:1, REF_CHK:1 */
1638                         else
1639                                 arr[4] = 0x0;   /* no protection stuff */
1640                         arr[5] = 0x7;   /* head of q, ordered + simple q's */
1641                 } else if (0x87 == cmd[2]) { /* mode page policy */
1642                         arr[1] = cmd[2];        /*sanity */
1643                         arr[3] = 0x8;   /* number of following entries */
1644                         arr[4] = 0x2;   /* disconnect-reconnect mp */
1645                         arr[6] = 0x80;  /* mlus, shared */
1646                         arr[8] = 0x18;   /* protocol specific lu */
1647                         arr[10] = 0x82;  /* mlus, per initiator port */
1648                 } else if (0x88 == cmd[2]) { /* SCSI Ports */
1649                         arr[1] = cmd[2];        /*sanity */
1650                         arr[3] = inquiry_vpd_88(&arr[4], target_dev_id);
1651                 } else if (is_disk_zbc && 0x89 == cmd[2]) { /* ATA info */
1652                         arr[1] = cmd[2];        /*sanity */
1653                         n = inquiry_vpd_89(&arr[4]);
1654                         put_unaligned_be16(n, arr + 2);
1655                 } else if (is_disk_zbc && 0xb0 == cmd[2]) { /* Block limits */
1656                         arr[1] = cmd[2];        /*sanity */
1657                         arr[3] = inquiry_vpd_b0(&arr[4]);
1658                 } else if (is_disk_zbc && 0xb1 == cmd[2]) { /* Block char. */
1659                         arr[1] = cmd[2];        /*sanity */
1660                         arr[3] = inquiry_vpd_b1(&arr[4]);
1661                 } else if (is_disk && 0xb2 == cmd[2]) { /* LB Prov. */
1662                         arr[1] = cmd[2];        /*sanity */
1663                         arr[3] = inquiry_vpd_b2(&arr[4]);
1664                 } else if (is_zbc && cmd[2] == 0xb6) { /* ZB dev. charact. */
1665                         arr[1] = cmd[2];        /*sanity */
1666                         arr[3] = inquiry_vpd_b6(devip, &arr[4]);
1667                 } else {
1668                         mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
1669                         kfree(arr);
1670                         return check_condition_result;
1671                 }
1672                 len = min(get_unaligned_be16(arr + 2) + 4, alloc_len);
1673                 ret = fill_from_dev_buffer(scp, arr,
1674                             min(len, SDEBUG_MAX_INQ_ARR_SZ));
1675                 kfree(arr);
1676                 return ret;
1677         }
1678         /* drops through here for a standard inquiry */
1679         arr[1] = sdebug_removable ? 0x80 : 0;   /* Removable disk */
1680         arr[2] = sdebug_scsi_level;
1681         arr[3] = 2;    /* response_data_format==2 */
1682         arr[4] = SDEBUG_LONG_INQ_SZ - 5;
1683         arr[5] = (int)have_dif_prot;    /* PROTECT bit */
1684         if (sdebug_vpd_use_hostno == 0)
1685                 arr[5] |= 0x10; /* claim: implicit TPGS */
1686         arr[6] = 0x10; /* claim: MultiP */
1687         /* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
1688         arr[7] = 0xa; /* claim: LINKED + CMDQUE */
1689         memcpy(&arr[8], sdebug_inq_vendor_id, 8);
1690         memcpy(&arr[16], sdebug_inq_product_id, 16);
1691         memcpy(&arr[32], sdebug_inq_product_rev, 4);
1692         /* Use Vendor Specific area to place driver date in ASCII hex */
1693         memcpy(&arr[36], sdebug_version_date, 8);
1694         /* version descriptors (2 bytes each) follow */
1695         put_unaligned_be16(0xc0, arr + 58);   /* SAM-6 no version claimed */
1696         put_unaligned_be16(0x5c0, arr + 60);  /* SPC-5 no version claimed */
1697         n = 62;
1698         if (is_disk) {          /* SBC-4 no version claimed */
1699                 put_unaligned_be16(0x600, arr + n);
1700                 n += 2;
1701         } else if (sdebug_ptype == TYPE_TAPE) { /* SSC-4 rev 3 */
1702                 put_unaligned_be16(0x525, arr + n);
1703                 n += 2;
1704         } else if (is_zbc) {    /* ZBC BSR INCITS 536 revision 05 */
1705                 put_unaligned_be16(0x624, arr + n);
1706                 n += 2;
1707         }
1708         put_unaligned_be16(0x2100, arr + n);    /* SPL-4 no version claimed */
1709         ret = fill_from_dev_buffer(scp, arr,
1710                             min_t(int, alloc_len, SDEBUG_LONG_INQ_SZ));
1711         kfree(arr);
1712         return ret;
1713 }
1714
1715 static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1716                                    0, 0, 0x0, 0x0};
1717
1718 static int resp_requests(struct scsi_cmnd *scp,
1719                          struct sdebug_dev_info *devip)
1720 {
1721         unsigned char *sbuff;
1722         unsigned char *cmd = scp->cmnd;
1723         unsigned char arr[SCSI_SENSE_BUFFERSIZE];
1724         bool dsense;
1725         int len = 18;
1726
1727         memset(arr, 0, sizeof(arr));
1728         dsense = !!(cmd[1] & 1);
1729         sbuff = scp->sense_buffer;
1730         if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
1731                 if (dsense) {
1732                         arr[0] = 0x72;
1733                         arr[1] = 0x0;           /* NO_SENSE in sense_key */
1734                         arr[2] = THRESHOLD_EXCEEDED;
1735                         arr[3] = 0xff;          /* TEST set and MRIE==6 */
1736                         len = 8;
1737                 } else {
1738                         arr[0] = 0x70;
1739                         arr[2] = 0x0;           /* NO_SENSE in sense_key */
1740                         arr[7] = 0xa;           /* 18 byte sense buffer */
1741                         arr[12] = THRESHOLD_EXCEEDED;
1742                         arr[13] = 0xff;         /* TEST set and MRIE==6 */
1743                 }
1744         } else {
1745                 memcpy(arr, sbuff, SCSI_SENSE_BUFFERSIZE);
1746                 if (arr[0] >= 0x70 && dsense == sdebug_dsense)
1747                         ;       /* have sense and formats match */
1748                 else if (arr[0] <= 0x70) {
1749                         if (dsense) {
1750                                 memset(arr, 0, 8);
1751                                 arr[0] = 0x72;
1752                                 len = 8;
1753                         } else {
1754                                 memset(arr, 0, 18);
1755                                 arr[0] = 0x70;
1756                                 arr[7] = 0xa;
1757                         }
1758                 } else if (dsense) {
1759                         memset(arr, 0, 8);
1760                         arr[0] = 0x72;
1761                         arr[1] = sbuff[2];     /* sense key */
1762                         arr[2] = sbuff[12];    /* asc */
1763                         arr[3] = sbuff[13];    /* ascq */
1764                         len = 8;
1765                 } else {
1766                         memset(arr, 0, 18);
1767                         arr[0] = 0x70;
1768                         arr[2] = sbuff[1];
1769                         arr[7] = 0xa;
1770                         arr[12] = sbuff[1];
1771                         arr[13] = sbuff[3];
1772                 }
1773
1774         }
1775         mk_sense_buffer(scp, 0, NO_ADDITIONAL_SENSE, 0);
1776         return fill_from_dev_buffer(scp, arr, len);
1777 }
1778
1779 static int resp_start_stop(struct scsi_cmnd *scp,
1780                            struct sdebug_dev_info *devip)
1781 {
1782         unsigned char *cmd = scp->cmnd;
1783         int power_cond, stop;
1784         bool changing;
1785
1786         power_cond = (cmd[4] & 0xf0) >> 4;
1787         if (power_cond) {
1788                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 7);
1789                 return check_condition_result;
1790         }
1791         stop = !(cmd[4] & 1);
1792         changing = atomic_read(&devip->stopped) == !stop;
1793         atomic_xchg(&devip->stopped, stop);
1794         if (!changing || cmd[1] & 0x1)  /* state unchanged or IMMED set */
1795                 return SDEG_RES_IMMED_MASK;
1796         else
1797                 return 0;
1798 }
1799
1800 static sector_t get_sdebug_capacity(void)
1801 {
1802         static const unsigned int gibibyte = 1073741824;
1803
1804         if (sdebug_virtual_gb > 0)
1805                 return (sector_t)sdebug_virtual_gb *
1806                         (gibibyte / sdebug_sector_size);
1807         else
1808                 return sdebug_store_sectors;
1809 }
1810
1811 #define SDEBUG_READCAP_ARR_SZ 8
1812 static int resp_readcap(struct scsi_cmnd *scp,
1813                         struct sdebug_dev_info *devip)
1814 {
1815         unsigned char arr[SDEBUG_READCAP_ARR_SZ];
1816         unsigned int capac;
1817
1818         /* following just in case virtual_gb changed */
1819         sdebug_capacity = get_sdebug_capacity();
1820         memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
1821         if (sdebug_capacity < 0xffffffff) {
1822                 capac = (unsigned int)sdebug_capacity - 1;
1823                 put_unaligned_be32(capac, arr + 0);
1824         } else
1825                 put_unaligned_be32(0xffffffff, arr + 0);
1826         put_unaligned_be16(sdebug_sector_size, arr + 6);
1827         return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
1828 }
1829
1830 #define SDEBUG_READCAP16_ARR_SZ 32
1831 static int resp_readcap16(struct scsi_cmnd *scp,
1832                           struct sdebug_dev_info *devip)
1833 {
1834         unsigned char *cmd = scp->cmnd;
1835         unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
1836         int alloc_len;
1837
1838         alloc_len = get_unaligned_be32(cmd + 10);
1839         /* following just in case virtual_gb changed */
1840         sdebug_capacity = get_sdebug_capacity();
1841         memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
1842         put_unaligned_be64((u64)(sdebug_capacity - 1), arr + 0);
1843         put_unaligned_be32(sdebug_sector_size, arr + 8);
1844         arr[13] = sdebug_physblk_exp & 0xf;
1845         arr[14] = (sdebug_lowest_aligned >> 8) & 0x3f;
1846
1847         if (scsi_debug_lbp()) {
1848                 arr[14] |= 0x80; /* LBPME */
1849                 /* from sbc4r07, this LBPRZ field is 1 bit, but the LBPRZ in
1850                  * the LB Provisioning VPD page is 3 bits. Note that lbprz=2
1851                  * in the wider field maps to 0 in this field.
1852                  */
1853                 if (sdebug_lbprz & 1)   /* precisely what the draft requires */
1854                         arr[14] |= 0x40;
1855         }
1856
1857         arr[15] = sdebug_lowest_aligned & 0xff;
1858
1859         if (have_dif_prot) {
1860                 arr[12] = (sdebug_dif - 1) << 1; /* P_TYPE */
1861                 arr[12] |= 1; /* PROT_EN */
1862         }
1863
1864         return fill_from_dev_buffer(scp, arr,
1865                             min_t(int, alloc_len, SDEBUG_READCAP16_ARR_SZ));
1866 }
1867
1868 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
1869
1870 static int resp_report_tgtpgs(struct scsi_cmnd *scp,
1871                               struct sdebug_dev_info *devip)
1872 {
1873         unsigned char *cmd = scp->cmnd;
1874         unsigned char *arr;
1875         int host_no = devip->sdbg_host->shost->host_no;
1876         int n, ret, alen, rlen;
1877         int port_group_a, port_group_b, port_a, port_b;
1878
1879         alen = get_unaligned_be32(cmd + 6);
1880         arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
1881         if (! arr)
1882                 return DID_REQUEUE << 16;
1883         /*
1884          * EVPD page 0x88 states we have two ports, one
1885          * real and a fake port with no device connected.
1886          * So we create two port groups with one port each
1887          * and set the group with port B to unavailable.
1888          */
1889         port_a = 0x1; /* relative port A */
1890         port_b = 0x2; /* relative port B */
1891         port_group_a = (((host_no + 1) & 0x7f) << 8) +
1892                         (devip->channel & 0x7f);
1893         port_group_b = (((host_no + 1) & 0x7f) << 8) +
1894                         (devip->channel & 0x7f) + 0x80;
1895
1896         /*
1897          * The asymmetric access state is cycled according to the host_id.
1898          */
1899         n = 4;
1900         if (sdebug_vpd_use_hostno == 0) {
1901                 arr[n++] = host_no % 3; /* Asymm access state */
1902                 arr[n++] = 0x0F; /* claim: all states are supported */
1903         } else {
1904                 arr[n++] = 0x0; /* Active/Optimized path */
1905                 arr[n++] = 0x01; /* only support active/optimized paths */
1906         }
1907         put_unaligned_be16(port_group_a, arr + n);
1908         n += 2;
1909         arr[n++] = 0;    /* Reserved */
1910         arr[n++] = 0;    /* Status code */
1911         arr[n++] = 0;    /* Vendor unique */
1912         arr[n++] = 0x1;  /* One port per group */
1913         arr[n++] = 0;    /* Reserved */
1914         arr[n++] = 0;    /* Reserved */
1915         put_unaligned_be16(port_a, arr + n);
1916         n += 2;
1917         arr[n++] = 3;    /* Port unavailable */
1918         arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
1919         put_unaligned_be16(port_group_b, arr + n);
1920         n += 2;
1921         arr[n++] = 0;    /* Reserved */
1922         arr[n++] = 0;    /* Status code */
1923         arr[n++] = 0;    /* Vendor unique */
1924         arr[n++] = 0x1;  /* One port per group */
1925         arr[n++] = 0;    /* Reserved */
1926         arr[n++] = 0;    /* Reserved */
1927         put_unaligned_be16(port_b, arr + n);
1928         n += 2;
1929
1930         rlen = n - 4;
1931         put_unaligned_be32(rlen, arr + 0);
1932
1933         /*
1934          * Return the smallest value of either
1935          * - The allocated length
1936          * - The constructed command length
1937          * - The maximum array size
1938          */
1939         rlen = min_t(int, alen, n);
1940         ret = fill_from_dev_buffer(scp, arr,
1941                            min_t(int, rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
1942         kfree(arr);
1943         return ret;
1944 }
1945
1946 static int resp_rsup_opcodes(struct scsi_cmnd *scp,
1947                              struct sdebug_dev_info *devip)
1948 {
1949         bool rctd;
1950         u8 reporting_opts, req_opcode, sdeb_i, supp;
1951         u16 req_sa, u;
1952         u32 alloc_len, a_len;
1953         int k, offset, len, errsts, count, bump, na;
1954         const struct opcode_info_t *oip;
1955         const struct opcode_info_t *r_oip;
1956         u8 *arr;
1957         u8 *cmd = scp->cmnd;
1958
1959         rctd = !!(cmd[2] & 0x80);
1960         reporting_opts = cmd[2] & 0x7;
1961         req_opcode = cmd[3];
1962         req_sa = get_unaligned_be16(cmd + 4);
1963         alloc_len = get_unaligned_be32(cmd + 6);
1964         if (alloc_len < 4 || alloc_len > 0xffff) {
1965                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
1966                 return check_condition_result;
1967         }
1968         if (alloc_len > 8192)
1969                 a_len = 8192;
1970         else
1971                 a_len = alloc_len;
1972         arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_ATOMIC);
1973         if (NULL == arr) {
1974                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
1975                                 INSUFF_RES_ASCQ);
1976                 return check_condition_result;
1977         }
1978         switch (reporting_opts) {
1979         case 0: /* all commands */
1980                 /* count number of commands */
1981                 for (count = 0, oip = opcode_info_arr;
1982                      oip->num_attached != 0xff; ++oip) {
1983                         if (F_INV_OP & oip->flags)
1984                                 continue;
1985                         count += (oip->num_attached + 1);
1986                 }
1987                 bump = rctd ? 20 : 8;
1988                 put_unaligned_be32(count * bump, arr);
1989                 for (offset = 4, oip = opcode_info_arr;
1990                      oip->num_attached != 0xff && offset < a_len; ++oip) {
1991                         if (F_INV_OP & oip->flags)
1992                                 continue;
1993                         na = oip->num_attached;
1994                         arr[offset] = oip->opcode;
1995                         put_unaligned_be16(oip->sa, arr + offset + 2);
1996                         if (rctd)
1997                                 arr[offset + 5] |= 0x2;
1998                         if (FF_SA & oip->flags)
1999                                 arr[offset + 5] |= 0x1;
2000                         put_unaligned_be16(oip->len_mask[0], arr + offset + 6);
2001                         if (rctd)
2002                                 put_unaligned_be16(0xa, arr + offset + 8);
2003                         r_oip = oip;
2004                         for (k = 0, oip = oip->arrp; k < na; ++k, ++oip) {
2005                                 if (F_INV_OP & oip->flags)
2006                                         continue;
2007                                 offset += bump;
2008                                 arr[offset] = oip->opcode;
2009                                 put_unaligned_be16(oip->sa, arr + offset + 2);
2010                                 if (rctd)
2011                                         arr[offset + 5] |= 0x2;
2012                                 if (FF_SA & oip->flags)
2013                                         arr[offset + 5] |= 0x1;
2014                                 put_unaligned_be16(oip->len_mask[0],
2015                                                    arr + offset + 6);
2016                                 if (rctd)
2017                                         put_unaligned_be16(0xa,
2018                                                            arr + offset + 8);
2019                         }
2020                         oip = r_oip;
2021                         offset += bump;
2022                 }
2023                 break;
2024         case 1: /* one command: opcode only */
2025         case 2: /* one command: opcode plus service action */
2026         case 3: /* one command: if sa==0 then opcode only else opcode+sa */
2027                 sdeb_i = opcode_ind_arr[req_opcode];
2028                 oip = &opcode_info_arr[sdeb_i];
2029                 if (F_INV_OP & oip->flags) {
2030                         supp = 1;
2031                         offset = 4;
2032                 } else {
2033                         if (1 == reporting_opts) {
2034                                 if (FF_SA & oip->flags) {
2035                                         mk_sense_invalid_fld(scp, SDEB_IN_CDB,
2036                                                              2, 2);
2037                                         kfree(arr);
2038                                         return check_condition_result;
2039                                 }
2040                                 req_sa = 0;
2041                         } else if (2 == reporting_opts &&
2042                                    0 == (FF_SA & oip->flags)) {
2043                                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
2044                                 kfree(arr);     /* point at requested sa */
2045                                 return check_condition_result;
2046                         }
2047                         if (0 == (FF_SA & oip->flags) &&
2048                             req_opcode == oip->opcode)
2049                                 supp = 3;
2050                         else if (0 == (FF_SA & oip->flags)) {
2051                                 na = oip->num_attached;
2052                                 for (k = 0, oip = oip->arrp; k < na;
2053                                      ++k, ++oip) {
2054                                         if (req_opcode == oip->opcode)
2055                                                 break;
2056                                 }
2057                                 supp = (k >= na) ? 1 : 3;
2058                         } else if (req_sa != oip->sa) {
2059                                 na = oip->num_attached;
2060                                 for (k = 0, oip = oip->arrp; k < na;
2061                                      ++k, ++oip) {
2062                                         if (req_sa == oip->sa)
2063                                                 break;
2064                                 }
2065                                 supp = (k >= na) ? 1 : 3;
2066                         } else
2067                                 supp = 3;
2068                         if (3 == supp) {
2069                                 u = oip->len_mask[0];
2070                                 put_unaligned_be16(u, arr + 2);
2071                                 arr[4] = oip->opcode;
2072                                 for (k = 1; k < u; ++k)
2073                                         arr[4 + k] = (k < 16) ?
2074                                                  oip->len_mask[k] : 0xff;
2075                                 offset = 4 + u;
2076                         } else
2077                                 offset = 4;
2078                 }
2079                 arr[1] = (rctd ? 0x80 : 0) | supp;
2080                 if (rctd) {
2081                         put_unaligned_be16(0xa, arr + offset);
2082                         offset += 12;
2083                 }
2084                 break;
2085         default:
2086                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
2087                 kfree(arr);
2088                 return check_condition_result;
2089         }
2090         offset = (offset < a_len) ? offset : a_len;
2091         len = (offset < alloc_len) ? offset : alloc_len;
2092         errsts = fill_from_dev_buffer(scp, arr, len);
2093         kfree(arr);
2094         return errsts;
2095 }
2096
2097 static int resp_rsup_tmfs(struct scsi_cmnd *scp,
2098                           struct sdebug_dev_info *devip)
2099 {
2100         bool repd;
2101         u32 alloc_len, len;
2102         u8 arr[16];
2103         u8 *cmd = scp->cmnd;
2104
2105         memset(arr, 0, sizeof(arr));
2106         repd = !!(cmd[2] & 0x80);
2107         alloc_len = get_unaligned_be32(cmd + 6);
2108         if (alloc_len < 4) {
2109                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
2110                 return check_condition_result;
2111         }
2112         arr[0] = 0xc8;          /* ATS | ATSS | LURS */
2113         arr[1] = 0x1;           /* ITNRS */
2114         if (repd) {
2115                 arr[3] = 0xc;
2116                 len = 16;
2117         } else
2118                 len = 4;
2119
2120         len = (len < alloc_len) ? len : alloc_len;
2121         return fill_from_dev_buffer(scp, arr, len);
2122 }
2123
2124 /* <<Following mode page info copied from ST318451LW>> */
2125
2126 static int resp_err_recov_pg(unsigned char *p, int pcontrol, int target)
2127 {       /* Read-Write Error Recovery page for mode_sense */
2128         unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
2129                                         5, 0, 0xff, 0xff};
2130
2131         memcpy(p, err_recov_pg, sizeof(err_recov_pg));
2132         if (1 == pcontrol)
2133                 memset(p + 2, 0, sizeof(err_recov_pg) - 2);
2134         return sizeof(err_recov_pg);
2135 }
2136
2137 static int resp_disconnect_pg(unsigned char *p, int pcontrol, int target)
2138 {       /* Disconnect-Reconnect page for mode_sense */
2139         unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
2140                                          0, 0, 0, 0, 0, 0, 0, 0};
2141
2142         memcpy(p, disconnect_pg, sizeof(disconnect_pg));
2143         if (1 == pcontrol)
2144                 memset(p + 2, 0, sizeof(disconnect_pg) - 2);
2145         return sizeof(disconnect_pg);
2146 }
2147
2148 static int resp_format_pg(unsigned char *p, int pcontrol, int target)
2149 {       /* Format device page for mode_sense */
2150         unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
2151                                      0, 0, 0, 0, 0, 0, 0, 0,
2152                                      0, 0, 0, 0, 0x40, 0, 0, 0};
2153
2154         memcpy(p, format_pg, sizeof(format_pg));
2155         put_unaligned_be16(sdebug_sectors_per, p + 10);
2156         put_unaligned_be16(sdebug_sector_size, p + 12);
2157         if (sdebug_removable)
2158                 p[20] |= 0x20; /* should agree with INQUIRY */
2159         if (1 == pcontrol)
2160                 memset(p + 2, 0, sizeof(format_pg) - 2);
2161         return sizeof(format_pg);
2162 }
2163
2164 static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2165                                      0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,
2166                                      0, 0, 0, 0};
2167
2168 static int resp_caching_pg(unsigned char *p, int pcontrol, int target)
2169 {       /* Caching page for mode_sense */
2170         unsigned char ch_caching_pg[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0,
2171                 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
2172         unsigned char d_caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2173                 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,     0, 0, 0, 0};
2174
2175         if (SDEBUG_OPT_N_WCE & sdebug_opts)
2176                 caching_pg[2] &= ~0x4;  /* set WCE=0 (default WCE=1) */
2177         memcpy(p, caching_pg, sizeof(caching_pg));
2178         if (1 == pcontrol)
2179                 memcpy(p + 2, ch_caching_pg, sizeof(ch_caching_pg));
2180         else if (2 == pcontrol)
2181                 memcpy(p, d_caching_pg, sizeof(d_caching_pg));
2182         return sizeof(caching_pg);
2183 }
2184
2185 static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2186                                     0, 0, 0x2, 0x4b};
2187
2188 static int resp_ctrl_m_pg(unsigned char *p, int pcontrol, int target)
2189 {       /* Control mode page for mode_sense */
2190         unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
2191                                         0, 0, 0, 0};
2192         unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2193                                      0, 0, 0x2, 0x4b};
2194
2195         if (sdebug_dsense)
2196                 ctrl_m_pg[2] |= 0x4;
2197         else
2198                 ctrl_m_pg[2] &= ~0x4;
2199
2200         if (sdebug_ato)
2201                 ctrl_m_pg[5] |= 0x80; /* ATO=1 */
2202
2203         memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
2204         if (1 == pcontrol)
2205                 memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
2206         else if (2 == pcontrol)
2207                 memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
2208         return sizeof(ctrl_m_pg);
2209 }
2210
2211
2212 static int resp_iec_m_pg(unsigned char *p, int pcontrol, int target)
2213 {       /* Informational Exceptions control mode page for mode_sense */
2214         unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
2215                                        0, 0, 0x0, 0x0};
2216         unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
2217                                       0, 0, 0x0, 0x0};
2218
2219         memcpy(p, iec_m_pg, sizeof(iec_m_pg));
2220         if (1 == pcontrol)
2221                 memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
2222         else if (2 == pcontrol)
2223                 memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
2224         return sizeof(iec_m_pg);
2225 }
2226
2227 static int resp_sas_sf_m_pg(unsigned char *p, int pcontrol, int target)
2228 {       /* SAS SSP mode page - short format for mode_sense */
2229         unsigned char sas_sf_m_pg[] = {0x19, 0x6,
2230                 0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
2231
2232         memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
2233         if (1 == pcontrol)
2234                 memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
2235         return sizeof(sas_sf_m_pg);
2236 }
2237
2238
2239 static int resp_sas_pcd_m_spg(unsigned char *p, int pcontrol, int target,
2240                               int target_dev_id)
2241 {       /* SAS phy control and discover mode page for mode_sense */
2242         unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
2243                     0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
2244                     0, 0, 0, 0, 0, 0, 0, 0,     /* insert SAS addr */
2245                     0, 0, 0, 0, 0, 0, 0, 0,     /* insert SAS addr */
2246                     0x2, 0, 0, 0, 0, 0, 0, 0,
2247                     0x88, 0x99, 0, 0, 0, 0, 0, 0,
2248                     0, 0, 0, 0, 0, 0, 0, 0,
2249                     0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
2250                     0, 0, 0, 0, 0, 0, 0, 0,     /* insert SAS addr */
2251                     0, 0, 0, 0, 0, 0, 0, 0,     /* insert SAS addr */
2252                     0x3, 0, 0, 0, 0, 0, 0, 0,
2253                     0x88, 0x99, 0, 0, 0, 0, 0, 0,
2254                     0, 0, 0, 0, 0, 0, 0, 0,
2255                 };
2256         int port_a, port_b;
2257
2258         put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 16);
2259         put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 24);
2260         put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 64);
2261         put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 72);
2262         port_a = target_dev_id + 1;
2263         port_b = port_a + 1;
2264         memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
2265         put_unaligned_be32(port_a, p + 20);
2266         put_unaligned_be32(port_b, p + 48 + 20);
2267         if (1 == pcontrol)
2268                 memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
2269         return sizeof(sas_pcd_m_pg);
2270 }
2271
2272 static int resp_sas_sha_m_spg(unsigned char *p, int pcontrol)
2273 {       /* SAS SSP shared protocol specific port mode subpage */
2274         unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
2275                     0, 0, 0, 0, 0, 0, 0, 0,
2276                 };
2277
2278         memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
2279         if (1 == pcontrol)
2280                 memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
2281         return sizeof(sas_sha_m_pg);
2282 }
2283
2284 #define SDEBUG_MAX_MSENSE_SZ 256
2285
2286 static int resp_mode_sense(struct scsi_cmnd *scp,
2287                            struct sdebug_dev_info *devip)
2288 {
2289         int pcontrol, pcode, subpcode, bd_len;
2290         unsigned char dev_spec;
2291         int alloc_len, offset, len, target_dev_id;
2292         int target = scp->device->id;
2293         unsigned char *ap;
2294         unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
2295         unsigned char *cmd = scp->cmnd;
2296         bool dbd, llbaa, msense_6, is_disk, is_zbc, bad_pcode;
2297
2298         dbd = !!(cmd[1] & 0x8);         /* disable block descriptors */
2299         pcontrol = (cmd[2] & 0xc0) >> 6;
2300         pcode = cmd[2] & 0x3f;
2301         subpcode = cmd[3];
2302         msense_6 = (MODE_SENSE == cmd[0]);
2303         llbaa = msense_6 ? false : !!(cmd[1] & 0x10);
2304         is_disk = (sdebug_ptype == TYPE_DISK);
2305         is_zbc = (sdebug_ptype == TYPE_ZBC);
2306         if ((is_disk || is_zbc) && !dbd)
2307                 bd_len = llbaa ? 16 : 8;
2308         else
2309                 bd_len = 0;
2310         alloc_len = msense_6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2311         memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
2312         if (0x3 == pcontrol) {  /* Saving values not supported */
2313                 mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0);
2314                 return check_condition_result;
2315         }
2316         target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
2317                         (devip->target * 1000) - 3;
2318         /* for disks+zbc set DPOFUA bit and clear write protect (WP) bit */
2319         if (is_disk || is_zbc) {
2320                 dev_spec = 0x10;        /* =0x90 if WP=1 implies read-only */
2321                 if (sdebug_wp)
2322                         dev_spec |= 0x80;
2323         } else
2324                 dev_spec = 0x0;
2325         if (msense_6) {
2326                 arr[2] = dev_spec;
2327                 arr[3] = bd_len;
2328                 offset = 4;
2329         } else {
2330                 arr[3] = dev_spec;
2331                 if (16 == bd_len)
2332                         arr[4] = 0x1;   /* set LONGLBA bit */
2333                 arr[7] = bd_len;        /* assume 255 or less */
2334                 offset = 8;
2335         }
2336         ap = arr + offset;
2337         if ((bd_len > 0) && (!sdebug_capacity))
2338                 sdebug_capacity = get_sdebug_capacity();
2339
2340         if (8 == bd_len) {
2341                 if (sdebug_capacity > 0xfffffffe)
2342                         put_unaligned_be32(0xffffffff, ap + 0);
2343                 else
2344                         put_unaligned_be32(sdebug_capacity, ap + 0);
2345                 put_unaligned_be16(sdebug_sector_size, ap + 6);
2346                 offset += bd_len;
2347                 ap = arr + offset;
2348         } else if (16 == bd_len) {
2349                 put_unaligned_be64((u64)sdebug_capacity, ap + 0);
2350                 put_unaligned_be32(sdebug_sector_size, ap + 12);
2351                 offset += bd_len;
2352                 ap = arr + offset;
2353         }
2354
2355         if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) {
2356                 /* TODO: Control Extension page */
2357                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2358                 return check_condition_result;
2359         }
2360         bad_pcode = false;
2361
2362         switch (pcode) {
2363         case 0x1:       /* Read-Write error recovery page, direct access */
2364                 len = resp_err_recov_pg(ap, pcontrol, target);
2365                 offset += len;
2366                 break;
2367         case 0x2:       /* Disconnect-Reconnect page, all devices */
2368                 len = resp_disconnect_pg(ap, pcontrol, target);
2369                 offset += len;
2370                 break;
2371         case 0x3:       /* Format device page, direct access */
2372                 if (is_disk) {
2373                         len = resp_format_pg(ap, pcontrol, target);
2374                         offset += len;
2375                 } else
2376                         bad_pcode = true;
2377                 break;
2378         case 0x8:       /* Caching page, direct access */
2379                 if (is_disk || is_zbc) {
2380                         len = resp_caching_pg(ap, pcontrol, target);
2381                         offset += len;
2382                 } else
2383                         bad_pcode = true;
2384                 break;
2385         case 0xa:       /* Control Mode page, all devices */
2386                 len = resp_ctrl_m_pg(ap, pcontrol, target);
2387                 offset += len;
2388                 break;
2389         case 0x19:      /* if spc==1 then sas phy, control+discover */
2390                 if ((subpcode > 0x2) && (subpcode < 0xff)) {
2391                         mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2392                         return check_condition_result;
2393                 }
2394                 len = 0;
2395                 if ((0x0 == subpcode) || (0xff == subpcode))
2396                         len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2397                 if ((0x1 == subpcode) || (0xff == subpcode))
2398                         len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
2399                                                   target_dev_id);
2400                 if ((0x2 == subpcode) || (0xff == subpcode))
2401                         len += resp_sas_sha_m_spg(ap + len, pcontrol);
2402                 offset += len;
2403                 break;
2404         case 0x1c:      /* Informational Exceptions Mode page, all devices */
2405                 len = resp_iec_m_pg(ap, pcontrol, target);
2406                 offset += len;
2407                 break;
2408         case 0x3f:      /* Read all Mode pages */
2409                 if ((0 == subpcode) || (0xff == subpcode)) {
2410                         len = resp_err_recov_pg(ap, pcontrol, target);
2411                         len += resp_disconnect_pg(ap + len, pcontrol, target);
2412                         if (is_disk) {
2413                                 len += resp_format_pg(ap + len, pcontrol,
2414                                                       target);
2415                                 len += resp_caching_pg(ap + len, pcontrol,
2416                                                        target);
2417                         } else if (is_zbc) {
2418                                 len += resp_caching_pg(ap + len, pcontrol,
2419                                                        target);
2420                         }
2421                         len += resp_ctrl_m_pg(ap + len, pcontrol, target);
2422                         len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2423                         if (0xff == subpcode) {
2424                                 len += resp_sas_pcd_m_spg(ap + len, pcontrol,
2425                                                   target, target_dev_id);
2426                                 len += resp_sas_sha_m_spg(ap + len, pcontrol);
2427                         }
2428                         len += resp_iec_m_pg(ap + len, pcontrol, target);
2429                         offset += len;
2430                 } else {
2431                         mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2432                         return check_condition_result;
2433                 }
2434                 break;
2435         default:
2436                 bad_pcode = true;
2437                 break;
2438         }
2439         if (bad_pcode) {
2440                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2441                 return check_condition_result;
2442         }
2443         if (msense_6)
2444                 arr[0] = offset - 1;
2445         else
2446                 put_unaligned_be16((offset - 2), arr + 0);
2447         return fill_from_dev_buffer(scp, arr, min_t(int, alloc_len, offset));
2448 }
2449
2450 #define SDEBUG_MAX_MSELECT_SZ 512
2451
2452 static int resp_mode_select(struct scsi_cmnd *scp,
2453                             struct sdebug_dev_info *devip)
2454 {
2455         int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
2456         int param_len, res, mpage;
2457         unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
2458         unsigned char *cmd = scp->cmnd;
2459         int mselect6 = (MODE_SELECT == cmd[0]);
2460
2461         memset(arr, 0, sizeof(arr));
2462         pf = cmd[1] & 0x10;
2463         sp = cmd[1] & 0x1;
2464         param_len = mselect6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2465         if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
2466                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1);
2467                 return check_condition_result;
2468         }
2469         res = fetch_to_dev_buffer(scp, arr, param_len);
2470         if (-1 == res)
2471                 return DID_ERROR << 16;
2472         else if (sdebug_verbose && (res < param_len))
2473                 sdev_printk(KERN_INFO, scp->device,
2474                             "%s: cdb indicated=%d, IO sent=%d bytes\n",
2475                             __func__, param_len, res);
2476         md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2);
2477         bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6);
2478         if (md_len > 2) {
2479                 mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
2480                 return check_condition_result;
2481         }
2482         off = bd_len + (mselect6 ? 4 : 8);
2483         mpage = arr[off] & 0x3f;
2484         ps = !!(arr[off] & 0x80);
2485         if (ps) {
2486                 mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 7);
2487                 return check_condition_result;
2488         }
2489         spf = !!(arr[off] & 0x40);
2490         pg_len = spf ? (get_unaligned_be16(arr + off + 2) + 4) :
2491                        (arr[off + 1] + 2);
2492         if ((pg_len + off) > param_len) {
2493                 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2494                                 PARAMETER_LIST_LENGTH_ERR, 0);
2495                 return check_condition_result;
2496         }
2497         switch (mpage) {
2498         case 0x8:      /* Caching Mode page */
2499                 if (caching_pg[1] == arr[off + 1]) {
2500                         memcpy(caching_pg + 2, arr + off + 2,
2501                                sizeof(caching_pg) - 2);
2502                         goto set_mode_changed_ua;
2503                 }
2504                 break;
2505         case 0xa:      /* Control Mode page */
2506                 if (ctrl_m_pg[1] == arr[off + 1]) {
2507                         memcpy(ctrl_m_pg + 2, arr + off + 2,
2508                                sizeof(ctrl_m_pg) - 2);
2509                         if (ctrl_m_pg[4] & 0x8)
2510                                 sdebug_wp = true;
2511                         else
2512                                 sdebug_wp = false;
2513                         sdebug_dsense = !!(ctrl_m_pg[2] & 0x4);
2514                         goto set_mode_changed_ua;
2515                 }
2516                 break;
2517         case 0x1c:      /* Informational Exceptions Mode page */
2518                 if (iec_m_pg[1] == arr[off + 1]) {
2519                         memcpy(iec_m_pg + 2, arr + off + 2,
2520                                sizeof(iec_m_pg) - 2);
2521                         goto set_mode_changed_ua;
2522                 }
2523                 break;
2524         default:
2525                 break;
2526         }
2527         mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 5);
2528         return check_condition_result;
2529 set_mode_changed_ua:
2530         set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm);
2531         return 0;
2532 }
2533
2534 static int resp_temp_l_pg(unsigned char *arr)
2535 {
2536         unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
2537                                      0x0, 0x1, 0x3, 0x2, 0x0, 65,
2538                 };
2539
2540         memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
2541         return sizeof(temp_l_pg);
2542 }
2543
2544 static int resp_ie_l_pg(unsigned char *arr)
2545 {
2546         unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
2547                 };
2548
2549         memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
2550         if (iec_m_pg[2] & 0x4) {        /* TEST bit set */
2551                 arr[4] = THRESHOLD_EXCEEDED;
2552                 arr[5] = 0xff;
2553         }
2554         return sizeof(ie_l_pg);
2555 }
2556
2557 #define SDEBUG_MAX_LSENSE_SZ 512
2558
2559 static int resp_log_sense(struct scsi_cmnd *scp,
2560                           struct sdebug_dev_info *devip)
2561 {
2562         int ppc, sp, pcode, subpcode, alloc_len, len, n;
2563         unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
2564         unsigned char *cmd = scp->cmnd;
2565
2566         memset(arr, 0, sizeof(arr));
2567         ppc = cmd[1] & 0x2;
2568         sp = cmd[1] & 0x1;
2569         if (ppc || sp) {
2570                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, ppc ? 1 : 0);
2571                 return check_condition_result;
2572         }
2573         pcode = cmd[2] & 0x3f;
2574         subpcode = cmd[3] & 0xff;
2575         alloc_len = get_unaligned_be16(cmd + 7);
2576         arr[0] = pcode;
2577         if (0 == subpcode) {
2578                 switch (pcode) {
2579                 case 0x0:       /* Supported log pages log page */
2580                         n = 4;
2581                         arr[n++] = 0x0;         /* this page */
2582                         arr[n++] = 0xd;         /* Temperature */
2583                         arr[n++] = 0x2f;        /* Informational exceptions */
2584                         arr[3] = n - 4;
2585                         break;
2586                 case 0xd:       /* Temperature log page */
2587                         arr[3] = resp_temp_l_pg(arr + 4);
2588                         break;
2589                 case 0x2f:      /* Informational exceptions log page */
2590                         arr[3] = resp_ie_l_pg(arr + 4);
2591                         break;
2592                 default:
2593                         mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2594                         return check_condition_result;
2595                 }
2596         } else if (0xff == subpcode) {
2597                 arr[0] |= 0x40;
2598                 arr[1] = subpcode;
2599                 switch (pcode) {
2600                 case 0x0:       /* Supported log pages and subpages log page */
2601                         n = 4;
2602                         arr[n++] = 0x0;
2603                         arr[n++] = 0x0;         /* 0,0 page */
2604                         arr[n++] = 0x0;
2605                         arr[n++] = 0xff;        /* this page */
2606                         arr[n++] = 0xd;
2607                         arr[n++] = 0x0;         /* Temperature */
2608                         arr[n++] = 0x2f;
2609                         arr[n++] = 0x0; /* Informational exceptions */
2610                         arr[3] = n - 4;
2611                         break;
2612                 case 0xd:       /* Temperature subpages */
2613                         n = 4;
2614                         arr[n++] = 0xd;
2615                         arr[n++] = 0x0;         /* Temperature */
2616                         arr[3] = n - 4;
2617                         break;
2618                 case 0x2f:      /* Informational exceptions subpages */
2619                         n = 4;
2620                         arr[n++] = 0x2f;
2621                         arr[n++] = 0x0;         /* Informational exceptions */
2622                         arr[3] = n - 4;
2623                         break;
2624                 default:
2625                         mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2626                         return check_condition_result;
2627                 }
2628         } else {
2629                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2630                 return check_condition_result;
2631         }
2632         len = min_t(int, get_unaligned_be16(arr + 2) + 4, alloc_len);
2633         return fill_from_dev_buffer(scp, arr,
2634                     min_t(int, len, SDEBUG_MAX_INQ_ARR_SZ));
2635 }
2636
2637 static inline bool sdebug_dev_is_zoned(struct sdebug_dev_info *devip)
2638 {
2639         return devip->nr_zones != 0;
2640 }
2641
2642 static struct sdeb_zone_state *zbc_zone(struct sdebug_dev_info *devip,
2643                                         unsigned long long lba)
2644 {
2645         unsigned int zno;
2646
2647         if (devip->zsize_shift)
2648                 zno = lba >> devip->zsize_shift;
2649         else
2650                 zno = lba / devip->zsize;
2651         return &devip->zstate[zno];
2652 }
2653
2654 static inline bool zbc_zone_is_conv(struct sdeb_zone_state *zsp)
2655 {
2656         return zsp->z_cond == ZBC_NOT_WRITE_POINTER;
2657 }
2658
2659 static void zbc_close_zone(struct sdebug_dev_info *devip,
2660                            struct sdeb_zone_state *zsp)
2661 {
2662         enum sdebug_z_cond zc;
2663
2664         if (zbc_zone_is_conv(zsp))
2665                 return;
2666
2667         zc = zsp->z_cond;
2668         if (!(zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN))
2669                 return;
2670
2671         if (zc == ZC2_IMPLICIT_OPEN)
2672                 devip->nr_imp_open--;
2673         else
2674                 devip->nr_exp_open--;
2675
2676         if (zsp->z_wp == zsp->z_start) {
2677                 zsp->z_cond = ZC1_EMPTY;
2678         } else {
2679                 zsp->z_cond = ZC4_CLOSED;
2680                 devip->nr_closed++;
2681         }
2682 }
2683
2684 static void zbc_close_imp_open_zone(struct sdebug_dev_info *devip)
2685 {
2686         struct sdeb_zone_state *zsp = &devip->zstate[0];
2687         unsigned int i;
2688
2689         for (i = 0; i < devip->nr_zones; i++, zsp++) {
2690                 if (zsp->z_cond == ZC2_IMPLICIT_OPEN) {
2691                         zbc_close_zone(devip, zsp);
2692                         return;
2693                 }
2694         }
2695 }
2696
2697 static void zbc_open_zone(struct sdebug_dev_info *devip,
2698                           struct sdeb_zone_state *zsp, bool explicit)
2699 {
2700         enum sdebug_z_cond zc;
2701
2702         if (zbc_zone_is_conv(zsp))
2703                 return;
2704
2705         zc = zsp->z_cond;
2706         if ((explicit && zc == ZC3_EXPLICIT_OPEN) ||
2707             (!explicit && zc == ZC2_IMPLICIT_OPEN))
2708                 return;
2709
2710         /* Close an implicit open zone if necessary */
2711         if (explicit && zsp->z_cond == ZC2_IMPLICIT_OPEN)
2712                 zbc_close_zone(devip, zsp);
2713         else if (devip->max_open &&
2714                  devip->nr_imp_open + devip->nr_exp_open >= devip->max_open)
2715                 zbc_close_imp_open_zone(devip);
2716
2717         if (zsp->z_cond == ZC4_CLOSED)
2718                 devip->nr_closed--;
2719         if (explicit) {
2720                 zsp->z_cond = ZC3_EXPLICIT_OPEN;
2721                 devip->nr_exp_open++;
2722         } else {
2723                 zsp->z_cond = ZC2_IMPLICIT_OPEN;
2724                 devip->nr_imp_open++;
2725         }
2726 }
2727
2728 static void zbc_inc_wp(struct sdebug_dev_info *devip,
2729                        unsigned long long lba, unsigned int num)
2730 {
2731         struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
2732
2733         if (zbc_zone_is_conv(zsp))
2734                 return;
2735
2736         zsp->z_wp += num;
2737         if (zsp->z_wp >= zsp->z_start + zsp->z_size)
2738                 zsp->z_cond = ZC5_FULL;
2739 }
2740
2741 static int check_zbc_access_params(struct scsi_cmnd *scp,
2742                         unsigned long long lba, unsigned int num, bool write)
2743 {
2744         struct scsi_device *sdp = scp->device;
2745         struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
2746         struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
2747         struct sdeb_zone_state *zsp_end = zbc_zone(devip, lba + num - 1);
2748
2749         if (!write) {
2750                 /* Reads cannot cross zone types boundaries */
2751                 if (zsp_end != zsp &&
2752                     zbc_zone_is_conv(zsp) &&
2753                     !zbc_zone_is_conv(zsp_end)) {
2754                         mk_sense_buffer(scp, ILLEGAL_REQUEST,
2755                                         LBA_OUT_OF_RANGE,
2756                                         READ_INVDATA_ASCQ);
2757                         return check_condition_result;
2758                 }
2759                 return 0;
2760         }
2761
2762         /* No restrictions for writes within conventional zones */
2763         if (zbc_zone_is_conv(zsp)) {
2764                 if (!zbc_zone_is_conv(zsp_end)) {
2765                         mk_sense_buffer(scp, ILLEGAL_REQUEST,
2766                                         LBA_OUT_OF_RANGE,
2767                                         WRITE_BOUNDARY_ASCQ);
2768                         return check_condition_result;
2769                 }
2770                 return 0;
2771         }
2772
2773         /* Writes cannot cross sequential zone boundaries */
2774         if (zsp_end != zsp) {
2775                 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2776                                 LBA_OUT_OF_RANGE,
2777                                 WRITE_BOUNDARY_ASCQ);
2778                 return check_condition_result;
2779         }
2780         /* Cannot write full zones */
2781         if (zsp->z_cond == ZC5_FULL) {
2782                 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2783                                 INVALID_FIELD_IN_CDB, 0);
2784                 return check_condition_result;
2785         }
2786         /* Writes must be aligned to the zone WP */
2787         if (lba != zsp->z_wp) {
2788                 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2789                                 LBA_OUT_OF_RANGE,
2790                                 UNALIGNED_WRITE_ASCQ);
2791                 return check_condition_result;
2792         }
2793
2794         /* Handle implicit open of closed and empty zones */
2795         if (zsp->z_cond == ZC1_EMPTY || zsp->z_cond == ZC4_CLOSED) {
2796                 if (devip->max_open &&
2797                     devip->nr_exp_open >= devip->max_open) {
2798                         mk_sense_buffer(scp, DATA_PROTECT,
2799                                         INSUFF_RES_ASC,
2800                                         INSUFF_ZONE_ASCQ);
2801                         return check_condition_result;
2802                 }
2803                 zbc_open_zone(devip, zsp, false);
2804         }
2805
2806         return 0;
2807 }
2808
2809 static inline int check_device_access_params
2810                         (struct scsi_cmnd *scp, unsigned long long lba,
2811                          unsigned int num, bool write)
2812 {
2813         struct scsi_device *sdp = scp->device;
2814         struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
2815
2816         if (lba + num > sdebug_capacity) {
2817                 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2818                 return check_condition_result;
2819         }
2820         /* transfer length excessive (tie in to block limits VPD page) */
2821         if (num > sdebug_store_sectors) {
2822                 /* needs work to find which cdb byte 'num' comes from */
2823                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2824                 return check_condition_result;
2825         }
2826         if (write && unlikely(sdebug_wp)) {
2827                 mk_sense_buffer(scp, DATA_PROTECT, WRITE_PROTECTED, 0x2);
2828                 return check_condition_result;
2829         }
2830         if (sdebug_dev_is_zoned(devip))
2831                 return check_zbc_access_params(scp, lba, num, write);
2832
2833         return 0;
2834 }
2835
2836 static inline struct sdeb_store_info *devip2sip(struct sdebug_dev_info *devip)
2837 {
2838         return sdebug_fake_rw ?
2839                         NULL : xa_load(per_store_ap, devip->sdbg_host->si_idx);
2840 }
2841
2842 /* Returns number of bytes copied or -1 if error. */
2843 static int do_device_access(struct sdeb_store_info *sip, struct scsi_cmnd *scp,
2844                             u32 sg_skip, u64 lba, u32 num, bool do_write)
2845 {
2846         int ret;
2847         u64 block, rest = 0;
2848         enum dma_data_direction dir;
2849         struct scsi_data_buffer *sdb = &scp->sdb;
2850         u8 *fsp;
2851
2852         if (do_write) {
2853                 dir = DMA_TO_DEVICE;
2854                 write_since_sync = true;
2855         } else {
2856                 dir = DMA_FROM_DEVICE;
2857         }
2858
2859         if (!sdb->length || !sip)
2860                 return 0;
2861         if (scp->sc_data_direction != dir)
2862                 return -1;
2863         fsp = sip->storep;
2864
2865         block = do_div(lba, sdebug_store_sectors);
2866         if (block + num > sdebug_store_sectors)
2867                 rest = block + num - sdebug_store_sectors;
2868
2869         ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
2870                    fsp + (block * sdebug_sector_size),
2871                    (num - rest) * sdebug_sector_size, sg_skip, do_write);
2872         if (ret != (num - rest) * sdebug_sector_size)
2873                 return ret;
2874
2875         if (rest) {
2876                 ret += sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
2877                             fsp, rest * sdebug_sector_size,
2878                             sg_skip + ((num - rest) * sdebug_sector_size),
2879                             do_write);
2880         }
2881
2882         return ret;
2883 }
2884
2885 /* Returns number of bytes copied or -1 if error. */
2886 static int do_dout_fetch(struct scsi_cmnd *scp, u32 num, u8 *doutp)
2887 {
2888         struct scsi_data_buffer *sdb = &scp->sdb;
2889
2890         if (!sdb->length)
2891                 return 0;
2892         if (scp->sc_data_direction != DMA_TO_DEVICE)
2893                 return -1;
2894         return sg_copy_buffer(sdb->table.sgl, sdb->table.nents, doutp,
2895                               num * sdebug_sector_size, 0, true);
2896 }
2897
2898 /* If sip->storep+lba compares equal to arr(num), then copy top half of
2899  * arr into sip->storep+lba and return true. If comparison fails then
2900  * return false. */
2901 static bool comp_write_worker(struct sdeb_store_info *sip, u64 lba, u32 num,
2902                               const u8 *arr, bool compare_only)
2903 {
2904         bool res;
2905         u64 block, rest = 0;
2906         u32 store_blks = sdebug_store_sectors;
2907         u32 lb_size = sdebug_sector_size;
2908         u8 *fsp = sip->storep;
2909
2910         block = do_div(lba, store_blks);
2911         if (block + num > store_blks)
2912                 rest = block + num - store_blks;
2913
2914         res = !memcmp(fsp + (block * lb_size), arr, (num - rest) * lb_size);
2915         if (!res)
2916                 return res;
2917         if (rest)
2918                 res = memcmp(fsp, arr + ((num - rest) * lb_size),
2919                              rest * lb_size);
2920         if (!res)
2921                 return res;
2922         if (compare_only)
2923                 return true;
2924         arr += num * lb_size;
2925         memcpy(fsp + (block * lb_size), arr, (num - rest) * lb_size);
2926         if (rest)
2927                 memcpy(fsp, arr + ((num - rest) * lb_size), rest * lb_size);
2928         return res;
2929 }
2930
2931 static __be16 dif_compute_csum(const void *buf, int len)
2932 {
2933         __be16 csum;
2934
2935         if (sdebug_guard)
2936                 csum = (__force __be16)ip_compute_csum(buf, len);
2937         else
2938                 csum = cpu_to_be16(crc_t10dif(buf, len));
2939
2940         return csum;
2941 }
2942
2943 static int dif_verify(struct t10_pi_tuple *sdt, const void *data,
2944                       sector_t sector, u32 ei_lba)
2945 {
2946         __be16 csum = dif_compute_csum(data, sdebug_sector_size);
2947
2948         if (sdt->guard_tag != csum) {
2949                 pr_err("GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
2950                         (unsigned long)sector,
2951                         be16_to_cpu(sdt->guard_tag),
2952                         be16_to_cpu(csum));
2953                 return 0x01;
2954         }
2955         if (sdebug_dif == T10_PI_TYPE1_PROTECTION &&
2956             be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
2957                 pr_err("REF check failed on sector %lu\n",
2958                         (unsigned long)sector);
2959                 return 0x03;
2960         }
2961         if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
2962             be32_to_cpu(sdt->ref_tag) != ei_lba) {
2963                 pr_err("REF check failed on sector %lu\n",
2964                         (unsigned long)sector);
2965                 return 0x03;
2966         }
2967         return 0;
2968 }
2969
2970 static void dif_copy_prot(struct scsi_cmnd *scp, sector_t sector,
2971                           unsigned int sectors, bool read)
2972 {
2973         size_t resid;
2974         void *paddr;
2975         struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
2976                                                 scp->device->hostdata);
2977         struct t10_pi_tuple *dif_storep = sip->dif_storep;
2978         const void *dif_store_end = dif_storep + sdebug_store_sectors;
2979         struct sg_mapping_iter miter;
2980
2981         /* Bytes of protection data to copy into sgl */
2982         resid = sectors * sizeof(*dif_storep);
2983
2984         sg_miter_start(&miter, scsi_prot_sglist(scp),
2985                        scsi_prot_sg_count(scp), SG_MITER_ATOMIC |
2986                        (read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
2987
2988         while (sg_miter_next(&miter) && resid > 0) {
2989                 size_t len = min_t(size_t, miter.length, resid);
2990                 void *start = dif_store(sip, sector);
2991                 size_t rest = 0;
2992
2993                 if (dif_store_end < start + len)
2994                         rest = start + len - dif_store_end;
2995
2996                 paddr = miter.addr;
2997
2998                 if (read)
2999                         memcpy(paddr, start, len - rest);
3000                 else
3001                         memcpy(start, paddr, len - rest);
3002
3003                 if (rest) {
3004                         if (read)
3005                                 memcpy(paddr + len - rest, dif_storep, rest);
3006                         else
3007                                 memcpy(dif_storep, paddr + len - rest, rest);
3008                 }
3009
3010                 sector += len / sizeof(*dif_storep);
3011                 resid -= len;
3012         }
3013         sg_miter_stop(&miter);
3014 }
3015
3016 static int prot_verify_read(struct scsi_cmnd *scp, sector_t start_sec,
3017                             unsigned int sectors, u32 ei_lba)
3018 {
3019         unsigned int i;
3020         sector_t sector;
3021         struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3022                                                 scp->device->hostdata);
3023         struct t10_pi_tuple *sdt;
3024
3025         for (i = 0; i < sectors; i++, ei_lba++) {
3026                 int ret;
3027
3028                 sector = start_sec + i;
3029                 sdt = dif_store(sip, sector);
3030
3031                 if (sdt->app_tag == cpu_to_be16(0xffff))
3032                         continue;
3033
3034                 ret = dif_verify(sdt, lba2fake_store(sip, sector), sector,
3035                                  ei_lba);
3036                 if (ret) {
3037                         dif_errors++;
3038                         return ret;
3039                 }
3040         }
3041
3042         dif_copy_prot(scp, start_sec, sectors, true);
3043         dix_reads++;
3044
3045         return 0;
3046 }
3047
3048 static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3049 {
3050         bool check_prot;
3051         u32 num;
3052         u32 ei_lba;
3053         int ret;
3054         u64 lba;
3055         struct sdeb_store_info *sip = devip2sip(devip);
3056         rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
3057         u8 *cmd = scp->cmnd;
3058         struct sdebug_queued_cmd *sqcp;
3059
3060         switch (cmd[0]) {
3061         case READ_16:
3062                 ei_lba = 0;
3063                 lba = get_unaligned_be64(cmd + 2);
3064                 num = get_unaligned_be32(cmd + 10);
3065                 check_prot = true;
3066                 break;
3067         case READ_10:
3068                 ei_lba = 0;
3069                 lba = get_unaligned_be32(cmd + 2);
3070                 num = get_unaligned_be16(cmd + 7);
3071                 check_prot = true;
3072                 break;
3073         case READ_6:
3074                 ei_lba = 0;
3075                 lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3076                       (u32)(cmd[1] & 0x1f) << 16;
3077                 num = (0 == cmd[4]) ? 256 : cmd[4];
3078                 check_prot = true;
3079                 break;
3080         case READ_12:
3081                 ei_lba = 0;
3082                 lba = get_unaligned_be32(cmd + 2);
3083                 num = get_unaligned_be32(cmd + 6);
3084                 check_prot = true;
3085                 break;
3086         case XDWRITEREAD_10:
3087                 ei_lba = 0;
3088                 lba = get_unaligned_be32(cmd + 2);
3089                 num = get_unaligned_be16(cmd + 7);
3090                 check_prot = false;
3091                 break;
3092         default:        /* assume READ(32) */
3093                 lba = get_unaligned_be64(cmd + 12);
3094                 ei_lba = get_unaligned_be32(cmd + 20);
3095                 num = get_unaligned_be32(cmd + 28);
3096                 check_prot = false;
3097                 break;
3098         }
3099         if (unlikely(have_dif_prot && check_prot)) {
3100                 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3101                     (cmd[1] & 0xe0)) {
3102                         mk_sense_invalid_opcode(scp);
3103                         return check_condition_result;
3104                 }
3105                 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3106                      sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3107                     (cmd[1] & 0xe0) == 0)
3108                         sdev_printk(KERN_ERR, scp->device, "Unprotected RD "
3109                                     "to DIF device\n");
3110         }
3111         if (unlikely(sdebug_any_injecting_opt)) {
3112                 sqcp = (struct sdebug_queued_cmd *)scp->host_scribble;
3113
3114                 if (sqcp) {
3115                         if (sqcp->inj_short)
3116                                 num /= 2;
3117                 }
3118         } else
3119                 sqcp = NULL;
3120
3121         ret = check_device_access_params(scp, lba, num, false);
3122         if (ret)
3123                 return ret;
3124         if (unlikely((SDEBUG_OPT_MEDIUM_ERR & sdebug_opts) &&
3125                      (lba <= (sdebug_medium_error_start + sdebug_medium_error_count - 1)) &&
3126                      ((lba + num) > sdebug_medium_error_start))) {
3127                 /* claim unrecoverable read error */
3128                 mk_sense_buffer(scp, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
3129                 /* set info field and valid bit for fixed descriptor */
3130                 if (0x70 == (scp->sense_buffer[0] & 0x7f)) {
3131                         scp->sense_buffer[0] |= 0x80;   /* Valid bit */
3132                         ret = (lba < OPT_MEDIUM_ERR_ADDR)
3133                               ? OPT_MEDIUM_ERR_ADDR : (int)lba;
3134                         put_unaligned_be32(ret, scp->sense_buffer + 3);
3135                 }
3136                 scsi_set_resid(scp, scsi_bufflen(scp));
3137                 return check_condition_result;
3138         }
3139
3140         read_lock(macc_lckp);
3141
3142         /* DIX + T10 DIF */
3143         if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3144                 int prot_ret = prot_verify_read(scp, lba, num, ei_lba);
3145
3146                 if (prot_ret) {
3147                         read_unlock(macc_lckp);
3148                         mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, prot_ret);
3149                         return illegal_condition_result;
3150                 }
3151         }
3152
3153         ret = do_device_access(sip, scp, 0, lba, num, false);
3154         read_unlock(macc_lckp);
3155         if (unlikely(ret == -1))
3156                 return DID_ERROR << 16;
3157
3158         scsi_set_resid(scp, scsi_bufflen(scp) - ret);
3159
3160         if (unlikely(sqcp)) {
3161                 if (sqcp->inj_recovered) {
3162                         mk_sense_buffer(scp, RECOVERED_ERROR,
3163                                         THRESHOLD_EXCEEDED, 0);
3164                         return check_condition_result;
3165                 } else if (sqcp->inj_transport) {
3166                         mk_sense_buffer(scp, ABORTED_COMMAND,
3167                                         TRANSPORT_PROBLEM, ACK_NAK_TO);
3168                         return check_condition_result;
3169                 } else if (sqcp->inj_dif) {
3170                         /* Logical block guard check failed */
3171                         mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3172                         return illegal_condition_result;
3173                 } else if (sqcp->inj_dix) {
3174                         mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3175                         return illegal_condition_result;
3176                 }
3177         }
3178         return 0;
3179 }
3180
3181 static void dump_sector(unsigned char *buf, int len)
3182 {
3183         int i, j, n;
3184
3185         pr_err(">>> Sector Dump <<<\n");
3186         for (i = 0 ; i < len ; i += 16) {
3187                 char b[128];
3188
3189                 for (j = 0, n = 0; j < 16; j++) {
3190                         unsigned char c = buf[i+j];
3191
3192                         if (c >= 0x20 && c < 0x7e)
3193                                 n += scnprintf(b + n, sizeof(b) - n,
3194                                                " %c ", buf[i+j]);
3195                         else
3196                                 n += scnprintf(b + n, sizeof(b) - n,
3197                                                "%02x ", buf[i+j]);
3198                 }
3199                 pr_err("%04d: %s\n", i, b);
3200         }
3201 }
3202
3203 static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
3204                              unsigned int sectors, u32 ei_lba)
3205 {
3206         int ret;
3207         struct t10_pi_tuple *sdt;
3208         void *daddr;
3209         sector_t sector = start_sec;
3210         int ppage_offset;
3211         int dpage_offset;
3212         struct sg_mapping_iter diter;
3213         struct sg_mapping_iter piter;
3214
3215         BUG_ON(scsi_sg_count(SCpnt) == 0);
3216         BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
3217
3218         sg_miter_start(&piter, scsi_prot_sglist(SCpnt),
3219                         scsi_prot_sg_count(SCpnt),
3220                         SG_MITER_ATOMIC | SG_MITER_FROM_SG);
3221         sg_miter_start(&diter, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
3222                         SG_MITER_ATOMIC | SG_MITER_FROM_SG);
3223
3224         /* For each protection page */
3225         while (sg_miter_next(&piter)) {
3226                 dpage_offset = 0;
3227                 if (WARN_ON(!sg_miter_next(&diter))) {
3228                         ret = 0x01;
3229                         goto out;
3230                 }
3231
3232                 for (ppage_offset = 0; ppage_offset < piter.length;
3233                      ppage_offset += sizeof(struct t10_pi_tuple)) {
3234                         /* If we're at the end of the current
3235                          * data page advance to the next one
3236                          */
3237                         if (dpage_offset >= diter.length) {
3238                                 if (WARN_ON(!sg_miter_next(&diter))) {
3239                                         ret = 0x01;
3240                                         goto out;
3241                                 }
3242                                 dpage_offset = 0;
3243                         }
3244
3245                         sdt = piter.addr + ppage_offset;
3246                         daddr = diter.addr + dpage_offset;
3247
3248                         ret = dif_verify(sdt, daddr, sector, ei_lba);
3249                         if (ret) {
3250                                 dump_sector(daddr, sdebug_sector_size);
3251                                 goto out;
3252                         }
3253
3254                         sector++;
3255                         ei_lba++;
3256                         dpage_offset += sdebug_sector_size;
3257                 }
3258                 diter.consumed = dpage_offset;
3259                 sg_miter_stop(&diter);
3260         }
3261         sg_miter_stop(&piter);
3262
3263         dif_copy_prot(SCpnt, start_sec, sectors, false);
3264         dix_writes++;
3265
3266         return 0;
3267
3268 out:
3269         dif_errors++;
3270         sg_miter_stop(&diter);
3271         sg_miter_stop(&piter);
3272         return ret;
3273 }
3274
3275 static unsigned long lba_to_map_index(sector_t lba)
3276 {
3277         if (sdebug_unmap_alignment)
3278                 lba += sdebug_unmap_granularity - sdebug_unmap_alignment;
3279         sector_div(lba, sdebug_unmap_granularity);
3280         return lba;
3281 }
3282
3283 static sector_t map_index_to_lba(unsigned long index)
3284 {
3285         sector_t lba = index * sdebug_unmap_granularity;
3286
3287         if (sdebug_unmap_alignment)
3288                 lba -= sdebug_unmap_granularity - sdebug_unmap_alignment;
3289         return lba;
3290 }
3291
3292 static unsigned int map_state(struct sdeb_store_info *sip, sector_t lba,
3293                               unsigned int *num)
3294 {
3295         sector_t end;
3296         unsigned int mapped;
3297         unsigned long index;
3298         unsigned long next;
3299
3300         index = lba_to_map_index(lba);
3301         mapped = test_bit(index, sip->map_storep);
3302
3303         if (mapped)
3304                 next = find_next_zero_bit(sip->map_storep, map_size, index);
3305         else
3306                 next = find_next_bit(sip->map_storep, map_size, index);
3307
3308         end = min_t(sector_t, sdebug_store_sectors,  map_index_to_lba(next));
3309         *num = end - lba;
3310         return mapped;
3311 }
3312
3313 static void map_region(struct sdeb_store_info *sip, sector_t lba,
3314                        unsigned int len)
3315 {
3316         sector_t end = lba + len;
3317
3318         while (lba < end) {
3319                 unsigned long index = lba_to_map_index(lba);
3320
3321                 if (index < map_size)
3322                         set_bit(index, sip->map_storep);
3323
3324                 lba = map_index_to_lba(index + 1);
3325         }
3326 }
3327
3328 static void unmap_region(struct sdeb_store_info *sip, sector_t lba,
3329                          unsigned int len)
3330 {
3331         sector_t end = lba + len;
3332         u8 *fsp = sip->storep;
3333
3334         while (lba < end) {
3335                 unsigned long index = lba_to_map_index(lba);
3336
3337                 if (lba == map_index_to_lba(index) &&
3338                     lba + sdebug_unmap_granularity <= end &&
3339                     index < map_size) {
3340                         clear_bit(index, sip->map_storep);
3341                         if (sdebug_lbprz) {  /* for LBPRZ=2 return 0xff_s */
3342                                 memset(fsp + lba * sdebug_sector_size,
3343                                        (sdebug_lbprz & 1) ? 0 : 0xff,
3344                                        sdebug_sector_size *
3345                                        sdebug_unmap_granularity);
3346                         }
3347                         if (sip->dif_storep) {
3348                                 memset(sip->dif_storep + lba, 0xff,
3349                                        sizeof(*sip->dif_storep) *
3350                                        sdebug_unmap_granularity);
3351                         }
3352                 }
3353                 lba = map_index_to_lba(index + 1);
3354         }
3355 }
3356
3357 static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3358 {
3359         bool check_prot;
3360         u32 num;
3361         u32 ei_lba;
3362         int ret;
3363         u64 lba;
3364         struct sdeb_store_info *sip = devip2sip(devip);
3365         rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
3366         u8 *cmd = scp->cmnd;
3367
3368         switch (cmd[0]) {
3369         case WRITE_16:
3370                 ei_lba = 0;
3371                 lba = get_unaligned_be64(cmd + 2);
3372                 num = get_unaligned_be32(cmd + 10);
3373                 check_prot = true;
3374                 break;
3375         case WRITE_10:
3376                 ei_lba = 0;
3377                 lba = get_unaligned_be32(cmd + 2);
3378                 num = get_unaligned_be16(cmd + 7);
3379                 check_prot = true;
3380                 break;
3381         case WRITE_6:
3382                 ei_lba = 0;
3383                 lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3384                       (u32)(cmd[1] & 0x1f) << 16;
3385                 num = (0 == cmd[4]) ? 256 : cmd[4];
3386                 check_prot = true;
3387                 break;
3388         case WRITE_12:
3389                 ei_lba = 0;
3390                 lba = get_unaligned_be32(cmd + 2);
3391                 num = get_unaligned_be32(cmd + 6);
3392                 check_prot = true;
3393                 break;
3394         case 0x53:      /* XDWRITEREAD(10) */
3395                 ei_lba = 0;
3396                 lba = get_unaligned_be32(cmd + 2);
3397                 num = get_unaligned_be16(cmd + 7);
3398                 check_prot = false;
3399                 break;
3400         default:        /* assume WRITE(32) */
3401                 lba = get_unaligned_be64(cmd + 12);
3402                 ei_lba = get_unaligned_be32(cmd + 20);
3403                 num = get_unaligned_be32(cmd + 28);
3404                 check_prot = false;
3405                 break;
3406         }
3407         if (unlikely(have_dif_prot && check_prot)) {
3408                 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3409                     (cmd[1] & 0xe0)) {
3410                         mk_sense_invalid_opcode(scp);
3411                         return check_condition_result;
3412                 }
3413                 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3414                      sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3415                     (cmd[1] & 0xe0) == 0)
3416                         sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3417                                     "to DIF device\n");
3418         }
3419
3420         write_lock(macc_lckp);
3421         ret = check_device_access_params(scp, lba, num, true);
3422         if (ret) {
3423                 write_unlock(macc_lckp);
3424                 return ret;
3425         }
3426
3427         /* DIX + T10 DIF */
3428         if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3429                 int prot_ret = prot_verify_write(scp, lba, num, ei_lba);
3430
3431                 if (prot_ret) {
3432                         write_unlock(macc_lckp);
3433                         mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, prot_ret);
3434                         return illegal_condition_result;
3435                 }
3436         }
3437
3438         ret = do_device_access(sip, scp, 0, lba, num, true);
3439         if (unlikely(scsi_debug_lbp()))
3440                 map_region(sip, lba, num);
3441         /* If ZBC zone then bump its write pointer */
3442         if (sdebug_dev_is_zoned(devip))
3443                 zbc_inc_wp(devip, lba, num);
3444         write_unlock(macc_lckp);
3445         if (unlikely(-1 == ret))
3446                 return DID_ERROR << 16;
3447         else if (unlikely(sdebug_verbose &&
3448                           (ret < (num * sdebug_sector_size))))
3449                 sdev_printk(KERN_INFO, scp->device,
3450                             "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3451                             my_name, num * sdebug_sector_size, ret);
3452
3453         if (unlikely(sdebug_any_injecting_opt)) {
3454                 struct sdebug_queued_cmd *sqcp =
3455                                 (struct sdebug_queued_cmd *)scp->host_scribble;
3456
3457                 if (sqcp) {
3458                         if (sqcp->inj_recovered) {
3459                                 mk_sense_buffer(scp, RECOVERED_ERROR,
3460                                                 THRESHOLD_EXCEEDED, 0);
3461                                 return check_condition_result;
3462                         } else if (sqcp->inj_dif) {
3463                                 /* Logical block guard check failed */
3464                                 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3465                                 return illegal_condition_result;
3466                         } else if (sqcp->inj_dix) {
3467                                 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3468                                 return illegal_condition_result;
3469                         }
3470                 }
3471         }
3472         return 0;
3473 }
3474
3475 /*
3476  * T10 has only specified WRITE SCATTERED(16) and WRITE SCATTERED(32).
3477  * No READ GATHERED yet (requires bidi or long cdb holding gather list).
3478  */
3479 static int resp_write_scat(struct scsi_cmnd *scp,
3480                            struct sdebug_dev_info *devip)
3481 {
3482         u8 *cmd = scp->cmnd;
3483         u8 *lrdp = NULL;
3484         u8 *up;
3485         struct sdeb_store_info *sip = devip2sip(devip);
3486         rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
3487         u8 wrprotect;
3488         u16 lbdof, num_lrd, k;
3489         u32 num, num_by, bt_len, lbdof_blen, sg_off, cum_lb;
3490         u32 lb_size = sdebug_sector_size;
3491         u32 ei_lba;
3492         u64 lba;
3493         int ret, res;
3494         bool is_16;
3495         static const u32 lrd_size = 32; /* + parameter list header size */
3496
3497         if (cmd[0] == VARIABLE_LENGTH_CMD) {
3498                 is_16 = false;
3499                 wrprotect = (cmd[10] >> 5) & 0x7;
3500                 lbdof = get_unaligned_be16(cmd + 12);
3501                 num_lrd = get_unaligned_be16(cmd + 16);
3502                 bt_len = get_unaligned_be32(cmd + 28);
3503         } else {        /* that leaves WRITE SCATTERED(16) */
3504                 is_16 = true;
3505                 wrprotect = (cmd[2] >> 5) & 0x7;
3506                 lbdof = get_unaligned_be16(cmd + 4);
3507                 num_lrd = get_unaligned_be16(cmd + 8);
3508                 bt_len = get_unaligned_be32(cmd + 10);
3509                 if (unlikely(have_dif_prot)) {
3510                         if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3511                             wrprotect) {
3512                                 mk_sense_invalid_opcode(scp);
3513                                 return illegal_condition_result;
3514                         }
3515                         if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3516                              sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3517                              wrprotect == 0)
3518                                 sdev_printk(KERN_ERR, scp->device,
3519                                             "Unprotected WR to DIF device\n");
3520                 }
3521         }
3522         if ((num_lrd == 0) || (bt_len == 0))
3523                 return 0;       /* T10 says these do-nothings are not errors */
3524         if (lbdof == 0) {
3525                 if (sdebug_verbose)
3526                         sdev_printk(KERN_INFO, scp->device,
3527                                 "%s: %s: LB Data Offset field bad\n",
3528                                 my_name, __func__);
3529                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3530                 return illegal_condition_result;
3531         }
3532         lbdof_blen = lbdof * lb_size;
3533         if ((lrd_size + (num_lrd * lrd_size)) > lbdof_blen) {
3534                 if (sdebug_verbose)
3535                         sdev_printk(KERN_INFO, scp->device,
3536                                 "%s: %s: LBA range descriptors don't fit\n",
3537                                 my_name, __func__);
3538                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3539                 return illegal_condition_result;
3540         }
3541         lrdp = kzalloc(lbdof_blen, GFP_ATOMIC);
3542         if (lrdp == NULL)
3543                 return SCSI_MLQUEUE_HOST_BUSY;
3544         if (sdebug_verbose)
3545                 sdev_printk(KERN_INFO, scp->device,
3546                         "%s: %s: Fetch header+scatter_list, lbdof_blen=%u\n",
3547                         my_name, __func__, lbdof_blen);
3548         res = fetch_to_dev_buffer(scp, lrdp, lbdof_blen);
3549         if (res == -1) {
3550                 ret = DID_ERROR << 16;
3551                 goto err_out;
3552         }
3553
3554         write_lock(macc_lckp);
3555         sg_off = lbdof_blen;
3556         /* Spec says Buffer xfer Length field in number of LBs in dout */
3557         cum_lb = 0;
3558         for (k = 0, up = lrdp + lrd_size; k < num_lrd; ++k, up += lrd_size) {
3559                 lba = get_unaligned_be64(up + 0);
3560                 num = get_unaligned_be32(up + 8);
3561                 if (sdebug_verbose)
3562                         sdev_printk(KERN_INFO, scp->device,
3563                                 "%s: %s: k=%d  LBA=0x%llx num=%u  sg_off=%u\n",
3564                                 my_name, __func__, k, lba, num, sg_off);
3565                 if (num == 0)
3566                         continue;
3567                 ret = check_device_access_params(scp, lba, num, true);
3568                 if (ret)
3569                         goto err_out_unlock;
3570                 num_by = num * lb_size;
3571                 ei_lba = is_16 ? 0 : get_unaligned_be32(up + 12);
3572
3573                 if ((cum_lb + num) > bt_len) {
3574                         if (sdebug_verbose)
3575                                 sdev_printk(KERN_INFO, scp->device,
3576                                     "%s: %s: sum of blocks > data provided\n",
3577                                     my_name, __func__);
3578                         mk_sense_buffer(scp, ILLEGAL_REQUEST, WRITE_ERROR_ASC,
3579                                         0);
3580                         ret = illegal_condition_result;
3581                         goto err_out_unlock;
3582                 }
3583
3584                 /* DIX + T10 DIF */
3585                 if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3586                         int prot_ret = prot_verify_write(scp, lba, num,
3587                                                          ei_lba);
3588
3589                         if (prot_ret) {
3590                                 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10,
3591                                                 prot_ret);
3592                                 ret = illegal_condition_result;
3593                                 goto err_out_unlock;
3594                         }
3595                 }
3596
3597                 ret = do_device_access(sip, scp, sg_off, lba, num, true);
3598                 /* If ZBC zone then bump its write pointer */
3599                 if (sdebug_dev_is_zoned(devip))
3600                         zbc_inc_wp(devip, lba, num);
3601                 if (unlikely(scsi_debug_lbp()))
3602                         map_region(sip, lba, num);
3603                 if (unlikely(-1 == ret)) {
3604                         ret = DID_ERROR << 16;
3605                         goto err_out_unlock;
3606                 } else if (unlikely(sdebug_verbose && (ret < num_by)))
3607                         sdev_printk(KERN_INFO, scp->device,
3608                             "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3609                             my_name, num_by, ret);
3610
3611                 if (unlikely(sdebug_any_injecting_opt)) {
3612                         struct sdebug_queued_cmd *sqcp =
3613                                 (struct sdebug_queued_cmd *)scp->host_scribble;
3614
3615                         if (sqcp) {
3616                                 if (sqcp->inj_recovered) {
3617                                         mk_sense_buffer(scp, RECOVERED_ERROR,
3618                                                         THRESHOLD_EXCEEDED, 0);
3619                                         ret = illegal_condition_result;
3620                                         goto err_out_unlock;
3621                                 } else if (sqcp->inj_dif) {
3622                                         /* Logical block guard check failed */
3623                                         mk_sense_buffer(scp, ABORTED_COMMAND,
3624                                                         0x10, 1);
3625                                         ret = illegal_condition_result;
3626                                         goto err_out_unlock;
3627                                 } else if (sqcp->inj_dix) {
3628                                         mk_sense_buffer(scp, ILLEGAL_REQUEST,
3629                                                         0x10, 1);
3630                                         ret = illegal_condition_result;
3631                                         goto err_out_unlock;
3632                                 }
3633                         }
3634                 }
3635                 sg_off += num_by;
3636                 cum_lb += num;
3637         }
3638         ret = 0;
3639 err_out_unlock:
3640         write_unlock(macc_lckp);
3641 err_out:
3642         kfree(lrdp);
3643         return ret;
3644 }
3645
3646 static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
3647                            u32 ei_lba, bool unmap, bool ndob)
3648 {
3649         struct scsi_device *sdp = scp->device;
3650         struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
3651         unsigned long long i;
3652         u64 block, lbaa;
3653         u32 lb_size = sdebug_sector_size;
3654         int ret;
3655         struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3656                                                 scp->device->hostdata);
3657         rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
3658         u8 *fs1p;
3659         u8 *fsp;
3660
3661         write_lock(macc_lckp);
3662
3663         ret = check_device_access_params(scp, lba, num, true);
3664         if (ret) {
3665                 write_unlock(macc_lckp);
3666                 return ret;
3667         }
3668
3669         if (unmap && scsi_debug_lbp()) {
3670                 unmap_region(sip, lba, num);
3671                 goto out;
3672         }
3673         lbaa = lba;
3674         block = do_div(lbaa, sdebug_store_sectors);
3675         /* if ndob then zero 1 logical block, else fetch 1 logical block */
3676         fsp = sip->storep;
3677         fs1p = fsp + (block * lb_size);
3678         if (ndob) {
3679                 memset(fs1p, 0, lb_size);
3680                 ret = 0;
3681         } else
3682                 ret = fetch_to_dev_buffer(scp, fs1p, lb_size);
3683
3684         if (-1 == ret) {
3685                 write_unlock(&sip->macc_lck);
3686                 return DID_ERROR << 16;
3687         } else if (sdebug_verbose && !ndob && (ret < lb_size))
3688                 sdev_printk(KERN_INFO, scp->device,
3689                             "%s: %s: lb size=%u, IO sent=%d bytes\n",
3690                             my_name, "write same", lb_size, ret);
3691
3692         /* Copy first sector to remaining blocks */
3693         for (i = 1 ; i < num ; i++) {
3694                 lbaa = lba + i;
3695                 block = do_div(lbaa, sdebug_store_sectors);
3696                 memmove(fsp + (block * lb_size), fs1p, lb_size);
3697         }
3698         if (scsi_debug_lbp())
3699                 map_region(sip, lba, num);
3700         /* If ZBC zone then bump its write pointer */
3701         if (sdebug_dev_is_zoned(devip))
3702                 zbc_inc_wp(devip, lba, num);
3703 out:
3704         write_unlock(macc_lckp);
3705
3706         return 0;
3707 }
3708
3709 static int resp_write_same_10(struct scsi_cmnd *scp,
3710                               struct sdebug_dev_info *devip)
3711 {
3712         u8 *cmd = scp->cmnd;
3713         u32 lba;
3714         u16 num;
3715         u32 ei_lba = 0;
3716         bool unmap = false;
3717
3718         if (cmd[1] & 0x8) {
3719                 if (sdebug_lbpws10 == 0) {
3720                         mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3721                         return check_condition_result;
3722                 } else
3723                         unmap = true;
3724         }
3725         lba = get_unaligned_be32(cmd + 2);
3726         num = get_unaligned_be16(cmd + 7);
3727         if (num > sdebug_write_same_length) {
3728                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3729                 return check_condition_result;
3730         }
3731         return resp_write_same(scp, lba, num, ei_lba, unmap, false);
3732 }
3733
3734 static int resp_write_same_16(struct scsi_cmnd *scp,
3735                               struct sdebug_dev_info *devip)
3736 {
3737         u8 *cmd = scp->cmnd;
3738         u64 lba;
3739         u32 num;
3740         u32 ei_lba = 0;
3741         bool unmap = false;
3742         bool ndob = false;
3743
3744         if (cmd[1] & 0x8) {     /* UNMAP */
3745                 if (sdebug_lbpws == 0) {
3746                         mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3747                         return check_condition_result;
3748                 } else
3749                         unmap = true;
3750         }
3751         if (cmd[1] & 0x1)  /* NDOB (no data-out buffer, assumes zeroes) */
3752                 ndob = true;
3753         lba = get_unaligned_be64(cmd + 2);
3754         num = get_unaligned_be32(cmd + 10);
3755         if (num > sdebug_write_same_length) {
3756                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
3757                 return check_condition_result;
3758         }
3759         return resp_write_same(scp, lba, num, ei_lba, unmap, ndob);
3760 }
3761
3762 /* Note the mode field is in the same position as the (lower) service action
3763  * field. For the Report supported operation codes command, SPC-4 suggests
3764  * each mode of this command should be reported separately; for future. */
3765 static int resp_write_buffer(struct scsi_cmnd *scp,
3766                              struct sdebug_dev_info *devip)
3767 {
3768         u8 *cmd = scp->cmnd;
3769         struct scsi_device *sdp = scp->device;
3770         struct sdebug_dev_info *dp;
3771         u8 mode;
3772
3773         mode = cmd[1] & 0x1f;
3774         switch (mode) {
3775         case 0x4:       /* download microcode (MC) and activate (ACT) */
3776                 /* set UAs on this device only */
3777                 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3778                 set_bit(SDEBUG_UA_MICROCODE_CHANGED, devip->uas_bm);
3779                 break;
3780         case 0x5:       /* download MC, save and ACT */
3781                 set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET, devip->uas_bm);
3782                 break;
3783         case 0x6:       /* download MC with offsets and ACT */
3784                 /* set UAs on most devices (LUs) in this target */
3785                 list_for_each_entry(dp,
3786                                     &devip->sdbg_host->dev_info_list,
3787                                     dev_list)
3788                         if (dp->target == sdp->id) {
3789                                 set_bit(SDEBUG_UA_BUS_RESET, dp->uas_bm);
3790                                 if (devip != dp)
3791                                         set_bit(SDEBUG_UA_MICROCODE_CHANGED,
3792                                                 dp->uas_bm);
3793                         }
3794                 break;
3795         case 0x7:       /* download MC with offsets, save, and ACT */
3796                 /* set UA on all devices (LUs) in this target */
3797                 list_for_each_entry(dp,
3798                                     &devip->sdbg_host->dev_info_list,
3799                                     dev_list)
3800                         if (dp->target == sdp->id)
3801                                 set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET,
3802                                         dp->uas_bm);
3803                 break;
3804         default:
3805                 /* do nothing for this command for other mode values */
3806                 break;
3807         }
3808         return 0;
3809 }
3810
3811 static int resp_comp_write(struct scsi_cmnd *scp,
3812                            struct sdebug_dev_info *devip)
3813 {
3814         u8 *cmd = scp->cmnd;
3815         u8 *arr;
3816         struct sdeb_store_info *sip = devip2sip(devip);
3817         rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
3818         u64 lba;
3819         u32 dnum;
3820         u32 lb_size = sdebug_sector_size;
3821         u8 num;
3822         int ret;
3823         int retval = 0;
3824
3825         lba = get_unaligned_be64(cmd + 2);
3826         num = cmd[13];          /* 1 to a maximum of 255 logical blocks */
3827         if (0 == num)
3828                 return 0;       /* degenerate case, not an error */
3829         if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3830             (cmd[1] & 0xe0)) {
3831                 mk_sense_invalid_opcode(scp);
3832                 return check_condition_result;
3833         }
3834         if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3835              sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3836             (cmd[1] & 0xe0) == 0)
3837                 sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3838                             "to DIF device\n");
3839         ret = check_device_access_params(scp, lba, num, false);
3840         if (ret)
3841                 return ret;
3842         dnum = 2 * num;
3843         arr = kcalloc(lb_size, dnum, GFP_ATOMIC);
3844         if (NULL == arr) {
3845                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3846                                 INSUFF_RES_ASCQ);
3847                 return check_condition_result;
3848         }
3849
3850         write_lock(macc_lckp);
3851
3852         ret = do_dout_fetch(scp, dnum, arr);
3853         if (ret == -1) {
3854                 retval = DID_ERROR << 16;
3855                 goto cleanup;
3856         } else if (sdebug_verbose && (ret < (dnum * lb_size)))
3857                 sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb "
3858                             "indicated=%u, IO sent=%d bytes\n", my_name,
3859                             dnum * lb_size, ret);
3860         if (!comp_write_worker(sip, lba, num, arr, false)) {
3861                 mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
3862                 retval = check_condition_result;
3863                 goto cleanup;
3864         }
3865         if (scsi_debug_lbp())
3866                 map_region(sip, lba, num);
3867 cleanup:
3868         write_unlock(macc_lckp);
3869         kfree(arr);
3870         return retval;
3871 }
3872
3873 struct unmap_block_desc {
3874         __be64  lba;
3875         __be32  blocks;
3876         __be32  __reserved;
3877 };
3878
3879 static int resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3880 {
3881         unsigned char *buf;
3882         struct unmap_block_desc *desc;
3883         struct sdeb_store_info *sip = devip2sip(devip);
3884         rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
3885         unsigned int i, payload_len, descriptors;
3886         int ret;
3887
3888         if (!scsi_debug_lbp())
3889                 return 0;       /* fib and say its done */
3890         payload_len = get_unaligned_be16(scp->cmnd + 7);
3891         BUG_ON(scsi_bufflen(scp) != payload_len);
3892
3893         descriptors = (payload_len - 8) / 16;
3894         if (descriptors > sdebug_unmap_max_desc) {
3895                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3896                 return check_condition_result;
3897         }
3898
3899         buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC);
3900         if (!buf) {
3901                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3902                                 INSUFF_RES_ASCQ);
3903                 return check_condition_result;
3904         }
3905
3906         scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
3907
3908         BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
3909         BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
3910
3911         desc = (void *)&buf[8];
3912
3913         write_lock(macc_lckp);
3914
3915         for (i = 0 ; i < descriptors ; i++) {
3916                 unsigned long long lba = get_unaligned_be64(&desc[i].lba);
3917                 unsigned int num = get_unaligned_be32(&desc[i].blocks);
3918
3919                 ret = check_device_access_params(scp, lba, num, true);
3920                 if (ret)
3921                         goto out;
3922
3923                 unmap_region(sip, lba, num);
3924         }
3925
3926         ret = 0;
3927
3928 out:
3929         write_unlock(macc_lckp);
3930         kfree(buf);
3931
3932         return ret;
3933 }
3934
3935 #define SDEBUG_GET_LBA_STATUS_LEN 32
3936
3937 static int resp_get_lba_status(struct scsi_cmnd *scp,
3938                                struct sdebug_dev_info *devip)
3939 {
3940         u8 *cmd = scp->cmnd;
3941         struct sdeb_store_info *sip = devip2sip(devip);
3942         u64 lba;
3943         u32 alloc_len, mapped, num;
3944         int ret;
3945         u8 arr[SDEBUG_GET_LBA_STATUS_LEN];
3946
3947         lba = get_unaligned_be64(cmd + 2);
3948         alloc_len = get_unaligned_be32(cmd + 10);
3949
3950         if (alloc_len < 24)
3951                 return 0;
3952
3953         ret = check_device_access_params(scp, lba, 1, false);
3954         if (ret)
3955                 return ret;
3956
3957         if (scsi_debug_lbp())
3958                 mapped = map_state(sip, lba, &num);
3959         else {
3960                 mapped = 1;
3961                 /* following just in case virtual_gb changed */
3962                 sdebug_capacity = get_sdebug_capacity();
3963                 if (sdebug_capacity - lba <= 0xffffffff)
3964                         num = sdebug_capacity - lba;
3965                 else
3966                         num = 0xffffffff;
3967         }
3968
3969         memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
3970         put_unaligned_be32(20, arr);            /* Parameter Data Length */
3971         put_unaligned_be64(lba, arr + 8);       /* LBA */
3972         put_unaligned_be32(num, arr + 16);      /* Number of blocks */
3973         arr[20] = !mapped;              /* prov_stat=0: mapped; 1: dealloc */
3974
3975         return fill_from_dev_buffer(scp, arr, SDEBUG_GET_LBA_STATUS_LEN);
3976 }
3977
3978 static int resp_sync_cache(struct scsi_cmnd *scp,
3979                            struct sdebug_dev_info *devip)
3980 {
3981         int res = 0;
3982         u64 lba;
3983         u32 num_blocks;
3984         u8 *cmd = scp->cmnd;
3985
3986         if (cmd[0] == SYNCHRONIZE_CACHE) {      /* 10 byte cdb */
3987                 lba = get_unaligned_be32(cmd + 2);
3988                 num_blocks = get_unaligned_be16(cmd + 7);
3989         } else {                                /* SYNCHRONIZE_CACHE(16) */
3990                 lba = get_unaligned_be64(cmd + 2);
3991                 num_blocks = get_unaligned_be32(cmd + 10);
3992         }
3993         if (lba + num_blocks > sdebug_capacity) {
3994                 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
3995                 return check_condition_result;
3996         }
3997         if (!write_since_sync || cmd[1] & 0x2)
3998                 res = SDEG_RES_IMMED_MASK;
3999         else            /* delay if write_since_sync and IMMED clear */
4000                 write_since_sync = false;
4001         return res;
4002 }
4003
4004 /*
4005  * Assuming the LBA+num_blocks is not out-of-range, this function will return
4006  * CONDITION MET if the specified blocks will/have fitted in the cache, and
4007  * a GOOD status otherwise. Model a disk with a big cache and yield
4008  * CONDITION MET. Actually tries to bring range in main memory into the
4009  * cache associated with the CPU(s).
4010  */
4011 static int resp_pre_fetch(struct scsi_cmnd *scp,
4012                           struct sdebug_dev_info *devip)
4013 {
4014         int res = 0;
4015         u64 lba;
4016         u64 block, rest = 0;
4017         u32 nblks;
4018         u8 *cmd = scp->cmnd;
4019         struct sdeb_store_info *sip = devip2sip(devip);
4020         rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4021         u8 *fsp = sip ? sip->storep : NULL;
4022
4023         if (cmd[0] == PRE_FETCH) {      /* 10 byte cdb */
4024                 lba = get_unaligned_be32(cmd + 2);
4025                 nblks = get_unaligned_be16(cmd + 7);
4026         } else {                        /* PRE-FETCH(16) */
4027                 lba = get_unaligned_be64(cmd + 2);
4028                 nblks = get_unaligned_be32(cmd + 10);
4029         }
4030         if (lba + nblks > sdebug_capacity) {
4031                 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4032                 return check_condition_result;
4033         }
4034         if (!fsp)
4035                 goto fini;
4036         /* PRE-FETCH spec says nothing about LBP or PI so skip them */
4037         block = do_div(lba, sdebug_store_sectors);
4038         if (block + nblks > sdebug_store_sectors)
4039                 rest = block + nblks - sdebug_store_sectors;
4040
4041         /* Try to bring the PRE-FETCH range into CPU's cache */
4042         read_lock(macc_lckp);
4043         prefetch_range(fsp + (sdebug_sector_size * block),
4044                        (nblks - rest) * sdebug_sector_size);
4045         if (rest)
4046                 prefetch_range(fsp, rest * sdebug_sector_size);
4047         read_unlock(macc_lckp);
4048 fini:
4049         if (cmd[1] & 0x2)
4050                 res = SDEG_RES_IMMED_MASK;
4051         return res | condition_met_result;
4052 }
4053
4054 #define RL_BUCKET_ELEMS 8
4055
4056 /* Even though each pseudo target has a REPORT LUNS "well known logical unit"
4057  * (W-LUN), the normal Linux scanning logic does not associate it with a
4058  * device (e.g. /dev/sg7). The following magic will make that association:
4059  *   "cd /sys/class/scsi_host/host<n> ; echo '- - 49409' > scan"
4060  * where <n> is a host number. If there are multiple targets in a host then
4061  * the above will associate a W-LUN to each target. To only get a W-LUN
4062  * for target 2, then use "echo '- 2 49409' > scan" .
4063  */
4064 static int resp_report_luns(struct scsi_cmnd *scp,
4065                             struct sdebug_dev_info *devip)
4066 {
4067         unsigned char *cmd = scp->cmnd;
4068         unsigned int alloc_len;
4069         unsigned char select_report;
4070         u64 lun;
4071         struct scsi_lun *lun_p;
4072         u8 arr[RL_BUCKET_ELEMS * sizeof(struct scsi_lun)];
4073         unsigned int lun_cnt;   /* normal LUN count (max: 256) */
4074         unsigned int wlun_cnt;  /* report luns W-LUN count */
4075         unsigned int tlun_cnt;  /* total LUN count */
4076         unsigned int rlen;      /* response length (in bytes) */
4077         int k, j, n, res;
4078         unsigned int off_rsp = 0;
4079         const int sz_lun = sizeof(struct scsi_lun);
4080
4081         clear_luns_changed_on_target(devip);
4082
4083         select_report = cmd[2];
4084         alloc_len = get_unaligned_be32(cmd + 6);
4085
4086         if (alloc_len < 4) {
4087                 pr_err("alloc len too small %d\n", alloc_len);
4088                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
4089                 return check_condition_result;
4090         }
4091
4092         switch (select_report) {
4093         case 0:         /* all LUNs apart from W-LUNs */
4094                 lun_cnt = sdebug_max_luns;
4095                 wlun_cnt = 0;
4096                 break;
4097         case 1:         /* only W-LUNs */
4098                 lun_cnt = 0;
4099                 wlun_cnt = 1;
4100                 break;
4101         case 2:         /* all LUNs */
4102                 lun_cnt = sdebug_max_luns;
4103                 wlun_cnt = 1;
4104                 break;
4105         case 0x10:      /* only administrative LUs */
4106         case 0x11:      /* see SPC-5 */
4107         case 0x12:      /* only subsiduary LUs owned by referenced LU */
4108         default:
4109                 pr_debug("select report invalid %d\n", select_report);
4110                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
4111                 return check_condition_result;
4112         }
4113
4114         if (sdebug_no_lun_0 && (lun_cnt > 0))
4115                 --lun_cnt;
4116
4117         tlun_cnt = lun_cnt + wlun_cnt;
4118         rlen = tlun_cnt * sz_lun;       /* excluding 8 byte header */
4119         scsi_set_resid(scp, scsi_bufflen(scp));
4120         pr_debug("select_report %d luns = %d wluns = %d no_lun0 %d\n",
4121                  select_report, lun_cnt, wlun_cnt, sdebug_no_lun_0);
4122
4123         /* loops rely on sizeof response header same as sizeof lun (both 8) */
4124         lun = sdebug_no_lun_0 ? 1 : 0;
4125         for (k = 0, j = 0, res = 0; true; ++k, j = 0) {
4126                 memset(arr, 0, sizeof(arr));
4127                 lun_p = (struct scsi_lun *)&arr[0];
4128                 if (k == 0) {
4129                         put_unaligned_be32(rlen, &arr[0]);
4130                         ++lun_p;
4131                         j = 1;
4132                 }
4133                 for ( ; j < RL_BUCKET_ELEMS; ++j, ++lun_p) {
4134                         if ((k * RL_BUCKET_ELEMS) + j > lun_cnt)
4135                                 break;
4136                         int_to_scsilun(lun++, lun_p);
4137                 }
4138                 if (j < RL_BUCKET_ELEMS)
4139                         break;
4140                 n = j * sz_lun;
4141                 res = p_fill_from_dev_buffer(scp, arr, n, off_rsp);
4142                 if (res)
4143                         return res;
4144                 off_rsp += n;
4145         }
4146         if (wlun_cnt) {
4147                 int_to_scsilun(SCSI_W_LUN_REPORT_LUNS, lun_p);
4148                 ++j;
4149         }
4150         if (j > 0)
4151                 res = p_fill_from_dev_buffer(scp, arr, j * sz_lun, off_rsp);
4152         return res;
4153 }
4154
4155 static int resp_verify(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4156 {
4157         bool is_bytchk3 = false;
4158         u8 bytchk;
4159         int ret, j;
4160         u32 vnum, a_num, off;
4161         const u32 lb_size = sdebug_sector_size;
4162         u64 lba;
4163         u8 *arr;
4164         u8 *cmd = scp->cmnd;
4165         struct sdeb_store_info *sip = devip2sip(devip);
4166         rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4167
4168         bytchk = (cmd[1] >> 1) & 0x3;
4169         if (bytchk == 0) {
4170                 return 0;       /* always claim internal verify okay */
4171         } else if (bytchk == 2) {
4172                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
4173                 return check_condition_result;
4174         } else if (bytchk == 3) {
4175                 is_bytchk3 = true;      /* 1 block sent, compared repeatedly */
4176         }
4177         switch (cmd[0]) {
4178         case VERIFY_16:
4179                 lba = get_unaligned_be64(cmd + 2);
4180                 vnum = get_unaligned_be32(cmd + 10);
4181                 break;
4182         case VERIFY:            /* is VERIFY(10) */
4183                 lba = get_unaligned_be32(cmd + 2);
4184                 vnum = get_unaligned_be16(cmd + 7);
4185                 break;
4186         default:
4187                 mk_sense_invalid_opcode(scp);
4188                 return check_condition_result;
4189         }
4190         a_num = is_bytchk3 ? 1 : vnum;
4191         /* Treat following check like one for read (i.e. no write) access */
4192         ret = check_device_access_params(scp, lba, a_num, false);
4193         if (ret)
4194                 return ret;
4195
4196         arr = kcalloc(lb_size, vnum, GFP_ATOMIC);
4197         if (!arr) {
4198                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4199                                 INSUFF_RES_ASCQ);
4200                 return check_condition_result;
4201         }
4202         /* Not changing store, so only need read access */
4203         read_lock(macc_lckp);
4204
4205         ret = do_dout_fetch(scp, a_num, arr);
4206         if (ret == -1) {
4207                 ret = DID_ERROR << 16;
4208                 goto cleanup;
4209         } else if (sdebug_verbose && (ret < (a_num * lb_size))) {
4210                 sdev_printk(KERN_INFO, scp->device,
4211                             "%s: %s: cdb indicated=%u, IO sent=%d bytes\n",
4212                             my_name, __func__, a_num * lb_size, ret);
4213         }
4214         if (is_bytchk3) {
4215                 for (j = 1, off = lb_size; j < vnum; ++j, off += lb_size)
4216                         memcpy(arr + off, arr, lb_size);
4217         }
4218         ret = 0;
4219         if (!comp_write_worker(sip, lba, vnum, arr, true)) {
4220                 mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
4221                 ret = check_condition_result;
4222                 goto cleanup;
4223         }
4224 cleanup:
4225         read_unlock(macc_lckp);
4226         kfree(arr);
4227         return ret;
4228 }
4229
4230 #define RZONES_DESC_HD 64
4231
4232 /* Report zones depending on start LBA nad reporting options */
4233 static int resp_report_zones(struct scsi_cmnd *scp,
4234                              struct sdebug_dev_info *devip)
4235 {
4236         unsigned int i, max_zones, rep_max_zones, nrz = 0;
4237         int ret = 0;
4238         u32 alloc_len, rep_opts, rep_len;
4239         bool partial;
4240         u64 lba, zs_lba;
4241         u8 *arr = NULL, *desc;
4242         u8 *cmd = scp->cmnd;
4243         struct sdeb_zone_state *zsp;
4244         struct sdeb_store_info *sip = devip2sip(devip);
4245         rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4246
4247         if (!sdebug_dev_is_zoned(devip)) {
4248                 mk_sense_invalid_opcode(scp);
4249                 return check_condition_result;
4250         }
4251         zs_lba = get_unaligned_be64(cmd + 2);
4252         alloc_len = get_unaligned_be32(cmd + 10);
4253         rep_opts = cmd[14] & 0x3f;
4254         partial = cmd[14] & 0x80;
4255
4256         if (zs_lba >= sdebug_capacity) {
4257                 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4258                 return check_condition_result;
4259         }
4260
4261         max_zones = devip->nr_zones - zs_lba / devip->zsize;
4262         rep_max_zones = min((alloc_len - 64) >> ilog2(RZONES_DESC_HD),
4263                             max_zones);
4264
4265         arr = kcalloc(RZONES_DESC_HD, alloc_len, GFP_ATOMIC);
4266         if (!arr) {
4267                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4268                                 INSUFF_RES_ASCQ);
4269                 return check_condition_result;
4270         }
4271
4272         read_lock(macc_lckp);
4273
4274         desc = arr + 64;
4275         for (i = 0; i < max_zones; i++) {
4276                 lba = zs_lba + devip->zsize * i;
4277                 if (lba > sdebug_capacity)
4278                         break;
4279                 zsp = zbc_zone(devip, lba);
4280                 switch (rep_opts) {
4281                 case 0x00:
4282                         /* All zones */
4283                         break;
4284                 case 0x01:
4285                         /* Empty zones */
4286                         if (zsp->z_cond != ZC1_EMPTY)
4287                                 continue;
4288                         break;
4289                 case 0x02:
4290                         /* Implicit open zones */
4291                         if (zsp->z_cond != ZC2_IMPLICIT_OPEN)
4292                                 continue;
4293                         break;
4294                 case 0x03:
4295                         /* Explicit open zones */
4296                         if (zsp->z_cond != ZC3_EXPLICIT_OPEN)
4297                                 continue;
4298                         break;
4299                 case 0x04:
4300                         /* Closed zones */
4301                         if (zsp->z_cond != ZC4_CLOSED)
4302                                 continue;
4303                         break;
4304                 case 0x05:
4305                         /* Full zones */
4306                         if (zsp->z_cond != ZC5_FULL)
4307                                 continue;
4308                         break;
4309                 case 0x06:
4310                 case 0x07:
4311                 case 0x10:
4312                 case 0x11:
4313                         /*
4314                          * Read-only, offline, reset WP recommended and
4315                          * non-seq-resource-used are not emulated: no zones
4316                          * to report;
4317                          */
4318                         continue;
4319                 case 0x3f:
4320                         /* Not write pointer (conventional) zones */
4321                         if (!zbc_zone_is_conv(zsp))
4322                                 continue;
4323                         break;
4324                 default:
4325                         mk_sense_buffer(scp, ILLEGAL_REQUEST,
4326                                         INVALID_FIELD_IN_CDB, 0);
4327                         ret = check_condition_result;
4328                         goto fini;
4329                 }
4330
4331                 if (nrz < rep_max_zones) {
4332                         /* Fill zone descriptor */
4333                         if (zbc_zone_is_conv(zsp))
4334                                 desc[0] = 0x1;
4335                         else
4336                                 desc[0] = 0x2;
4337                         desc[1] = zsp->z_cond << 4;
4338                         put_unaligned_be64((u64)zsp->z_size, desc + 8);
4339                         put_unaligned_be64((u64)zsp->z_start, desc + 16);
4340                         put_unaligned_be64((u64)zsp->z_wp, desc + 24);
4341                         desc += 64;
4342                 }
4343
4344                 if (partial && nrz >= rep_max_zones)
4345                         break;
4346
4347                 nrz++;
4348         }
4349
4350         /* Report header */
4351         put_unaligned_be32(nrz * RZONES_DESC_HD, arr + 0);
4352         put_unaligned_be64(sdebug_capacity - 1, arr + 8);
4353
4354         rep_len = (unsigned long)desc - (unsigned long)arr;
4355         ret = fill_from_dev_buffer(scp, arr, min_t(int, alloc_len, rep_len));
4356
4357 fini:
4358         read_unlock(macc_lckp);
4359         kfree(arr);
4360         return ret;
4361 }
4362
4363 /* Logic transplanted from tcmu-runner, file_zbc.c */
4364 static void zbc_open_all(struct sdebug_dev_info *devip)
4365 {
4366         struct sdeb_zone_state *zsp = &devip->zstate[0];
4367         unsigned int i;
4368
4369         for (i = 0; i < devip->nr_zones; i++, zsp++) {
4370                 if (zsp->z_cond == ZC4_CLOSED)
4371                         zbc_open_zone(devip, &devip->zstate[i], true);
4372         }
4373 }
4374
4375 static int resp_open_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4376 {
4377         int res = 0;
4378         u64 z_id;
4379         enum sdebug_z_cond zc;
4380         u8 *cmd = scp->cmnd;
4381         struct sdeb_zone_state *zsp;
4382         bool all = cmd[14] & 0x01;
4383         struct sdeb_store_info *sip = devip2sip(devip);
4384         rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4385
4386         if (!sdebug_dev_is_zoned(devip)) {
4387                 mk_sense_invalid_opcode(scp);
4388                 return check_condition_result;
4389         }
4390
4391         write_lock(macc_lckp);
4392
4393         if (all) {
4394                 /* Check if all closed zones can be open */
4395                 if (devip->max_open &&
4396                     devip->nr_exp_open + devip->nr_closed > devip->max_open) {
4397                         mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
4398                                         INSUFF_ZONE_ASCQ);
4399                         res = check_condition_result;
4400                         goto fini;
4401                 }
4402                 /* Open all closed zones */
4403                 zbc_open_all(devip);
4404                 goto fini;
4405         }
4406
4407         /* Open the specified zone */
4408         z_id = get_unaligned_be64(cmd + 2);
4409         if (z_id >= sdebug_capacity) {
4410                 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4411                 res = check_condition_result;
4412                 goto fini;
4413         }
4414
4415         zsp = zbc_zone(devip, z_id);
4416         if (z_id != zsp->z_start) {
4417                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4418                 res = check_condition_result;
4419                 goto fini;
4420         }
4421         if (zbc_zone_is_conv(zsp)) {
4422                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4423                 res = check_condition_result;
4424                 goto fini;
4425         }
4426
4427         zc = zsp->z_cond;
4428         if (zc == ZC3_EXPLICIT_OPEN || zc == ZC5_FULL)
4429                 goto fini;
4430
4431         if (devip->max_open && devip->nr_exp_open >= devip->max_open) {
4432                 mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
4433                                 INSUFF_ZONE_ASCQ);
4434                 res = check_condition_result;
4435                 goto fini;
4436         }
4437
4438         if (zc == ZC2_IMPLICIT_OPEN)
4439                 zbc_close_zone(devip, zsp);
4440         zbc_open_zone(devip, zsp, true);
4441 fini:
4442         write_unlock(macc_lckp);
4443         return res;
4444 }
4445
4446 static void zbc_close_all(struct sdebug_dev_info *devip)
4447 {
4448         unsigned int i;
4449
4450         for (i = 0; i < devip->nr_zones; i++)
4451                 zbc_close_zone(devip, &devip->zstate[i]);
4452 }
4453
4454 static int resp_close_zone(struct scsi_cmnd *scp,
4455                            struct sdebug_dev_info *devip)
4456 {
4457         int res = 0;
4458         u64 z_id;
4459         u8 *cmd = scp->cmnd;
4460         struct sdeb_zone_state *zsp;
4461         bool all = cmd[14] & 0x01;
4462         struct sdeb_store_info *sip = devip2sip(devip);
4463         rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4464
4465         if (!sdebug_dev_is_zoned(devip)) {
4466                 mk_sense_invalid_opcode(scp);
4467                 return check_condition_result;
4468         }
4469
4470         write_lock(macc_lckp);
4471
4472         if (all) {
4473                 zbc_close_all(devip);
4474                 goto fini;
4475         }
4476
4477         /* Close specified zone */
4478         z_id = get_unaligned_be64(cmd + 2);
4479         if (z_id >= sdebug_capacity) {
4480                 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4481                 res = check_condition_result;
4482                 goto fini;
4483         }
4484
4485         zsp = zbc_zone(devip, z_id);
4486         if (z_id != zsp->z_start) {
4487                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4488                 res = check_condition_result;
4489                 goto fini;
4490         }
4491         if (zbc_zone_is_conv(zsp)) {
4492                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4493                 res = check_condition_result;
4494                 goto fini;
4495         }
4496
4497         zbc_close_zone(devip, zsp);
4498 fini:
4499         write_unlock(macc_lckp);
4500         return res;
4501 }
4502
4503 static void zbc_finish_zone(struct sdebug_dev_info *devip,
4504                             struct sdeb_zone_state *zsp, bool empty)
4505 {
4506         enum sdebug_z_cond zc = zsp->z_cond;
4507
4508         if (zc == ZC4_CLOSED || zc == ZC2_IMPLICIT_OPEN ||
4509             zc == ZC3_EXPLICIT_OPEN || (empty && zc == ZC1_EMPTY)) {
4510                 if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
4511                         zbc_close_zone(devip, zsp);
4512                 if (zsp->z_cond == ZC4_CLOSED)
4513                         devip->nr_closed--;
4514                 zsp->z_wp = zsp->z_start + zsp->z_size;
4515                 zsp->z_cond = ZC5_FULL;
4516         }
4517 }
4518
4519 static void zbc_finish_all(struct sdebug_dev_info *devip)
4520 {
4521         unsigned int i;
4522
4523         for (i = 0; i < devip->nr_zones; i++)
4524                 zbc_finish_zone(devip, &devip->zstate[i], false);
4525 }
4526
4527 static int resp_finish_zone(struct scsi_cmnd *scp,
4528                             struct sdebug_dev_info *devip)
4529 {
4530         struct sdeb_zone_state *zsp;
4531         int res = 0;
4532         u64 z_id;
4533         u8 *cmd = scp->cmnd;
4534         bool all = cmd[14] & 0x01;
4535         struct sdeb_store_info *sip = devip2sip(devip);
4536         rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4537
4538         if (!sdebug_dev_is_zoned(devip)) {
4539                 mk_sense_invalid_opcode(scp);
4540                 return check_condition_result;
4541         }
4542
4543         write_lock(macc_lckp);
4544
4545         if (all) {
4546                 zbc_finish_all(devip);
4547                 goto fini;
4548         }
4549
4550         /* Finish the specified zone */
4551         z_id = get_unaligned_be64(cmd + 2);
4552         if (z_id >= sdebug_capacity) {
4553                 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4554                 res = check_condition_result;
4555                 goto fini;
4556         }
4557
4558         zsp = zbc_zone(devip, z_id);
4559         if (z_id != zsp->z_start) {
4560                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4561                 res = check_condition_result;
4562                 goto fini;
4563         }
4564         if (zbc_zone_is_conv(zsp)) {
4565                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4566                 res = check_condition_result;
4567                 goto fini;
4568         }
4569
4570         zbc_finish_zone(devip, zsp, true);
4571 fini:
4572         write_unlock(macc_lckp);
4573         return res;
4574 }
4575
4576 static void zbc_rwp_zone(struct sdebug_dev_info *devip,
4577                          struct sdeb_zone_state *zsp)
4578 {
4579         enum sdebug_z_cond zc;
4580
4581         if (zbc_zone_is_conv(zsp))
4582                 return;
4583
4584         zc = zsp->z_cond;
4585         if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
4586                 zbc_close_zone(devip, zsp);
4587
4588         if (zsp->z_cond == ZC4_CLOSED)
4589                 devip->nr_closed--;
4590
4591         zsp->z_wp = zsp->z_start;
4592         zsp->z_cond = ZC1_EMPTY;
4593 }
4594
4595 static void zbc_rwp_all(struct sdebug_dev_info *devip)
4596 {
4597         unsigned int i;
4598
4599         for (i = 0; i < devip->nr_zones; i++)
4600                 zbc_rwp_zone(devip, &devip->zstate[i]);
4601 }
4602
4603 static int resp_rwp_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4604 {
4605         struct sdeb_zone_state *zsp;
4606         int res = 0;
4607         u64 z_id;
4608         u8 *cmd = scp->cmnd;
4609         bool all = cmd[14] & 0x01;
4610         struct sdeb_store_info *sip = devip2sip(devip);
4611         rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4612
4613         if (!sdebug_dev_is_zoned(devip)) {
4614                 mk_sense_invalid_opcode(scp);
4615                 return check_condition_result;
4616         }
4617
4618         write_lock(macc_lckp);
4619
4620         if (all) {
4621                 zbc_rwp_all(devip);
4622                 goto fini;
4623         }
4624
4625         z_id = get_unaligned_be64(cmd + 2);
4626         if (z_id >= sdebug_capacity) {
4627                 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4628                 res = check_condition_result;
4629                 goto fini;
4630         }
4631
4632         zsp = zbc_zone(devip, z_id);
4633         if (z_id != zsp->z_start) {
4634                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4635                 res = check_condition_result;
4636                 goto fini;
4637         }
4638         if (zbc_zone_is_conv(zsp)) {
4639                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4640                 res = check_condition_result;
4641                 goto fini;
4642         }
4643
4644         zbc_rwp_zone(devip, zsp);
4645 fini:
4646         write_unlock(macc_lckp);
4647         return res;
4648 }
4649
4650 static struct sdebug_queue *get_queue(struct scsi_cmnd *cmnd)
4651 {
4652         u32 tag = blk_mq_unique_tag(cmnd->request);
4653         u16 hwq = blk_mq_unique_tag_to_hwq(tag);
4654
4655         pr_debug("tag=%#x, hwq=%d\n", tag, hwq);
4656         if (WARN_ON_ONCE(hwq >= submit_queues))
4657                 hwq = 0;
4658         return sdebug_q_arr + hwq;
4659 }
4660
4661 /* Queued (deferred) command completions converge here. */
4662 static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
4663 {
4664         bool aborted = sd_dp->aborted;
4665         int qc_idx;
4666         int retiring = 0;
4667         unsigned long iflags;
4668         struct sdebug_queue *sqp;
4669         struct sdebug_queued_cmd *sqcp;
4670         struct scsi_cmnd *scp;
4671         struct sdebug_dev_info *devip;
4672
4673         sd_dp->defer_t = SDEB_DEFER_NONE;
4674         if (unlikely(aborted))
4675                 sd_dp->aborted = false;
4676         qc_idx = sd_dp->qc_idx;
4677         sqp = sdebug_q_arr + sd_dp->sqa_idx;
4678         if (sdebug_statistics) {
4679                 atomic_inc(&sdebug_completions);
4680                 if (raw_smp_processor_id() != sd_dp->issuing_cpu)
4681                         atomic_inc(&sdebug_miss_cpus);
4682         }
4683         if (unlikely((qc_idx < 0) || (qc_idx >= SDEBUG_CANQUEUE))) {
4684                 pr_err("wild qc_idx=%d\n", qc_idx);
4685                 return;
4686         }
4687         spin_lock_irqsave(&sqp->qc_lock, iflags);
4688         sqcp = &sqp->qc_arr[qc_idx];
4689         scp = sqcp->a_cmnd;
4690         if (unlikely(scp == NULL)) {
4691                 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4692                 pr_err("scp is NULL, sqa_idx=%d, qc_idx=%d\n",
4693                        sd_dp->sqa_idx, qc_idx);
4694                 return;
4695         }
4696         devip = (struct sdebug_dev_info *)scp->device->hostdata;
4697         if (likely(devip))
4698                 atomic_dec(&devip->num_in_q);
4699         else
4700                 pr_err("devip=NULL\n");
4701         if (unlikely(atomic_read(&retired_max_queue) > 0))
4702                 retiring = 1;
4703
4704         sqcp->a_cmnd = NULL;
4705         if (unlikely(!test_and_clear_bit(qc_idx, sqp->in_use_bm))) {
4706                 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4707                 pr_err("Unexpected completion\n");
4708                 return;
4709         }
4710
4711         if (unlikely(retiring)) {       /* user has reduced max_queue */
4712                 int k, retval;
4713
4714                 retval = atomic_read(&retired_max_queue);
4715                 if (qc_idx >= retval) {
4716                         spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4717                         pr_err("index %d too large\n", retval);
4718                         return;
4719                 }
4720                 k = find_last_bit(sqp->in_use_bm, retval);
4721                 if ((k < sdebug_max_queue) || (k == retval))
4722                         atomic_set(&retired_max_queue, 0);
4723                 else
4724                         atomic_set(&retired_max_queue, k + 1);
4725         }
4726         spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4727         if (unlikely(aborted)) {
4728                 if (sdebug_verbose)
4729                         pr_info("bypassing scsi_done() due to aborted cmd\n");
4730                 return;
4731         }
4732         scp->scsi_done(scp); /* callback to mid level */
4733 }
4734
4735 /* When high resolution timer goes off this function is called. */
4736 static enum hrtimer_restart sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
4737 {
4738         struct sdebug_defer *sd_dp = container_of(timer, struct sdebug_defer,
4739                                                   hrt);
4740         sdebug_q_cmd_complete(sd_dp);
4741         return HRTIMER_NORESTART;
4742 }
4743
4744 /* When work queue schedules work, it calls this function. */
4745 static void sdebug_q_cmd_wq_complete(struct work_struct *work)
4746 {
4747         struct sdebug_defer *sd_dp = container_of(work, struct sdebug_defer,
4748                                                   ew.work);
4749         sdebug_q_cmd_complete(sd_dp);
4750 }
4751
4752 static bool got_shared_uuid;
4753 static uuid_t shared_uuid;
4754
4755 static int sdebug_device_create_zones(struct sdebug_dev_info *devip)
4756 {
4757         struct sdeb_zone_state *zsp;
4758         sector_t capacity = get_sdebug_capacity();
4759         sector_t zstart = 0;
4760         unsigned int i;
4761
4762         /*
4763          * Set the zone size: if zbc_zone_size_mb is not set, figure out a
4764          * zone size allowing for at least 4 zones on the device. Otherwise,
4765          * use the specified zone size checking that at least 2 zones can be
4766          * created for the device.
4767          */
4768         if (!zbc_zone_size_mb) {
4769                 devip->zsize = (DEF_ZBC_ZONE_SIZE_MB * SZ_1M)
4770                         >> ilog2(sdebug_sector_size);
4771                 while (capacity < devip->zsize << 2 && devip->zsize >= 2)
4772                         devip->zsize >>= 1;
4773                 if (devip->zsize < 2) {
4774                         pr_err("Device capacity too small\n");
4775                         return -EINVAL;
4776                 }
4777         } else {
4778                 devip->zsize = (zbc_zone_size_mb * SZ_1M)
4779                         >> ilog2(sdebug_sector_size);
4780                 if (devip->zsize >= capacity) {
4781                         pr_err("Zone size too large for device capacity\n");
4782                         return -EINVAL;
4783                 }
4784         }
4785
4786         if (is_power_of_2(devip->zsize))
4787                 devip->zsize_shift = ilog2(devip->zsize);
4788         devip->nr_zones = (capacity + devip->zsize - 1) >> devip->zsize_shift;
4789
4790         /* zbc_max_open_zones can be 0, meaning "not reported" (no limit) */
4791         if (sdeb_zbc_max_open >= devip->nr_zones - 1)
4792                 devip->max_open = (devip->nr_zones - 1) / 2;
4793         else
4794                 devip->max_open = sdeb_zbc_max_open;
4795
4796         devip->zstate = kcalloc(devip->nr_zones,
4797                                 sizeof(struct sdeb_zone_state), GFP_KERNEL);
4798         if (!devip->zstate)
4799                 return -ENOMEM;
4800
4801         for (i = 0; i < devip->nr_zones; i++) {
4802                 zsp = &devip->zstate[i];
4803
4804                 zsp->z_start = zstart;
4805
4806                 if (i == 0) {
4807                         zsp->z_cond = ZBC_NOT_WRITE_POINTER;
4808                         zsp->z_wp = (sector_t)-1;
4809                 } else {
4810                         zsp->z_cond = ZC1_EMPTY;
4811                         zsp->z_wp = zsp->z_start;
4812                 }
4813
4814                 if (zsp->z_start + devip->zsize < capacity)
4815                         zsp->z_size = devip->zsize;
4816                 else
4817                         zsp->z_size = capacity - zsp->z_start;
4818
4819                 zstart += zsp->z_size;
4820         }
4821
4822         return 0;
4823 }
4824
4825 static struct sdebug_dev_info *sdebug_device_create(
4826                         struct sdebug_host_info *sdbg_host, gfp_t flags)
4827 {
4828         struct sdebug_dev_info *devip;
4829
4830         devip = kzalloc(sizeof(*devip), flags);
4831         if (devip) {
4832                 if (sdebug_uuid_ctl == 1)
4833                         uuid_gen(&devip->lu_name);
4834                 else if (sdebug_uuid_ctl == 2) {
4835                         if (got_shared_uuid)
4836                                 devip->lu_name = shared_uuid;
4837                         else {
4838                                 uuid_gen(&shared_uuid);
4839                                 got_shared_uuid = true;
4840                                 devip->lu_name = shared_uuid;
4841                         }
4842                 }
4843                 devip->sdbg_host = sdbg_host;
4844                 if (sdeb_zbc_in_use) {
4845                         if (sdebug_device_create_zones(devip)) {
4846                                 kfree(devip);
4847                                 return NULL;
4848                         }
4849                 }
4850                 devip->sdbg_host = sdbg_host;
4851                 list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
4852         }
4853         return devip;
4854 }
4855
4856 static struct sdebug_dev_info *find_build_dev_info(struct scsi_device *sdev)
4857 {
4858         struct sdebug_host_info *sdbg_host;
4859         struct sdebug_dev_info *open_devip = NULL;
4860         struct sdebug_dev_info *devip;
4861
4862         sdbg_host = *(struct sdebug_host_info **)shost_priv(sdev->host);
4863         if (!sdbg_host) {
4864                 pr_err("Host info NULL\n");
4865                 return NULL;
4866         }
4867         list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
4868                 if ((devip->used) && (devip->channel == sdev->channel) &&
4869                     (devip->target == sdev->id) &&
4870                     (devip->lun == sdev->lun))
4871                         return devip;
4872                 else {
4873                         if ((!devip->used) && (!open_devip))
4874                                 open_devip = devip;
4875                 }
4876         }
4877         if (!open_devip) { /* try and make a new one */
4878                 open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
4879                 if (!open_devip) {
4880                         pr_err("out of memory at line %d\n", __LINE__);
4881                         return NULL;
4882                 }
4883         }
4884
4885         open_devip->channel = sdev->channel;
4886         open_devip->target = sdev->id;
4887         open_devip->lun = sdev->lun;
4888         open_devip->sdbg_host = sdbg_host;
4889         atomic_set(&open_devip->num_in_q, 0);
4890         set_bit(SDEBUG_UA_POR, open_devip->uas_bm);
4891         open_devip->used = true;
4892         return open_devip;
4893 }
4894
4895 static int scsi_debug_slave_alloc(struct scsi_device *sdp)
4896 {
4897         if (sdebug_verbose)
4898                 pr_info("slave_alloc <%u %u %u %llu>\n",
4899                        sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
4900         return 0;
4901 }
4902
4903 static int scsi_debug_slave_configure(struct scsi_device *sdp)
4904 {
4905         struct sdebug_dev_info *devip =
4906                         (struct sdebug_dev_info *)sdp->hostdata;
4907
4908         if (sdebug_verbose)
4909                 pr_info("slave_configure <%u %u %u %llu>\n",
4910                        sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
4911         if (sdp->host->max_cmd_len != SDEBUG_MAX_CMD_LEN)
4912                 sdp->host->max_cmd_len = SDEBUG_MAX_CMD_LEN;
4913         if (devip == NULL) {
4914                 devip = find_build_dev_info(sdp);
4915                 if (devip == NULL)
4916                         return 1;  /* no resources, will be marked offline */
4917         }
4918         sdp->hostdata = devip;
4919         if (sdebug_no_uld)
4920                 sdp->no_uld_attach = 1;
4921         config_cdb_len(sdp);
4922         return 0;
4923 }
4924
4925 static void scsi_debug_slave_destroy(struct scsi_device *sdp)
4926 {
4927         struct sdebug_dev_info *devip =
4928                 (struct sdebug_dev_info *)sdp->hostdata;
4929
4930         if (sdebug_verbose)
4931                 pr_info("slave_destroy <%u %u %u %llu>\n",
4932                        sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
4933         if (devip) {
4934                 /* make this slot available for re-use */
4935                 devip->used = false;
4936                 sdp->hostdata = NULL;
4937         }
4938 }
4939
4940 static void stop_qc_helper(struct sdebug_defer *sd_dp,
4941                            enum sdeb_defer_type defer_t)
4942 {
4943         if (!sd_dp)
4944                 return;
4945         if (defer_t == SDEB_DEFER_HRT)
4946                 hrtimer_cancel(&sd_dp->hrt);
4947         else if (defer_t == SDEB_DEFER_WQ)
4948                 cancel_work_sync(&sd_dp->ew.work);
4949 }
4950
4951 /* If @cmnd found deletes its timer or work queue and returns true; else
4952    returns false */
4953 static bool stop_queued_cmnd(struct scsi_cmnd *cmnd)
4954 {
4955         unsigned long iflags;
4956         int j, k, qmax, r_qmax;
4957         enum sdeb_defer_type l_defer_t;
4958         struct sdebug_queue *sqp;
4959         struct sdebug_queued_cmd *sqcp;
4960         struct sdebug_dev_info *devip;
4961         struct sdebug_defer *sd_dp;
4962
4963         for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
4964                 spin_lock_irqsave(&sqp->qc_lock, iflags);
4965                 qmax = sdebug_max_queue;
4966                 r_qmax = atomic_read(&retired_max_queue);
4967                 if (r_qmax > qmax)
4968                         qmax = r_qmax;
4969                 for (k = 0; k < qmax; ++k) {
4970                         if (test_bit(k, sqp->in_use_bm)) {
4971                                 sqcp = &sqp->qc_arr[k];
4972                                 if (cmnd != sqcp->a_cmnd)
4973                                         continue;
4974                                 /* found */
4975                                 devip = (struct sdebug_dev_info *)
4976                                                 cmnd->device->hostdata;
4977                                 if (devip)
4978                                         atomic_dec(&devip->num_in_q);
4979                                 sqcp->a_cmnd = NULL;
4980                                 sd_dp = sqcp->sd_dp;
4981                                 if (sd_dp) {
4982                                         l_defer_t = sd_dp->defer_t;
4983                                         sd_dp->defer_t = SDEB_DEFER_NONE;
4984                                 } else
4985                                         l_defer_t = SDEB_DEFER_NONE;
4986                                 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4987                                 stop_qc_helper(sd_dp, l_defer_t);
4988                                 clear_bit(k, sqp->in_use_bm);
4989                                 return true;
4990                         }
4991                 }
4992                 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4993         }
4994         return false;
4995 }
4996
4997 /* Deletes (stops) timers or work queues of all queued commands */
4998 static void stop_all_queued(void)
4999 {
5000         unsigned long iflags;
5001         int j, k;
5002         enum sdeb_defer_type l_defer_t;
5003         struct sdebug_queue *sqp;
5004         struct sdebug_queued_cmd *sqcp;
5005         struct sdebug_dev_info *devip;
5006         struct sdebug_defer *sd_dp;
5007
5008         for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5009                 spin_lock_irqsave(&sqp->qc_lock, iflags);
5010                 for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
5011                         if (test_bit(k, sqp->in_use_bm)) {
5012                                 sqcp = &sqp->qc_arr[k];
5013                                 if (sqcp->a_cmnd == NULL)
5014                                         continue;
5015                                 devip = (struct sdebug_dev_info *)
5016                                         sqcp->a_cmnd->device->hostdata;
5017                                 if (devip)
5018                                         atomic_dec(&devip->num_in_q);
5019                                 sqcp->a_cmnd = NULL;
5020                                 sd_dp = sqcp->sd_dp;
5021                                 if (sd_dp) {
5022                                         l_defer_t = sd_dp->defer_t;
5023                                         sd_dp->defer_t = SDEB_DEFER_NONE;
5024                                 } else
5025                                         l_defer_t = SDEB_DEFER_NONE;
5026                                 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5027                                 stop_qc_helper(sd_dp, l_defer_t);
5028                                 clear_bit(k, sqp->in_use_bm);
5029                                 spin_lock_irqsave(&sqp->qc_lock, iflags);
5030                         }
5031                 }
5032                 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5033         }
5034 }
5035
5036 /* Free queued command memory on heap */
5037 static void free_all_queued(void)
5038 {
5039         int j, k;
5040         struct sdebug_queue *sqp;
5041         struct sdebug_queued_cmd *sqcp;
5042
5043         for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5044                 for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
5045                         sqcp = &sqp->qc_arr[k];
5046                         kfree(sqcp->sd_dp);
5047                         sqcp->sd_dp = NULL;
5048                 }
5049         }
5050 }
5051
5052 static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
5053 {
5054         bool ok;
5055
5056         ++num_aborts;
5057         if (SCpnt) {
5058                 ok = stop_queued_cmnd(SCpnt);
5059                 if (SCpnt->device && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
5060                         sdev_printk(KERN_INFO, SCpnt->device,
5061                                     "%s: command%s found\n", __func__,
5062                                     ok ? "" : " not");
5063         }
5064         return SUCCESS;
5065 }
5066
5067 static int scsi_debug_device_reset(struct scsi_cmnd *SCpnt)
5068 {
5069         ++num_dev_resets;
5070         if (SCpnt && SCpnt->device) {
5071                 struct scsi_device *sdp = SCpnt->device;
5072                 struct sdebug_dev_info *devip =
5073                                 (struct sdebug_dev_info *)sdp->hostdata;
5074
5075                 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5076                         sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5077                 if (devip)
5078                         set_bit(SDEBUG_UA_POR, devip->uas_bm);
5079         }
5080         return SUCCESS;
5081 }
5082
5083 static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
5084 {
5085         struct sdebug_host_info *sdbg_host;
5086         struct sdebug_dev_info *devip;
5087         struct scsi_device *sdp;
5088         struct Scsi_Host *hp;
5089         int k = 0;
5090
5091         ++num_target_resets;
5092         if (!SCpnt)
5093                 goto lie;
5094         sdp = SCpnt->device;
5095         if (!sdp)
5096                 goto lie;
5097         if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5098                 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5099         hp = sdp->host;
5100         if (!hp)
5101                 goto lie;
5102         sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
5103         if (sdbg_host) {
5104                 list_for_each_entry(devip,
5105                                     &sdbg_host->dev_info_list,
5106                                     dev_list)
5107                         if (devip->target == sdp->id) {
5108                                 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5109                                 ++k;
5110                         }
5111         }
5112         if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5113                 sdev_printk(KERN_INFO, sdp,
5114                             "%s: %d device(s) found in target\n", __func__, k);
5115 lie:
5116         return SUCCESS;
5117 }
5118
5119 static int scsi_debug_bus_reset(struct scsi_cmnd *SCpnt)
5120 {
5121         struct sdebug_host_info *sdbg_host;
5122         struct sdebug_dev_info *devip;
5123         struct scsi_device *sdp;
5124         struct Scsi_Host *hp;
5125         int k = 0;
5126
5127         ++num_bus_resets;
5128         if (!(SCpnt && SCpnt->device))
5129                 goto lie;
5130         sdp = SCpnt->device;
5131         if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5132                 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5133         hp = sdp->host;
5134         if (hp) {
5135                 sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
5136                 if (sdbg_host) {
5137                         list_for_each_entry(devip,
5138                                             &sdbg_host->dev_info_list,
5139                                             dev_list) {
5140                                 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5141                                 ++k;
5142                         }
5143                 }
5144         }
5145         if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5146                 sdev_printk(KERN_INFO, sdp,
5147                             "%s: %d device(s) found in host\n", __func__, k);
5148 lie:
5149         return SUCCESS;
5150 }
5151
5152 static int scsi_debug_host_reset(struct scsi_cmnd *SCpnt)
5153 {
5154         struct sdebug_host_info *sdbg_host;
5155         struct sdebug_dev_info *devip;
5156         int k = 0;
5157
5158         ++num_host_resets;
5159         if ((SCpnt->device) && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
5160                 sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__);
5161         spin_lock(&sdebug_host_list_lock);
5162         list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
5163                 list_for_each_entry(devip, &sdbg_host->dev_info_list,
5164                                     dev_list) {
5165                         set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5166                         ++k;
5167                 }
5168         }
5169         spin_unlock(&sdebug_host_list_lock);
5170         stop_all_queued();
5171         if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5172                 sdev_printk(KERN_INFO, SCpnt->device,
5173                             "%s: %d device(s) found\n", __func__, k);
5174         return SUCCESS;
5175 }
5176
5177 static void sdebug_build_parts(unsigned char *ramp, unsigned long store_size)
5178 {
5179         struct msdos_partition *pp;
5180         int starts[SDEBUG_MAX_PARTS + 2];
5181         int sectors_per_part, num_sectors, k;
5182         int heads_by_sects, start_sec, end_sec;
5183
5184         /* assume partition table already zeroed */
5185         if ((sdebug_num_parts < 1) || (store_size < 1048576))
5186                 return;
5187         if (sdebug_num_parts > SDEBUG_MAX_PARTS) {
5188                 sdebug_num_parts = SDEBUG_MAX_PARTS;
5189                 pr_warn("reducing partitions to %d\n", SDEBUG_MAX_PARTS);
5190         }
5191         num_sectors = (int)sdebug_store_sectors;
5192         sectors_per_part = (num_sectors - sdebug_sectors_per)
5193                            / sdebug_num_parts;
5194         heads_by_sects = sdebug_heads * sdebug_sectors_per;
5195         starts[0] = sdebug_sectors_per;
5196         for (k = 1; k < sdebug_num_parts; ++k)
5197                 starts[k] = ((k * sectors_per_part) / heads_by_sects)
5198                             * heads_by_sects;
5199         starts[sdebug_num_parts] = num_sectors;
5200         starts[sdebug_num_parts + 1] = 0;
5201
5202         ramp[510] = 0x55;       /* magic partition markings */
5203         ramp[511] = 0xAA;
5204         pp = (struct msdos_partition *)(ramp + 0x1be);
5205         for (k = 0; starts[k + 1]; ++k, ++pp) {
5206                 start_sec = starts[k];
5207                 end_sec = starts[k + 1] - 1;
5208                 pp->boot_ind = 0;
5209
5210                 pp->cyl = start_sec / heads_by_sects;
5211                 pp->head = (start_sec - (pp->cyl * heads_by_sects))
5212                            / sdebug_sectors_per;
5213                 pp->sector = (start_sec % sdebug_sectors_per) + 1;
5214
5215                 pp->end_cyl = end_sec / heads_by_sects;
5216                 pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
5217                                / sdebug_sectors_per;
5218                 pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
5219
5220                 pp->start_sect = cpu_to_le32(start_sec);
5221                 pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1);
5222                 pp->sys_ind = 0x83;     /* plain Linux partition */
5223         }
5224 }
5225
5226 static void block_unblock_all_queues(bool block)
5227 {
5228         int j;
5229         struct sdebug_queue *sqp;
5230
5231         for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp)
5232                 atomic_set(&sqp->blocked, (int)block);
5233 }
5234
5235 /* Adjust (by rounding down) the sdebug_cmnd_count so abs(every_nth)-1
5236  * commands will be processed normally before triggers occur.
5237  */
5238 static void tweak_cmnd_count(void)
5239 {
5240         int count, modulo;
5241
5242         modulo = abs(sdebug_every_nth);
5243         if (modulo < 2)
5244                 return;
5245         block_unblock_all_queues(true);
5246         count = atomic_read(&sdebug_cmnd_count);
5247         atomic_set(&sdebug_cmnd_count, (count / modulo) * modulo);
5248         block_unblock_all_queues(false);
5249 }
5250
5251 static void clear_queue_stats(void)
5252 {
5253         atomic_set(&sdebug_cmnd_count, 0);
5254         atomic_set(&sdebug_completions, 0);
5255         atomic_set(&sdebug_miss_cpus, 0);
5256         atomic_set(&sdebug_a_tsf, 0);
5257 }
5258
5259 static void setup_inject(struct sdebug_queue *sqp,
5260                          struct sdebug_queued_cmd *sqcp)
5261 {
5262         if ((atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) > 0) {
5263                 if (sdebug_every_nth > 0)
5264                         sqcp->inj_recovered = sqcp->inj_transport
5265                                 = sqcp->inj_dif
5266                                 = sqcp->inj_dix = sqcp->inj_short
5267                                 = sqcp->inj_host_busy = sqcp->inj_cmd_abort = 0;
5268                 return;
5269         }
5270         sqcp->inj_recovered = !!(SDEBUG_OPT_RECOVERED_ERR & sdebug_opts);
5271         sqcp->inj_transport = !!(SDEBUG_OPT_TRANSPORT_ERR & sdebug_opts);
5272         sqcp->inj_dif = !!(SDEBUG_OPT_DIF_ERR & sdebug_opts);
5273         sqcp->inj_dix = !!(SDEBUG_OPT_DIX_ERR & sdebug_opts);
5274         sqcp->inj_short = !!(SDEBUG_OPT_SHORT_TRANSFER & sdebug_opts);
5275         sqcp->inj_host_busy = !!(SDEBUG_OPT_HOST_BUSY & sdebug_opts);
5276         sqcp->inj_cmd_abort = !!(SDEBUG_OPT_CMD_ABORT & sdebug_opts);
5277 }
5278
5279 #define INCLUSIVE_TIMING_MAX_NS 1000000         /* 1 millisecond */
5280
5281 /* Complete the processing of the thread that queued a SCSI command to this
5282  * driver. It either completes the command by calling cmnd_done() or
5283  * schedules a hr timer or work queue then returns 0. Returns
5284  * SCSI_MLQUEUE_HOST_BUSY if temporarily out of resources.
5285  */
5286 static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
5287                          int scsi_result,
5288                          int (*pfp)(struct scsi_cmnd *,
5289                                     struct sdebug_dev_info *),
5290                          int delta_jiff, int ndelay)
5291 {
5292         bool new_sd_dp;
5293         int k, num_in_q, qdepth, inject;
5294         unsigned long iflags;
5295         u64 ns_from_boot = 0;
5296         struct sdebug_queue *sqp;
5297         struct sdebug_queued_cmd *sqcp;
5298         struct scsi_device *sdp;
5299         struct sdebug_defer *sd_dp;
5300
5301         if (unlikely(devip == NULL)) {
5302                 if (scsi_result == 0)
5303                         scsi_result = DID_NO_CONNECT << 16;
5304                 goto respond_in_thread;
5305         }
5306         sdp = cmnd->device;
5307
5308         if (delta_jiff == 0)
5309                 goto respond_in_thread;
5310
5311         sqp = get_queue(cmnd);
5312         spin_lock_irqsave(&sqp->qc_lock, iflags);
5313         if (unlikely(atomic_read(&sqp->blocked))) {
5314                 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5315                 return SCSI_MLQUEUE_HOST_BUSY;
5316         }
5317         num_in_q = atomic_read(&devip->num_in_q);
5318         qdepth = cmnd->device->queue_depth;
5319         inject = 0;
5320         if (unlikely((qdepth > 0) && (num_in_q >= qdepth))) {
5321                 if (scsi_result) {
5322                         spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5323                         goto respond_in_thread;
5324                 } else
5325                         scsi_result = device_qfull_result;
5326         } else if (unlikely(sdebug_every_nth &&
5327                             (SDEBUG_OPT_RARE_TSF & sdebug_opts) &&
5328                             (scsi_result == 0))) {
5329                 if ((num_in_q == (qdepth - 1)) &&
5330                     (atomic_inc_return(&sdebug_a_tsf) >=
5331                      abs(sdebug_every_nth))) {
5332                         atomic_set(&sdebug_a_tsf, 0);
5333                         inject = 1;
5334                         scsi_result = device_qfull_result;
5335                 }
5336         }
5337
5338         k = find_first_zero_bit(sqp->in_use_bm, sdebug_max_queue);
5339         if (unlikely(k >= sdebug_max_queue)) {
5340                 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5341                 if (scsi_result)
5342                         goto respond_in_thread;
5343                 else if (SDEBUG_OPT_ALL_TSF & sdebug_opts)
5344                         scsi_result = device_qfull_result;
5345                 if (SDEBUG_OPT_Q_NOISE & sdebug_opts)
5346                         sdev_printk(KERN_INFO, sdp,
5347                                     "%s: max_queue=%d exceeded, %s\n",
5348                                     __func__, sdebug_max_queue,
5349                                     (scsi_result ?  "status: TASK SET FULL" :
5350                                                     "report: host busy"));
5351                 if (scsi_result)
5352                         goto respond_in_thread;
5353                 else
5354                         return SCSI_MLQUEUE_HOST_BUSY;
5355         }
5356         __set_bit(k, sqp->in_use_bm);
5357         atomic_inc(&devip->num_in_q);
5358         sqcp = &sqp->qc_arr[k];
5359         sqcp->a_cmnd = cmnd;
5360         cmnd->host_scribble = (unsigned char *)sqcp;
5361         sd_dp = sqcp->sd_dp;
5362         spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5363         if (unlikely(sdebug_every_nth && sdebug_any_injecting_opt))
5364                 setup_inject(sqp, sqcp);
5365         if (sd_dp == NULL) {
5366                 sd_dp = kzalloc(sizeof(*sd_dp), GFP_ATOMIC);
5367                 if (sd_dp == NULL)
5368                         return SCSI_MLQUEUE_HOST_BUSY;
5369                 new_sd_dp = true;
5370         } else {
5371                 new_sd_dp = false;
5372         }
5373
5374         if (ndelay > 0 && ndelay < INCLUSIVE_TIMING_MAX_NS)
5375                 ns_from_boot = ktime_get_boottime_ns();
5376
5377         /* one of the resp_*() response functions is called here */
5378         cmnd->result = pfp != NULL ? pfp(cmnd, devip) : 0;
5379         if (cmnd->result & SDEG_RES_IMMED_MASK) {
5380                 cmnd->result &= ~SDEG_RES_IMMED_MASK;
5381                 delta_jiff = ndelay = 0;
5382         }
5383         if (cmnd->result == 0 && scsi_result != 0)
5384                 cmnd->result = scsi_result;
5385
5386         if (unlikely(sdebug_verbose && cmnd->result))
5387                 sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n",
5388                             __func__, cmnd->result);
5389
5390         if (delta_jiff > 0 || ndelay > 0) {
5391                 ktime_t kt;
5392
5393                 if (delta_jiff > 0) {
5394                         u64 ns = jiffies_to_nsecs(delta_jiff);
5395
5396                         if (sdebug_random && ns < U32_MAX) {
5397                                 ns = prandom_u32_max((u32)ns);
5398                         } else if (sdebug_random) {
5399                                 ns >>= 12;      /* scale to 4 usec precision */
5400                                 if (ns < U32_MAX)       /* over 4 hours max */
5401                                         ns = prandom_u32_max((u32)ns);
5402                                 ns <<= 12;
5403                         }
5404                         kt = ns_to_ktime(ns);
5405                 } else {        /* ndelay has a 4.2 second max */
5406                         kt = sdebug_random ? prandom_u32_max((u32)ndelay) :
5407                                              (u32)ndelay;
5408                         if (ndelay < INCLUSIVE_TIMING_MAX_NS) {
5409                                 u64 d = ktime_get_boottime_ns() - ns_from_boot;
5410
5411                                 if (kt <= d) {  /* elapsed duration >= kt */
5412                                         sqcp->a_cmnd = NULL;
5413                                         atomic_dec(&devip->num_in_q);
5414                                         clear_bit(k, sqp->in_use_bm);
5415                                         if (new_sd_dp)
5416                                                 kfree(sd_dp);
5417                                         /* call scsi_done() from this thread */
5418                                         cmnd->scsi_done(cmnd);
5419                                         return 0;
5420                                 }
5421                                 /* otherwise reduce kt by elapsed time */
5422                                 kt -= d;
5423                         }
5424                 }
5425                 if (!sd_dp->init_hrt) {
5426                         sd_dp->init_hrt = true;
5427                         sqcp->sd_dp = sd_dp;
5428                         hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC,
5429                                      HRTIMER_MODE_REL_PINNED);
5430                         sd_dp->hrt.function = sdebug_q_cmd_hrt_complete;
5431                         sd_dp->sqa_idx = sqp - sdebug_q_arr;
5432                         sd_dp->qc_idx = k;
5433                 }
5434                 if (sdebug_statistics)
5435                         sd_dp->issuing_cpu = raw_smp_processor_id();
5436                 sd_dp->defer_t = SDEB_DEFER_HRT;
5437                 /* schedule the invocation of scsi_done() for a later time */
5438                 hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED);
5439         } else {        /* jdelay < 0, use work queue */
5440                 if (!sd_dp->init_wq) {
5441                         sd_dp->init_wq = true;
5442                         sqcp->sd_dp = sd_dp;
5443                         sd_dp->sqa_idx = sqp - sdebug_q_arr;
5444                         sd_dp->qc_idx = k;
5445                         INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete);
5446                 }
5447                 if (sdebug_statistics)
5448                         sd_dp->issuing_cpu = raw_smp_processor_id();
5449                 sd_dp->defer_t = SDEB_DEFER_WQ;
5450                 if (unlikely(sqcp->inj_cmd_abort))
5451                         sd_dp->aborted = true;
5452                 schedule_work(&sd_dp->ew.work);
5453                 if (unlikely(sqcp->inj_cmd_abort)) {
5454                         sdev_printk(KERN_INFO, sdp, "abort request tag %d\n",
5455                                     cmnd->request->tag);
5456                         blk_abort_request(cmnd->request);
5457                 }
5458         }
5459         if (unlikely((SDEBUG_OPT_Q_NOISE & sdebug_opts) &&
5460                      (scsi_result == device_qfull_result)))
5461                 sdev_printk(KERN_INFO, sdp,
5462                             "%s: num_in_q=%d +1, %s%s\n", __func__,
5463                             num_in_q, (inject ? "<inject> " : ""),
5464                             "status: TASK SET FULL");
5465         return 0;
5466
5467 respond_in_thread:      /* call back to mid-layer using invocation thread */
5468         cmnd->result = pfp != NULL ? pfp(cmnd, devip) : 0;
5469         cmnd->result &= ~SDEG_RES_IMMED_MASK;
5470         if (cmnd->result == 0 && scsi_result != 0)
5471                 cmnd->result = scsi_result;
5472         cmnd->scsi_done(cmnd);
5473         return 0;
5474 }
5475
5476 /* Note: The following macros create attribute files in the
5477    /sys/module/scsi_debug/parameters directory. Unfortunately this
5478    driver is unaware of a change and cannot trigger auxiliary actions
5479    as it can when the corresponding attribute in the
5480    /sys/bus/pseudo/drivers/scsi_debug directory is changed.
5481  */
5482 module_param_named(add_host, sdebug_add_host, int, S_IRUGO | S_IWUSR);
5483 module_param_named(ato, sdebug_ato, int, S_IRUGO);
5484 module_param_named(cdb_len, sdebug_cdb_len, int, 0644);
5485 module_param_named(clustering, sdebug_clustering, bool, S_IRUGO | S_IWUSR);
5486 module_param_named(delay, sdebug_jdelay, int, S_IRUGO | S_IWUSR);
5487 module_param_named(dev_size_mb, sdebug_dev_size_mb, int, S_IRUGO);
5488 module_param_named(dif, sdebug_dif, int, S_IRUGO);
5489 module_param_named(dix, sdebug_dix, int, S_IRUGO);
5490 module_param_named(dsense, sdebug_dsense, int, S_IRUGO | S_IWUSR);
5491 module_param_named(every_nth, sdebug_every_nth, int, S_IRUGO | S_IWUSR);
5492 module_param_named(fake_rw, sdebug_fake_rw, int, S_IRUGO | S_IWUSR);
5493 module_param_named(guard, sdebug_guard, uint, S_IRUGO);
5494 module_param_named(host_lock, sdebug_host_lock, bool, S_IRUGO | S_IWUSR);
5495 module_param_string(inq_product, sdebug_inq_product_id,
5496                     sizeof(sdebug_inq_product_id), S_IRUGO | S_IWUSR);
5497 module_param_string(inq_rev, sdebug_inq_product_rev,
5498                     sizeof(sdebug_inq_product_rev), S_IRUGO | S_IWUSR);
5499 module_param_string(inq_vendor, sdebug_inq_vendor_id,
5500                     sizeof(sdebug_inq_vendor_id), S_IRUGO | S_IWUSR);
5501 module_param_named(lbprz, sdebug_lbprz, int, S_IRUGO);
5502 module_param_named(lbpu, sdebug_lbpu, int, S_IRUGO);
5503 module_param_named(lbpws, sdebug_lbpws, int, S_IRUGO);
5504 module_param_named(lbpws10, sdebug_lbpws10, int, S_IRUGO);
5505 module_param_named(lowest_aligned, sdebug_lowest_aligned, int, S_IRUGO);
5506 module_param_named(max_luns, sdebug_max_luns, int, S_IRUGO | S_IWUSR);
5507 module_param_named(max_queue, sdebug_max_queue, int, S_IRUGO | S_IWUSR);
5508 module_param_named(medium_error_count, sdebug_medium_error_count, int,
5509                    S_IRUGO | S_IWUSR);
5510 module_param_named(medium_error_start, sdebug_medium_error_start, int,
5511                    S_IRUGO | S_IWUSR);
5512 module_param_named(ndelay, sdebug_ndelay, int, S_IRUGO | S_IWUSR);
5513 module_param_named(no_lun_0, sdebug_no_lun_0, int, S_IRUGO | S_IWUSR);
5514 module_param_named(no_uld, sdebug_no_uld, int, S_IRUGO);
5515 module_param_named(num_parts, sdebug_num_parts, int, S_IRUGO);
5516 module_param_named(num_tgts, sdebug_num_tgts, int, S_IRUGO | S_IWUSR);
5517 module_param_named(opt_blks, sdebug_opt_blks, int, S_IRUGO);
5518 module_param_named(opt_xferlen_exp, sdebug_opt_xferlen_exp, int, S_IRUGO);
5519 module_param_named(opts, sdebug_opts, int, S_IRUGO | S_IWUSR);
5520 module_param_named(per_host_store, sdebug_per_host_store, bool,
5521                    S_IRUGO | S_IWUSR);
5522 module_param_named(physblk_exp, sdebug_physblk_exp, int, S_IRUGO);
5523 module_param_named(ptype, sdebug_ptype, int, S_IRUGO | S_IWUSR);
5524 module_param_named(random, sdebug_random, bool, S_IRUGO | S_IWUSR);
5525 module_param_named(removable, sdebug_removable, bool, S_IRUGO | S_IWUSR);
5526 module_param_named(scsi_level, sdebug_scsi_level, int, S_IRUGO);
5527 module_param_named(sector_size, sdebug_sector_size, int, S_IRUGO);
5528 module_param_named(statistics, sdebug_statistics, bool, S_IRUGO | S_IWUSR);
5529 module_param_named(strict, sdebug_strict, bool, S_IRUGO | S_IWUSR);
5530 module_param_named(submit_queues, submit_queues, int, S_IRUGO);
5531 module_param_named(unmap_alignment, sdebug_unmap_alignment, int, S_IRUGO);
5532 module_param_named(unmap_granularity, sdebug_unmap_granularity, int, S_IRUGO);
5533 module_param_named(unmap_max_blocks, sdebug_unmap_max_blocks, int, S_IRUGO);
5534 module_param_named(unmap_max_desc, sdebug_unmap_max_desc, int, S_IRUGO);
5535 module_param_named(uuid_ctl, sdebug_uuid_ctl, int, S_IRUGO);
5536 module_param_named(virtual_gb, sdebug_virtual_gb, int, S_IRUGO | S_IWUSR);
5537 module_param_named(vpd_use_hostno, sdebug_vpd_use_hostno, int,
5538                    S_IRUGO | S_IWUSR);
5539 module_param_named(wp, sdebug_wp, bool, S_IRUGO | S_IWUSR);
5540 module_param_named(write_same_length, sdebug_write_same_length, int,
5541                    S_IRUGO | S_IWUSR);
5542 module_param_named(zbc, sdeb_zbc_model_s, charp, S_IRUGO);
5543 module_param_named(zone_max_open, sdeb_zbc_max_open, int, S_IRUGO);
5544
5545 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
5546 MODULE_DESCRIPTION("SCSI debug adapter driver");
5547 MODULE_LICENSE("GPL");
5548 MODULE_VERSION(SDEBUG_VERSION);
5549
5550 MODULE_PARM_DESC(add_host, "add n hosts, in sysfs if negative remove host(s) (def=1)");
5551 MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
5552 MODULE_PARM_DESC(cdb_len, "suggest CDB lengths to drivers (def=10)");
5553 MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
5554 MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
5555 MODULE_PARM_DESC(dev_size_mb, "size in MiB of ram shared by devs(def=8)");
5556 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
5557 MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
5558 MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
5559 MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
5560 MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
5561 MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
5562 MODULE_PARM_DESC(host_lock, "host_lock is ignored (def=0)");
5563 MODULE_PARM_DESC(inq_product, "SCSI INQUIRY product string (def=\"scsi_debug\")");
5564 MODULE_PARM_DESC(inq_rev, "SCSI INQUIRY revision string (def=\""
5565                  SDEBUG_VERSION "\")");
5566 MODULE_PARM_DESC(inq_vendor, "SCSI INQUIRY vendor string (def=\"Linux\")");
5567 MODULE_PARM_DESC(lbprz,
5568                  "on read unmapped LBs return 0 when 1 (def), return 0xff when 2");
5569 MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
5570 MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
5571 MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
5572 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
5573 MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
5574 MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))");
5575 MODULE_PARM_DESC(medium_error_count, "count of sectors to return follow on MEDIUM error");
5576 MODULE_PARM_DESC(medium_error_start, "starting sector number to return MEDIUM error");
5577 MODULE_PARM_DESC(ndelay, "response delay in nanoseconds (def=0 -> ignore)");
5578 MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
5579 MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
5580 MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
5581 MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
5582 MODULE_PARM_DESC(opt_blks, "optimal transfer length in blocks (def=1024)");
5583 MODULE_PARM_DESC(opt_xferlen_exp, "optimal transfer length granularity exponent (def=physblk_exp)");
5584 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
5585 MODULE_PARM_DESC(per_host_store, "If set, next positive add_host will get new store (def=0)");
5586 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
5587 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
5588 MODULE_PARM_DESC(random, "If set, uniformly randomize command duration between 0 and delay_in_ns");
5589 MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
5590 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=7[SPC-5])");
5591 MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
5592 MODULE_PARM_DESC(statistics, "collect statistics on commands, queues (def=0)");
5593 MODULE_PARM_DESC(strict, "stricter checks: reserved field in cdb (def=0)");
5594 MODULE_PARM_DESC(submit_queues, "support for block multi-queue (def=1)");
5595 MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
5596 MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
5597 MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
5598 MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
5599 MODULE_PARM_DESC(uuid_ctl,
5600                  "1->use uuid for lu name, 0->don't, 2->all use same (def=0)");
5601 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)");
5602 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
5603 MODULE_PARM_DESC(wp, "Write Protect (def=0)");
5604 MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
5605 MODULE_PARM_DESC(zbc, "'none' [0]; 'aware' [1]; 'managed' [2] (def=0). Can have 'host-' prefix");
5606 MODULE_PARM_DESC(zone_max_open, "Maximum number of open zones; [0] for no limit (def=auto)");
5607
5608 #define SDEBUG_INFO_LEN 256
5609 static char sdebug_info[SDEBUG_INFO_LEN];
5610
5611 static const char *scsi_debug_info(struct Scsi_Host *shp)
5612 {
5613         int k;
5614
5615         k = scnprintf(sdebug_info, SDEBUG_INFO_LEN, "%s: version %s [%s]\n",
5616                       my_name, SDEBUG_VERSION, sdebug_version_date);
5617         if (k >= (SDEBUG_INFO_LEN - 1))
5618                 return sdebug_info;
5619         scnprintf(sdebug_info + k, SDEBUG_INFO_LEN - k,
5620                   "  dev_size_mb=%d, opts=0x%x, submit_queues=%d, %s=%d",
5621                   sdebug_dev_size_mb, sdebug_opts, submit_queues,
5622                   "statistics", (int)sdebug_statistics);
5623         return sdebug_info;
5624 }
5625
5626 /* 'echo <val> > /proc/scsi/scsi_debug/<host_id>' writes to opts */
5627 static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer,
5628                                  int length)
5629 {
5630         char arr[16];
5631         int opts;
5632         int minLen = length > 15 ? 15 : length;
5633
5634         if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
5635                 return -EACCES;
5636         memcpy(arr, buffer, minLen);
5637         arr[minLen] = '\0';
5638         if (1 != sscanf(arr, "%d", &opts))
5639                 return -EINVAL;
5640         sdebug_opts = opts;
5641         sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
5642         sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
5643         if (sdebug_every_nth != 0)
5644                 tweak_cmnd_count();
5645         return length;
5646 }
5647
5648 /* Output seen with 'cat /proc/scsi/scsi_debug/<host_id>'. It will be the
5649  * same for each scsi_debug host (if more than one). Some of the counters
5650  * output are not atomics so might be inaccurate in a busy system. */
5651 static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
5652 {
5653         int f, j, l;
5654         struct sdebug_queue *sqp;
5655         struct sdebug_host_info *sdhp;
5656
5657         seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n",
5658                    SDEBUG_VERSION, sdebug_version_date);
5659         seq_printf(m, "num_tgts=%d, %ssize=%d MB, opts=0x%x, every_nth=%d\n",
5660                    sdebug_num_tgts, "shared (ram) ", sdebug_dev_size_mb,
5661                    sdebug_opts, sdebug_every_nth);
5662         seq_printf(m, "delay=%d, ndelay=%d, max_luns=%d, sector_size=%d %s\n",
5663                    sdebug_jdelay, sdebug_ndelay, sdebug_max_luns,
5664                    sdebug_sector_size, "bytes");
5665         seq_printf(m, "cylinders=%d, heads=%d, sectors=%d, command aborts=%d\n",
5666                    sdebug_cylinders_per, sdebug_heads, sdebug_sectors_per,
5667                    num_aborts);
5668         seq_printf(m, "RESETs: device=%d, target=%d, bus=%d, host=%d\n",
5669                    num_dev_resets, num_target_resets, num_bus_resets,
5670                    num_host_resets);
5671         seq_printf(m, "dix_reads=%d, dix_writes=%d, dif_errors=%d\n",
5672                    dix_reads, dix_writes, dif_errors);
5673         seq_printf(m, "usec_in_jiffy=%lu, statistics=%d\n", TICK_NSEC / 1000,
5674                    sdebug_statistics);
5675         seq_printf(m, "cmnd_count=%d, completions=%d, %s=%d, a_tsf=%d\n",
5676                    atomic_read(&sdebug_cmnd_count),
5677                    atomic_read(&sdebug_completions),
5678                    "miss_cpus", atomic_read(&sdebug_miss_cpus),
5679                    atomic_read(&sdebug_a_tsf));
5680
5681         seq_printf(m, "submit_queues=%d\n", submit_queues);
5682         for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5683                 seq_printf(m, "  queue %d:\n", j);
5684                 f = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
5685                 if (f != sdebug_max_queue) {
5686                         l = find_last_bit(sqp->in_use_bm, sdebug_max_queue);
5687                         seq_printf(m, "    in_use_bm BUSY: %s: %d,%d\n",
5688                                    "first,last bits", f, l);
5689                 }
5690         }
5691
5692         seq_printf(m, "this host_no=%d\n", host->host_no);
5693         if (!xa_empty(per_store_ap)) {
5694                 bool niu;
5695                 int idx;
5696                 unsigned long l_idx;
5697                 struct sdeb_store_info *sip;
5698
5699                 seq_puts(m, "\nhost list:\n");
5700                 j = 0;
5701                 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
5702                         idx = sdhp->si_idx;
5703                         seq_printf(m, "  %d: host_no=%d, si_idx=%d\n", j,
5704                                    sdhp->shost->host_no, idx);
5705                         ++j;
5706                 }
5707                 seq_printf(m, "\nper_store array [most_recent_idx=%d]:\n",
5708                            sdeb_most_recent_idx);
5709                 j = 0;
5710                 xa_for_each(per_store_ap, l_idx, sip) {
5711                         niu = xa_get_mark(per_store_ap, l_idx,
5712                                           SDEB_XA_NOT_IN_USE);
5713                         idx = (int)l_idx;
5714                         seq_printf(m, "  %d: idx=%d%s\n", j, idx,
5715                                    (niu ? "  not_in_use" : ""));
5716                         ++j;
5717                 }
5718         }
5719         return 0;
5720 }
5721
5722 static ssize_t delay_show(struct device_driver *ddp, char *buf)
5723 {
5724         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_jdelay);
5725 }
5726 /* Returns -EBUSY if jdelay is being changed and commands are queued. The unit
5727  * of delay is jiffies.
5728  */
5729 static ssize_t delay_store(struct device_driver *ddp, const char *buf,
5730                            size_t count)
5731 {
5732         int jdelay, res;
5733
5734         if (count > 0 && sscanf(buf, "%d", &jdelay) == 1) {
5735                 res = count;
5736                 if (sdebug_jdelay != jdelay) {
5737                         int j, k;
5738                         struct sdebug_queue *sqp;
5739
5740                         block_unblock_all_queues(true);
5741                         for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
5742                              ++j, ++sqp) {
5743                                 k = find_first_bit(sqp->in_use_bm,
5744                                                    sdebug_max_queue);
5745                                 if (k != sdebug_max_queue) {
5746                                         res = -EBUSY;   /* queued commands */
5747                                         break;
5748                                 }
5749                         }
5750                         if (res > 0) {
5751                                 sdebug_jdelay = jdelay;
5752                                 sdebug_ndelay = 0;
5753                         }
5754                         block_unblock_all_queues(false);
5755                 }
5756                 return res;
5757         }
5758         return -EINVAL;
5759 }
5760 static DRIVER_ATTR_RW(delay);
5761
5762 static ssize_t ndelay_show(struct device_driver *ddp, char *buf)
5763 {
5764         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ndelay);
5765 }
5766 /* Returns -EBUSY if ndelay is being changed and commands are queued */
5767 /* If > 0 and accepted then sdebug_jdelay is set to JDELAY_OVERRIDDEN */
5768 static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
5769                             size_t count)
5770 {
5771         int ndelay, res;
5772
5773         if ((count > 0) && (1 == sscanf(buf, "%d", &ndelay)) &&
5774             (ndelay >= 0) && (ndelay < (1000 * 1000 * 1000))) {
5775                 res = count;
5776                 if (sdebug_ndelay != ndelay) {
5777                         int j, k;
5778                         struct sdebug_queue *sqp;
5779
5780                         block_unblock_all_queues(true);
5781                         for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
5782                              ++j, ++sqp) {
5783                                 k = find_first_bit(sqp->in_use_bm,
5784                                                    sdebug_max_queue);
5785                                 if (k != sdebug_max_queue) {
5786                                         res = -EBUSY;   /* queued commands */
5787                                         break;
5788                                 }
5789                         }
5790                         if (res > 0) {
5791                                 sdebug_ndelay = ndelay;
5792                                 sdebug_jdelay = ndelay  ? JDELAY_OVERRIDDEN
5793                                                         : DEF_JDELAY;
5794                         }
5795                         block_unblock_all_queues(false);
5796                 }
5797                 return res;
5798         }
5799         return -EINVAL;
5800 }
5801 static DRIVER_ATTR_RW(ndelay);
5802
5803 static ssize_t opts_show(struct device_driver *ddp, char *buf)
5804 {
5805         return scnprintf(buf, PAGE_SIZE, "0x%x\n", sdebug_opts);
5806 }
5807
5808 static ssize_t opts_store(struct device_driver *ddp, const char *buf,
5809                           size_t count)
5810 {
5811         int opts;
5812         char work[20];
5813
5814         if (sscanf(buf, "%10s", work) == 1) {
5815                 if (strncasecmp(work, "0x", 2) == 0) {
5816                         if (kstrtoint(work + 2, 16, &opts) == 0)
5817                                 goto opts_done;
5818                 } else {
5819                         if (kstrtoint(work, 10, &opts) == 0)
5820                                 goto opts_done;
5821                 }
5822         }
5823         return -EINVAL;
5824 opts_done:
5825         sdebug_opts = opts;
5826         sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
5827         sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
5828         tweak_cmnd_count();
5829         return count;
5830 }
5831 static DRIVER_ATTR_RW(opts);
5832
5833 static ssize_t ptype_show(struct device_driver *ddp, char *buf)
5834 {
5835         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ptype);
5836 }
5837 static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
5838                            size_t count)
5839 {
5840         int n;
5841
5842         /* Cannot change from or to TYPE_ZBC with sysfs */
5843         if (sdebug_ptype == TYPE_ZBC)
5844                 return -EINVAL;
5845
5846         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
5847                 if (n == TYPE_ZBC)
5848                         return -EINVAL;
5849                 sdebug_ptype = n;
5850                 return count;
5851         }
5852         return -EINVAL;
5853 }
5854 static DRIVER_ATTR_RW(ptype);
5855
5856 static ssize_t dsense_show(struct device_driver *ddp, char *buf)
5857 {
5858         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dsense);
5859 }
5860 static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
5861                             size_t count)
5862 {
5863         int n;
5864
5865         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
5866                 sdebug_dsense = n;
5867                 return count;
5868         }
5869         return -EINVAL;
5870 }
5871 static DRIVER_ATTR_RW(dsense);
5872
5873 static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
5874 {
5875         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_fake_rw);
5876 }
5877 static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
5878                              size_t count)
5879 {
5880         int n, idx;
5881
5882         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
5883                 bool want_store = (n == 0);
5884                 struct sdebug_host_info *sdhp;
5885
5886                 n = (n > 0);
5887                 sdebug_fake_rw = (sdebug_fake_rw > 0);
5888                 if (sdebug_fake_rw == n)
5889                         return count;   /* not transitioning so do nothing */
5890
5891                 if (want_store) {       /* 1 --> 0 transition, set up store */
5892                         if (sdeb_first_idx < 0) {
5893                                 idx = sdebug_add_store();
5894                                 if (idx < 0)
5895                                         return idx;
5896                         } else {
5897                                 idx = sdeb_first_idx;
5898                                 xa_clear_mark(per_store_ap, idx,
5899                                               SDEB_XA_NOT_IN_USE);
5900                         }
5901                         /* make all hosts use same store */
5902                         list_for_each_entry(sdhp, &sdebug_host_list,
5903                                             host_list) {
5904                                 if (sdhp->si_idx != idx) {
5905                                         xa_set_mark(per_store_ap, sdhp->si_idx,
5906                                                     SDEB_XA_NOT_IN_USE);
5907                                         sdhp->si_idx = idx;
5908                                 }
5909                         }
5910                         sdeb_most_recent_idx = idx;
5911                 } else {        /* 0 --> 1 transition is trigger for shrink */
5912                         sdebug_erase_all_stores(true /* apart from first */);
5913                 }
5914                 sdebug_fake_rw = n;
5915                 return count;
5916         }
5917         return -EINVAL;
5918 }
5919 static DRIVER_ATTR_RW(fake_rw);
5920
5921 static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
5922 {
5923         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_lun_0);
5924 }
5925 static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
5926                               size_t count)
5927 {
5928         int n;
5929
5930         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
5931                 sdebug_no_lun_0 = n;
5932                 return count;
5933         }
5934         return -EINVAL;
5935 }
5936 static DRIVER_ATTR_RW(no_lun_0);
5937
5938 static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
5939 {
5940         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_tgts);
5941 }
5942 static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
5943                               size_t count)
5944 {
5945         int n;
5946
5947         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
5948                 sdebug_num_tgts = n;
5949                 sdebug_max_tgts_luns();
5950                 return count;
5951         }
5952         return -EINVAL;
5953 }
5954 static DRIVER_ATTR_RW(num_tgts);
5955
5956 static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)
5957 {
5958         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dev_size_mb);
5959 }
5960 static DRIVER_ATTR_RO(dev_size_mb);
5961
5962 static ssize_t per_host_store_show(struct device_driver *ddp, char *buf)
5963 {
5964         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_per_host_store);
5965 }
5966
5967 static ssize_t per_host_store_store(struct device_driver *ddp, const char *buf,
5968                                     size_t count)
5969 {
5970         bool v;
5971
5972         if (kstrtobool(buf, &v))
5973                 return -EINVAL;
5974
5975         sdebug_per_host_store = v;
5976         return count;
5977 }
5978 static DRIVER_ATTR_RW(per_host_store);
5979
5980 static ssize_t num_parts_show(struct device_driver *ddp, char *buf)
5981 {
5982         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_parts);
5983 }
5984 static DRIVER_ATTR_RO(num_parts);
5985
5986 static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
5987 {
5988         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_every_nth);
5989 }
5990 static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
5991                                size_t count)
5992 {
5993         int nth;
5994
5995         if ((count > 0) && (1 == sscanf(buf, "%d", &nth))) {
5996                 sdebug_every_nth = nth;
5997                 if (nth && !sdebug_statistics) {
5998                         pr_info("every_nth needs statistics=1, set it\n");
5999                         sdebug_statistics = true;
6000                 }
6001                 tweak_cmnd_count();
6002                 return count;
6003         }
6004         return -EINVAL;
6005 }
6006 static DRIVER_ATTR_RW(every_nth);
6007
6008 static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
6009 {
6010         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_luns);
6011 }
6012 static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
6013                               size_t count)
6014 {
6015         int n;
6016         bool changed;
6017
6018         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6019                 if (n > 256) {
6020                         pr_warn("max_luns can be no more than 256\n");
6021                         return -EINVAL;
6022                 }
6023                 changed = (sdebug_max_luns != n);
6024                 sdebug_max_luns = n;
6025                 sdebug_max_tgts_luns();
6026                 if (changed && (sdebug_scsi_level >= 5)) {      /* >= SPC-3 */
6027                         struct sdebug_host_info *sdhp;
6028                         struct sdebug_dev_info *dp;
6029
6030                         spin_lock(&sdebug_host_list_lock);
6031                         list_for_each_entry(sdhp, &sdebug_host_list,
6032                                             host_list) {
6033                                 list_for_each_entry(dp, &sdhp->dev_info_list,
6034                                                     dev_list) {
6035                                         set_bit(SDEBUG_UA_LUNS_CHANGED,
6036                                                 dp->uas_bm);
6037                                 }
6038                         }
6039                         spin_unlock(&sdebug_host_list_lock);
6040                 }
6041                 return count;
6042         }
6043         return -EINVAL;
6044 }
6045 static DRIVER_ATTR_RW(max_luns);
6046
6047 static ssize_t max_queue_show(struct device_driver *ddp, char *buf)
6048 {
6049         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_queue);
6050 }
6051 /* N.B. max_queue can be changed while there are queued commands. In flight
6052  * commands beyond the new max_queue will be completed. */
6053 static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
6054                                size_t count)
6055 {
6056         int j, n, k, a;
6057         struct sdebug_queue *sqp;
6058
6059         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
6060             (n <= SDEBUG_CANQUEUE)) {
6061                 block_unblock_all_queues(true);
6062                 k = 0;
6063                 for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
6064                      ++j, ++sqp) {
6065                         a = find_last_bit(sqp->in_use_bm, SDEBUG_CANQUEUE);
6066                         if (a > k)
6067                                 k = a;
6068                 }
6069                 sdebug_max_queue = n;
6070                 if (k == SDEBUG_CANQUEUE)
6071                         atomic_set(&retired_max_queue, 0);
6072                 else if (k >= n)
6073                         atomic_set(&retired_max_queue, k + 1);
6074                 else
6075                         atomic_set(&retired_max_queue, 0);
6076                 block_unblock_all_queues(false);
6077                 return count;
6078         }
6079         return -EINVAL;
6080 }
6081 static DRIVER_ATTR_RW(max_queue);
6082
6083 static ssize_t no_uld_show(struct device_driver *ddp, char *buf)
6084 {
6085         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_uld);
6086 }
6087 static DRIVER_ATTR_RO(no_uld);
6088
6089 static ssize_t scsi_level_show(struct device_driver *ddp, char *buf)
6090 {
6091         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_scsi_level);
6092 }
6093 static DRIVER_ATTR_RO(scsi_level);
6094
6095 static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
6096 {
6097         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_virtual_gb);
6098 }
6099 static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
6100                                 size_t count)
6101 {
6102         int n;
6103         bool changed;
6104
6105         /* Ignore capacity change for ZBC drives for now */
6106         if (sdeb_zbc_in_use)
6107                 return -ENOTSUPP;
6108
6109         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6110                 changed = (sdebug_virtual_gb != n);
6111                 sdebug_virtual_gb = n;
6112                 sdebug_capacity = get_sdebug_capacity();
6113                 if (changed) {
6114                         struct sdebug_host_info *sdhp;
6115                         struct sdebug_dev_info *dp;
6116
6117                         spin_lock(&sdebug_host_list_lock);
6118                         list_for_each_entry(sdhp, &sdebug_host_list,
6119                                             host_list) {
6120                                 list_for_each_entry(dp, &sdhp->dev_info_list,
6121                                                     dev_list) {
6122                                         set_bit(SDEBUG_UA_CAPACITY_CHANGED,
6123                                                 dp->uas_bm);
6124                                 }
6125                         }
6126                         spin_unlock(&sdebug_host_list_lock);
6127                 }
6128                 return count;
6129         }
6130         return -EINVAL;
6131 }
6132 static DRIVER_ATTR_RW(virtual_gb);
6133
6134 static ssize_t add_host_show(struct device_driver *ddp, char *buf)
6135 {
6136         /* absolute number of hosts currently active is what is shown */
6137         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_hosts);
6138 }
6139
6140 static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
6141                               size_t count)
6142 {
6143         bool found;
6144         unsigned long idx;
6145         struct sdeb_store_info *sip;
6146         bool want_phs = (sdebug_fake_rw == 0) && sdebug_per_host_store;
6147         int delta_hosts;
6148
6149         if (sscanf(buf, "%d", &delta_hosts) != 1)
6150                 return -EINVAL;
6151         if (delta_hosts > 0) {
6152                 do {
6153                         found = false;
6154                         if (want_phs) {
6155                                 xa_for_each_marked(per_store_ap, idx, sip,
6156                                                    SDEB_XA_NOT_IN_USE) {
6157                                         sdeb_most_recent_idx = (int)idx;
6158                                         found = true;
6159                                         break;
6160                                 }
6161                                 if (found)      /* re-use case */
6162                                         sdebug_add_host_helper((int)idx);
6163                                 else
6164                                         sdebug_do_add_host(true);
6165                         } else {
6166                                 sdebug_do_add_host(false);
6167                         }
6168                 } while (--delta_hosts);
6169         } else if (delta_hosts < 0) {
6170                 do {
6171                         sdebug_do_remove_host(false);
6172                 } while (++delta_hosts);
6173         }
6174         return count;
6175 }
6176 static DRIVER_ATTR_RW(add_host);
6177
6178 static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf)
6179 {
6180         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_vpd_use_hostno);
6181 }
6182 static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf,
6183                                     size_t count)
6184 {
6185         int n;
6186
6187         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6188                 sdebug_vpd_use_hostno = n;
6189                 return count;
6190         }
6191         return -EINVAL;
6192 }
6193 static DRIVER_ATTR_RW(vpd_use_hostno);
6194
6195 static ssize_t statistics_show(struct device_driver *ddp, char *buf)
6196 {
6197         return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_statistics);
6198 }
6199 static ssize_t statistics_store(struct device_driver *ddp, const char *buf,
6200                                 size_t count)
6201 {
6202         int n;
6203
6204         if ((count > 0) && (sscanf(buf, "%d", &n) == 1) && (n >= 0)) {
6205                 if (n > 0)
6206                         sdebug_statistics = true;
6207                 else {
6208                         clear_queue_stats();
6209                         sdebug_statistics = false;
6210                 }
6211                 return count;
6212         }
6213         return -EINVAL;
6214 }
6215 static DRIVER_ATTR_RW(statistics);
6216
6217 static ssize_t sector_size_show(struct device_driver *ddp, char *buf)
6218 {
6219         return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_sector_size);
6220 }
6221 static DRIVER_ATTR_RO(sector_size);
6222
6223 static ssize_t submit_queues_show(struct device_driver *ddp, char *buf)
6224 {
6225         return scnprintf(buf, PAGE_SIZE, "%d\n", submit_queues);
6226 }
6227 static DRIVER_ATTR_RO(submit_queues);
6228
6229 static ssize_t dix_show(struct device_driver *ddp, char *buf)
6230 {
6231         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dix);
6232 }
6233 static DRIVER_ATTR_RO(dix);
6234
6235 static ssize_t dif_show(struct device_driver *ddp, char *buf)
6236 {
6237         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dif);
6238 }
6239 static DRIVER_ATTR_RO(dif);
6240
6241 static ssize_t guard_show(struct device_driver *ddp, char *buf)
6242 {
6243         return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_guard);
6244 }
6245 static DRIVER_ATTR_RO(guard);
6246
6247 static ssize_t ato_show(struct device_driver *ddp, char *buf)
6248 {
6249         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ato);
6250 }
6251 static DRIVER_ATTR_RO(ato);
6252
6253 static ssize_t map_show(struct device_driver *ddp, char *buf)
6254 {
6255         ssize_t count = 0;
6256
6257         if (!scsi_debug_lbp())
6258                 return scnprintf(buf, PAGE_SIZE, "0-%u\n",
6259                                  sdebug_store_sectors);
6260
6261         if (sdebug_fake_rw == 0 && !xa_empty(per_store_ap)) {
6262                 struct sdeb_store_info *sip = xa_load(per_store_ap, 0);
6263
6264                 if (sip)
6265                         count = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
6266                                           (int)map_size, sip->map_storep);
6267         }
6268         buf[count++] = '\n';
6269         buf[count] = '\0';
6270
6271         return count;
6272 }
6273 static DRIVER_ATTR_RO(map);
6274
6275 static ssize_t random_show(struct device_driver *ddp, char *buf)
6276 {
6277         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_random);
6278 }
6279
6280 static ssize_t random_store(struct device_driver *ddp, const char *buf,
6281                             size_t count)
6282 {
6283         bool v;
6284
6285         if (kstrtobool(buf, &v))
6286                 return -EINVAL;
6287
6288         sdebug_random = v;
6289         return count;
6290 }
6291 static DRIVER_ATTR_RW(random);
6292
6293 static ssize_t removable_show(struct device_driver *ddp, char *buf)
6294 {
6295         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_removable ? 1 : 0);
6296 }
6297 static ssize_t removable_store(struct device_driver *ddp, const char *buf,
6298                                size_t count)
6299 {
6300         int n;
6301
6302         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6303                 sdebug_removable = (n > 0);
6304                 return count;
6305         }
6306         return -EINVAL;
6307 }
6308 static DRIVER_ATTR_RW(removable);
6309
6310 static ssize_t host_lock_show(struct device_driver *ddp, char *buf)
6311 {
6312         return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_host_lock);
6313 }
6314 /* N.B. sdebug_host_lock does nothing, kept for backward compatibility */
6315 static ssize_t host_lock_store(struct device_driver *ddp, const char *buf,
6316                                size_t count)
6317 {
6318         int n;
6319
6320         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6321                 sdebug_host_lock = (n > 0);
6322                 return count;
6323         }
6324         return -EINVAL;
6325 }
6326 static DRIVER_ATTR_RW(host_lock);
6327
6328 static ssize_t strict_show(struct device_driver *ddp, char *buf)
6329 {
6330         return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_strict);
6331 }
6332 static ssize_t strict_store(struct device_driver *ddp, const char *buf,
6333                             size_t count)
6334 {
6335         int n;
6336
6337         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6338                 sdebug_strict = (n > 0);
6339                 return count;
6340         }
6341         return -EINVAL;
6342 }
6343 static DRIVER_ATTR_RW(strict);
6344
6345 static ssize_t uuid_ctl_show(struct device_driver *ddp, char *buf)
6346 {
6347         return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_uuid_ctl);
6348 }
6349 static DRIVER_ATTR_RO(uuid_ctl);
6350
6351 static ssize_t cdb_len_show(struct device_driver *ddp, char *buf)
6352 {
6353         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_cdb_len);
6354 }
6355 static ssize_t cdb_len_store(struct device_driver *ddp, const char *buf,
6356                              size_t count)
6357 {
6358         int ret, n;
6359
6360         ret = kstrtoint(buf, 0, &n);
6361         if (ret)
6362                 return ret;
6363         sdebug_cdb_len = n;
6364         all_config_cdb_len();
6365         return count;
6366 }
6367 static DRIVER_ATTR_RW(cdb_len);
6368
6369 static const char * const zbc_model_strs_a[] = {
6370         [BLK_ZONED_NONE] = "none",
6371         [BLK_ZONED_HA]   = "host-aware",
6372         [BLK_ZONED_HM]   = "host-managed",
6373 };
6374
6375 static const char * const zbc_model_strs_b[] = {
6376         [BLK_ZONED_NONE] = "no",
6377         [BLK_ZONED_HA]   = "aware",
6378         [BLK_ZONED_HM]   = "managed",
6379 };
6380
6381 static const char * const zbc_model_strs_c[] = {
6382         [BLK_ZONED_NONE] = "0",
6383         [BLK_ZONED_HA]   = "1",
6384         [BLK_ZONED_HM]   = "2",
6385 };
6386
6387 static int sdeb_zbc_model_str(const char *cp)
6388 {
6389         int res = sysfs_match_string(zbc_model_strs_a, cp);
6390
6391         if (res < 0) {
6392                 res = sysfs_match_string(zbc_model_strs_b, cp);
6393                 if (res < 0) {
6394                         res = sysfs_match_string(zbc_model_strs_c, cp);
6395                         if (sdeb_zbc_model < 0)
6396                                 return -EINVAL;
6397                 }
6398         }
6399         return res;
6400 }
6401
6402 static ssize_t zbc_show(struct device_driver *ddp, char *buf)
6403 {
6404         return scnprintf(buf, PAGE_SIZE, "%s\n",
6405                          zbc_model_strs_a[sdeb_zbc_model]);
6406 }
6407 static DRIVER_ATTR_RO(zbc);
6408
6409 /* Note: The following array creates attribute files in the
6410    /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
6411    files (over those found in the /sys/module/scsi_debug/parameters
6412    directory) is that auxiliary actions can be triggered when an attribute
6413    is changed. For example see: add_host_store() above.
6414  */
6415
6416 static struct attribute *sdebug_drv_attrs[] = {
6417         &driver_attr_delay.attr,
6418         &driver_attr_opts.attr,
6419         &driver_attr_ptype.attr,
6420         &driver_attr_dsense.attr,
6421         &driver_attr_fake_rw.attr,
6422         &driver_attr_no_lun_0.attr,
6423         &driver_attr_num_tgts.attr,
6424         &driver_attr_dev_size_mb.attr,
6425         &driver_attr_num_parts.attr,
6426         &driver_attr_every_nth.attr,
6427         &driver_attr_max_luns.attr,
6428         &driver_attr_max_queue.attr,
6429         &driver_attr_no_uld.attr,
6430         &driver_attr_scsi_level.attr,
6431         &driver_attr_virtual_gb.attr,
6432         &driver_attr_add_host.attr,
6433         &driver_attr_per_host_store.attr,
6434         &driver_attr_vpd_use_hostno.attr,
6435         &driver_attr_sector_size.attr,
6436         &driver_attr_statistics.attr,
6437         &driver_attr_submit_queues.attr,
6438         &driver_attr_dix.attr,
6439         &driver_attr_dif.attr,
6440         &driver_attr_guard.attr,
6441         &driver_attr_ato.attr,
6442         &driver_attr_map.attr,
6443         &driver_attr_random.attr,
6444         &driver_attr_removable.attr,
6445         &driver_attr_host_lock.attr,
6446         &driver_attr_ndelay.attr,
6447         &driver_attr_strict.attr,
6448         &driver_attr_uuid_ctl.attr,
6449         &driver_attr_cdb_len.attr,
6450         &driver_attr_zbc.attr,
6451         NULL,
6452 };
6453 ATTRIBUTE_GROUPS(sdebug_drv);
6454
6455 static struct device *pseudo_primary;
6456
6457 static int __init scsi_debug_init(void)
6458 {
6459         bool want_store = (sdebug_fake_rw == 0);
6460         unsigned long sz;
6461         int k, ret, hosts_to_add;
6462         int idx = -1;
6463
6464         ramdisk_lck_a[0] = &atomic_rw;
6465         ramdisk_lck_a[1] = &atomic_rw2;
6466         atomic_set(&retired_max_queue, 0);
6467
6468         if (sdebug_ndelay >= 1000 * 1000 * 1000) {
6469                 pr_warn("ndelay must be less than 1 second, ignored\n");
6470                 sdebug_ndelay = 0;
6471         } else if (sdebug_ndelay > 0)
6472                 sdebug_jdelay = JDELAY_OVERRIDDEN;
6473
6474         switch (sdebug_sector_size) {
6475         case  512:
6476         case 1024:
6477         case 2048:
6478         case 4096:
6479                 break;
6480         default:
6481                 pr_err("invalid sector_size %d\n", sdebug_sector_size);
6482                 return -EINVAL;
6483         }
6484
6485         switch (sdebug_dif) {
6486         case T10_PI_TYPE0_PROTECTION:
6487                 break;
6488         case T10_PI_TYPE1_PROTECTION:
6489         case T10_PI_TYPE2_PROTECTION:
6490         case T10_PI_TYPE3_PROTECTION:
6491                 have_dif_prot = true;
6492                 break;
6493
6494         default:
6495                 pr_err("dif must be 0, 1, 2 or 3\n");
6496                 return -EINVAL;
6497         }
6498
6499         if (sdebug_num_tgts < 0) {
6500                 pr_err("num_tgts must be >= 0\n");
6501                 return -EINVAL;
6502         }
6503
6504         if (sdebug_guard > 1) {
6505                 pr_err("guard must be 0 or 1\n");
6506                 return -EINVAL;
6507         }
6508
6509         if (sdebug_ato > 1) {
6510                 pr_err("ato must be 0 or 1\n");
6511                 return -EINVAL;
6512         }
6513
6514         if (sdebug_physblk_exp > 15) {
6515                 pr_err("invalid physblk_exp %u\n", sdebug_physblk_exp);
6516                 return -EINVAL;
6517         }
6518         if (sdebug_max_luns > 256) {
6519                 pr_warn("max_luns can be no more than 256, use default\n");
6520                 sdebug_max_luns = DEF_MAX_LUNS;
6521         }
6522
6523         if (sdebug_lowest_aligned > 0x3fff) {
6524                 pr_err("lowest_aligned too big: %u\n", sdebug_lowest_aligned);
6525                 return -EINVAL;
6526         }
6527
6528         if (submit_queues < 1) {
6529                 pr_err("submit_queues must be 1 or more\n");
6530                 return -EINVAL;
6531         }
6532         sdebug_q_arr = kcalloc(submit_queues, sizeof(struct sdebug_queue),
6533                                GFP_KERNEL);
6534         if (sdebug_q_arr == NULL)
6535                 return -ENOMEM;
6536         for (k = 0; k < submit_queues; ++k)
6537                 spin_lock_init(&sdebug_q_arr[k].qc_lock);
6538
6539         /*
6540          * check for host managed zoned block device specified with
6541          * ptype=0x14 or zbc=XXX.
6542          */
6543         if (sdebug_ptype == TYPE_ZBC) {
6544                 sdeb_zbc_model = BLK_ZONED_HM;
6545         } else if (sdeb_zbc_model_s && *sdeb_zbc_model_s) {
6546                 k = sdeb_zbc_model_str(sdeb_zbc_model_s);
6547                 if (k < 0) {
6548                         ret = k;
6549                         goto free_vm;
6550                 }
6551                 sdeb_zbc_model = k;
6552                 switch (sdeb_zbc_model) {
6553                 case BLK_ZONED_NONE:
6554                         sdebug_ptype = TYPE_DISK;
6555                         break;
6556                 case BLK_ZONED_HM:
6557                         sdebug_ptype = TYPE_ZBC;
6558                         break;
6559                 case BLK_ZONED_HA:
6560                 default:
6561                         pr_err("Invalid ZBC model\n");
6562                         return -EINVAL;
6563                 }
6564         }
6565         if (sdeb_zbc_model != BLK_ZONED_NONE) {
6566                 sdeb_zbc_in_use = true;
6567                 if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
6568                         sdebug_dev_size_mb = DEF_ZBC_DEV_SIZE_MB;
6569         }
6570
6571         if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
6572                 sdebug_dev_size_mb = DEF_DEV_SIZE_MB;
6573         if (sdebug_dev_size_mb < 1)
6574                 sdebug_dev_size_mb = 1;  /* force minimum 1 MB ramdisk */
6575         sz = (unsigned long)sdebug_dev_size_mb * 1048576;
6576         sdebug_store_sectors = sz / sdebug_sector_size;
6577         sdebug_capacity = get_sdebug_capacity();
6578
6579         /* play around with geometry, don't waste too much on track 0 */
6580         sdebug_heads = 8;
6581         sdebug_sectors_per = 32;
6582         if (sdebug_dev_size_mb >= 256)
6583                 sdebug_heads = 64;
6584         else if (sdebug_dev_size_mb >= 16)
6585                 sdebug_heads = 32;
6586         sdebug_cylinders_per = (unsigned long)sdebug_capacity /
6587                                (sdebug_sectors_per * sdebug_heads);
6588         if (sdebug_cylinders_per >= 1024) {
6589                 /* other LLDs do this; implies >= 1GB ram disk ... */
6590                 sdebug_heads = 255;
6591                 sdebug_sectors_per = 63;
6592                 sdebug_cylinders_per = (unsigned long)sdebug_capacity /
6593                                (sdebug_sectors_per * sdebug_heads);
6594         }
6595         if (scsi_debug_lbp()) {
6596                 sdebug_unmap_max_blocks =
6597                         clamp(sdebug_unmap_max_blocks, 0U, 0xffffffffU);
6598
6599                 sdebug_unmap_max_desc =
6600                         clamp(sdebug_unmap_max_desc, 0U, 256U);
6601
6602                 sdebug_unmap_granularity =
6603                         clamp(sdebug_unmap_granularity, 1U, 0xffffffffU);
6604
6605                 if (sdebug_unmap_alignment &&
6606                     sdebug_unmap_granularity <=
6607                     sdebug_unmap_alignment) {
6608                         pr_err("ERR: unmap_granularity <= unmap_alignment\n");
6609                         ret = -EINVAL;
6610                         goto free_q_arr;
6611                 }
6612         }
6613         xa_init_flags(per_store_ap, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
6614         if (want_store) {
6615                 idx = sdebug_add_store();
6616                 if (idx < 0) {
6617                         ret = idx;
6618                         goto free_q_arr;
6619                 }
6620         }
6621
6622         pseudo_primary = root_device_register("pseudo_0");
6623         if (IS_ERR(pseudo_primary)) {
6624                 pr_warn("root_device_register() error\n");
6625                 ret = PTR_ERR(pseudo_primary);
6626                 goto free_vm;
6627         }
6628         ret = bus_register(&pseudo_lld_bus);
6629         if (ret < 0) {
6630                 pr_warn("bus_register error: %d\n", ret);
6631                 goto dev_unreg;
6632         }
6633         ret = driver_register(&sdebug_driverfs_driver);
6634         if (ret < 0) {
6635                 pr_warn("driver_register error: %d\n", ret);
6636                 goto bus_unreg;
6637         }
6638
6639         hosts_to_add = sdebug_add_host;
6640         sdebug_add_host = 0;
6641
6642         for (k = 0; k < hosts_to_add; k++) {
6643                 if (want_store && k == 0) {
6644                         ret = sdebug_add_host_helper(idx);
6645                         if (ret < 0) {
6646                                 pr_err("add_host_helper k=%d, error=%d\n",
6647                                        k, -ret);
6648                                 break;
6649                         }
6650                 } else {
6651                         ret = sdebug_do_add_host(want_store &&
6652                                                  sdebug_per_host_store);
6653                         if (ret < 0) {
6654                                 pr_err("add_host k=%d error=%d\n", k, -ret);
6655                                 break;
6656                         }
6657                 }
6658         }
6659         if (sdebug_verbose)
6660                 pr_info("built %d host(s)\n", sdebug_num_hosts);
6661
6662         return 0;
6663
6664 bus_unreg:
6665         bus_unregister(&pseudo_lld_bus);
6666 dev_unreg:
6667         root_device_unregister(pseudo_primary);
6668 free_vm:
6669         sdebug_erase_store(idx, NULL);
6670 free_q_arr:
6671         kfree(sdebug_q_arr);
6672         return ret;
6673 }
6674
6675 static void __exit scsi_debug_exit(void)
6676 {
6677         int k = sdebug_num_hosts;
6678
6679         stop_all_queued();
6680         for (; k; k--)
6681                 sdebug_do_remove_host(true);
6682         free_all_queued();
6683         driver_unregister(&sdebug_driverfs_driver);
6684         bus_unregister(&pseudo_lld_bus);
6685         root_device_unregister(pseudo_primary);
6686
6687         sdebug_erase_all_stores(false);
6688         xa_destroy(per_store_ap);
6689 }
6690
6691 device_initcall(scsi_debug_init);
6692 module_exit(scsi_debug_exit);
6693
6694 static void sdebug_release_adapter(struct device *dev)
6695 {
6696         struct sdebug_host_info *sdbg_host;
6697
6698         sdbg_host = to_sdebug_host(dev);
6699         kfree(sdbg_host);
6700 }
6701
6702 /* idx must be valid, if sip is NULL then it will be obtained using idx */
6703 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip)
6704 {
6705         if (idx < 0)
6706                 return;
6707         if (!sip) {
6708                 if (xa_empty(per_store_ap))
6709                         return;
6710                 sip = xa_load(per_store_ap, idx);
6711                 if (!sip)
6712                         return;
6713         }
6714         vfree(sip->map_storep);
6715         vfree(sip->dif_storep);
6716         vfree(sip->storep);
6717         xa_erase(per_store_ap, idx);
6718         kfree(sip);
6719 }
6720
6721 /* Assume apart_from_first==false only in shutdown case. */
6722 static void sdebug_erase_all_stores(bool apart_from_first)
6723 {
6724         unsigned long idx;
6725         struct sdeb_store_info *sip = NULL;
6726
6727         xa_for_each(per_store_ap, idx, sip) {
6728                 if (apart_from_first)
6729                         apart_from_first = false;
6730                 else
6731                         sdebug_erase_store(idx, sip);
6732         }
6733         if (apart_from_first)
6734                 sdeb_most_recent_idx = sdeb_first_idx;
6735 }
6736
6737 /*
6738  * Returns store xarray new element index (idx) if >=0 else negated errno.
6739  * Limit the number of stores to 65536.
6740  */
6741 static int sdebug_add_store(void)
6742 {
6743         int res;
6744         u32 n_idx;
6745         unsigned long iflags;
6746         unsigned long sz = (unsigned long)sdebug_dev_size_mb * 1048576;
6747         struct sdeb_store_info *sip = NULL;
6748         struct xa_limit xal = { .max = 1 << 16, .min = 0 };
6749
6750         sip = kzalloc(sizeof(*sip), GFP_KERNEL);
6751         if (!sip)
6752                 return -ENOMEM;
6753
6754         xa_lock_irqsave(per_store_ap, iflags);
6755         res = __xa_alloc(per_store_ap, &n_idx, sip, xal, GFP_ATOMIC);
6756         if (unlikely(res < 0)) {
6757                 xa_unlock_irqrestore(per_store_ap, iflags);
6758                 kfree(sip);
6759                 pr_warn("%s: xa_alloc() errno=%d\n", __func__, -res);
6760                 return res;
6761         }
6762         sdeb_most_recent_idx = n_idx;
6763         if (sdeb_first_idx < 0)
6764                 sdeb_first_idx = n_idx;
6765         xa_unlock_irqrestore(per_store_ap, iflags);
6766
6767         res = -ENOMEM;
6768         sip->storep = vzalloc(sz);
6769         if (!sip->storep) {
6770                 pr_err("user data oom\n");
6771                 goto err;
6772         }
6773         if (sdebug_num_parts > 0)
6774                 sdebug_build_parts(sip->storep, sz);
6775
6776         /* DIF/DIX: what T10 calls Protection Information (PI) */
6777         if (sdebug_dix) {
6778                 int dif_size;
6779
6780                 dif_size = sdebug_store_sectors * sizeof(struct t10_pi_tuple);
6781                 sip->dif_storep = vmalloc(dif_size);
6782
6783                 pr_info("dif_storep %u bytes @ %pK\n", dif_size,
6784                         sip->dif_storep);
6785
6786                 if (!sip->dif_storep) {
6787                         pr_err("DIX oom\n");
6788                         goto err;
6789                 }
6790                 memset(sip->dif_storep, 0xff, dif_size);
6791         }
6792         /* Logical Block Provisioning */
6793         if (scsi_debug_lbp()) {
6794                 map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
6795                 sip->map_storep = vmalloc(array_size(sizeof(long),
6796                                                      BITS_TO_LONGS(map_size)));
6797
6798                 pr_info("%lu provisioning blocks\n", map_size);
6799
6800                 if (!sip->map_storep) {
6801                         pr_err("LBP map oom\n");
6802                         goto err;
6803                 }
6804
6805                 bitmap_zero(sip->map_storep, map_size);
6806
6807                 /* Map first 1KB for partition table */
6808                 if (sdebug_num_parts)
6809                         map_region(sip, 0, 2);
6810         }
6811
6812         rwlock_init(&sip->macc_lck);
6813         return (int)n_idx;
6814 err:
6815         sdebug_erase_store((int)n_idx, sip);
6816         pr_warn("%s: failed, errno=%d\n", __func__, -res);
6817         return res;
6818 }
6819
6820 static int sdebug_add_host_helper(int per_host_idx)
6821 {
6822         int k, devs_per_host, idx;
6823         int error = -ENOMEM;
6824         struct sdebug_host_info *sdbg_host;
6825         struct sdebug_dev_info *sdbg_devinfo, *tmp;
6826
6827         sdbg_host = kzalloc(sizeof(*sdbg_host), GFP_KERNEL);
6828         if (!sdbg_host)
6829                 return -ENOMEM;
6830         idx = (per_host_idx < 0) ? sdeb_first_idx : per_host_idx;
6831         if (xa_get_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE))
6832                 xa_clear_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
6833         sdbg_host->si_idx = idx;
6834
6835         INIT_LIST_HEAD(&sdbg_host->dev_info_list);
6836
6837         devs_per_host = sdebug_num_tgts * sdebug_max_luns;
6838         for (k = 0; k < devs_per_host; k++) {
6839                 sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
6840                 if (!sdbg_devinfo)
6841                         goto clean;
6842         }
6843
6844         spin_lock(&sdebug_host_list_lock);
6845         list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
6846         spin_unlock(&sdebug_host_list_lock);
6847
6848         sdbg_host->dev.bus = &pseudo_lld_bus;
6849         sdbg_host->dev.parent = pseudo_primary;
6850         sdbg_host->dev.release = &sdebug_release_adapter;
6851         dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_num_hosts);
6852
6853         error = device_register(&sdbg_host->dev);
6854         if (error)
6855                 goto clean;
6856
6857         ++sdebug_num_hosts;
6858         return 0;
6859
6860 clean:
6861         list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
6862                                  dev_list) {
6863                 list_del(&sdbg_devinfo->dev_list);
6864                 kfree(sdbg_devinfo->zstate);
6865                 kfree(sdbg_devinfo);
6866         }
6867         kfree(sdbg_host);
6868         pr_warn("%s: failed, errno=%d\n", __func__, -error);
6869         return error;
6870 }
6871
6872 static int sdebug_do_add_host(bool mk_new_store)
6873 {
6874         int ph_idx = sdeb_most_recent_idx;
6875
6876         if (mk_new_store) {
6877                 ph_idx = sdebug_add_store();
6878                 if (ph_idx < 0)
6879                         return ph_idx;
6880         }
6881         return sdebug_add_host_helper(ph_idx);
6882 }
6883
6884 static void sdebug_do_remove_host(bool the_end)
6885 {
6886         int idx = -1;
6887         struct sdebug_host_info *sdbg_host = NULL;
6888         struct sdebug_host_info *sdbg_host2;
6889
6890         spin_lock(&sdebug_host_list_lock);
6891         if (!list_empty(&sdebug_host_list)) {
6892                 sdbg_host = list_entry(sdebug_host_list.prev,
6893                                        struct sdebug_host_info, host_list);
6894                 idx = sdbg_host->si_idx;
6895         }
6896         if (!the_end && idx >= 0) {
6897                 bool unique = true;
6898
6899                 list_for_each_entry(sdbg_host2, &sdebug_host_list, host_list) {
6900                         if (sdbg_host2 == sdbg_host)
6901                                 continue;
6902                         if (idx == sdbg_host2->si_idx) {
6903                                 unique = false;
6904                                 break;
6905                         }
6906                 }
6907                 if (unique) {
6908                         xa_set_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
6909                         if (idx == sdeb_most_recent_idx)
6910                                 --sdeb_most_recent_idx;
6911                 }
6912         }
6913         if (sdbg_host)
6914                 list_del(&sdbg_host->host_list);
6915         spin_unlock(&sdebug_host_list_lock);
6916
6917         if (!sdbg_host)
6918                 return;
6919
6920         device_unregister(&sdbg_host->dev);
6921         --sdebug_num_hosts;
6922 }
6923
6924 static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
6925 {
6926         int num_in_q = 0;
6927         struct sdebug_dev_info *devip;
6928
6929         block_unblock_all_queues(true);
6930         devip = (struct sdebug_dev_info *)sdev->hostdata;
6931         if (NULL == devip) {
6932                 block_unblock_all_queues(false);
6933                 return  -ENODEV;
6934         }
6935         num_in_q = atomic_read(&devip->num_in_q);
6936
6937         if (qdepth < 1)
6938                 qdepth = 1;
6939         /* allow to exceed max host qc_arr elements for testing */
6940         if (qdepth > SDEBUG_CANQUEUE + 10)
6941                 qdepth = SDEBUG_CANQUEUE + 10;
6942         scsi_change_queue_depth(sdev, qdepth);
6943
6944         if (SDEBUG_OPT_Q_NOISE & sdebug_opts) {
6945                 sdev_printk(KERN_INFO, sdev, "%s: qdepth=%d, num_in_q=%d\n",
6946                             __func__, qdepth, num_in_q);
6947         }
6948         block_unblock_all_queues(false);
6949         return sdev->queue_depth;
6950 }
6951
6952 static bool fake_timeout(struct scsi_cmnd *scp)
6953 {
6954         if (0 == (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth))) {
6955                 if (sdebug_every_nth < -1)
6956                         sdebug_every_nth = -1;
6957                 if (SDEBUG_OPT_TIMEOUT & sdebug_opts)
6958                         return true; /* ignore command causing timeout */
6959                 else if (SDEBUG_OPT_MAC_TIMEOUT & sdebug_opts &&
6960                          scsi_medium_access_command(scp))
6961                         return true; /* time out reads and writes */
6962         }
6963         return false;
6964 }
6965
6966 static bool fake_host_busy(struct scsi_cmnd *scp)
6967 {
6968         return (sdebug_opts & SDEBUG_OPT_HOST_BUSY) &&
6969                 (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) == 0;
6970 }
6971
6972 static int scsi_debug_queuecommand(struct Scsi_Host *shost,
6973                                    struct scsi_cmnd *scp)
6974 {
6975         u8 sdeb_i;
6976         struct scsi_device *sdp = scp->device;
6977         const struct opcode_info_t *oip;
6978         const struct opcode_info_t *r_oip;
6979         struct sdebug_dev_info *devip;
6980
6981         u8 *cmd = scp->cmnd;
6982         int (*r_pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
6983         int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *) = NULL;
6984         int k, na;
6985         int errsts = 0;
6986         u32 flags;
6987         u16 sa;
6988         u8 opcode = cmd[0];
6989         bool has_wlun_rl;
6990
6991         scsi_set_resid(scp, 0);
6992         if (sdebug_statistics)
6993                 atomic_inc(&sdebug_cmnd_count);
6994         if (unlikely(sdebug_verbose &&
6995                      !(SDEBUG_OPT_NO_CDB_NOISE & sdebug_opts))) {
6996                 char b[120];
6997                 int n, len, sb;
6998
6999                 len = scp->cmd_len;
7000                 sb = (int)sizeof(b);
7001                 if (len > 32)
7002                         strcpy(b, "too long, over 32 bytes");
7003                 else {
7004                         for (k = 0, n = 0; k < len && n < sb; ++k)
7005                                 n += scnprintf(b + n, sb - n, "%02x ",
7006                                                (u32)cmd[k]);
7007                 }
7008                 sdev_printk(KERN_INFO, sdp, "%s: tag=%#x, cmd %s\n", my_name,
7009                             blk_mq_unique_tag(scp->request), b);
7010         }
7011         if (fake_host_busy(scp))
7012                 return SCSI_MLQUEUE_HOST_BUSY;
7013         has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS);
7014         if (unlikely((sdp->lun >= sdebug_max_luns) && !has_wlun_rl))
7015                 goto err_out;
7016
7017         sdeb_i = opcode_ind_arr[opcode];        /* fully mapped */
7018         oip = &opcode_info_arr[sdeb_i];         /* safe if table consistent */
7019         devip = (struct sdebug_dev_info *)sdp->hostdata;
7020         if (unlikely(!devip)) {
7021                 devip = find_build_dev_info(sdp);
7022                 if (NULL == devip)
7023                         goto err_out;
7024         }
7025         na = oip->num_attached;
7026         r_pfp = oip->pfp;
7027         if (na) {       /* multiple commands with this opcode */
7028                 r_oip = oip;
7029                 if (FF_SA & r_oip->flags) {
7030                         if (F_SA_LOW & oip->flags)
7031                                 sa = 0x1f & cmd[1];
7032                         else
7033                                 sa = get_unaligned_be16(cmd + 8);
7034                         for (k = 0; k <= na; oip = r_oip->arrp + k++) {
7035                                 if (opcode == oip->opcode && sa == oip->sa)
7036                                         break;
7037                         }
7038                 } else {   /* since no service action only check opcode */
7039                         for (k = 0; k <= na; oip = r_oip->arrp + k++) {
7040                                 if (opcode == oip->opcode)
7041                                         break;
7042                         }
7043                 }
7044                 if (k > na) {
7045                         if (F_SA_LOW & r_oip->flags)
7046                                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 4);
7047                         else if (F_SA_HIGH & r_oip->flags)
7048                                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, 7);
7049                         else
7050                                 mk_sense_invalid_opcode(scp);
7051                         goto check_cond;
7052                 }
7053         }       /* else (when na==0) we assume the oip is a match */
7054         flags = oip->flags;
7055         if (unlikely(F_INV_OP & flags)) {
7056                 mk_sense_invalid_opcode(scp);
7057                 goto check_cond;
7058         }
7059         if (unlikely(has_wlun_rl && !(F_RL_WLUN_OK & flags))) {
7060                 if (sdebug_verbose)
7061                         sdev_printk(KERN_INFO, sdp, "%s: Opcode 0x%x not%s\n",
7062                                     my_name, opcode, " supported for wlun");
7063                 mk_sense_invalid_opcode(scp);
7064                 goto check_cond;
7065         }
7066         if (unlikely(sdebug_strict)) {  /* check cdb against mask */
7067                 u8 rem;
7068                 int j;
7069
7070                 for (k = 1; k < oip->len_mask[0] && k < 16; ++k) {
7071                         rem = ~oip->len_mask[k] & cmd[k];
7072                         if (rem) {
7073                                 for (j = 7; j >= 0; --j, rem <<= 1) {
7074                                         if (0x80 & rem)
7075                                                 break;
7076                                 }
7077                                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, k, j);
7078                                 goto check_cond;
7079                         }
7080                 }
7081         }
7082         if (unlikely(!(F_SKIP_UA & flags) &&
7083                      find_first_bit(devip->uas_bm,
7084                                     SDEBUG_NUM_UAS) != SDEBUG_NUM_UAS)) {
7085                 errsts = make_ua(scp, devip);
7086                 if (errsts)
7087                         goto check_cond;
7088         }
7089         if (unlikely((F_M_ACCESS & flags) && atomic_read(&devip->stopped))) {
7090                 mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x2);
7091                 if (sdebug_verbose)
7092                         sdev_printk(KERN_INFO, sdp, "%s reports: Not ready: "
7093                                     "%s\n", my_name, "initializing command "
7094                                     "required");
7095                 errsts = check_condition_result;
7096                 goto fini;
7097         }
7098         if (sdebug_fake_rw && (F_FAKE_RW & flags))
7099                 goto fini;
7100         if (unlikely(sdebug_every_nth)) {
7101                 if (fake_timeout(scp))
7102                         return 0;       /* ignore command: make trouble */
7103         }
7104         if (likely(oip->pfp))
7105                 pfp = oip->pfp; /* calls a resp_* function */
7106         else
7107                 pfp = r_pfp;    /* if leaf function ptr NULL, try the root's */
7108
7109 fini:
7110         if (F_DELAY_OVERR & flags)      /* cmds like INQUIRY respond asap */
7111                 return schedule_resp(scp, devip, errsts, pfp, 0, 0);
7112         else if ((flags & F_LONG_DELAY) && (sdebug_jdelay > 0 ||
7113                                             sdebug_ndelay > 10000)) {
7114                 /*
7115                  * Skip long delays if ndelay <= 10 microseconds. Otherwise
7116                  * for Start Stop Unit (SSU) want at least 1 second delay and
7117                  * if sdebug_jdelay>1 want a long delay of that many seconds.
7118                  * For Synchronize Cache want 1/20 of SSU's delay.
7119                  */
7120                 int jdelay = (sdebug_jdelay < 2) ? 1 : sdebug_jdelay;
7121                 int denom = (flags & F_SYNC_DELAY) ? 20 : 1;
7122
7123                 jdelay = mult_frac(USER_HZ * jdelay, HZ, denom * USER_HZ);
7124                 return schedule_resp(scp, devip, errsts, pfp, jdelay, 0);
7125         } else
7126                 return schedule_resp(scp, devip, errsts, pfp, sdebug_jdelay,
7127                                      sdebug_ndelay);
7128 check_cond:
7129         return schedule_resp(scp, devip, check_condition_result, NULL, 0, 0);
7130 err_out:
7131         return schedule_resp(scp, NULL, DID_NO_CONNECT << 16, NULL, 0, 0);
7132 }
7133
7134 static struct scsi_host_template sdebug_driver_template = {
7135         .show_info =            scsi_debug_show_info,
7136         .write_info =           scsi_debug_write_info,
7137         .proc_name =            sdebug_proc_name,
7138         .name =                 "SCSI DEBUG",
7139         .info =                 scsi_debug_info,
7140         .slave_alloc =          scsi_debug_slave_alloc,
7141         .slave_configure =      scsi_debug_slave_configure,
7142         .slave_destroy =        scsi_debug_slave_destroy,
7143         .ioctl =                scsi_debug_ioctl,
7144         .queuecommand =         scsi_debug_queuecommand,
7145         .change_queue_depth =   sdebug_change_qdepth,
7146         .eh_abort_handler =     scsi_debug_abort,
7147         .eh_device_reset_handler = scsi_debug_device_reset,
7148         .eh_target_reset_handler = scsi_debug_target_reset,
7149         .eh_bus_reset_handler = scsi_debug_bus_reset,
7150         .eh_host_reset_handler = scsi_debug_host_reset,
7151         .can_queue =            SDEBUG_CANQUEUE,
7152         .this_id =              7,
7153         .sg_tablesize =         SG_MAX_SEGMENTS,
7154         .cmd_per_lun =          DEF_CMD_PER_LUN,
7155         .max_sectors =          -1U,
7156         .max_segment_size =     -1U,
7157         .module =               THIS_MODULE,
7158         .track_queue_depth =    1,
7159 };
7160
7161 static int sdebug_driver_probe(struct device *dev)
7162 {
7163         int error = 0;
7164         struct sdebug_host_info *sdbg_host;
7165         struct Scsi_Host *hpnt;
7166         int hprot;
7167
7168         sdbg_host = to_sdebug_host(dev);
7169
7170         sdebug_driver_template.can_queue = sdebug_max_queue;
7171         if (!sdebug_clustering)
7172                 sdebug_driver_template.dma_boundary = PAGE_SIZE - 1;
7173
7174         hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
7175         if (NULL == hpnt) {
7176                 pr_err("scsi_host_alloc failed\n");
7177                 error = -ENODEV;
7178                 return error;
7179         }
7180         if (submit_queues > nr_cpu_ids) {
7181                 pr_warn("%s: trim submit_queues (was %d) to nr_cpu_ids=%u\n",
7182                         my_name, submit_queues, nr_cpu_ids);
7183                 submit_queues = nr_cpu_ids;
7184         }
7185         /* Decide whether to tell scsi subsystem that we want mq */
7186         /* Following should give the same answer for each host */
7187         hpnt->nr_hw_queues = submit_queues;
7188
7189         sdbg_host->shost = hpnt;
7190         *((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host;
7191         if ((hpnt->this_id >= 0) && (sdebug_num_tgts > hpnt->this_id))
7192                 hpnt->max_id = sdebug_num_tgts + 1;
7193         else
7194                 hpnt->max_id = sdebug_num_tgts;
7195         /* = sdebug_max_luns; */
7196         hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
7197
7198         hprot = 0;
7199
7200         switch (sdebug_dif) {
7201
7202         case T10_PI_TYPE1_PROTECTION:
7203                 hprot = SHOST_DIF_TYPE1_PROTECTION;
7204                 if (sdebug_dix)
7205                         hprot |= SHOST_DIX_TYPE1_PROTECTION;
7206                 break;
7207
7208         case T10_PI_TYPE2_PROTECTION:
7209                 hprot = SHOST_DIF_TYPE2_PROTECTION;
7210                 if (sdebug_dix)
7211                         hprot |= SHOST_DIX_TYPE2_PROTECTION;
7212                 break;
7213
7214         case T10_PI_TYPE3_PROTECTION:
7215                 hprot = SHOST_DIF_TYPE3_PROTECTION;
7216                 if (sdebug_dix)
7217                         hprot |= SHOST_DIX_TYPE3_PROTECTION;
7218                 break;
7219
7220         default:
7221                 if (sdebug_dix)
7222                         hprot |= SHOST_DIX_TYPE0_PROTECTION;
7223                 break;
7224         }
7225
7226         scsi_host_set_prot(hpnt, hprot);
7227
7228         if (have_dif_prot || sdebug_dix)
7229                 pr_info("host protection%s%s%s%s%s%s%s\n",
7230                         (hprot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
7231                         (hprot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
7232                         (hprot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
7233                         (hprot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
7234                         (hprot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
7235                         (hprot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
7236                         (hprot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
7237
7238         if (sdebug_guard == 1)
7239                 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
7240         else
7241                 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
7242
7243         sdebug_verbose = !!(SDEBUG_OPT_NOISE & sdebug_opts);
7244         sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & sdebug_opts);
7245         if (sdebug_every_nth)   /* need stats counters for every_nth */
7246                 sdebug_statistics = true;
7247         error = scsi_add_host(hpnt, &sdbg_host->dev);
7248         if (error) {
7249                 pr_err("scsi_add_host failed\n");
7250                 error = -ENODEV;
7251                 scsi_host_put(hpnt);
7252         } else {
7253                 scsi_scan_host(hpnt);
7254         }
7255
7256         return error;
7257 }
7258
7259 static int sdebug_driver_remove(struct device *dev)
7260 {
7261         struct sdebug_host_info *sdbg_host;
7262         struct sdebug_dev_info *sdbg_devinfo, *tmp;
7263
7264         sdbg_host = to_sdebug_host(dev);
7265
7266         if (!sdbg_host) {
7267                 pr_err("Unable to locate host info\n");
7268                 return -ENODEV;
7269         }
7270
7271         scsi_remove_host(sdbg_host->shost);
7272
7273         list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
7274                                  dev_list) {
7275                 list_del(&sdbg_devinfo->dev_list);
7276                 kfree(sdbg_devinfo->zstate);
7277                 kfree(sdbg_devinfo);
7278         }
7279
7280         scsi_host_put(sdbg_host->shost);
7281         return 0;
7282 }
7283
7284 static int pseudo_lld_bus_match(struct device *dev,
7285                                 struct device_driver *dev_driver)
7286 {
7287         return 1;
7288 }
7289
7290 static struct bus_type pseudo_lld_bus = {
7291         .name = "pseudo",
7292         .match = pseudo_lld_bus_match,
7293         .probe = sdebug_driver_probe,
7294         .remove = sdebug_driver_remove,
7295         .drv_groups = sdebug_drv_groups,
7296 };