Merge tag 'for-5.7-rc1-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave...
[linux-2.6-microblaze.git] / drivers / scsi / scsi_debug.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
4  *  Copyright (C) 1992  Eric Youngdale
5  *  Simulate a host adapter with 2 disks attached.  Do a lot of checking
6  *  to make sure that we are not getting blocks mixed up, and PANIC if
7  *  anything out of the ordinary is seen.
8  * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
9  *
10  * Copyright (C) 2001 - 2018 Douglas Gilbert
11  *
12  *  For documentation see http://sg.danny.cz/sg/sdebug26.html
13  */
14
15
16 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
17
18 #include <linux/module.h>
19
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/jiffies.h>
23 #include <linux/slab.h>
24 #include <linux/types.h>
25 #include <linux/string.h>
26 #include <linux/genhd.h>
27 #include <linux/fs.h>
28 #include <linux/init.h>
29 #include <linux/proc_fs.h>
30 #include <linux/vmalloc.h>
31 #include <linux/moduleparam.h>
32 #include <linux/scatterlist.h>
33 #include <linux/blkdev.h>
34 #include <linux/crc-t10dif.h>
35 #include <linux/spinlock.h>
36 #include <linux/interrupt.h>
37 #include <linux/atomic.h>
38 #include <linux/hrtimer.h>
39 #include <linux/uuid.h>
40 #include <linux/t10-pi.h>
41 #include <linux/msdos_partition.h>
42
43 #include <net/checksum.h>
44
45 #include <asm/unaligned.h>
46
47 #include <scsi/scsi.h>
48 #include <scsi/scsi_cmnd.h>
49 #include <scsi/scsi_device.h>
50 #include <scsi/scsi_host.h>
51 #include <scsi/scsicam.h>
52 #include <scsi/scsi_eh.h>
53 #include <scsi/scsi_tcq.h>
54 #include <scsi/scsi_dbg.h>
55
56 #include "sd.h"
57 #include "scsi_logging.h"
58
59 /* make sure inq_product_rev string corresponds to this version */
60 #define SDEBUG_VERSION "0188"   /* format to fit INQUIRY revision field */
61 static const char *sdebug_version_date = "20190125";
62
63 #define MY_NAME "scsi_debug"
64
65 /* Additional Sense Code (ASC) */
66 #define NO_ADDITIONAL_SENSE 0x0
67 #define LOGICAL_UNIT_NOT_READY 0x4
68 #define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
69 #define UNRECOVERED_READ_ERR 0x11
70 #define PARAMETER_LIST_LENGTH_ERR 0x1a
71 #define INVALID_OPCODE 0x20
72 #define LBA_OUT_OF_RANGE 0x21
73 #define INVALID_FIELD_IN_CDB 0x24
74 #define INVALID_FIELD_IN_PARAM_LIST 0x26
75 #define WRITE_PROTECTED 0x27
76 #define UA_RESET_ASC 0x29
77 #define UA_CHANGED_ASC 0x2a
78 #define TARGET_CHANGED_ASC 0x3f
79 #define LUNS_CHANGED_ASCQ 0x0e
80 #define INSUFF_RES_ASC 0x55
81 #define INSUFF_RES_ASCQ 0x3
82 #define POWER_ON_RESET_ASCQ 0x0
83 #define BUS_RESET_ASCQ 0x2      /* scsi bus reset occurred */
84 #define MODE_CHANGED_ASCQ 0x1   /* mode parameters changed */
85 #define CAPACITY_CHANGED_ASCQ 0x9
86 #define SAVING_PARAMS_UNSUP 0x39
87 #define TRANSPORT_PROBLEM 0x4b
88 #define THRESHOLD_EXCEEDED 0x5d
89 #define LOW_POWER_COND_ON 0x5e
90 #define MISCOMPARE_VERIFY_ASC 0x1d
91 #define MICROCODE_CHANGED_ASCQ 0x1      /* with TARGET_CHANGED_ASC */
92 #define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16
93 #define WRITE_ERROR_ASC 0xc
94
95 /* Additional Sense Code Qualifier (ASCQ) */
96 #define ACK_NAK_TO 0x3
97
98 /* Default values for driver parameters */
99 #define DEF_NUM_HOST   1
100 #define DEF_NUM_TGTS   1
101 #define DEF_MAX_LUNS   1
102 /* With these defaults, this driver will make 1 host with 1 target
103  * (id 0) containing 1 logical unit (lun 0). That is 1 device.
104  */
105 #define DEF_ATO 1
106 #define DEF_CDB_LEN 10
107 #define DEF_JDELAY   1          /* if > 0 unit is a jiffy */
108 #define DEF_DEV_SIZE_MB   8
109 #define DEF_DIF 0
110 #define DEF_DIX 0
111 #define DEF_D_SENSE   0
112 #define DEF_EVERY_NTH   0
113 #define DEF_FAKE_RW     0
114 #define DEF_GUARD 0
115 #define DEF_HOST_LOCK 0
116 #define DEF_LBPU 0
117 #define DEF_LBPWS 0
118 #define DEF_LBPWS10 0
119 #define DEF_LBPRZ 1
120 #define DEF_LOWEST_ALIGNED 0
121 #define DEF_NDELAY   0          /* if > 0 unit is a nanosecond */
122 #define DEF_NO_LUN_0   0
123 #define DEF_NUM_PARTS   0
124 #define DEF_OPTS   0
125 #define DEF_OPT_BLKS 1024
126 #define DEF_PHYSBLK_EXP 0
127 #define DEF_OPT_XFERLEN_EXP 0
128 #define DEF_PTYPE   TYPE_DISK
129 #define DEF_REMOVABLE false
130 #define DEF_SCSI_LEVEL   7    /* INQUIRY, byte2 [6->SPC-4; 7->SPC-5] */
131 #define DEF_SECTOR_SIZE 512
132 #define DEF_UNMAP_ALIGNMENT 0
133 #define DEF_UNMAP_GRANULARITY 1
134 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
135 #define DEF_UNMAP_MAX_DESC 256
136 #define DEF_VIRTUAL_GB   0
137 #define DEF_VPD_USE_HOSTNO 1
138 #define DEF_WRITESAME_LENGTH 0xFFFF
139 #define DEF_STRICT 0
140 #define DEF_STATISTICS false
141 #define DEF_SUBMIT_QUEUES 1
142 #define DEF_UUID_CTL 0
143 #define JDELAY_OVERRIDDEN -9999
144
145 #define SDEBUG_LUN_0_VAL 0
146
147 /* bit mask values for sdebug_opts */
148 #define SDEBUG_OPT_NOISE                1
149 #define SDEBUG_OPT_MEDIUM_ERR           2
150 #define SDEBUG_OPT_TIMEOUT              4
151 #define SDEBUG_OPT_RECOVERED_ERR        8
152 #define SDEBUG_OPT_TRANSPORT_ERR        16
153 #define SDEBUG_OPT_DIF_ERR              32
154 #define SDEBUG_OPT_DIX_ERR              64
155 #define SDEBUG_OPT_MAC_TIMEOUT          128
156 #define SDEBUG_OPT_SHORT_TRANSFER       0x100
157 #define SDEBUG_OPT_Q_NOISE              0x200
158 #define SDEBUG_OPT_ALL_TSF              0x400
159 #define SDEBUG_OPT_RARE_TSF             0x800
160 #define SDEBUG_OPT_N_WCE                0x1000
161 #define SDEBUG_OPT_RESET_NOISE          0x2000
162 #define SDEBUG_OPT_NO_CDB_NOISE         0x4000
163 #define SDEBUG_OPT_HOST_BUSY            0x8000
164 #define SDEBUG_OPT_CMD_ABORT            0x10000
165 #define SDEBUG_OPT_ALL_NOISE (SDEBUG_OPT_NOISE | SDEBUG_OPT_Q_NOISE | \
166                               SDEBUG_OPT_RESET_NOISE)
167 #define SDEBUG_OPT_ALL_INJECTING (SDEBUG_OPT_RECOVERED_ERR | \
168                                   SDEBUG_OPT_TRANSPORT_ERR | \
169                                   SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR | \
170                                   SDEBUG_OPT_SHORT_TRANSFER | \
171                                   SDEBUG_OPT_HOST_BUSY | \
172                                   SDEBUG_OPT_CMD_ABORT)
173 /* When "every_nth" > 0 then modulo "every_nth" commands:
174  *   - a missing response is simulated if SDEBUG_OPT_TIMEOUT is set
175  *   - a RECOVERED_ERROR is simulated on successful read and write
176  *     commands if SDEBUG_OPT_RECOVERED_ERR is set.
177  *   - a TRANSPORT_ERROR is simulated on successful read and write
178  *     commands if SDEBUG_OPT_TRANSPORT_ERR is set.
179  *   - similarly for DIF_ERR, DIX_ERR, SHORT_TRANSFER, HOST_BUSY and
180  *     CMD_ABORT
181  *
182  * When "every_nth" < 0 then after "- every_nth" commands the selected
183  * error will be injected. The error will be injected on every subsequent
184  * command until some other action occurs; for example, the user writing
185  * a new value (other than -1 or 1) to every_nth:
186  *      echo 0 > /sys/bus/pseudo/drivers/scsi_debug/every_nth
187  */
188
189 /* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs) are returned in
190  * priority order. In the subset implemented here lower numbers have higher
191  * priority. The UA numbers should be a sequence starting from 0 with
192  * SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */
193 #define SDEBUG_UA_POR 0         /* Power on, reset, or bus device reset */
194 #define SDEBUG_UA_BUS_RESET 1
195 #define SDEBUG_UA_MODE_CHANGED 2
196 #define SDEBUG_UA_CAPACITY_CHANGED 3
197 #define SDEBUG_UA_LUNS_CHANGED 4
198 #define SDEBUG_UA_MICROCODE_CHANGED 5   /* simulate firmware change */
199 #define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 6
200 #define SDEBUG_NUM_UAS 7
201
202 /* when 1==SDEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
203  * sector on read commands: */
204 #define OPT_MEDIUM_ERR_ADDR   0x1234 /* that's sector 4660 in decimal */
205 #define OPT_MEDIUM_ERR_NUM    10     /* number of consecutive medium errs */
206
207 /* If REPORT LUNS has luns >= 256 it can choose "flat space" (value 1)
208  * or "peripheral device" addressing (value 0) */
209 #define SAM2_LUN_ADDRESS_METHOD 0
210
211 /* SDEBUG_CANQUEUE is the maximum number of commands that can be queued
212  * (for response) per submit queue at one time. Can be reduced by max_queue
213  * option. Command responses are not queued when jdelay=0 and ndelay=0. The
214  * per-device DEF_CMD_PER_LUN can be changed via sysfs:
215  * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth
216  * but cannot exceed SDEBUG_CANQUEUE .
217  */
218 #define SDEBUG_CANQUEUE_WORDS  3        /* a WORD is bits in a long */
219 #define SDEBUG_CANQUEUE  (SDEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
220 #define DEF_CMD_PER_LUN  255
221
222 #define F_D_IN                  1
223 #define F_D_OUT                 2
224 #define F_D_OUT_MAYBE           4       /* WRITE SAME, NDOB bit */
225 #define F_D_UNKN                8
226 #define F_RL_WLUN_OK            0x10
227 #define F_SKIP_UA               0x20
228 #define F_DELAY_OVERR           0x40
229 #define F_SA_LOW                0x80    /* cdb byte 1, bits 4 to 0 */
230 #define F_SA_HIGH               0x100   /* as used by variable length cdbs */
231 #define F_INV_OP                0x200
232 #define F_FAKE_RW               0x400
233 #define F_M_ACCESS              0x800   /* media access */
234 #define F_SSU_DELAY             0x1000
235 #define F_SYNC_DELAY            0x2000
236
237 #define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
238 #define FF_MEDIA_IO (F_M_ACCESS | F_FAKE_RW)
239 #define FF_SA (F_SA_HIGH | F_SA_LOW)
240 #define F_LONG_DELAY            (F_SSU_DELAY | F_SYNC_DELAY)
241
242 #define SDEBUG_MAX_PARTS 4
243
244 #define SDEBUG_MAX_CMD_LEN 32
245
246
247 struct sdebug_dev_info {
248         struct list_head dev_list;
249         unsigned int channel;
250         unsigned int target;
251         u64 lun;
252         uuid_t lu_name;
253         struct sdebug_host_info *sdbg_host;
254         unsigned long uas_bm[1];
255         atomic_t num_in_q;
256         atomic_t stopped;
257         bool used;
258 };
259
260 struct sdebug_host_info {
261         struct list_head host_list;
262         struct Scsi_Host *shost;
263         struct device dev;
264         struct list_head dev_info_list;
265 };
266
267 #define to_sdebug_host(d)       \
268         container_of(d, struct sdebug_host_info, dev)
269
270 enum sdeb_defer_type {SDEB_DEFER_NONE = 0, SDEB_DEFER_HRT = 1,
271                       SDEB_DEFER_WQ = 2};
272
273 struct sdebug_defer {
274         struct hrtimer hrt;
275         struct execute_work ew;
276         int sqa_idx;    /* index of sdebug_queue array */
277         int qc_idx;     /* index of sdebug_queued_cmd array within sqa_idx */
278         int issuing_cpu;
279         bool init_hrt;
280         bool init_wq;
281         bool aborted;   /* true when blk_abort_request() already called */
282         enum sdeb_defer_type defer_t;
283 };
284
285 struct sdebug_queued_cmd {
286         /* corresponding bit set in in_use_bm[] in owning struct sdebug_queue
287          * instance indicates this slot is in use.
288          */
289         struct sdebug_defer *sd_dp;
290         struct scsi_cmnd *a_cmnd;
291         unsigned int inj_recovered:1;
292         unsigned int inj_transport:1;
293         unsigned int inj_dif:1;
294         unsigned int inj_dix:1;
295         unsigned int inj_short:1;
296         unsigned int inj_host_busy:1;
297         unsigned int inj_cmd_abort:1;
298 };
299
300 struct sdebug_queue {
301         struct sdebug_queued_cmd qc_arr[SDEBUG_CANQUEUE];
302         unsigned long in_use_bm[SDEBUG_CANQUEUE_WORDS];
303         spinlock_t qc_lock;
304         atomic_t blocked;       /* to temporarily stop more being queued */
305 };
306
307 static atomic_t sdebug_cmnd_count;   /* number of incoming commands */
308 static atomic_t sdebug_completions;  /* count of deferred completions */
309 static atomic_t sdebug_miss_cpus;    /* submission + completion cpus differ */
310 static atomic_t sdebug_a_tsf;        /* 'almost task set full' counter */
311
312 struct opcode_info_t {
313         u8 num_attached;        /* 0 if this is it (i.e. a leaf); use 0xff */
314                                 /* for terminating element */
315         u8 opcode;              /* if num_attached > 0, preferred */
316         u16 sa;                 /* service action */
317         u32 flags;              /* OR-ed set of SDEB_F_* */
318         int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
319         const struct opcode_info_t *arrp;  /* num_attached elements or NULL */
320         u8 len_mask[16];        /* len_mask[0]-->cdb_len, then mask for cdb */
321                                 /* 1 to min(cdb_len, 15); ignore cdb[15...] */
322 };
323
324 /* SCSI opcodes (first byte of cdb) of interest mapped onto these indexes */
325 enum sdeb_opcode_index {
326         SDEB_I_INVALID_OPCODE = 0,
327         SDEB_I_INQUIRY = 1,
328         SDEB_I_REPORT_LUNS = 2,
329         SDEB_I_REQUEST_SENSE = 3,
330         SDEB_I_TEST_UNIT_READY = 4,
331         SDEB_I_MODE_SENSE = 5,          /* 6, 10 */
332         SDEB_I_MODE_SELECT = 6,         /* 6, 10 */
333         SDEB_I_LOG_SENSE = 7,
334         SDEB_I_READ_CAPACITY = 8,       /* 10; 16 is in SA_IN(16) */
335         SDEB_I_READ = 9,                /* 6, 10, 12, 16 */
336         SDEB_I_WRITE = 10,              /* 6, 10, 12, 16 */
337         SDEB_I_START_STOP = 11,
338         SDEB_I_SERV_ACT_IN_16 = 12,     /* add ...SERV_ACT_IN_12 if needed */
339         SDEB_I_SERV_ACT_OUT_16 = 13,    /* add ...SERV_ACT_OUT_12 if needed */
340         SDEB_I_MAINT_IN = 14,
341         SDEB_I_MAINT_OUT = 15,
342         SDEB_I_VERIFY = 16,             /* 10 only */
343         SDEB_I_VARIABLE_LEN = 17,       /* READ(32), WRITE(32), WR_SCAT(32) */
344         SDEB_I_RESERVE = 18,            /* 6, 10 */
345         SDEB_I_RELEASE = 19,            /* 6, 10 */
346         SDEB_I_ALLOW_REMOVAL = 20,      /* PREVENT ALLOW MEDIUM REMOVAL */
347         SDEB_I_REZERO_UNIT = 21,        /* REWIND in SSC */
348         SDEB_I_ATA_PT = 22,             /* 12, 16 */
349         SDEB_I_SEND_DIAG = 23,
350         SDEB_I_UNMAP = 24,
351         SDEB_I_WRITE_BUFFER = 25,
352         SDEB_I_WRITE_SAME = 26,         /* 10, 16 */
353         SDEB_I_SYNC_CACHE = 27,         /* 10, 16 */
354         SDEB_I_COMP_WRITE = 28,
355         SDEB_I_LAST_ELEMENT = 29,       /* keep this last (previous + 1) */
356 };
357
358
359 static const unsigned char opcode_ind_arr[256] = {
360 /* 0x0; 0x0->0x1f: 6 byte cdbs */
361         SDEB_I_TEST_UNIT_READY, SDEB_I_REZERO_UNIT, 0, SDEB_I_REQUEST_SENSE,
362             0, 0, 0, 0,
363         SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, 0,
364         0, 0, SDEB_I_INQUIRY, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
365             SDEB_I_RELEASE,
366         0, 0, SDEB_I_MODE_SENSE, SDEB_I_START_STOP, 0, SDEB_I_SEND_DIAG,
367             SDEB_I_ALLOW_REMOVAL, 0,
368 /* 0x20; 0x20->0x3f: 10 byte cdbs */
369         0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY, 0, 0,
370         SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, SDEB_I_VERIFY,
371         0, 0, 0, 0, 0, SDEB_I_SYNC_CACHE, 0, 0,
372         0, 0, 0, SDEB_I_WRITE_BUFFER, 0, 0, 0, 0,
373 /* 0x40; 0x40->0x5f: 10 byte cdbs */
374         0, SDEB_I_WRITE_SAME, SDEB_I_UNMAP, 0, 0, 0, 0, 0,
375         0, 0, 0, 0, 0, SDEB_I_LOG_SENSE, 0, 0,
376         0, 0, 0, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
377             SDEB_I_RELEASE,
378         0, 0, SDEB_I_MODE_SENSE, 0, 0, 0, 0, 0,
379 /* 0x60; 0x60->0x7d are reserved, 0x7e is "extended cdb" */
380         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
381         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
382         0, SDEB_I_VARIABLE_LEN,
383 /* 0x80; 0x80->0x9f: 16 byte cdbs */
384         0, 0, 0, 0, 0, SDEB_I_ATA_PT, 0, 0,
385         SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0, 0, 0, 0, 0,
386         0, SDEB_I_SYNC_CACHE, 0, SDEB_I_WRITE_SAME, 0, 0, 0, 0,
387         0, 0, 0, 0, 0, 0, SDEB_I_SERV_ACT_IN_16, SDEB_I_SERV_ACT_OUT_16,
388 /* 0xa0; 0xa0->0xbf: 12 byte cdbs */
389         SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN,
390              SDEB_I_MAINT_OUT, 0, 0, 0,
391         SDEB_I_READ, 0 /* SDEB_I_SERV_ACT_OUT_12 */, SDEB_I_WRITE,
392              0 /* SDEB_I_SERV_ACT_IN_12 */, 0, 0, 0, 0,
393         0, 0, 0, 0, 0, 0, 0, 0,
394         0, 0, 0, 0, 0, 0, 0, 0,
395 /* 0xc0; 0xc0->0xff: vendor specific */
396         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
397         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
398         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
399         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
400 };
401
402 /*
403  * The following "response" functions return the SCSI mid-level's 4 byte
404  * tuple-in-an-int. To handle commands with an IMMED bit, for a faster
405  * command completion, they can mask their return value with
406  * SDEG_RES_IMMED_MASK .
407  */
408 #define SDEG_RES_IMMED_MASK 0x40000000
409
410 static int resp_inquiry(struct scsi_cmnd *, struct sdebug_dev_info *);
411 static int resp_report_luns(struct scsi_cmnd *, struct sdebug_dev_info *);
412 static int resp_requests(struct scsi_cmnd *, struct sdebug_dev_info *);
413 static int resp_mode_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
414 static int resp_mode_select(struct scsi_cmnd *, struct sdebug_dev_info *);
415 static int resp_log_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
416 static int resp_readcap(struct scsi_cmnd *, struct sdebug_dev_info *);
417 static int resp_read_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
418 static int resp_write_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
419 static int resp_write_scat(struct scsi_cmnd *, struct sdebug_dev_info *);
420 static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *);
421 static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *);
422 static int resp_get_lba_status(struct scsi_cmnd *, struct sdebug_dev_info *);
423 static int resp_report_tgtpgs(struct scsi_cmnd *, struct sdebug_dev_info *);
424 static int resp_unmap(struct scsi_cmnd *, struct sdebug_dev_info *);
425 static int resp_rsup_opcodes(struct scsi_cmnd *, struct sdebug_dev_info *);
426 static int resp_rsup_tmfs(struct scsi_cmnd *, struct sdebug_dev_info *);
427 static int resp_write_same_10(struct scsi_cmnd *, struct sdebug_dev_info *);
428 static int resp_write_same_16(struct scsi_cmnd *, struct sdebug_dev_info *);
429 static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *);
430 static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *);
431 static int resp_sync_cache(struct scsi_cmnd *, struct sdebug_dev_info *);
432
433 /*
434  * The following are overflow arrays for cdbs that "hit" the same index in
435  * the opcode_info_arr array. The most time sensitive (or commonly used) cdb
436  * should be placed in opcode_info_arr[], the others should be placed here.
437  */
438 static const struct opcode_info_t msense_iarr[] = {
439         {0, 0x1a, 0, F_D_IN, NULL, NULL,
440             {6,  0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
441 };
442
443 static const struct opcode_info_t mselect_iarr[] = {
444         {0, 0x15, 0, F_D_OUT, NULL, NULL,
445             {6,  0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
446 };
447
448 static const struct opcode_info_t read_iarr[] = {
449         {0, 0x28, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(10) */
450             {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
451              0, 0, 0, 0} },
452         {0, 0x8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL, /* READ(6) */
453             {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
454         {0, 0xa8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(12) */
455             {12,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf,
456              0xc7, 0, 0, 0, 0} },
457 };
458
459 static const struct opcode_info_t write_iarr[] = {
460         {0, 0x2a, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,  /* WRITE(10) */
461             NULL, {10,  0xfb, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7,
462                    0, 0, 0, 0, 0, 0} },
463         {0, 0xa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,   /* WRITE(6) */
464             NULL, {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0,
465                    0, 0, 0} },
466         {0, 0xaa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,  /* WRITE(12) */
467             NULL, {12,  0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
468                    0xbf, 0xc7, 0, 0, 0, 0} },
469 };
470
471 static const struct opcode_info_t sa_in_16_iarr[] = {
472         {0, 0x9e, 0x12, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
473             {16,  0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
474              0xff, 0xff, 0xff, 0, 0xc7} },      /* GET LBA STATUS(16) */
475 };
476
477 static const struct opcode_info_t vl_iarr[] = { /* VARIABLE LENGTH */
478         {0, 0x7f, 0xb, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_dt0,
479             NULL, {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0xb, 0xfa,
480                    0, 0xff, 0xff, 0xff, 0xff} },        /* WRITE(32) */
481         {0, 0x7f, 0x11, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
482             NULL, {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x11, 0xf8,
483                    0, 0xff, 0xff, 0x0, 0x0} },  /* WRITE SCATTERED(32) */
484 };
485
486 static const struct opcode_info_t maint_in_iarr[] = {   /* MAINT IN */
487         {0, 0xa3, 0xc, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL,
488             {12,  0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
489              0xc7, 0, 0, 0, 0} }, /* REPORT SUPPORTED OPERATION CODES */
490         {0, 0xa3, 0xd, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL,
491             {12,  0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
492              0, 0} },   /* REPORTED SUPPORTED TASK MANAGEMENT FUNCTIONS */
493 };
494
495 static const struct opcode_info_t write_same_iarr[] = {
496         {0, 0x93, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_write_same_16, NULL,
497             {16,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
498              0xff, 0xff, 0xff, 0x3f, 0xc7} },           /* WRITE SAME(16) */
499 };
500
501 static const struct opcode_info_t reserve_iarr[] = {
502         {0, 0x16, 0, F_D_OUT, NULL, NULL,               /* RESERVE(6) */
503             {6,  0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
504 };
505
506 static const struct opcode_info_t release_iarr[] = {
507         {0, 0x17, 0, F_D_OUT, NULL, NULL,               /* RELEASE(6) */
508             {6,  0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
509 };
510
511 static const struct opcode_info_t sync_cache_iarr[] = {
512         {0, 0x91, 0, F_SYNC_DELAY | F_M_ACCESS, resp_sync_cache, NULL,
513             {16,  0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
514              0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },     /* SYNC_CACHE (16) */
515 };
516
517
518 /* This array is accessed via SDEB_I_* values. Make sure all are mapped,
519  * plus the terminating elements for logic that scans this table such as
520  * REPORT SUPPORTED OPERATION CODES. */
521 static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEMENT + 1] = {
522 /* 0 */
523         {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL,    /* unknown opcodes */
524             {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
525         {0, 0x12, 0, FF_RESPOND | F_D_IN, resp_inquiry, NULL, /* INQUIRY */
526             {6,  0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
527         {0, 0xa0, 0, FF_RESPOND | F_D_IN, resp_report_luns, NULL,
528             {12,  0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
529              0, 0} },                                   /* REPORT LUNS */
530         {0, 0x3, 0, FF_RESPOND | F_D_IN, resp_requests, NULL,
531             {6,  0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
532         {0, 0x0, 0, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */
533             {6,  0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
534 /* 5 */
535         {ARRAY_SIZE(msense_iarr), 0x5a, 0, F_D_IN,      /* MODE SENSE(10) */
536             resp_mode_sense, msense_iarr, {10,  0xf8, 0xff, 0xff, 0, 0, 0,
537                 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
538         {ARRAY_SIZE(mselect_iarr), 0x55, 0, F_D_OUT,    /* MODE SELECT(10) */
539             resp_mode_select, mselect_iarr, {10,  0xf1, 0, 0, 0, 0, 0, 0xff,
540                 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
541         {0, 0x4d, 0, F_D_IN, resp_log_sense, NULL,      /* LOG SENSE */
542             {10,  0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0,
543              0, 0, 0} },
544         {0, 0x25, 0, F_D_IN, resp_readcap, NULL,    /* READ CAPACITY(10) */
545             {10,  0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0,
546              0, 0} },
547         {ARRAY_SIZE(read_iarr), 0x88, 0, F_D_IN | FF_MEDIA_IO, /* READ(16) */
548             resp_read_dt0, read_iarr, {16,  0xfe, 0xff, 0xff, 0xff, 0xff,
549             0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
550 /* 10 */
551         {ARRAY_SIZE(write_iarr), 0x8a, 0, F_D_OUT | FF_MEDIA_IO,
552             resp_write_dt0, write_iarr,                 /* WRITE(16) */
553                 {16,  0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
554                  0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
555         {0, 0x1b, 0, F_SSU_DELAY, resp_start_stop, NULL,/* START STOP UNIT */
556             {6,  0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
557         {ARRAY_SIZE(sa_in_16_iarr), 0x9e, 0x10, F_SA_LOW | F_D_IN,
558             resp_readcap16, sa_in_16_iarr, /* SA_IN(16), READ CAPACITY(16) */
559                 {16,  0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
560                  0xff, 0xff, 0xff, 0xff, 0x1, 0xc7} },
561         {0, 0x9f, 0x12, F_SA_LOW | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
562             NULL, {16,  0x12, 0xf9, 0x0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff,
563             0xff, 0xff, 0xff, 0xff, 0xc7} },  /* SA_OUT(16), WRITE SCAT(16) */
564         {ARRAY_SIZE(maint_in_iarr), 0xa3, 0xa, F_SA_LOW | F_D_IN,
565             resp_report_tgtpgs, /* MAINT IN, REPORT TARGET PORT GROUPS */
566                 maint_in_iarr, {12,  0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff,
567                                 0xff, 0, 0xc7, 0, 0, 0, 0} },
568 /* 15 */
569         {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
570             {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
571         {0, 0x2f, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, NULL, NULL, /* VERIFY(10) */
572             {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7,
573              0, 0, 0, 0, 0, 0} },
574         {ARRAY_SIZE(vl_iarr), 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_MEDIA_IO,
575             resp_read_dt0, vl_iarr,     /* VARIABLE LENGTH, READ(32) */
576             {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x9, 0xfe, 0, 0xff, 0xff,
577              0xff, 0xff} },
578         {ARRAY_SIZE(reserve_iarr), 0x56, 0, F_D_OUT,
579             NULL, reserve_iarr, /* RESERVE(10) <no response function> */
580             {10,  0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
581              0} },
582         {ARRAY_SIZE(release_iarr), 0x57, 0, F_D_OUT,
583             NULL, release_iarr, /* RELEASE(10) <no response function> */
584             {10,  0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
585              0} },
586 /* 20 */
587         {0, 0x1e, 0, 0, NULL, NULL, /* ALLOW REMOVAL */
588             {6,  0, 0, 0, 0x3, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
589         {0, 0x1, 0, 0, resp_start_stop, NULL, /* REWIND ?? */
590             {6,  0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
591         {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */
592             {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
593         {0, 0x1d, F_D_OUT, 0, NULL, NULL,       /* SEND DIAGNOSTIC */
594             {6,  0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
595         {0, 0x42, 0, F_D_OUT | FF_MEDIA_IO, resp_unmap, NULL, /* UNMAP */
596             {10,  0x1, 0, 0, 0, 0, 0x3f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
597 /* 25 */
598         {0, 0x3b, 0, F_D_OUT_MAYBE, resp_write_buffer, NULL,
599             {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0,
600              0, 0, 0, 0} },                     /* WRITE_BUFFER */
601         {ARRAY_SIZE(write_same_iarr), 0x41, 0, F_D_OUT_MAYBE | FF_MEDIA_IO,
602             resp_write_same_10, write_same_iarr,        /* WRITE SAME(10) */
603                 {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0,
604                  0, 0, 0, 0, 0} },
605         {ARRAY_SIZE(sync_cache_iarr), 0x35, 0, F_SYNC_DELAY | F_M_ACCESS,
606             resp_sync_cache, sync_cache_iarr,
607             {10,  0x7, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
608              0, 0, 0, 0} },                     /* SYNC_CACHE (10) */
609         {0, 0x89, 0, F_D_OUT | FF_MEDIA_IO, resp_comp_write, NULL,
610             {16,  0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
611              0, 0xff, 0x3f, 0xc7} },            /* COMPARE AND WRITE */
612
613 /* 29 */
614         {0xff, 0, 0, 0, NULL, NULL,             /* terminating element */
615             {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
616 };
617
618 static int sdebug_add_host = DEF_NUM_HOST;
619 static int sdebug_ato = DEF_ATO;
620 static int sdebug_cdb_len = DEF_CDB_LEN;
621 static int sdebug_jdelay = DEF_JDELAY;  /* if > 0 then unit is jiffies */
622 static int sdebug_dev_size_mb = DEF_DEV_SIZE_MB;
623 static int sdebug_dif = DEF_DIF;
624 static int sdebug_dix = DEF_DIX;
625 static int sdebug_dsense = DEF_D_SENSE;
626 static int sdebug_every_nth = DEF_EVERY_NTH;
627 static int sdebug_fake_rw = DEF_FAKE_RW;
628 static unsigned int sdebug_guard = DEF_GUARD;
629 static int sdebug_lowest_aligned = DEF_LOWEST_ALIGNED;
630 static int sdebug_max_luns = DEF_MAX_LUNS;
631 static int sdebug_max_queue = SDEBUG_CANQUEUE;  /* per submit queue */
632 static unsigned int sdebug_medium_error_start = OPT_MEDIUM_ERR_ADDR;
633 static int sdebug_medium_error_count = OPT_MEDIUM_ERR_NUM;
634 static atomic_t retired_max_queue;      /* if > 0 then was prior max_queue */
635 static int sdebug_ndelay = DEF_NDELAY;  /* if > 0 then unit is nanoseconds */
636 static int sdebug_no_lun_0 = DEF_NO_LUN_0;
637 static int sdebug_no_uld;
638 static int sdebug_num_parts = DEF_NUM_PARTS;
639 static int sdebug_num_tgts = DEF_NUM_TGTS; /* targets per host */
640 static int sdebug_opt_blks = DEF_OPT_BLKS;
641 static int sdebug_opts = DEF_OPTS;
642 static int sdebug_physblk_exp = DEF_PHYSBLK_EXP;
643 static int sdebug_opt_xferlen_exp = DEF_OPT_XFERLEN_EXP;
644 static int sdebug_ptype = DEF_PTYPE; /* SCSI peripheral device type */
645 static int sdebug_scsi_level = DEF_SCSI_LEVEL;
646 static int sdebug_sector_size = DEF_SECTOR_SIZE;
647 static int sdebug_virtual_gb = DEF_VIRTUAL_GB;
648 static int sdebug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
649 static unsigned int sdebug_lbpu = DEF_LBPU;
650 static unsigned int sdebug_lbpws = DEF_LBPWS;
651 static unsigned int sdebug_lbpws10 = DEF_LBPWS10;
652 static unsigned int sdebug_lbprz = DEF_LBPRZ;
653 static unsigned int sdebug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
654 static unsigned int sdebug_unmap_granularity = DEF_UNMAP_GRANULARITY;
655 static unsigned int sdebug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
656 static unsigned int sdebug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
657 static unsigned int sdebug_write_same_length = DEF_WRITESAME_LENGTH;
658 static int sdebug_uuid_ctl = DEF_UUID_CTL;
659 static bool sdebug_removable = DEF_REMOVABLE;
660 static bool sdebug_clustering;
661 static bool sdebug_host_lock = DEF_HOST_LOCK;
662 static bool sdebug_strict = DEF_STRICT;
663 static bool sdebug_any_injecting_opt;
664 static bool sdebug_verbose;
665 static bool have_dif_prot;
666 static bool write_since_sync;
667 static bool sdebug_statistics = DEF_STATISTICS;
668 static bool sdebug_wp;
669
670 static unsigned int sdebug_store_sectors;
671 static sector_t sdebug_capacity;        /* in sectors */
672
673 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
674    may still need them */
675 static int sdebug_heads;                /* heads per disk */
676 static int sdebug_cylinders_per;        /* cylinders per surface */
677 static int sdebug_sectors_per;          /* sectors per cylinder */
678
679 static LIST_HEAD(sdebug_host_list);
680 static DEFINE_SPINLOCK(sdebug_host_list_lock);
681
682 static unsigned char *fake_storep;      /* ramdisk storage */
683 static struct t10_pi_tuple *dif_storep; /* protection info */
684 static void *map_storep;                /* provisioning map */
685
686 static unsigned long map_size;
687 static int num_aborts;
688 static int num_dev_resets;
689 static int num_target_resets;
690 static int num_bus_resets;
691 static int num_host_resets;
692 static int dix_writes;
693 static int dix_reads;
694 static int dif_errors;
695
696 static int submit_queues = DEF_SUBMIT_QUEUES;  /* > 1 for multi-queue (mq) */
697 static struct sdebug_queue *sdebug_q_arr;  /* ptr to array of submit queues */
698
699 static DEFINE_RWLOCK(atomic_rw);
700
701 static char sdebug_proc_name[] = MY_NAME;
702 static const char *my_name = MY_NAME;
703
704 static struct bus_type pseudo_lld_bus;
705
706 static struct device_driver sdebug_driverfs_driver = {
707         .name           = sdebug_proc_name,
708         .bus            = &pseudo_lld_bus,
709 };
710
711 static const int check_condition_result =
712                 (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
713
714 static const int illegal_condition_result =
715         (DRIVER_SENSE << 24) | (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
716
717 static const int device_qfull_result =
718         (DID_OK << 16) | (COMMAND_COMPLETE << 8) | SAM_STAT_TASK_SET_FULL;
719
720
721 /* Only do the extra work involved in logical block provisioning if one or
722  * more of the lbpu, lbpws or lbpws10 parameters are given and we are doing
723  * real reads and writes (i.e. not skipping them for speed).
724  */
725 static inline bool scsi_debug_lbp(void)
726 {
727         return 0 == sdebug_fake_rw &&
728                 (sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10);
729 }
730
731 static void *lba2fake_store(unsigned long long lba)
732 {
733         lba = do_div(lba, sdebug_store_sectors);
734
735         return fake_storep + lba * sdebug_sector_size;
736 }
737
738 static struct t10_pi_tuple *dif_store(sector_t sector)
739 {
740         sector = sector_div(sector, sdebug_store_sectors);
741
742         return dif_storep + sector;
743 }
744
745 static void sdebug_max_tgts_luns(void)
746 {
747         struct sdebug_host_info *sdbg_host;
748         struct Scsi_Host *hpnt;
749
750         spin_lock(&sdebug_host_list_lock);
751         list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
752                 hpnt = sdbg_host->shost;
753                 if ((hpnt->this_id >= 0) &&
754                     (sdebug_num_tgts > hpnt->this_id))
755                         hpnt->max_id = sdebug_num_tgts + 1;
756                 else
757                         hpnt->max_id = sdebug_num_tgts;
758                 /* sdebug_max_luns; */
759                 hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
760         }
761         spin_unlock(&sdebug_host_list_lock);
762 }
763
764 enum sdeb_cmd_data {SDEB_IN_DATA = 0, SDEB_IN_CDB = 1};
765
766 /* Set in_bit to -1 to indicate no bit position of invalid field */
767 static void mk_sense_invalid_fld(struct scsi_cmnd *scp,
768                                  enum sdeb_cmd_data c_d,
769                                  int in_byte, int in_bit)
770 {
771         unsigned char *sbuff;
772         u8 sks[4];
773         int sl, asc;
774
775         sbuff = scp->sense_buffer;
776         if (!sbuff) {
777                 sdev_printk(KERN_ERR, scp->device,
778                             "%s: sense_buffer is NULL\n", __func__);
779                 return;
780         }
781         asc = c_d ? INVALID_FIELD_IN_CDB : INVALID_FIELD_IN_PARAM_LIST;
782         memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
783         scsi_build_sense_buffer(sdebug_dsense, sbuff, ILLEGAL_REQUEST, asc, 0);
784         memset(sks, 0, sizeof(sks));
785         sks[0] = 0x80;
786         if (c_d)
787                 sks[0] |= 0x40;
788         if (in_bit >= 0) {
789                 sks[0] |= 0x8;
790                 sks[0] |= 0x7 & in_bit;
791         }
792         put_unaligned_be16(in_byte, sks + 1);
793         if (sdebug_dsense) {
794                 sl = sbuff[7] + 8;
795                 sbuff[7] = sl;
796                 sbuff[sl] = 0x2;
797                 sbuff[sl + 1] = 0x6;
798                 memcpy(sbuff + sl + 4, sks, 3);
799         } else
800                 memcpy(sbuff + 15, sks, 3);
801         if (sdebug_verbose)
802                 sdev_printk(KERN_INFO, scp->device, "%s:  [sense_key,asc,ascq"
803                             "]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n",
804                             my_name, asc, c_d ? 'C' : 'D', in_byte, in_bit);
805 }
806
807 static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
808 {
809         unsigned char *sbuff;
810
811         sbuff = scp->sense_buffer;
812         if (!sbuff) {
813                 sdev_printk(KERN_ERR, scp->device,
814                             "%s: sense_buffer is NULL\n", __func__);
815                 return;
816         }
817         memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
818
819         scsi_build_sense_buffer(sdebug_dsense, sbuff, key, asc, asq);
820
821         if (sdebug_verbose)
822                 sdev_printk(KERN_INFO, scp->device,
823                             "%s:  [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
824                             my_name, key, asc, asq);
825 }
826
827 static void mk_sense_invalid_opcode(struct scsi_cmnd *scp)
828 {
829         mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
830 }
831
832 static int scsi_debug_ioctl(struct scsi_device *dev, unsigned int cmd,
833                             void __user *arg)
834 {
835         if (sdebug_verbose) {
836                 if (0x1261 == cmd)
837                         sdev_printk(KERN_INFO, dev,
838                                     "%s: BLKFLSBUF [0x1261]\n", __func__);
839                 else if (0x5331 == cmd)
840                         sdev_printk(KERN_INFO, dev,
841                                     "%s: CDROM_GET_CAPABILITY [0x5331]\n",
842                                     __func__);
843                 else
844                         sdev_printk(KERN_INFO, dev, "%s: cmd=0x%x\n",
845                                     __func__, cmd);
846         }
847         return -EINVAL;
848         /* return -ENOTTY; // correct return but upsets fdisk */
849 }
850
851 static void config_cdb_len(struct scsi_device *sdev)
852 {
853         switch (sdebug_cdb_len) {
854         case 6: /* suggest 6 byte READ, WRITE and MODE SENSE/SELECT */
855                 sdev->use_10_for_rw = false;
856                 sdev->use_16_for_rw = false;
857                 sdev->use_10_for_ms = false;
858                 break;
859         case 10: /* suggest 10 byte RWs and 6 byte MODE SENSE/SELECT */
860                 sdev->use_10_for_rw = true;
861                 sdev->use_16_for_rw = false;
862                 sdev->use_10_for_ms = false;
863                 break;
864         case 12: /* suggest 10 byte RWs and 10 byte MODE SENSE/SELECT */
865                 sdev->use_10_for_rw = true;
866                 sdev->use_16_for_rw = false;
867                 sdev->use_10_for_ms = true;
868                 break;
869         case 16:
870                 sdev->use_10_for_rw = false;
871                 sdev->use_16_for_rw = true;
872                 sdev->use_10_for_ms = true;
873                 break;
874         case 32: /* No knobs to suggest this so same as 16 for now */
875                 sdev->use_10_for_rw = false;
876                 sdev->use_16_for_rw = true;
877                 sdev->use_10_for_ms = true;
878                 break;
879         default:
880                 pr_warn("unexpected cdb_len=%d, force to 10\n",
881                         sdebug_cdb_len);
882                 sdev->use_10_for_rw = true;
883                 sdev->use_16_for_rw = false;
884                 sdev->use_10_for_ms = false;
885                 sdebug_cdb_len = 10;
886                 break;
887         }
888 }
889
890 static void all_config_cdb_len(void)
891 {
892         struct sdebug_host_info *sdbg_host;
893         struct Scsi_Host *shost;
894         struct scsi_device *sdev;
895
896         spin_lock(&sdebug_host_list_lock);
897         list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
898                 shost = sdbg_host->shost;
899                 shost_for_each_device(sdev, shost) {
900                         config_cdb_len(sdev);
901                 }
902         }
903         spin_unlock(&sdebug_host_list_lock);
904 }
905
906 static void clear_luns_changed_on_target(struct sdebug_dev_info *devip)
907 {
908         struct sdebug_host_info *sdhp;
909         struct sdebug_dev_info *dp;
910
911         spin_lock(&sdebug_host_list_lock);
912         list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
913                 list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
914                         if ((devip->sdbg_host == dp->sdbg_host) &&
915                             (devip->target == dp->target))
916                                 clear_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
917                 }
918         }
919         spin_unlock(&sdebug_host_list_lock);
920 }
921
922 static int make_ua(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
923 {
924         int k;
925
926         k = find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS);
927         if (k != SDEBUG_NUM_UAS) {
928                 const char *cp = NULL;
929
930                 switch (k) {
931                 case SDEBUG_UA_POR:
932                         mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
933                                         POWER_ON_RESET_ASCQ);
934                         if (sdebug_verbose)
935                                 cp = "power on reset";
936                         break;
937                 case SDEBUG_UA_BUS_RESET:
938                         mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
939                                         BUS_RESET_ASCQ);
940                         if (sdebug_verbose)
941                                 cp = "bus reset";
942                         break;
943                 case SDEBUG_UA_MODE_CHANGED:
944                         mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
945                                         MODE_CHANGED_ASCQ);
946                         if (sdebug_verbose)
947                                 cp = "mode parameters changed";
948                         break;
949                 case SDEBUG_UA_CAPACITY_CHANGED:
950                         mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
951                                         CAPACITY_CHANGED_ASCQ);
952                         if (sdebug_verbose)
953                                 cp = "capacity data changed";
954                         break;
955                 case SDEBUG_UA_MICROCODE_CHANGED:
956                         mk_sense_buffer(scp, UNIT_ATTENTION,
957                                         TARGET_CHANGED_ASC,
958                                         MICROCODE_CHANGED_ASCQ);
959                         if (sdebug_verbose)
960                                 cp = "microcode has been changed";
961                         break;
962                 case SDEBUG_UA_MICROCODE_CHANGED_WO_RESET:
963                         mk_sense_buffer(scp, UNIT_ATTENTION,
964                                         TARGET_CHANGED_ASC,
965                                         MICROCODE_CHANGED_WO_RESET_ASCQ);
966                         if (sdebug_verbose)
967                                 cp = "microcode has been changed without reset";
968                         break;
969                 case SDEBUG_UA_LUNS_CHANGED:
970                         /*
971                          * SPC-3 behavior is to report a UNIT ATTENTION with
972                          * ASC/ASCQ REPORTED LUNS DATA HAS CHANGED on every LUN
973                          * on the target, until a REPORT LUNS command is
974                          * received.  SPC-4 behavior is to report it only once.
975                          * NOTE:  sdebug_scsi_level does not use the same
976                          * values as struct scsi_device->scsi_level.
977                          */
978                         if (sdebug_scsi_level >= 6)     /* SPC-4 and above */
979                                 clear_luns_changed_on_target(devip);
980                         mk_sense_buffer(scp, UNIT_ATTENTION,
981                                         TARGET_CHANGED_ASC,
982                                         LUNS_CHANGED_ASCQ);
983                         if (sdebug_verbose)
984                                 cp = "reported luns data has changed";
985                         break;
986                 default:
987                         pr_warn("unexpected unit attention code=%d\n", k);
988                         if (sdebug_verbose)
989                                 cp = "unknown";
990                         break;
991                 }
992                 clear_bit(k, devip->uas_bm);
993                 if (sdebug_verbose)
994                         sdev_printk(KERN_INFO, scp->device,
995                                    "%s reports: Unit attention: %s\n",
996                                    my_name, cp);
997                 return check_condition_result;
998         }
999         return 0;
1000 }
1001
1002 /* Build SCSI "data-in" buffer. Returns 0 if ok else (DID_ERROR << 16). */
1003 static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1004                                 int arr_len)
1005 {
1006         int act_len;
1007         struct scsi_data_buffer *sdb = &scp->sdb;
1008
1009         if (!sdb->length)
1010                 return 0;
1011         if (scp->sc_data_direction != DMA_FROM_DEVICE)
1012                 return DID_ERROR << 16;
1013
1014         act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
1015                                       arr, arr_len);
1016         scsi_set_resid(scp, scsi_bufflen(scp) - act_len);
1017
1018         return 0;
1019 }
1020
1021 /* Partial build of SCSI "data-in" buffer. Returns 0 if ok else
1022  * (DID_ERROR << 16). Can write to offset in data-in buffer. If multiple
1023  * calls, not required to write in ascending offset order. Assumes resid
1024  * set to scsi_bufflen() prior to any calls.
1025  */
1026 static int p_fill_from_dev_buffer(struct scsi_cmnd *scp, const void *arr,
1027                                   int arr_len, unsigned int off_dst)
1028 {
1029         unsigned int act_len, n;
1030         struct scsi_data_buffer *sdb = &scp->sdb;
1031         off_t skip = off_dst;
1032
1033         if (sdb->length <= off_dst)
1034                 return 0;
1035         if (scp->sc_data_direction != DMA_FROM_DEVICE)
1036                 return DID_ERROR << 16;
1037
1038         act_len = sg_pcopy_from_buffer(sdb->table.sgl, sdb->table.nents,
1039                                        arr, arr_len, skip);
1040         pr_debug("%s: off_dst=%u, scsi_bufflen=%u, act_len=%u, resid=%d\n",
1041                  __func__, off_dst, scsi_bufflen(scp), act_len,
1042                  scsi_get_resid(scp));
1043         n = scsi_bufflen(scp) - (off_dst + act_len);
1044         scsi_set_resid(scp, min(scsi_get_resid(scp), n));
1045         return 0;
1046 }
1047
1048 /* Fetches from SCSI "data-out" buffer. Returns number of bytes fetched into
1049  * 'arr' or -1 if error.
1050  */
1051 static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1052                                int arr_len)
1053 {
1054         if (!scsi_bufflen(scp))
1055                 return 0;
1056         if (scp->sc_data_direction != DMA_TO_DEVICE)
1057                 return -1;
1058
1059         return scsi_sg_copy_to_buffer(scp, arr, arr_len);
1060 }
1061
1062
1063 static char sdebug_inq_vendor_id[9] = "Linux   ";
1064 static char sdebug_inq_product_id[17] = "scsi_debug      ";
1065 static char sdebug_inq_product_rev[5] = SDEBUG_VERSION;
1066 /* Use some locally assigned NAAs for SAS addresses. */
1067 static const u64 naa3_comp_a = 0x3222222000000000ULL;
1068 static const u64 naa3_comp_b = 0x3333333000000000ULL;
1069 static const u64 naa3_comp_c = 0x3111111000000000ULL;
1070
1071 /* Device identification VPD page. Returns number of bytes placed in arr */
1072 static int inquiry_vpd_83(unsigned char *arr, int port_group_id,
1073                           int target_dev_id, int dev_id_num,
1074                           const char *dev_id_str, int dev_id_str_len,
1075                           const uuid_t *lu_name)
1076 {
1077         int num, port_a;
1078         char b[32];
1079
1080         port_a = target_dev_id + 1;
1081         /* T10 vendor identifier field format (faked) */
1082         arr[0] = 0x2;   /* ASCII */
1083         arr[1] = 0x1;
1084         arr[2] = 0x0;
1085         memcpy(&arr[4], sdebug_inq_vendor_id, 8);
1086         memcpy(&arr[12], sdebug_inq_product_id, 16);
1087         memcpy(&arr[28], dev_id_str, dev_id_str_len);
1088         num = 8 + 16 + dev_id_str_len;
1089         arr[3] = num;
1090         num += 4;
1091         if (dev_id_num >= 0) {
1092                 if (sdebug_uuid_ctl) {
1093                         /* Locally assigned UUID */
1094                         arr[num++] = 0x1;  /* binary (not necessarily sas) */
1095                         arr[num++] = 0xa;  /* PIV=0, lu, naa */
1096                         arr[num++] = 0x0;
1097                         arr[num++] = 0x12;
1098                         arr[num++] = 0x10; /* uuid type=1, locally assigned */
1099                         arr[num++] = 0x0;
1100                         memcpy(arr + num, lu_name, 16);
1101                         num += 16;
1102                 } else {
1103                         /* NAA-3, Logical unit identifier (binary) */
1104                         arr[num++] = 0x1;  /* binary (not necessarily sas) */
1105                         arr[num++] = 0x3;  /* PIV=0, lu, naa */
1106                         arr[num++] = 0x0;
1107                         arr[num++] = 0x8;
1108                         put_unaligned_be64(naa3_comp_b + dev_id_num, arr + num);
1109                         num += 8;
1110                 }
1111                 /* Target relative port number */
1112                 arr[num++] = 0x61;      /* proto=sas, binary */
1113                 arr[num++] = 0x94;      /* PIV=1, target port, rel port */
1114                 arr[num++] = 0x0;       /* reserved */
1115                 arr[num++] = 0x4;       /* length */
1116                 arr[num++] = 0x0;       /* reserved */
1117                 arr[num++] = 0x0;       /* reserved */
1118                 arr[num++] = 0x0;
1119                 arr[num++] = 0x1;       /* relative port A */
1120         }
1121         /* NAA-3, Target port identifier */
1122         arr[num++] = 0x61;      /* proto=sas, binary */
1123         arr[num++] = 0x93;      /* piv=1, target port, naa */
1124         arr[num++] = 0x0;
1125         arr[num++] = 0x8;
1126         put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1127         num += 8;
1128         /* NAA-3, Target port group identifier */
1129         arr[num++] = 0x61;      /* proto=sas, binary */
1130         arr[num++] = 0x95;      /* piv=1, target port group id */
1131         arr[num++] = 0x0;
1132         arr[num++] = 0x4;
1133         arr[num++] = 0;
1134         arr[num++] = 0;
1135         put_unaligned_be16(port_group_id, arr + num);
1136         num += 2;
1137         /* NAA-3, Target device identifier */
1138         arr[num++] = 0x61;      /* proto=sas, binary */
1139         arr[num++] = 0xa3;      /* piv=1, target device, naa */
1140         arr[num++] = 0x0;
1141         arr[num++] = 0x8;
1142         put_unaligned_be64(naa3_comp_a + target_dev_id, arr + num);
1143         num += 8;
1144         /* SCSI name string: Target device identifier */
1145         arr[num++] = 0x63;      /* proto=sas, UTF-8 */
1146         arr[num++] = 0xa8;      /* piv=1, target device, SCSI name string */
1147         arr[num++] = 0x0;
1148         arr[num++] = 24;
1149         memcpy(arr + num, "naa.32222220", 12);
1150         num += 12;
1151         snprintf(b, sizeof(b), "%08X", target_dev_id);
1152         memcpy(arr + num, b, 8);
1153         num += 8;
1154         memset(arr + num, 0, 4);
1155         num += 4;
1156         return num;
1157 }
1158
1159 static unsigned char vpd84_data[] = {
1160 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
1161     0x22,0x22,0x22,0x0,0xbb,0x1,
1162     0x22,0x22,0x22,0x0,0xbb,0x2,
1163 };
1164
1165 /*  Software interface identification VPD page */
1166 static int inquiry_vpd_84(unsigned char *arr)
1167 {
1168         memcpy(arr, vpd84_data, sizeof(vpd84_data));
1169         return sizeof(vpd84_data);
1170 }
1171
1172 /* Management network addresses VPD page */
1173 static int inquiry_vpd_85(unsigned char *arr)
1174 {
1175         int num = 0;
1176         const char *na1 = "https://www.kernel.org/config";
1177         const char *na2 = "http://www.kernel.org/log";
1178         int plen, olen;
1179
1180         arr[num++] = 0x1;       /* lu, storage config */
1181         arr[num++] = 0x0;       /* reserved */
1182         arr[num++] = 0x0;
1183         olen = strlen(na1);
1184         plen = olen + 1;
1185         if (plen % 4)
1186                 plen = ((plen / 4) + 1) * 4;
1187         arr[num++] = plen;      /* length, null termianted, padded */
1188         memcpy(arr + num, na1, olen);
1189         memset(arr + num + olen, 0, plen - olen);
1190         num += plen;
1191
1192         arr[num++] = 0x4;       /* lu, logging */
1193         arr[num++] = 0x0;       /* reserved */
1194         arr[num++] = 0x0;
1195         olen = strlen(na2);
1196         plen = olen + 1;
1197         if (plen % 4)
1198                 plen = ((plen / 4) + 1) * 4;
1199         arr[num++] = plen;      /* length, null terminated, padded */
1200         memcpy(arr + num, na2, olen);
1201         memset(arr + num + olen, 0, plen - olen);
1202         num += plen;
1203
1204         return num;
1205 }
1206
1207 /* SCSI ports VPD page */
1208 static int inquiry_vpd_88(unsigned char *arr, int target_dev_id)
1209 {
1210         int num = 0;
1211         int port_a, port_b;
1212
1213         port_a = target_dev_id + 1;
1214         port_b = port_a + 1;
1215         arr[num++] = 0x0;       /* reserved */
1216         arr[num++] = 0x0;       /* reserved */
1217         arr[num++] = 0x0;
1218         arr[num++] = 0x1;       /* relative port 1 (primary) */
1219         memset(arr + num, 0, 6);
1220         num += 6;
1221         arr[num++] = 0x0;
1222         arr[num++] = 12;        /* length tp descriptor */
1223         /* naa-5 target port identifier (A) */
1224         arr[num++] = 0x61;      /* proto=sas, binary */
1225         arr[num++] = 0x93;      /* PIV=1, target port, NAA */
1226         arr[num++] = 0x0;       /* reserved */
1227         arr[num++] = 0x8;       /* length */
1228         put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1229         num += 8;
1230         arr[num++] = 0x0;       /* reserved */
1231         arr[num++] = 0x0;       /* reserved */
1232         arr[num++] = 0x0;
1233         arr[num++] = 0x2;       /* relative port 2 (secondary) */
1234         memset(arr + num, 0, 6);
1235         num += 6;
1236         arr[num++] = 0x0;
1237         arr[num++] = 12;        /* length tp descriptor */
1238         /* naa-5 target port identifier (B) */
1239         arr[num++] = 0x61;      /* proto=sas, binary */
1240         arr[num++] = 0x93;      /* PIV=1, target port, NAA */
1241         arr[num++] = 0x0;       /* reserved */
1242         arr[num++] = 0x8;       /* length */
1243         put_unaligned_be64(naa3_comp_a + port_b, arr + num);
1244         num += 8;
1245
1246         return num;
1247 }
1248
1249
1250 static unsigned char vpd89_data[] = {
1251 /* from 4th byte */ 0,0,0,0,
1252 'l','i','n','u','x',' ',' ',' ',
1253 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
1254 '1','2','3','4',
1255 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
1256 0xec,0,0,0,
1257 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
1258 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
1259 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
1260 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
1261 0x53,0x41,
1262 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1263 0x20,0x20,
1264 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1265 0x10,0x80,
1266 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
1267 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
1268 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
1269 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
1270 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
1271 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
1272 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
1273 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1274 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1275 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1276 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
1277 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
1278 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
1279 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
1280 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1281 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1282 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1283 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1284 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1285 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1286 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1287 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1288 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1289 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1290 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1291 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
1292 };
1293
1294 /* ATA Information VPD page */
1295 static int inquiry_vpd_89(unsigned char *arr)
1296 {
1297         memcpy(arr, vpd89_data, sizeof(vpd89_data));
1298         return sizeof(vpd89_data);
1299 }
1300
1301
1302 static unsigned char vpdb0_data[] = {
1303         /* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
1304         0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1305         0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1306         0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1307 };
1308
1309 /* Block limits VPD page (SBC-3) */
1310 static int inquiry_vpd_b0(unsigned char *arr)
1311 {
1312         unsigned int gran;
1313
1314         memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
1315
1316         /* Optimal transfer length granularity */
1317         if (sdebug_opt_xferlen_exp != 0 &&
1318             sdebug_physblk_exp < sdebug_opt_xferlen_exp)
1319                 gran = 1 << sdebug_opt_xferlen_exp;
1320         else
1321                 gran = 1 << sdebug_physblk_exp;
1322         put_unaligned_be16(gran, arr + 2);
1323
1324         /* Maximum Transfer Length */
1325         if (sdebug_store_sectors > 0x400)
1326                 put_unaligned_be32(sdebug_store_sectors, arr + 4);
1327
1328         /* Optimal Transfer Length */
1329         put_unaligned_be32(sdebug_opt_blks, &arr[8]);
1330
1331         if (sdebug_lbpu) {
1332                 /* Maximum Unmap LBA Count */
1333                 put_unaligned_be32(sdebug_unmap_max_blocks, &arr[16]);
1334
1335                 /* Maximum Unmap Block Descriptor Count */
1336                 put_unaligned_be32(sdebug_unmap_max_desc, &arr[20]);
1337         }
1338
1339         /* Unmap Granularity Alignment */
1340         if (sdebug_unmap_alignment) {
1341                 put_unaligned_be32(sdebug_unmap_alignment, &arr[28]);
1342                 arr[28] |= 0x80; /* UGAVALID */
1343         }
1344
1345         /* Optimal Unmap Granularity */
1346         put_unaligned_be32(sdebug_unmap_granularity, &arr[24]);
1347
1348         /* Maximum WRITE SAME Length */
1349         put_unaligned_be64(sdebug_write_same_length, &arr[32]);
1350
1351         return 0x3c; /* Mandatory page length for Logical Block Provisioning */
1352
1353         return sizeof(vpdb0_data);
1354 }
1355
1356 /* Block device characteristics VPD page (SBC-3) */
1357 static int inquiry_vpd_b1(unsigned char *arr)
1358 {
1359         memset(arr, 0, 0x3c);
1360         arr[0] = 0;
1361         arr[1] = 1;     /* non rotating medium (e.g. solid state) */
1362         arr[2] = 0;
1363         arr[3] = 5;     /* less than 1.8" */
1364
1365         return 0x3c;
1366 }
1367
1368 /* Logical block provisioning VPD page (SBC-4) */
1369 static int inquiry_vpd_b2(unsigned char *arr)
1370 {
1371         memset(arr, 0, 0x4);
1372         arr[0] = 0;                     /* threshold exponent */
1373         if (sdebug_lbpu)
1374                 arr[1] = 1 << 7;
1375         if (sdebug_lbpws)
1376                 arr[1] |= 1 << 6;
1377         if (sdebug_lbpws10)
1378                 arr[1] |= 1 << 5;
1379         if (sdebug_lbprz && scsi_debug_lbp())
1380                 arr[1] |= (sdebug_lbprz & 0x7) << 2;  /* sbc4r07 and later */
1381         /* anc_sup=0; dp=0 (no provisioning group descriptor) */
1382         /* minimum_percentage=0; provisioning_type=0 (unknown) */
1383         /* threshold_percentage=0 */
1384         return 0x4;
1385 }
1386
1387 #define SDEBUG_LONG_INQ_SZ 96
1388 #define SDEBUG_MAX_INQ_ARR_SZ 584
1389
1390 static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1391 {
1392         unsigned char pq_pdt;
1393         unsigned char *arr;
1394         unsigned char *cmd = scp->cmnd;
1395         int alloc_len, n, ret;
1396         bool have_wlun, is_disk;
1397
1398         alloc_len = get_unaligned_be16(cmd + 3);
1399         arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
1400         if (! arr)
1401                 return DID_REQUEUE << 16;
1402         is_disk = (sdebug_ptype == TYPE_DISK);
1403         have_wlun = scsi_is_wlun(scp->device->lun);
1404         if (have_wlun)
1405                 pq_pdt = TYPE_WLUN;     /* present, wlun */
1406         else if (sdebug_no_lun_0 && (devip->lun == SDEBUG_LUN_0_VAL))
1407                 pq_pdt = 0x7f;  /* not present, PQ=3, PDT=0x1f */
1408         else
1409                 pq_pdt = (sdebug_ptype & 0x1f);
1410         arr[0] = pq_pdt;
1411         if (0x2 & cmd[1]) {  /* CMDDT bit set */
1412                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
1413                 kfree(arr);
1414                 return check_condition_result;
1415         } else if (0x1 & cmd[1]) {  /* EVPD bit set */
1416                 int lu_id_num, port_group_id, target_dev_id, len;
1417                 char lu_id_str[6];
1418                 int host_no = devip->sdbg_host->shost->host_no;
1419                 
1420                 port_group_id = (((host_no + 1) & 0x7f) << 8) +
1421                     (devip->channel & 0x7f);
1422                 if (sdebug_vpd_use_hostno == 0)
1423                         host_no = 0;
1424                 lu_id_num = have_wlun ? -1 : (((host_no + 1) * 2000) +
1425                             (devip->target * 1000) + devip->lun);
1426                 target_dev_id = ((host_no + 1) * 2000) +
1427                                  (devip->target * 1000) - 3;
1428                 len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
1429                 if (0 == cmd[2]) { /* supported vital product data pages */
1430                         arr[1] = cmd[2];        /*sanity */
1431                         n = 4;
1432                         arr[n++] = 0x0;   /* this page */
1433                         arr[n++] = 0x80;  /* unit serial number */
1434                         arr[n++] = 0x83;  /* device identification */
1435                         arr[n++] = 0x84;  /* software interface ident. */
1436                         arr[n++] = 0x85;  /* management network addresses */
1437                         arr[n++] = 0x86;  /* extended inquiry */
1438                         arr[n++] = 0x87;  /* mode page policy */
1439                         arr[n++] = 0x88;  /* SCSI ports */
1440                         if (is_disk) {    /* SBC only */
1441                                 arr[n++] = 0x89;  /* ATA information */
1442                                 arr[n++] = 0xb0;  /* Block limits */
1443                                 arr[n++] = 0xb1;  /* Block characteristics */
1444                                 arr[n++] = 0xb2;  /* Logical Block Prov */
1445                         }
1446                         arr[3] = n - 4;   /* number of supported VPD pages */
1447                 } else if (0x80 == cmd[2]) { /* unit serial number */
1448                         arr[1] = cmd[2];        /*sanity */
1449                         arr[3] = len;
1450                         memcpy(&arr[4], lu_id_str, len);
1451                 } else if (0x83 == cmd[2]) { /* device identification */
1452                         arr[1] = cmd[2];        /*sanity */
1453                         arr[3] = inquiry_vpd_83(&arr[4], port_group_id,
1454                                                 target_dev_id, lu_id_num,
1455                                                 lu_id_str, len,
1456                                                 &devip->lu_name);
1457                 } else if (0x84 == cmd[2]) { /* Software interface ident. */
1458                         arr[1] = cmd[2];        /*sanity */
1459                         arr[3] = inquiry_vpd_84(&arr[4]);
1460                 } else if (0x85 == cmd[2]) { /* Management network addresses */
1461                         arr[1] = cmd[2];        /*sanity */
1462                         arr[3] = inquiry_vpd_85(&arr[4]);
1463                 } else if (0x86 == cmd[2]) { /* extended inquiry */
1464                         arr[1] = cmd[2];        /*sanity */
1465                         arr[3] = 0x3c;  /* number of following entries */
1466                         if (sdebug_dif == T10_PI_TYPE3_PROTECTION)
1467                                 arr[4] = 0x4;   /* SPT: GRD_CHK:1 */
1468                         else if (have_dif_prot)
1469                                 arr[4] = 0x5;   /* SPT: GRD_CHK:1, REF_CHK:1 */
1470                         else
1471                                 arr[4] = 0x0;   /* no protection stuff */
1472                         arr[5] = 0x7;   /* head of q, ordered + simple q's */
1473                 } else if (0x87 == cmd[2]) { /* mode page policy */
1474                         arr[1] = cmd[2];        /*sanity */
1475                         arr[3] = 0x8;   /* number of following entries */
1476                         arr[4] = 0x2;   /* disconnect-reconnect mp */
1477                         arr[6] = 0x80;  /* mlus, shared */
1478                         arr[8] = 0x18;   /* protocol specific lu */
1479                         arr[10] = 0x82;  /* mlus, per initiator port */
1480                 } else if (0x88 == cmd[2]) { /* SCSI Ports */
1481                         arr[1] = cmd[2];        /*sanity */
1482                         arr[3] = inquiry_vpd_88(&arr[4], target_dev_id);
1483                 } else if (is_disk && 0x89 == cmd[2]) { /* ATA information */
1484                         arr[1] = cmd[2];        /*sanity */
1485                         n = inquiry_vpd_89(&arr[4]);
1486                         put_unaligned_be16(n, arr + 2);
1487                 } else if (is_disk && 0xb0 == cmd[2]) { /* Block limits */
1488                         arr[1] = cmd[2];        /*sanity */
1489                         arr[3] = inquiry_vpd_b0(&arr[4]);
1490                 } else if (is_disk && 0xb1 == cmd[2]) { /* Block char. */
1491                         arr[1] = cmd[2];        /*sanity */
1492                         arr[3] = inquiry_vpd_b1(&arr[4]);
1493                 } else if (is_disk && 0xb2 == cmd[2]) { /* LB Prov. */
1494                         arr[1] = cmd[2];        /*sanity */
1495                         arr[3] = inquiry_vpd_b2(&arr[4]);
1496                 } else {
1497                         mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
1498                         kfree(arr);
1499                         return check_condition_result;
1500                 }
1501                 len = min(get_unaligned_be16(arr + 2) + 4, alloc_len);
1502                 ret = fill_from_dev_buffer(scp, arr,
1503                             min(len, SDEBUG_MAX_INQ_ARR_SZ));
1504                 kfree(arr);
1505                 return ret;
1506         }
1507         /* drops through here for a standard inquiry */
1508         arr[1] = sdebug_removable ? 0x80 : 0;   /* Removable disk */
1509         arr[2] = sdebug_scsi_level;
1510         arr[3] = 2;    /* response_data_format==2 */
1511         arr[4] = SDEBUG_LONG_INQ_SZ - 5;
1512         arr[5] = (int)have_dif_prot;    /* PROTECT bit */
1513         if (sdebug_vpd_use_hostno == 0)
1514                 arr[5] |= 0x10; /* claim: implicit TPGS */
1515         arr[6] = 0x10; /* claim: MultiP */
1516         /* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
1517         arr[7] = 0xa; /* claim: LINKED + CMDQUE */
1518         memcpy(&arr[8], sdebug_inq_vendor_id, 8);
1519         memcpy(&arr[16], sdebug_inq_product_id, 16);
1520         memcpy(&arr[32], sdebug_inq_product_rev, 4);
1521         /* Use Vendor Specific area to place driver date in ASCII hex */
1522         memcpy(&arr[36], sdebug_version_date, 8);
1523         /* version descriptors (2 bytes each) follow */
1524         put_unaligned_be16(0xc0, arr + 58);   /* SAM-6 no version claimed */
1525         put_unaligned_be16(0x5c0, arr + 60);  /* SPC-5 no version claimed */
1526         n = 62;
1527         if (is_disk) {          /* SBC-4 no version claimed */
1528                 put_unaligned_be16(0x600, arr + n);
1529                 n += 2;
1530         } else if (sdebug_ptype == TYPE_TAPE) { /* SSC-4 rev 3 */
1531                 put_unaligned_be16(0x525, arr + n);
1532                 n += 2;
1533         }
1534         put_unaligned_be16(0x2100, arr + n);    /* SPL-4 no version claimed */
1535         ret = fill_from_dev_buffer(scp, arr,
1536                             min(alloc_len, SDEBUG_LONG_INQ_SZ));
1537         kfree(arr);
1538         return ret;
1539 }
1540
1541 static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1542                                    0, 0, 0x0, 0x0};
1543
1544 static int resp_requests(struct scsi_cmnd *scp,
1545                          struct sdebug_dev_info *devip)
1546 {
1547         unsigned char *sbuff;
1548         unsigned char *cmd = scp->cmnd;
1549         unsigned char arr[SCSI_SENSE_BUFFERSIZE];
1550         bool dsense;
1551         int len = 18;
1552
1553         memset(arr, 0, sizeof(arr));
1554         dsense = !!(cmd[1] & 1);
1555         sbuff = scp->sense_buffer;
1556         if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
1557                 if (dsense) {
1558                         arr[0] = 0x72;
1559                         arr[1] = 0x0;           /* NO_SENSE in sense_key */
1560                         arr[2] = THRESHOLD_EXCEEDED;
1561                         arr[3] = 0xff;          /* TEST set and MRIE==6 */
1562                         len = 8;
1563                 } else {
1564                         arr[0] = 0x70;
1565                         arr[2] = 0x0;           /* NO_SENSE in sense_key */
1566                         arr[7] = 0xa;           /* 18 byte sense buffer */
1567                         arr[12] = THRESHOLD_EXCEEDED;
1568                         arr[13] = 0xff;         /* TEST set and MRIE==6 */
1569                 }
1570         } else {
1571                 memcpy(arr, sbuff, SCSI_SENSE_BUFFERSIZE);
1572                 if (arr[0] >= 0x70 && dsense == sdebug_dsense)
1573                         ;       /* have sense and formats match */
1574                 else if (arr[0] <= 0x70) {
1575                         if (dsense) {
1576                                 memset(arr, 0, 8);
1577                                 arr[0] = 0x72;
1578                                 len = 8;
1579                         } else {
1580                                 memset(arr, 0, 18);
1581                                 arr[0] = 0x70;
1582                                 arr[7] = 0xa;
1583                         }
1584                 } else if (dsense) {
1585                         memset(arr, 0, 8);
1586                         arr[0] = 0x72;
1587                         arr[1] = sbuff[2];     /* sense key */
1588                         arr[2] = sbuff[12];    /* asc */
1589                         arr[3] = sbuff[13];    /* ascq */
1590                         len = 8;
1591                 } else {
1592                         memset(arr, 0, 18);
1593                         arr[0] = 0x70;
1594                         arr[2] = sbuff[1];
1595                         arr[7] = 0xa;
1596                         arr[12] = sbuff[1];
1597                         arr[13] = sbuff[3];
1598                 }
1599
1600         }
1601         mk_sense_buffer(scp, 0, NO_ADDITIONAL_SENSE, 0);
1602         return fill_from_dev_buffer(scp, arr, len);
1603 }
1604
1605 static int resp_start_stop(struct scsi_cmnd *scp,
1606                            struct sdebug_dev_info *devip)
1607 {
1608         unsigned char *cmd = scp->cmnd;
1609         int power_cond, stop;
1610         bool changing;
1611
1612         power_cond = (cmd[4] & 0xf0) >> 4;
1613         if (power_cond) {
1614                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 7);
1615                 return check_condition_result;
1616         }
1617         stop = !(cmd[4] & 1);
1618         changing = atomic_read(&devip->stopped) == !stop;
1619         atomic_xchg(&devip->stopped, stop);
1620         if (!changing || cmd[1] & 0x1)  /* state unchanged or IMMED set */
1621                 return SDEG_RES_IMMED_MASK;
1622         else
1623                 return 0;
1624 }
1625
1626 static sector_t get_sdebug_capacity(void)
1627 {
1628         static const unsigned int gibibyte = 1073741824;
1629
1630         if (sdebug_virtual_gb > 0)
1631                 return (sector_t)sdebug_virtual_gb *
1632                         (gibibyte / sdebug_sector_size);
1633         else
1634                 return sdebug_store_sectors;
1635 }
1636
1637 #define SDEBUG_READCAP_ARR_SZ 8
1638 static int resp_readcap(struct scsi_cmnd *scp,
1639                         struct sdebug_dev_info *devip)
1640 {
1641         unsigned char arr[SDEBUG_READCAP_ARR_SZ];
1642         unsigned int capac;
1643
1644         /* following just in case virtual_gb changed */
1645         sdebug_capacity = get_sdebug_capacity();
1646         memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
1647         if (sdebug_capacity < 0xffffffff) {
1648                 capac = (unsigned int)sdebug_capacity - 1;
1649                 put_unaligned_be32(capac, arr + 0);
1650         } else
1651                 put_unaligned_be32(0xffffffff, arr + 0);
1652         put_unaligned_be16(sdebug_sector_size, arr + 6);
1653         return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
1654 }
1655
1656 #define SDEBUG_READCAP16_ARR_SZ 32
1657 static int resp_readcap16(struct scsi_cmnd *scp,
1658                           struct sdebug_dev_info *devip)
1659 {
1660         unsigned char *cmd = scp->cmnd;
1661         unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
1662         int alloc_len;
1663
1664         alloc_len = get_unaligned_be32(cmd + 10);
1665         /* following just in case virtual_gb changed */
1666         sdebug_capacity = get_sdebug_capacity();
1667         memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
1668         put_unaligned_be64((u64)(sdebug_capacity - 1), arr + 0);
1669         put_unaligned_be32(sdebug_sector_size, arr + 8);
1670         arr[13] = sdebug_physblk_exp & 0xf;
1671         arr[14] = (sdebug_lowest_aligned >> 8) & 0x3f;
1672
1673         if (scsi_debug_lbp()) {
1674                 arr[14] |= 0x80; /* LBPME */
1675                 /* from sbc4r07, this LBPRZ field is 1 bit, but the LBPRZ in
1676                  * the LB Provisioning VPD page is 3 bits. Note that lbprz=2
1677                  * in the wider field maps to 0 in this field.
1678                  */
1679                 if (sdebug_lbprz & 1)   /* precisely what the draft requires */
1680                         arr[14] |= 0x40;
1681         }
1682
1683         arr[15] = sdebug_lowest_aligned & 0xff;
1684
1685         if (have_dif_prot) {
1686                 arr[12] = (sdebug_dif - 1) << 1; /* P_TYPE */
1687                 arr[12] |= 1; /* PROT_EN */
1688         }
1689
1690         return fill_from_dev_buffer(scp, arr,
1691                                     min(alloc_len, SDEBUG_READCAP16_ARR_SZ));
1692 }
1693
1694 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
1695
1696 static int resp_report_tgtpgs(struct scsi_cmnd *scp,
1697                               struct sdebug_dev_info *devip)
1698 {
1699         unsigned char *cmd = scp->cmnd;
1700         unsigned char *arr;
1701         int host_no = devip->sdbg_host->shost->host_no;
1702         int n, ret, alen, rlen;
1703         int port_group_a, port_group_b, port_a, port_b;
1704
1705         alen = get_unaligned_be32(cmd + 6);
1706         arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
1707         if (! arr)
1708                 return DID_REQUEUE << 16;
1709         /*
1710          * EVPD page 0x88 states we have two ports, one
1711          * real and a fake port with no device connected.
1712          * So we create two port groups with one port each
1713          * and set the group with port B to unavailable.
1714          */
1715         port_a = 0x1; /* relative port A */
1716         port_b = 0x2; /* relative port B */
1717         port_group_a = (((host_no + 1) & 0x7f) << 8) +
1718                         (devip->channel & 0x7f);
1719         port_group_b = (((host_no + 1) & 0x7f) << 8) +
1720                         (devip->channel & 0x7f) + 0x80;
1721
1722         /*
1723          * The asymmetric access state is cycled according to the host_id.
1724          */
1725         n = 4;
1726         if (sdebug_vpd_use_hostno == 0) {
1727                 arr[n++] = host_no % 3; /* Asymm access state */
1728                 arr[n++] = 0x0F; /* claim: all states are supported */
1729         } else {
1730                 arr[n++] = 0x0; /* Active/Optimized path */
1731                 arr[n++] = 0x01; /* only support active/optimized paths */
1732         }
1733         put_unaligned_be16(port_group_a, arr + n);
1734         n += 2;
1735         arr[n++] = 0;    /* Reserved */
1736         arr[n++] = 0;    /* Status code */
1737         arr[n++] = 0;    /* Vendor unique */
1738         arr[n++] = 0x1;  /* One port per group */
1739         arr[n++] = 0;    /* Reserved */
1740         arr[n++] = 0;    /* Reserved */
1741         put_unaligned_be16(port_a, arr + n);
1742         n += 2;
1743         arr[n++] = 3;    /* Port unavailable */
1744         arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
1745         put_unaligned_be16(port_group_b, arr + n);
1746         n += 2;
1747         arr[n++] = 0;    /* Reserved */
1748         arr[n++] = 0;    /* Status code */
1749         arr[n++] = 0;    /* Vendor unique */
1750         arr[n++] = 0x1;  /* One port per group */
1751         arr[n++] = 0;    /* Reserved */
1752         arr[n++] = 0;    /* Reserved */
1753         put_unaligned_be16(port_b, arr + n);
1754         n += 2;
1755
1756         rlen = n - 4;
1757         put_unaligned_be32(rlen, arr + 0);
1758
1759         /*
1760          * Return the smallest value of either
1761          * - The allocated length
1762          * - The constructed command length
1763          * - The maximum array size
1764          */
1765         rlen = min(alen,n);
1766         ret = fill_from_dev_buffer(scp, arr,
1767                                    min(rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
1768         kfree(arr);
1769         return ret;
1770 }
1771
1772 static int resp_rsup_opcodes(struct scsi_cmnd *scp,
1773                              struct sdebug_dev_info *devip)
1774 {
1775         bool rctd;
1776         u8 reporting_opts, req_opcode, sdeb_i, supp;
1777         u16 req_sa, u;
1778         u32 alloc_len, a_len;
1779         int k, offset, len, errsts, count, bump, na;
1780         const struct opcode_info_t *oip;
1781         const struct opcode_info_t *r_oip;
1782         u8 *arr;
1783         u8 *cmd = scp->cmnd;
1784
1785         rctd = !!(cmd[2] & 0x80);
1786         reporting_opts = cmd[2] & 0x7;
1787         req_opcode = cmd[3];
1788         req_sa = get_unaligned_be16(cmd + 4);
1789         alloc_len = get_unaligned_be32(cmd + 6);
1790         if (alloc_len < 4 || alloc_len > 0xffff) {
1791                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
1792                 return check_condition_result;
1793         }
1794         if (alloc_len > 8192)
1795                 a_len = 8192;
1796         else
1797                 a_len = alloc_len;
1798         arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_ATOMIC);
1799         if (NULL == arr) {
1800                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
1801                                 INSUFF_RES_ASCQ);
1802                 return check_condition_result;
1803         }
1804         switch (reporting_opts) {
1805         case 0: /* all commands */
1806                 /* count number of commands */
1807                 for (count = 0, oip = opcode_info_arr;
1808                      oip->num_attached != 0xff; ++oip) {
1809                         if (F_INV_OP & oip->flags)
1810                                 continue;
1811                         count += (oip->num_attached + 1);
1812                 }
1813                 bump = rctd ? 20 : 8;
1814                 put_unaligned_be32(count * bump, arr);
1815                 for (offset = 4, oip = opcode_info_arr;
1816                      oip->num_attached != 0xff && offset < a_len; ++oip) {
1817                         if (F_INV_OP & oip->flags)
1818                                 continue;
1819                         na = oip->num_attached;
1820                         arr[offset] = oip->opcode;
1821                         put_unaligned_be16(oip->sa, arr + offset + 2);
1822                         if (rctd)
1823                                 arr[offset + 5] |= 0x2;
1824                         if (FF_SA & oip->flags)
1825                                 arr[offset + 5] |= 0x1;
1826                         put_unaligned_be16(oip->len_mask[0], arr + offset + 6);
1827                         if (rctd)
1828                                 put_unaligned_be16(0xa, arr + offset + 8);
1829                         r_oip = oip;
1830                         for (k = 0, oip = oip->arrp; k < na; ++k, ++oip) {
1831                                 if (F_INV_OP & oip->flags)
1832                                         continue;
1833                                 offset += bump;
1834                                 arr[offset] = oip->opcode;
1835                                 put_unaligned_be16(oip->sa, arr + offset + 2);
1836                                 if (rctd)
1837                                         arr[offset + 5] |= 0x2;
1838                                 if (FF_SA & oip->flags)
1839                                         arr[offset + 5] |= 0x1;
1840                                 put_unaligned_be16(oip->len_mask[0],
1841                                                    arr + offset + 6);
1842                                 if (rctd)
1843                                         put_unaligned_be16(0xa,
1844                                                            arr + offset + 8);
1845                         }
1846                         oip = r_oip;
1847                         offset += bump;
1848                 }
1849                 break;
1850         case 1: /* one command: opcode only */
1851         case 2: /* one command: opcode plus service action */
1852         case 3: /* one command: if sa==0 then opcode only else opcode+sa */
1853                 sdeb_i = opcode_ind_arr[req_opcode];
1854                 oip = &opcode_info_arr[sdeb_i];
1855                 if (F_INV_OP & oip->flags) {
1856                         supp = 1;
1857                         offset = 4;
1858                 } else {
1859                         if (1 == reporting_opts) {
1860                                 if (FF_SA & oip->flags) {
1861                                         mk_sense_invalid_fld(scp, SDEB_IN_CDB,
1862                                                              2, 2);
1863                                         kfree(arr);
1864                                         return check_condition_result;
1865                                 }
1866                                 req_sa = 0;
1867                         } else if (2 == reporting_opts &&
1868                                    0 == (FF_SA & oip->flags)) {
1869                                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
1870                                 kfree(arr);     /* point at requested sa */
1871                                 return check_condition_result;
1872                         }
1873                         if (0 == (FF_SA & oip->flags) &&
1874                             req_opcode == oip->opcode)
1875                                 supp = 3;
1876                         else if (0 == (FF_SA & oip->flags)) {
1877                                 na = oip->num_attached;
1878                                 for (k = 0, oip = oip->arrp; k < na;
1879                                      ++k, ++oip) {
1880                                         if (req_opcode == oip->opcode)
1881                                                 break;
1882                                 }
1883                                 supp = (k >= na) ? 1 : 3;
1884                         } else if (req_sa != oip->sa) {
1885                                 na = oip->num_attached;
1886                                 for (k = 0, oip = oip->arrp; k < na;
1887                                      ++k, ++oip) {
1888                                         if (req_sa == oip->sa)
1889                                                 break;
1890                                 }
1891                                 supp = (k >= na) ? 1 : 3;
1892                         } else
1893                                 supp = 3;
1894                         if (3 == supp) {
1895                                 u = oip->len_mask[0];
1896                                 put_unaligned_be16(u, arr + 2);
1897                                 arr[4] = oip->opcode;
1898                                 for (k = 1; k < u; ++k)
1899                                         arr[4 + k] = (k < 16) ?
1900                                                  oip->len_mask[k] : 0xff;
1901                                 offset = 4 + u;
1902                         } else
1903                                 offset = 4;
1904                 }
1905                 arr[1] = (rctd ? 0x80 : 0) | supp;
1906                 if (rctd) {
1907                         put_unaligned_be16(0xa, arr + offset);
1908                         offset += 12;
1909                 }
1910                 break;
1911         default:
1912                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
1913                 kfree(arr);
1914                 return check_condition_result;
1915         }
1916         offset = (offset < a_len) ? offset : a_len;
1917         len = (offset < alloc_len) ? offset : alloc_len;
1918         errsts = fill_from_dev_buffer(scp, arr, len);
1919         kfree(arr);
1920         return errsts;
1921 }
1922
1923 static int resp_rsup_tmfs(struct scsi_cmnd *scp,
1924                           struct sdebug_dev_info *devip)
1925 {
1926         bool repd;
1927         u32 alloc_len, len;
1928         u8 arr[16];
1929         u8 *cmd = scp->cmnd;
1930
1931         memset(arr, 0, sizeof(arr));
1932         repd = !!(cmd[2] & 0x80);
1933         alloc_len = get_unaligned_be32(cmd + 6);
1934         if (alloc_len < 4) {
1935                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
1936                 return check_condition_result;
1937         }
1938         arr[0] = 0xc8;          /* ATS | ATSS | LURS */
1939         arr[1] = 0x1;           /* ITNRS */
1940         if (repd) {
1941                 arr[3] = 0xc;
1942                 len = 16;
1943         } else
1944                 len = 4;
1945
1946         len = (len < alloc_len) ? len : alloc_len;
1947         return fill_from_dev_buffer(scp, arr, len);
1948 }
1949
1950 /* <<Following mode page info copied from ST318451LW>> */
1951
1952 static int resp_err_recov_pg(unsigned char *p, int pcontrol, int target)
1953 {       /* Read-Write Error Recovery page for mode_sense */
1954         unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
1955                                         5, 0, 0xff, 0xff};
1956
1957         memcpy(p, err_recov_pg, sizeof(err_recov_pg));
1958         if (1 == pcontrol)
1959                 memset(p + 2, 0, sizeof(err_recov_pg) - 2);
1960         return sizeof(err_recov_pg);
1961 }
1962
1963 static int resp_disconnect_pg(unsigned char *p, int pcontrol, int target)
1964 {       /* Disconnect-Reconnect page for mode_sense */
1965         unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
1966                                          0, 0, 0, 0, 0, 0, 0, 0};
1967
1968         memcpy(p, disconnect_pg, sizeof(disconnect_pg));
1969         if (1 == pcontrol)
1970                 memset(p + 2, 0, sizeof(disconnect_pg) - 2);
1971         return sizeof(disconnect_pg);
1972 }
1973
1974 static int resp_format_pg(unsigned char *p, int pcontrol, int target)
1975 {       /* Format device page for mode_sense */
1976         unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
1977                                      0, 0, 0, 0, 0, 0, 0, 0,
1978                                      0, 0, 0, 0, 0x40, 0, 0, 0};
1979
1980         memcpy(p, format_pg, sizeof(format_pg));
1981         put_unaligned_be16(sdebug_sectors_per, p + 10);
1982         put_unaligned_be16(sdebug_sector_size, p + 12);
1983         if (sdebug_removable)
1984                 p[20] |= 0x20; /* should agree with INQUIRY */
1985         if (1 == pcontrol)
1986                 memset(p + 2, 0, sizeof(format_pg) - 2);
1987         return sizeof(format_pg);
1988 }
1989
1990 static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
1991                                      0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,
1992                                      0, 0, 0, 0};
1993
1994 static int resp_caching_pg(unsigned char *p, int pcontrol, int target)
1995 {       /* Caching page for mode_sense */
1996         unsigned char ch_caching_pg[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0,
1997                 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
1998         unsigned char d_caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
1999                 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,     0, 0, 0, 0};
2000
2001         if (SDEBUG_OPT_N_WCE & sdebug_opts)
2002                 caching_pg[2] &= ~0x4;  /* set WCE=0 (default WCE=1) */
2003         memcpy(p, caching_pg, sizeof(caching_pg));
2004         if (1 == pcontrol)
2005                 memcpy(p + 2, ch_caching_pg, sizeof(ch_caching_pg));
2006         else if (2 == pcontrol)
2007                 memcpy(p, d_caching_pg, sizeof(d_caching_pg));
2008         return sizeof(caching_pg);
2009 }
2010
2011 static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2012                                     0, 0, 0x2, 0x4b};
2013
2014 static int resp_ctrl_m_pg(unsigned char *p, int pcontrol, int target)
2015 {       /* Control mode page for mode_sense */
2016         unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
2017                                         0, 0, 0, 0};
2018         unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2019                                      0, 0, 0x2, 0x4b};
2020
2021         if (sdebug_dsense)
2022                 ctrl_m_pg[2] |= 0x4;
2023         else
2024                 ctrl_m_pg[2] &= ~0x4;
2025
2026         if (sdebug_ato)
2027                 ctrl_m_pg[5] |= 0x80; /* ATO=1 */
2028
2029         memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
2030         if (1 == pcontrol)
2031                 memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
2032         else if (2 == pcontrol)
2033                 memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
2034         return sizeof(ctrl_m_pg);
2035 }
2036
2037
2038 static int resp_iec_m_pg(unsigned char *p, int pcontrol, int target)
2039 {       /* Informational Exceptions control mode page for mode_sense */
2040         unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
2041                                        0, 0, 0x0, 0x0};
2042         unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
2043                                       0, 0, 0x0, 0x0};
2044
2045         memcpy(p, iec_m_pg, sizeof(iec_m_pg));
2046         if (1 == pcontrol)
2047                 memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
2048         else if (2 == pcontrol)
2049                 memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
2050         return sizeof(iec_m_pg);
2051 }
2052
2053 static int resp_sas_sf_m_pg(unsigned char *p, int pcontrol, int target)
2054 {       /* SAS SSP mode page - short format for mode_sense */
2055         unsigned char sas_sf_m_pg[] = {0x19, 0x6,
2056                 0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
2057
2058         memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
2059         if (1 == pcontrol)
2060                 memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
2061         return sizeof(sas_sf_m_pg);
2062 }
2063
2064
2065 static int resp_sas_pcd_m_spg(unsigned char *p, int pcontrol, int target,
2066                               int target_dev_id)
2067 {       /* SAS phy control and discover mode page for mode_sense */
2068         unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
2069                     0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
2070                     0, 0, 0, 0, 0, 0, 0, 0,     /* insert SAS addr */
2071                     0, 0, 0, 0, 0, 0, 0, 0,     /* insert SAS addr */
2072                     0x2, 0, 0, 0, 0, 0, 0, 0,
2073                     0x88, 0x99, 0, 0, 0, 0, 0, 0,
2074                     0, 0, 0, 0, 0, 0, 0, 0,
2075                     0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
2076                     0, 0, 0, 0, 0, 0, 0, 0,     /* insert SAS addr */
2077                     0, 0, 0, 0, 0, 0, 0, 0,     /* insert SAS addr */
2078                     0x3, 0, 0, 0, 0, 0, 0, 0,
2079                     0x88, 0x99, 0, 0, 0, 0, 0, 0,
2080                     0, 0, 0, 0, 0, 0, 0, 0,
2081                 };
2082         int port_a, port_b;
2083
2084         put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 16);
2085         put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 24);
2086         put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 64);
2087         put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 72);
2088         port_a = target_dev_id + 1;
2089         port_b = port_a + 1;
2090         memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
2091         put_unaligned_be32(port_a, p + 20);
2092         put_unaligned_be32(port_b, p + 48 + 20);
2093         if (1 == pcontrol)
2094                 memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
2095         return sizeof(sas_pcd_m_pg);
2096 }
2097
2098 static int resp_sas_sha_m_spg(unsigned char *p, int pcontrol)
2099 {       /* SAS SSP shared protocol specific port mode subpage */
2100         unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
2101                     0, 0, 0, 0, 0, 0, 0, 0,
2102                 };
2103
2104         memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
2105         if (1 == pcontrol)
2106                 memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
2107         return sizeof(sas_sha_m_pg);
2108 }
2109
2110 #define SDEBUG_MAX_MSENSE_SZ 256
2111
2112 static int resp_mode_sense(struct scsi_cmnd *scp,
2113                            struct sdebug_dev_info *devip)
2114 {
2115         int pcontrol, pcode, subpcode, bd_len;
2116         unsigned char dev_spec;
2117         int alloc_len, offset, len, target_dev_id;
2118         int target = scp->device->id;
2119         unsigned char *ap;
2120         unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
2121         unsigned char *cmd = scp->cmnd;
2122         bool dbd, llbaa, msense_6, is_disk, bad_pcode;
2123
2124         dbd = !!(cmd[1] & 0x8);         /* disable block descriptors */
2125         pcontrol = (cmd[2] & 0xc0) >> 6;
2126         pcode = cmd[2] & 0x3f;
2127         subpcode = cmd[3];
2128         msense_6 = (MODE_SENSE == cmd[0]);
2129         llbaa = msense_6 ? false : !!(cmd[1] & 0x10);
2130         is_disk = (sdebug_ptype == TYPE_DISK);
2131         if (is_disk && !dbd)
2132                 bd_len = llbaa ? 16 : 8;
2133         else
2134                 bd_len = 0;
2135         alloc_len = msense_6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2136         memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
2137         if (0x3 == pcontrol) {  /* Saving values not supported */
2138                 mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0);
2139                 return check_condition_result;
2140         }
2141         target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
2142                         (devip->target * 1000) - 3;
2143         /* for disks set DPOFUA bit and clear write protect (WP) bit */
2144         if (is_disk) {
2145                 dev_spec = 0x10;        /* =0x90 if WP=1 implies read-only */
2146                 if (sdebug_wp)
2147                         dev_spec |= 0x80;
2148         } else
2149                 dev_spec = 0x0;
2150         if (msense_6) {
2151                 arr[2] = dev_spec;
2152                 arr[3] = bd_len;
2153                 offset = 4;
2154         } else {
2155                 arr[3] = dev_spec;
2156                 if (16 == bd_len)
2157                         arr[4] = 0x1;   /* set LONGLBA bit */
2158                 arr[7] = bd_len;        /* assume 255 or less */
2159                 offset = 8;
2160         }
2161         ap = arr + offset;
2162         if ((bd_len > 0) && (!sdebug_capacity))
2163                 sdebug_capacity = get_sdebug_capacity();
2164
2165         if (8 == bd_len) {
2166                 if (sdebug_capacity > 0xfffffffe)
2167                         put_unaligned_be32(0xffffffff, ap + 0);
2168                 else
2169                         put_unaligned_be32(sdebug_capacity, ap + 0);
2170                 put_unaligned_be16(sdebug_sector_size, ap + 6);
2171                 offset += bd_len;
2172                 ap = arr + offset;
2173         } else if (16 == bd_len) {
2174                 put_unaligned_be64((u64)sdebug_capacity, ap + 0);
2175                 put_unaligned_be32(sdebug_sector_size, ap + 12);
2176                 offset += bd_len;
2177                 ap = arr + offset;
2178         }
2179
2180         if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) {
2181                 /* TODO: Control Extension page */
2182                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2183                 return check_condition_result;
2184         }
2185         bad_pcode = false;
2186
2187         switch (pcode) {
2188         case 0x1:       /* Read-Write error recovery page, direct access */
2189                 len = resp_err_recov_pg(ap, pcontrol, target);
2190                 offset += len;
2191                 break;
2192         case 0x2:       /* Disconnect-Reconnect page, all devices */
2193                 len = resp_disconnect_pg(ap, pcontrol, target);
2194                 offset += len;
2195                 break;
2196         case 0x3:       /* Format device page, direct access */
2197                 if (is_disk) {
2198                         len = resp_format_pg(ap, pcontrol, target);
2199                         offset += len;
2200                 } else
2201                         bad_pcode = true;
2202                 break;
2203         case 0x8:       /* Caching page, direct access */
2204                 if (is_disk) {
2205                         len = resp_caching_pg(ap, pcontrol, target);
2206                         offset += len;
2207                 } else
2208                         bad_pcode = true;
2209                 break;
2210         case 0xa:       /* Control Mode page, all devices */
2211                 len = resp_ctrl_m_pg(ap, pcontrol, target);
2212                 offset += len;
2213                 break;
2214         case 0x19:      /* if spc==1 then sas phy, control+discover */
2215                 if ((subpcode > 0x2) && (subpcode < 0xff)) {
2216                         mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2217                         return check_condition_result;
2218                 }
2219                 len = 0;
2220                 if ((0x0 == subpcode) || (0xff == subpcode))
2221                         len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2222                 if ((0x1 == subpcode) || (0xff == subpcode))
2223                         len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
2224                                                   target_dev_id);
2225                 if ((0x2 == subpcode) || (0xff == subpcode))
2226                         len += resp_sas_sha_m_spg(ap + len, pcontrol);
2227                 offset += len;
2228                 break;
2229         case 0x1c:      /* Informational Exceptions Mode page, all devices */
2230                 len = resp_iec_m_pg(ap, pcontrol, target);
2231                 offset += len;
2232                 break;
2233         case 0x3f:      /* Read all Mode pages */
2234                 if ((0 == subpcode) || (0xff == subpcode)) {
2235                         len = resp_err_recov_pg(ap, pcontrol, target);
2236                         len += resp_disconnect_pg(ap + len, pcontrol, target);
2237                         if (is_disk) {
2238                                 len += resp_format_pg(ap + len, pcontrol,
2239                                                       target);
2240                                 len += resp_caching_pg(ap + len, pcontrol,
2241                                                        target);
2242                         }
2243                         len += resp_ctrl_m_pg(ap + len, pcontrol, target);
2244                         len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2245                         if (0xff == subpcode) {
2246                                 len += resp_sas_pcd_m_spg(ap + len, pcontrol,
2247                                                   target, target_dev_id);
2248                                 len += resp_sas_sha_m_spg(ap + len, pcontrol);
2249                         }
2250                         len += resp_iec_m_pg(ap + len, pcontrol, target);
2251                         offset += len;
2252                 } else {
2253                         mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2254                         return check_condition_result;
2255                 }
2256                 break;
2257         default:
2258                 bad_pcode = true;
2259                 break;
2260         }
2261         if (bad_pcode) {
2262                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2263                 return check_condition_result;
2264         }
2265         if (msense_6)
2266                 arr[0] = offset - 1;
2267         else
2268                 put_unaligned_be16((offset - 2), arr + 0);
2269         return fill_from_dev_buffer(scp, arr, min(alloc_len, offset));
2270 }
2271
2272 #define SDEBUG_MAX_MSELECT_SZ 512
2273
2274 static int resp_mode_select(struct scsi_cmnd *scp,
2275                             struct sdebug_dev_info *devip)
2276 {
2277         int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
2278         int param_len, res, mpage;
2279         unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
2280         unsigned char *cmd = scp->cmnd;
2281         int mselect6 = (MODE_SELECT == cmd[0]);
2282
2283         memset(arr, 0, sizeof(arr));
2284         pf = cmd[1] & 0x10;
2285         sp = cmd[1] & 0x1;
2286         param_len = mselect6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2287         if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
2288                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1);
2289                 return check_condition_result;
2290         }
2291         res = fetch_to_dev_buffer(scp, arr, param_len);
2292         if (-1 == res)
2293                 return DID_ERROR << 16;
2294         else if (sdebug_verbose && (res < param_len))
2295                 sdev_printk(KERN_INFO, scp->device,
2296                             "%s: cdb indicated=%d, IO sent=%d bytes\n",
2297                             __func__, param_len, res);
2298         md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2);
2299         bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6);
2300         if (md_len > 2) {
2301                 mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
2302                 return check_condition_result;
2303         }
2304         off = bd_len + (mselect6 ? 4 : 8);
2305         mpage = arr[off] & 0x3f;
2306         ps = !!(arr[off] & 0x80);
2307         if (ps) {
2308                 mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 7);
2309                 return check_condition_result;
2310         }
2311         spf = !!(arr[off] & 0x40);
2312         pg_len = spf ? (get_unaligned_be16(arr + off + 2) + 4) :
2313                        (arr[off + 1] + 2);
2314         if ((pg_len + off) > param_len) {
2315                 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2316                                 PARAMETER_LIST_LENGTH_ERR, 0);
2317                 return check_condition_result;
2318         }
2319         switch (mpage) {
2320         case 0x8:      /* Caching Mode page */
2321                 if (caching_pg[1] == arr[off + 1]) {
2322                         memcpy(caching_pg + 2, arr + off + 2,
2323                                sizeof(caching_pg) - 2);
2324                         goto set_mode_changed_ua;
2325                 }
2326                 break;
2327         case 0xa:      /* Control Mode page */
2328                 if (ctrl_m_pg[1] == arr[off + 1]) {
2329                         memcpy(ctrl_m_pg + 2, arr + off + 2,
2330                                sizeof(ctrl_m_pg) - 2);
2331                         if (ctrl_m_pg[4] & 0x8)
2332                                 sdebug_wp = true;
2333                         else
2334                                 sdebug_wp = false;
2335                         sdebug_dsense = !!(ctrl_m_pg[2] & 0x4);
2336                         goto set_mode_changed_ua;
2337                 }
2338                 break;
2339         case 0x1c:      /* Informational Exceptions Mode page */
2340                 if (iec_m_pg[1] == arr[off + 1]) {
2341                         memcpy(iec_m_pg + 2, arr + off + 2,
2342                                sizeof(iec_m_pg) - 2);
2343                         goto set_mode_changed_ua;
2344                 }
2345                 break;
2346         default:
2347                 break;
2348         }
2349         mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 5);
2350         return check_condition_result;
2351 set_mode_changed_ua:
2352         set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm);
2353         return 0;
2354 }
2355
2356 static int resp_temp_l_pg(unsigned char *arr)
2357 {
2358         unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
2359                                      0x0, 0x1, 0x3, 0x2, 0x0, 65,
2360                 };
2361
2362         memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
2363         return sizeof(temp_l_pg);
2364 }
2365
2366 static int resp_ie_l_pg(unsigned char *arr)
2367 {
2368         unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
2369                 };
2370
2371         memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
2372         if (iec_m_pg[2] & 0x4) {        /* TEST bit set */
2373                 arr[4] = THRESHOLD_EXCEEDED;
2374                 arr[5] = 0xff;
2375         }
2376         return sizeof(ie_l_pg);
2377 }
2378
2379 #define SDEBUG_MAX_LSENSE_SZ 512
2380
2381 static int resp_log_sense(struct scsi_cmnd *scp,
2382                           struct sdebug_dev_info *devip)
2383 {
2384         int ppc, sp, pcode, subpcode, alloc_len, len, n;
2385         unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
2386         unsigned char *cmd = scp->cmnd;
2387
2388         memset(arr, 0, sizeof(arr));
2389         ppc = cmd[1] & 0x2;
2390         sp = cmd[1] & 0x1;
2391         if (ppc || sp) {
2392                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, ppc ? 1 : 0);
2393                 return check_condition_result;
2394         }
2395         pcode = cmd[2] & 0x3f;
2396         subpcode = cmd[3] & 0xff;
2397         alloc_len = get_unaligned_be16(cmd + 7);
2398         arr[0] = pcode;
2399         if (0 == subpcode) {
2400                 switch (pcode) {
2401                 case 0x0:       /* Supported log pages log page */
2402                         n = 4;
2403                         arr[n++] = 0x0;         /* this page */
2404                         arr[n++] = 0xd;         /* Temperature */
2405                         arr[n++] = 0x2f;        /* Informational exceptions */
2406                         arr[3] = n - 4;
2407                         break;
2408                 case 0xd:       /* Temperature log page */
2409                         arr[3] = resp_temp_l_pg(arr + 4);
2410                         break;
2411                 case 0x2f:      /* Informational exceptions log page */
2412                         arr[3] = resp_ie_l_pg(arr + 4);
2413                         break;
2414                 default:
2415                         mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2416                         return check_condition_result;
2417                 }
2418         } else if (0xff == subpcode) {
2419                 arr[0] |= 0x40;
2420                 arr[1] = subpcode;
2421                 switch (pcode) {
2422                 case 0x0:       /* Supported log pages and subpages log page */
2423                         n = 4;
2424                         arr[n++] = 0x0;
2425                         arr[n++] = 0x0;         /* 0,0 page */
2426                         arr[n++] = 0x0;
2427                         arr[n++] = 0xff;        /* this page */
2428                         arr[n++] = 0xd;
2429                         arr[n++] = 0x0;         /* Temperature */
2430                         arr[n++] = 0x2f;
2431                         arr[n++] = 0x0; /* Informational exceptions */
2432                         arr[3] = n - 4;
2433                         break;
2434                 case 0xd:       /* Temperature subpages */
2435                         n = 4;
2436                         arr[n++] = 0xd;
2437                         arr[n++] = 0x0;         /* Temperature */
2438                         arr[3] = n - 4;
2439                         break;
2440                 case 0x2f:      /* Informational exceptions subpages */
2441                         n = 4;
2442                         arr[n++] = 0x2f;
2443                         arr[n++] = 0x0;         /* Informational exceptions */
2444                         arr[3] = n - 4;
2445                         break;
2446                 default:
2447                         mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2448                         return check_condition_result;
2449                 }
2450         } else {
2451                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2452                 return check_condition_result;
2453         }
2454         len = min(get_unaligned_be16(arr + 2) + 4, alloc_len);
2455         return fill_from_dev_buffer(scp, arr,
2456                     min(len, SDEBUG_MAX_INQ_ARR_SZ));
2457 }
2458
2459 static inline int check_device_access_params(struct scsi_cmnd *scp,
2460         unsigned long long lba, unsigned int num, bool write)
2461 {
2462         if (lba + num > sdebug_capacity) {
2463                 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2464                 return check_condition_result;
2465         }
2466         /* transfer length excessive (tie in to block limits VPD page) */
2467         if (num > sdebug_store_sectors) {
2468                 /* needs work to find which cdb byte 'num' comes from */
2469                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2470                 return check_condition_result;
2471         }
2472         if (write && unlikely(sdebug_wp)) {
2473                 mk_sense_buffer(scp, DATA_PROTECT, WRITE_PROTECTED, 0x2);
2474                 return check_condition_result;
2475         }
2476         return 0;
2477 }
2478
2479 /* Returns number of bytes copied or -1 if error. */
2480 static int do_device_access(struct scsi_cmnd *scmd, u32 sg_skip, u64 lba,
2481                             u32 num, bool do_write)
2482 {
2483         int ret;
2484         u64 block, rest = 0;
2485         struct scsi_data_buffer *sdb = &scmd->sdb;
2486         enum dma_data_direction dir;
2487
2488         if (do_write) {
2489                 dir = DMA_TO_DEVICE;
2490                 write_since_sync = true;
2491         } else {
2492                 dir = DMA_FROM_DEVICE;
2493         }
2494
2495         if (!sdb->length)
2496                 return 0;
2497         if (scmd->sc_data_direction != dir)
2498                 return -1;
2499
2500         block = do_div(lba, sdebug_store_sectors);
2501         if (block + num > sdebug_store_sectors)
2502                 rest = block + num - sdebug_store_sectors;
2503
2504         ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
2505                    fake_storep + (block * sdebug_sector_size),
2506                    (num - rest) * sdebug_sector_size, sg_skip, do_write);
2507         if (ret != (num - rest) * sdebug_sector_size)
2508                 return ret;
2509
2510         if (rest) {
2511                 ret += sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
2512                             fake_storep, rest * sdebug_sector_size,
2513                             sg_skip + ((num - rest) * sdebug_sector_size),
2514                             do_write);
2515         }
2516
2517         return ret;
2518 }
2519
2520 /* If lba2fake_store(lba,num) compares equal to arr(num), then copy top half of
2521  * arr into lba2fake_store(lba,num) and return true. If comparison fails then
2522  * return false. */
2523 static bool comp_write_worker(u64 lba, u32 num, const u8 *arr)
2524 {
2525         bool res;
2526         u64 block, rest = 0;
2527         u32 store_blks = sdebug_store_sectors;
2528         u32 lb_size = sdebug_sector_size;
2529
2530         block = do_div(lba, store_blks);
2531         if (block + num > store_blks)
2532                 rest = block + num - store_blks;
2533
2534         res = !memcmp(fake_storep + (block * lb_size), arr,
2535                       (num - rest) * lb_size);
2536         if (!res)
2537                 return res;
2538         if (rest)
2539                 res = memcmp(fake_storep, arr + ((num - rest) * lb_size),
2540                              rest * lb_size);
2541         if (!res)
2542                 return res;
2543         arr += num * lb_size;
2544         memcpy(fake_storep + (block * lb_size), arr, (num - rest) * lb_size);
2545         if (rest)
2546                 memcpy(fake_storep, arr + ((num - rest) * lb_size),
2547                        rest * lb_size);
2548         return res;
2549 }
2550
2551 static __be16 dif_compute_csum(const void *buf, int len)
2552 {
2553         __be16 csum;
2554
2555         if (sdebug_guard)
2556                 csum = (__force __be16)ip_compute_csum(buf, len);
2557         else
2558                 csum = cpu_to_be16(crc_t10dif(buf, len));
2559
2560         return csum;
2561 }
2562
2563 static int dif_verify(struct t10_pi_tuple *sdt, const void *data,
2564                       sector_t sector, u32 ei_lba)
2565 {
2566         __be16 csum = dif_compute_csum(data, sdebug_sector_size);
2567
2568         if (sdt->guard_tag != csum) {
2569                 pr_err("GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
2570                         (unsigned long)sector,
2571                         be16_to_cpu(sdt->guard_tag),
2572                         be16_to_cpu(csum));
2573                 return 0x01;
2574         }
2575         if (sdebug_dif == T10_PI_TYPE1_PROTECTION &&
2576             be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
2577                 pr_err("REF check failed on sector %lu\n",
2578                         (unsigned long)sector);
2579                 return 0x03;
2580         }
2581         if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
2582             be32_to_cpu(sdt->ref_tag) != ei_lba) {
2583                 pr_err("REF check failed on sector %lu\n",
2584                         (unsigned long)sector);
2585                 return 0x03;
2586         }
2587         return 0;
2588 }
2589
2590 static void dif_copy_prot(struct scsi_cmnd *SCpnt, sector_t sector,
2591                           unsigned int sectors, bool read)
2592 {
2593         size_t resid;
2594         void *paddr;
2595         const void *dif_store_end = dif_storep + sdebug_store_sectors;
2596         struct sg_mapping_iter miter;
2597
2598         /* Bytes of protection data to copy into sgl */
2599         resid = sectors * sizeof(*dif_storep);
2600
2601         sg_miter_start(&miter, scsi_prot_sglist(SCpnt),
2602                         scsi_prot_sg_count(SCpnt), SG_MITER_ATOMIC |
2603                         (read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
2604
2605         while (sg_miter_next(&miter) && resid > 0) {
2606                 size_t len = min(miter.length, resid);
2607                 void *start = dif_store(sector);
2608                 size_t rest = 0;
2609
2610                 if (dif_store_end < start + len)
2611                         rest = start + len - dif_store_end;
2612
2613                 paddr = miter.addr;
2614
2615                 if (read)
2616                         memcpy(paddr, start, len - rest);
2617                 else
2618                         memcpy(start, paddr, len - rest);
2619
2620                 if (rest) {
2621                         if (read)
2622                                 memcpy(paddr + len - rest, dif_storep, rest);
2623                         else
2624                                 memcpy(dif_storep, paddr + len - rest, rest);
2625                 }
2626
2627                 sector += len / sizeof(*dif_storep);
2628                 resid -= len;
2629         }
2630         sg_miter_stop(&miter);
2631 }
2632
2633 static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec,
2634                             unsigned int sectors, u32 ei_lba)
2635 {
2636         unsigned int i;
2637         struct t10_pi_tuple *sdt;
2638         sector_t sector;
2639
2640         for (i = 0; i < sectors; i++, ei_lba++) {
2641                 int ret;
2642
2643                 sector = start_sec + i;
2644                 sdt = dif_store(sector);
2645
2646                 if (sdt->app_tag == cpu_to_be16(0xffff))
2647                         continue;
2648
2649                 ret = dif_verify(sdt, lba2fake_store(sector), sector, ei_lba);
2650                 if (ret) {
2651                         dif_errors++;
2652                         return ret;
2653                 }
2654         }
2655
2656         dif_copy_prot(SCpnt, start_sec, sectors, true);
2657         dix_reads++;
2658
2659         return 0;
2660 }
2661
2662 static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2663 {
2664         u8 *cmd = scp->cmnd;
2665         struct sdebug_queued_cmd *sqcp;
2666         u64 lba;
2667         u32 num;
2668         u32 ei_lba;
2669         unsigned long iflags;
2670         int ret;
2671         bool check_prot;
2672
2673         switch (cmd[0]) {
2674         case READ_16:
2675                 ei_lba = 0;
2676                 lba = get_unaligned_be64(cmd + 2);
2677                 num = get_unaligned_be32(cmd + 10);
2678                 check_prot = true;
2679                 break;
2680         case READ_10:
2681                 ei_lba = 0;
2682                 lba = get_unaligned_be32(cmd + 2);
2683                 num = get_unaligned_be16(cmd + 7);
2684                 check_prot = true;
2685                 break;
2686         case READ_6:
2687                 ei_lba = 0;
2688                 lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
2689                       (u32)(cmd[1] & 0x1f) << 16;
2690                 num = (0 == cmd[4]) ? 256 : cmd[4];
2691                 check_prot = true;
2692                 break;
2693         case READ_12:
2694                 ei_lba = 0;
2695                 lba = get_unaligned_be32(cmd + 2);
2696                 num = get_unaligned_be32(cmd + 6);
2697                 check_prot = true;
2698                 break;
2699         case XDWRITEREAD_10:
2700                 ei_lba = 0;
2701                 lba = get_unaligned_be32(cmd + 2);
2702                 num = get_unaligned_be16(cmd + 7);
2703                 check_prot = false;
2704                 break;
2705         default:        /* assume READ(32) */
2706                 lba = get_unaligned_be64(cmd + 12);
2707                 ei_lba = get_unaligned_be32(cmd + 20);
2708                 num = get_unaligned_be32(cmd + 28);
2709                 check_prot = false;
2710                 break;
2711         }
2712         if (unlikely(have_dif_prot && check_prot)) {
2713                 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
2714                     (cmd[1] & 0xe0)) {
2715                         mk_sense_invalid_opcode(scp);
2716                         return check_condition_result;
2717                 }
2718                 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
2719                      sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
2720                     (cmd[1] & 0xe0) == 0)
2721                         sdev_printk(KERN_ERR, scp->device, "Unprotected RD "
2722                                     "to DIF device\n");
2723         }
2724         if (unlikely(sdebug_any_injecting_opt)) {
2725                 sqcp = (struct sdebug_queued_cmd *)scp->host_scribble;
2726
2727                 if (sqcp) {
2728                         if (sqcp->inj_short)
2729                                 num /= 2;
2730                 }
2731         } else
2732                 sqcp = NULL;
2733
2734         ret = check_device_access_params(scp, lba, num, false);
2735         if (ret)
2736                 return ret;
2737         if (unlikely((SDEBUG_OPT_MEDIUM_ERR & sdebug_opts) &&
2738                      (lba <= (sdebug_medium_error_start + sdebug_medium_error_count - 1)) &&
2739                      ((lba + num) > sdebug_medium_error_start))) {
2740                 /* claim unrecoverable read error */
2741                 mk_sense_buffer(scp, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
2742                 /* set info field and valid bit for fixed descriptor */
2743                 if (0x70 == (scp->sense_buffer[0] & 0x7f)) {
2744                         scp->sense_buffer[0] |= 0x80;   /* Valid bit */
2745                         ret = (lba < OPT_MEDIUM_ERR_ADDR)
2746                               ? OPT_MEDIUM_ERR_ADDR : (int)lba;
2747                         put_unaligned_be32(ret, scp->sense_buffer + 3);
2748                 }
2749                 scsi_set_resid(scp, scsi_bufflen(scp));
2750                 return check_condition_result;
2751         }
2752
2753         read_lock_irqsave(&atomic_rw, iflags);
2754
2755         /* DIX + T10 DIF */
2756         if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
2757                 int prot_ret = prot_verify_read(scp, lba, num, ei_lba);
2758
2759                 if (prot_ret) {
2760                         read_unlock_irqrestore(&atomic_rw, iflags);
2761                         mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, prot_ret);
2762                         return illegal_condition_result;
2763                 }
2764         }
2765
2766         ret = do_device_access(scp, 0, lba, num, false);
2767         read_unlock_irqrestore(&atomic_rw, iflags);
2768         if (unlikely(ret == -1))
2769                 return DID_ERROR << 16;
2770
2771         scsi_set_resid(scp, scsi_bufflen(scp) - ret);
2772
2773         if (unlikely(sqcp)) {
2774                 if (sqcp->inj_recovered) {
2775                         mk_sense_buffer(scp, RECOVERED_ERROR,
2776                                         THRESHOLD_EXCEEDED, 0);
2777                         return check_condition_result;
2778                 } else if (sqcp->inj_transport) {
2779                         mk_sense_buffer(scp, ABORTED_COMMAND,
2780                                         TRANSPORT_PROBLEM, ACK_NAK_TO);
2781                         return check_condition_result;
2782                 } else if (sqcp->inj_dif) {
2783                         /* Logical block guard check failed */
2784                         mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
2785                         return illegal_condition_result;
2786                 } else if (sqcp->inj_dix) {
2787                         mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
2788                         return illegal_condition_result;
2789                 }
2790         }
2791         return 0;
2792 }
2793
2794 static void dump_sector(unsigned char *buf, int len)
2795 {
2796         int i, j, n;
2797
2798         pr_err(">>> Sector Dump <<<\n");
2799         for (i = 0 ; i < len ; i += 16) {
2800                 char b[128];
2801
2802                 for (j = 0, n = 0; j < 16; j++) {
2803                         unsigned char c = buf[i+j];
2804
2805                         if (c >= 0x20 && c < 0x7e)
2806                                 n += scnprintf(b + n, sizeof(b) - n,
2807                                                " %c ", buf[i+j]);
2808                         else
2809                                 n += scnprintf(b + n, sizeof(b) - n,
2810                                                "%02x ", buf[i+j]);
2811                 }
2812                 pr_err("%04d: %s\n", i, b);
2813         }
2814 }
2815
2816 static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
2817                              unsigned int sectors, u32 ei_lba)
2818 {
2819         int ret;
2820         struct t10_pi_tuple *sdt;
2821         void *daddr;
2822         sector_t sector = start_sec;
2823         int ppage_offset;
2824         int dpage_offset;
2825         struct sg_mapping_iter diter;
2826         struct sg_mapping_iter piter;
2827
2828         BUG_ON(scsi_sg_count(SCpnt) == 0);
2829         BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
2830
2831         sg_miter_start(&piter, scsi_prot_sglist(SCpnt),
2832                         scsi_prot_sg_count(SCpnt),
2833                         SG_MITER_ATOMIC | SG_MITER_FROM_SG);
2834         sg_miter_start(&diter, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
2835                         SG_MITER_ATOMIC | SG_MITER_FROM_SG);
2836
2837         /* For each protection page */
2838         while (sg_miter_next(&piter)) {
2839                 dpage_offset = 0;
2840                 if (WARN_ON(!sg_miter_next(&diter))) {
2841                         ret = 0x01;
2842                         goto out;
2843                 }
2844
2845                 for (ppage_offset = 0; ppage_offset < piter.length;
2846                      ppage_offset += sizeof(struct t10_pi_tuple)) {
2847                         /* If we're at the end of the current
2848                          * data page advance to the next one
2849                          */
2850                         if (dpage_offset >= diter.length) {
2851                                 if (WARN_ON(!sg_miter_next(&diter))) {
2852                                         ret = 0x01;
2853                                         goto out;
2854                                 }
2855                                 dpage_offset = 0;
2856                         }
2857
2858                         sdt = piter.addr + ppage_offset;
2859                         daddr = diter.addr + dpage_offset;
2860
2861                         ret = dif_verify(sdt, daddr, sector, ei_lba);
2862                         if (ret) {
2863                                 dump_sector(daddr, sdebug_sector_size);
2864                                 goto out;
2865                         }
2866
2867                         sector++;
2868                         ei_lba++;
2869                         dpage_offset += sdebug_sector_size;
2870                 }
2871                 diter.consumed = dpage_offset;
2872                 sg_miter_stop(&diter);
2873         }
2874         sg_miter_stop(&piter);
2875
2876         dif_copy_prot(SCpnt, start_sec, sectors, false);
2877         dix_writes++;
2878
2879         return 0;
2880
2881 out:
2882         dif_errors++;
2883         sg_miter_stop(&diter);
2884         sg_miter_stop(&piter);
2885         return ret;
2886 }
2887
2888 static unsigned long lba_to_map_index(sector_t lba)
2889 {
2890         if (sdebug_unmap_alignment)
2891                 lba += sdebug_unmap_granularity - sdebug_unmap_alignment;
2892         sector_div(lba, sdebug_unmap_granularity);
2893         return lba;
2894 }
2895
2896 static sector_t map_index_to_lba(unsigned long index)
2897 {
2898         sector_t lba = index * sdebug_unmap_granularity;
2899
2900         if (sdebug_unmap_alignment)
2901                 lba -= sdebug_unmap_granularity - sdebug_unmap_alignment;
2902         return lba;
2903 }
2904
2905 static unsigned int map_state(sector_t lba, unsigned int *num)
2906 {
2907         sector_t end;
2908         unsigned int mapped;
2909         unsigned long index;
2910         unsigned long next;
2911
2912         index = lba_to_map_index(lba);
2913         mapped = test_bit(index, map_storep);
2914
2915         if (mapped)
2916                 next = find_next_zero_bit(map_storep, map_size, index);
2917         else
2918                 next = find_next_bit(map_storep, map_size, index);
2919
2920         end = min_t(sector_t, sdebug_store_sectors,  map_index_to_lba(next));
2921         *num = end - lba;
2922         return mapped;
2923 }
2924
2925 static void map_region(sector_t lba, unsigned int len)
2926 {
2927         sector_t end = lba + len;
2928
2929         while (lba < end) {
2930                 unsigned long index = lba_to_map_index(lba);
2931
2932                 if (index < map_size)
2933                         set_bit(index, map_storep);
2934
2935                 lba = map_index_to_lba(index + 1);
2936         }
2937 }
2938
2939 static void unmap_region(sector_t lba, unsigned int len)
2940 {
2941         sector_t end = lba + len;
2942
2943         while (lba < end) {
2944                 unsigned long index = lba_to_map_index(lba);
2945
2946                 if (lba == map_index_to_lba(index) &&
2947                     lba + sdebug_unmap_granularity <= end &&
2948                     index < map_size) {
2949                         clear_bit(index, map_storep);
2950                         if (sdebug_lbprz) {  /* for LBPRZ=2 return 0xff_s */
2951                                 memset(fake_storep +
2952                                        lba * sdebug_sector_size,
2953                                        (sdebug_lbprz & 1) ? 0 : 0xff,
2954                                        sdebug_sector_size *
2955                                        sdebug_unmap_granularity);
2956                         }
2957                         if (dif_storep) {
2958                                 memset(dif_storep + lba, 0xff,
2959                                        sizeof(*dif_storep) *
2960                                        sdebug_unmap_granularity);
2961                         }
2962                 }
2963                 lba = map_index_to_lba(index + 1);
2964         }
2965 }
2966
2967 static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2968 {
2969         u8 *cmd = scp->cmnd;
2970         u64 lba;
2971         u32 num;
2972         u32 ei_lba;
2973         unsigned long iflags;
2974         int ret;
2975         bool check_prot;
2976
2977         switch (cmd[0]) {
2978         case WRITE_16:
2979                 ei_lba = 0;
2980                 lba = get_unaligned_be64(cmd + 2);
2981                 num = get_unaligned_be32(cmd + 10);
2982                 check_prot = true;
2983                 break;
2984         case WRITE_10:
2985                 ei_lba = 0;
2986                 lba = get_unaligned_be32(cmd + 2);
2987                 num = get_unaligned_be16(cmd + 7);
2988                 check_prot = true;
2989                 break;
2990         case WRITE_6:
2991                 ei_lba = 0;
2992                 lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
2993                       (u32)(cmd[1] & 0x1f) << 16;
2994                 num = (0 == cmd[4]) ? 256 : cmd[4];
2995                 check_prot = true;
2996                 break;
2997         case WRITE_12:
2998                 ei_lba = 0;
2999                 lba = get_unaligned_be32(cmd + 2);
3000                 num = get_unaligned_be32(cmd + 6);
3001                 check_prot = true;
3002                 break;
3003         case 0x53:      /* XDWRITEREAD(10) */
3004                 ei_lba = 0;
3005                 lba = get_unaligned_be32(cmd + 2);
3006                 num = get_unaligned_be16(cmd + 7);
3007                 check_prot = false;
3008                 break;
3009         default:        /* assume WRITE(32) */
3010                 lba = get_unaligned_be64(cmd + 12);
3011                 ei_lba = get_unaligned_be32(cmd + 20);
3012                 num = get_unaligned_be32(cmd + 28);
3013                 check_prot = false;
3014                 break;
3015         }
3016         if (unlikely(have_dif_prot && check_prot)) {
3017                 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3018                     (cmd[1] & 0xe0)) {
3019                         mk_sense_invalid_opcode(scp);
3020                         return check_condition_result;
3021                 }
3022                 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3023                      sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3024                     (cmd[1] & 0xe0) == 0)
3025                         sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3026                                     "to DIF device\n");
3027         }
3028         ret = check_device_access_params(scp, lba, num, true);
3029         if (ret)
3030                 return ret;
3031         write_lock_irqsave(&atomic_rw, iflags);
3032
3033         /* DIX + T10 DIF */
3034         if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3035                 int prot_ret = prot_verify_write(scp, lba, num, ei_lba);
3036
3037                 if (prot_ret) {
3038                         write_unlock_irqrestore(&atomic_rw, iflags);
3039                         mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, prot_ret);
3040                         return illegal_condition_result;
3041                 }
3042         }
3043
3044         ret = do_device_access(scp, 0, lba, num, true);
3045         if (unlikely(scsi_debug_lbp()))
3046                 map_region(lba, num);
3047         write_unlock_irqrestore(&atomic_rw, iflags);
3048         if (unlikely(-1 == ret))
3049                 return DID_ERROR << 16;
3050         else if (unlikely(sdebug_verbose &&
3051                           (ret < (num * sdebug_sector_size))))
3052                 sdev_printk(KERN_INFO, scp->device,
3053                             "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3054                             my_name, num * sdebug_sector_size, ret);
3055
3056         if (unlikely(sdebug_any_injecting_opt)) {
3057                 struct sdebug_queued_cmd *sqcp =
3058                                 (struct sdebug_queued_cmd *)scp->host_scribble;
3059
3060                 if (sqcp) {
3061                         if (sqcp->inj_recovered) {
3062                                 mk_sense_buffer(scp, RECOVERED_ERROR,
3063                                                 THRESHOLD_EXCEEDED, 0);
3064                                 return check_condition_result;
3065                         } else if (sqcp->inj_dif) {
3066                                 /* Logical block guard check failed */
3067                                 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3068                                 return illegal_condition_result;
3069                         } else if (sqcp->inj_dix) {
3070                                 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3071                                 return illegal_condition_result;
3072                         }
3073                 }
3074         }
3075         return 0;
3076 }
3077
3078 /*
3079  * T10 has only specified WRITE SCATTERED(16) and WRITE SCATTERED(32).
3080  * No READ GATHERED yet (requires bidi or long cdb holding gather list).
3081  */
3082 static int resp_write_scat(struct scsi_cmnd *scp,
3083                            struct sdebug_dev_info *devip)
3084 {
3085         u8 *cmd = scp->cmnd;
3086         u8 *lrdp = NULL;
3087         u8 *up;
3088         u8 wrprotect;
3089         u16 lbdof, num_lrd, k;
3090         u32 num, num_by, bt_len, lbdof_blen, sg_off, cum_lb;
3091         u32 lb_size = sdebug_sector_size;
3092         u32 ei_lba;
3093         u64 lba;
3094         unsigned long iflags;
3095         int ret, res;
3096         bool is_16;
3097         static const u32 lrd_size = 32; /* + parameter list header size */
3098
3099         if (cmd[0] == VARIABLE_LENGTH_CMD) {
3100                 is_16 = false;
3101                 wrprotect = (cmd[10] >> 5) & 0x7;
3102                 lbdof = get_unaligned_be16(cmd + 12);
3103                 num_lrd = get_unaligned_be16(cmd + 16);
3104                 bt_len = get_unaligned_be32(cmd + 28);
3105         } else {        /* that leaves WRITE SCATTERED(16) */
3106                 is_16 = true;
3107                 wrprotect = (cmd[2] >> 5) & 0x7;
3108                 lbdof = get_unaligned_be16(cmd + 4);
3109                 num_lrd = get_unaligned_be16(cmd + 8);
3110                 bt_len = get_unaligned_be32(cmd + 10);
3111                 if (unlikely(have_dif_prot)) {
3112                         if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3113                             wrprotect) {
3114                                 mk_sense_invalid_opcode(scp);
3115                                 return illegal_condition_result;
3116                         }
3117                         if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3118                              sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3119                              wrprotect == 0)
3120                                 sdev_printk(KERN_ERR, scp->device,
3121                                             "Unprotected WR to DIF device\n");
3122                 }
3123         }
3124         if ((num_lrd == 0) || (bt_len == 0))
3125                 return 0;       /* T10 says these do-nothings are not errors */
3126         if (lbdof == 0) {
3127                 if (sdebug_verbose)
3128                         sdev_printk(KERN_INFO, scp->device,
3129                                 "%s: %s: LB Data Offset field bad\n",
3130                                 my_name, __func__);
3131                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3132                 return illegal_condition_result;
3133         }
3134         lbdof_blen = lbdof * lb_size;
3135         if ((lrd_size + (num_lrd * lrd_size)) > lbdof_blen) {
3136                 if (sdebug_verbose)
3137                         sdev_printk(KERN_INFO, scp->device,
3138                                 "%s: %s: LBA range descriptors don't fit\n",
3139                                 my_name, __func__);
3140                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3141                 return illegal_condition_result;
3142         }
3143         lrdp = kzalloc(lbdof_blen, GFP_ATOMIC);
3144         if (lrdp == NULL)
3145                 return SCSI_MLQUEUE_HOST_BUSY;
3146         if (sdebug_verbose)
3147                 sdev_printk(KERN_INFO, scp->device,
3148                         "%s: %s: Fetch header+scatter_list, lbdof_blen=%u\n",
3149                         my_name, __func__, lbdof_blen);
3150         res = fetch_to_dev_buffer(scp, lrdp, lbdof_blen);
3151         if (res == -1) {
3152                 ret = DID_ERROR << 16;
3153                 goto err_out;
3154         }
3155
3156         write_lock_irqsave(&atomic_rw, iflags);
3157         sg_off = lbdof_blen;
3158         /* Spec says Buffer xfer Length field in number of LBs in dout */
3159         cum_lb = 0;
3160         for (k = 0, up = lrdp + lrd_size; k < num_lrd; ++k, up += lrd_size) {
3161                 lba = get_unaligned_be64(up + 0);
3162                 num = get_unaligned_be32(up + 8);
3163                 if (sdebug_verbose)
3164                         sdev_printk(KERN_INFO, scp->device,
3165                                 "%s: %s: k=%d  LBA=0x%llx num=%u  sg_off=%u\n",
3166                                 my_name, __func__, k, lba, num, sg_off);
3167                 if (num == 0)
3168                         continue;
3169                 ret = check_device_access_params(scp, lba, num, true);
3170                 if (ret)
3171                         goto err_out_unlock;
3172                 num_by = num * lb_size;
3173                 ei_lba = is_16 ? 0 : get_unaligned_be32(up + 12);
3174
3175                 if ((cum_lb + num) > bt_len) {
3176                         if (sdebug_verbose)
3177                                 sdev_printk(KERN_INFO, scp->device,
3178                                     "%s: %s: sum of blocks > data provided\n",
3179                                     my_name, __func__);
3180                         mk_sense_buffer(scp, ILLEGAL_REQUEST, WRITE_ERROR_ASC,
3181                                         0);
3182                         ret = illegal_condition_result;
3183                         goto err_out_unlock;
3184                 }
3185
3186                 /* DIX + T10 DIF */
3187                 if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3188                         int prot_ret = prot_verify_write(scp, lba, num,
3189                                                          ei_lba);
3190
3191                         if (prot_ret) {
3192                                 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10,
3193                                                 prot_ret);
3194                                 ret = illegal_condition_result;
3195                                 goto err_out_unlock;
3196                         }
3197                 }
3198
3199                 ret = do_device_access(scp, sg_off, lba, num, true);
3200                 if (unlikely(scsi_debug_lbp()))
3201                         map_region(lba, num);
3202                 if (unlikely(-1 == ret)) {
3203                         ret = DID_ERROR << 16;
3204                         goto err_out_unlock;
3205                 } else if (unlikely(sdebug_verbose && (ret < num_by)))
3206                         sdev_printk(KERN_INFO, scp->device,
3207                             "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3208                             my_name, num_by, ret);
3209
3210                 if (unlikely(sdebug_any_injecting_opt)) {
3211                         struct sdebug_queued_cmd *sqcp =
3212                                 (struct sdebug_queued_cmd *)scp->host_scribble;
3213
3214                         if (sqcp) {
3215                                 if (sqcp->inj_recovered) {
3216                                         mk_sense_buffer(scp, RECOVERED_ERROR,
3217                                                         THRESHOLD_EXCEEDED, 0);
3218                                         ret = illegal_condition_result;
3219                                         goto err_out_unlock;
3220                                 } else if (sqcp->inj_dif) {
3221                                         /* Logical block guard check failed */
3222                                         mk_sense_buffer(scp, ABORTED_COMMAND,
3223                                                         0x10, 1);
3224                                         ret = illegal_condition_result;
3225                                         goto err_out_unlock;
3226                                 } else if (sqcp->inj_dix) {
3227                                         mk_sense_buffer(scp, ILLEGAL_REQUEST,
3228                                                         0x10, 1);
3229                                         ret = illegal_condition_result;
3230                                         goto err_out_unlock;
3231                                 }
3232                         }
3233                 }
3234                 sg_off += num_by;
3235                 cum_lb += num;
3236         }
3237         ret = 0;
3238 err_out_unlock:
3239         write_unlock_irqrestore(&atomic_rw, iflags);
3240 err_out:
3241         kfree(lrdp);
3242         return ret;
3243 }
3244
3245 static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
3246                            u32 ei_lba, bool unmap, bool ndob)
3247 {
3248         int ret;
3249         unsigned long iflags;
3250         unsigned long long i;
3251         u32 lb_size = sdebug_sector_size;
3252         u64 block, lbaa;
3253         u8 *fs1p;
3254
3255         ret = check_device_access_params(scp, lba, num, true);
3256         if (ret)
3257                 return ret;
3258
3259         write_lock_irqsave(&atomic_rw, iflags);
3260
3261         if (unmap && scsi_debug_lbp()) {
3262                 unmap_region(lba, num);
3263                 goto out;
3264         }
3265         lbaa = lba;
3266         block = do_div(lbaa, sdebug_store_sectors);
3267         /* if ndob then zero 1 logical block, else fetch 1 logical block */
3268         fs1p = fake_storep + (block * lb_size);
3269         if (ndob) {
3270                 memset(fs1p, 0, lb_size);
3271                 ret = 0;
3272         } else
3273                 ret = fetch_to_dev_buffer(scp, fs1p, lb_size);
3274
3275         if (-1 == ret) {
3276                 write_unlock_irqrestore(&atomic_rw, iflags);
3277                 return DID_ERROR << 16;
3278         } else if (sdebug_verbose && !ndob && (ret < lb_size))
3279                 sdev_printk(KERN_INFO, scp->device,
3280                             "%s: %s: lb size=%u, IO sent=%d bytes\n",
3281                             my_name, "write same", lb_size, ret);
3282
3283         /* Copy first sector to remaining blocks */
3284         for (i = 1 ; i < num ; i++) {
3285                 lbaa = lba + i;
3286                 block = do_div(lbaa, sdebug_store_sectors);
3287                 memmove(fake_storep + (block * lb_size), fs1p, lb_size);
3288         }
3289         if (scsi_debug_lbp())
3290                 map_region(lba, num);
3291 out:
3292         write_unlock_irqrestore(&atomic_rw, iflags);
3293
3294         return 0;
3295 }
3296
3297 static int resp_write_same_10(struct scsi_cmnd *scp,
3298                               struct sdebug_dev_info *devip)
3299 {
3300         u8 *cmd = scp->cmnd;
3301         u32 lba;
3302         u16 num;
3303         u32 ei_lba = 0;
3304         bool unmap = false;
3305
3306         if (cmd[1] & 0x8) {
3307                 if (sdebug_lbpws10 == 0) {
3308                         mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3309                         return check_condition_result;
3310                 } else
3311                         unmap = true;
3312         }
3313         lba = get_unaligned_be32(cmd + 2);
3314         num = get_unaligned_be16(cmd + 7);
3315         if (num > sdebug_write_same_length) {
3316                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3317                 return check_condition_result;
3318         }
3319         return resp_write_same(scp, lba, num, ei_lba, unmap, false);
3320 }
3321
3322 static int resp_write_same_16(struct scsi_cmnd *scp,
3323                               struct sdebug_dev_info *devip)
3324 {
3325         u8 *cmd = scp->cmnd;
3326         u64 lba;
3327         u32 num;
3328         u32 ei_lba = 0;
3329         bool unmap = false;
3330         bool ndob = false;
3331
3332         if (cmd[1] & 0x8) {     /* UNMAP */
3333                 if (sdebug_lbpws == 0) {
3334                         mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3335                         return check_condition_result;
3336                 } else
3337                         unmap = true;
3338         }
3339         if (cmd[1] & 0x1)  /* NDOB (no data-out buffer, assumes zeroes) */
3340                 ndob = true;
3341         lba = get_unaligned_be64(cmd + 2);
3342         num = get_unaligned_be32(cmd + 10);
3343         if (num > sdebug_write_same_length) {
3344                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
3345                 return check_condition_result;
3346         }
3347         return resp_write_same(scp, lba, num, ei_lba, unmap, ndob);
3348 }
3349
3350 /* Note the mode field is in the same position as the (lower) service action
3351  * field. For the Report supported operation codes command, SPC-4 suggests
3352  * each mode of this command should be reported separately; for future. */
3353 static int resp_write_buffer(struct scsi_cmnd *scp,
3354                              struct sdebug_dev_info *devip)
3355 {
3356         u8 *cmd = scp->cmnd;
3357         struct scsi_device *sdp = scp->device;
3358         struct sdebug_dev_info *dp;
3359         u8 mode;
3360
3361         mode = cmd[1] & 0x1f;
3362         switch (mode) {
3363         case 0x4:       /* download microcode (MC) and activate (ACT) */
3364                 /* set UAs on this device only */
3365                 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3366                 set_bit(SDEBUG_UA_MICROCODE_CHANGED, devip->uas_bm);
3367                 break;
3368         case 0x5:       /* download MC, save and ACT */
3369                 set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET, devip->uas_bm);
3370                 break;
3371         case 0x6:       /* download MC with offsets and ACT */
3372                 /* set UAs on most devices (LUs) in this target */
3373                 list_for_each_entry(dp,
3374                                     &devip->sdbg_host->dev_info_list,
3375                                     dev_list)
3376                         if (dp->target == sdp->id) {
3377                                 set_bit(SDEBUG_UA_BUS_RESET, dp->uas_bm);
3378                                 if (devip != dp)
3379                                         set_bit(SDEBUG_UA_MICROCODE_CHANGED,
3380                                                 dp->uas_bm);
3381                         }
3382                 break;
3383         case 0x7:       /* download MC with offsets, save, and ACT */
3384                 /* set UA on all devices (LUs) in this target */
3385                 list_for_each_entry(dp,
3386                                     &devip->sdbg_host->dev_info_list,
3387                                     dev_list)
3388                         if (dp->target == sdp->id)
3389                                 set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET,
3390                                         dp->uas_bm);
3391                 break;
3392         default:
3393                 /* do nothing for this command for other mode values */
3394                 break;
3395         }
3396         return 0;
3397 }
3398
3399 static int resp_comp_write(struct scsi_cmnd *scp,
3400                            struct sdebug_dev_info *devip)
3401 {
3402         u8 *cmd = scp->cmnd;
3403         u8 *arr;
3404         u8 *fake_storep_hold;
3405         u64 lba;
3406         u32 dnum;
3407         u32 lb_size = sdebug_sector_size;
3408         u8 num;
3409         unsigned long iflags;
3410         int ret;
3411         int retval = 0;
3412
3413         lba = get_unaligned_be64(cmd + 2);
3414         num = cmd[13];          /* 1 to a maximum of 255 logical blocks */
3415         if (0 == num)
3416                 return 0;       /* degenerate case, not an error */
3417         if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3418             (cmd[1] & 0xe0)) {
3419                 mk_sense_invalid_opcode(scp);
3420                 return check_condition_result;
3421         }
3422         if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3423              sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3424             (cmd[1] & 0xe0) == 0)
3425                 sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3426                             "to DIF device\n");
3427         ret = check_device_access_params(scp, lba, num, false);
3428         if (ret)
3429                 return ret;
3430         dnum = 2 * num;
3431         arr = kcalloc(lb_size, dnum, GFP_ATOMIC);
3432         if (NULL == arr) {
3433                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3434                                 INSUFF_RES_ASCQ);
3435                 return check_condition_result;
3436         }
3437
3438         write_lock_irqsave(&atomic_rw, iflags);
3439
3440         /* trick do_device_access() to fetch both compare and write buffers
3441          * from data-in into arr. Safe (atomic) since write_lock held. */
3442         fake_storep_hold = fake_storep;
3443         fake_storep = arr;
3444         ret = do_device_access(scp, 0, 0, dnum, true);
3445         fake_storep = fake_storep_hold;
3446         if (ret == -1) {
3447                 retval = DID_ERROR << 16;
3448                 goto cleanup;
3449         } else if (sdebug_verbose && (ret < (dnum * lb_size)))
3450                 sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb "
3451                             "indicated=%u, IO sent=%d bytes\n", my_name,
3452                             dnum * lb_size, ret);
3453         if (!comp_write_worker(lba, num, arr)) {
3454                 mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
3455                 retval = check_condition_result;
3456                 goto cleanup;
3457         }
3458         if (scsi_debug_lbp())
3459                 map_region(lba, num);
3460 cleanup:
3461         write_unlock_irqrestore(&atomic_rw, iflags);
3462         kfree(arr);
3463         return retval;
3464 }
3465
3466 struct unmap_block_desc {
3467         __be64  lba;
3468         __be32  blocks;
3469         __be32  __reserved;
3470 };
3471
3472 static int resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3473 {
3474         unsigned char *buf;
3475         struct unmap_block_desc *desc;
3476         unsigned int i, payload_len, descriptors;
3477         int ret;
3478         unsigned long iflags;
3479
3480
3481         if (!scsi_debug_lbp())
3482                 return 0;       /* fib and say its done */
3483         payload_len = get_unaligned_be16(scp->cmnd + 7);
3484         BUG_ON(scsi_bufflen(scp) != payload_len);
3485
3486         descriptors = (payload_len - 8) / 16;
3487         if (descriptors > sdebug_unmap_max_desc) {
3488                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3489                 return check_condition_result;
3490         }
3491
3492         buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC);
3493         if (!buf) {
3494                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3495                                 INSUFF_RES_ASCQ);
3496                 return check_condition_result;
3497         }
3498
3499         scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
3500
3501         BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
3502         BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
3503
3504         desc = (void *)&buf[8];
3505
3506         write_lock_irqsave(&atomic_rw, iflags);
3507
3508         for (i = 0 ; i < descriptors ; i++) {
3509                 unsigned long long lba = get_unaligned_be64(&desc[i].lba);
3510                 unsigned int num = get_unaligned_be32(&desc[i].blocks);
3511
3512                 ret = check_device_access_params(scp, lba, num, true);
3513                 if (ret)
3514                         goto out;
3515
3516                 unmap_region(lba, num);
3517         }
3518
3519         ret = 0;
3520
3521 out:
3522         write_unlock_irqrestore(&atomic_rw, iflags);
3523         kfree(buf);
3524
3525         return ret;
3526 }
3527
3528 #define SDEBUG_GET_LBA_STATUS_LEN 32
3529
3530 static int resp_get_lba_status(struct scsi_cmnd *scp,
3531                                struct sdebug_dev_info *devip)
3532 {
3533         u8 *cmd = scp->cmnd;
3534         u64 lba;
3535         u32 alloc_len, mapped, num;
3536         u8 arr[SDEBUG_GET_LBA_STATUS_LEN];
3537         int ret;
3538
3539         lba = get_unaligned_be64(cmd + 2);
3540         alloc_len = get_unaligned_be32(cmd + 10);
3541
3542         if (alloc_len < 24)
3543                 return 0;
3544
3545         ret = check_device_access_params(scp, lba, 1, false);
3546         if (ret)
3547                 return ret;
3548
3549         if (scsi_debug_lbp())
3550                 mapped = map_state(lba, &num);
3551         else {
3552                 mapped = 1;
3553                 /* following just in case virtual_gb changed */
3554                 sdebug_capacity = get_sdebug_capacity();
3555                 if (sdebug_capacity - lba <= 0xffffffff)
3556                         num = sdebug_capacity - lba;
3557                 else
3558                         num = 0xffffffff;
3559         }
3560
3561         memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
3562         put_unaligned_be32(20, arr);            /* Parameter Data Length */
3563         put_unaligned_be64(lba, arr + 8);       /* LBA */
3564         put_unaligned_be32(num, arr + 16);      /* Number of blocks */
3565         arr[20] = !mapped;              /* prov_stat=0: mapped; 1: dealloc */
3566
3567         return fill_from_dev_buffer(scp, arr, SDEBUG_GET_LBA_STATUS_LEN);
3568 }
3569
3570 static int resp_sync_cache(struct scsi_cmnd *scp,
3571                            struct sdebug_dev_info *devip)
3572 {
3573         int res = 0;
3574         u64 lba;
3575         u32 num_blocks;
3576         u8 *cmd = scp->cmnd;
3577
3578         if (cmd[0] == SYNCHRONIZE_CACHE) {      /* 10 byte cdb */
3579                 lba = get_unaligned_be32(cmd + 2);
3580                 num_blocks = get_unaligned_be16(cmd + 7);
3581         } else {                                /* SYNCHRONIZE_CACHE(16) */
3582                 lba = get_unaligned_be64(cmd + 2);
3583                 num_blocks = get_unaligned_be32(cmd + 10);
3584         }
3585         if (lba + num_blocks > sdebug_capacity) {
3586                 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
3587                 return check_condition_result;
3588         }
3589         if (!write_since_sync || cmd[1] & 0x2)
3590                 res = SDEG_RES_IMMED_MASK;
3591         else            /* delay if write_since_sync and IMMED clear */
3592                 write_since_sync = false;
3593         return res;
3594 }
3595
3596 #define RL_BUCKET_ELEMS 8
3597
3598 /* Even though each pseudo target has a REPORT LUNS "well known logical unit"
3599  * (W-LUN), the normal Linux scanning logic does not associate it with a
3600  * device (e.g. /dev/sg7). The following magic will make that association:
3601  *   "cd /sys/class/scsi_host/host<n> ; echo '- - 49409' > scan"
3602  * where <n> is a host number. If there are multiple targets in a host then
3603  * the above will associate a W-LUN to each target. To only get a W-LUN
3604  * for target 2, then use "echo '- 2 49409' > scan" .
3605  */
3606 static int resp_report_luns(struct scsi_cmnd *scp,
3607                             struct sdebug_dev_info *devip)
3608 {
3609         unsigned char *cmd = scp->cmnd;
3610         unsigned int alloc_len;
3611         unsigned char select_report;
3612         u64 lun;
3613         struct scsi_lun *lun_p;
3614         u8 arr[RL_BUCKET_ELEMS * sizeof(struct scsi_lun)];
3615         unsigned int lun_cnt;   /* normal LUN count (max: 256) */
3616         unsigned int wlun_cnt;  /* report luns W-LUN count */
3617         unsigned int tlun_cnt;  /* total LUN count */
3618         unsigned int rlen;      /* response length (in bytes) */
3619         int k, j, n, res;
3620         unsigned int off_rsp = 0;
3621         const int sz_lun = sizeof(struct scsi_lun);
3622
3623         clear_luns_changed_on_target(devip);
3624
3625         select_report = cmd[2];
3626         alloc_len = get_unaligned_be32(cmd + 6);
3627
3628         if (alloc_len < 4) {
3629                 pr_err("alloc len too small %d\n", alloc_len);
3630                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
3631                 return check_condition_result;
3632         }
3633
3634         switch (select_report) {
3635         case 0:         /* all LUNs apart from W-LUNs */
3636                 lun_cnt = sdebug_max_luns;
3637                 wlun_cnt = 0;
3638                 break;
3639         case 1:         /* only W-LUNs */
3640                 lun_cnt = 0;
3641                 wlun_cnt = 1;
3642                 break;
3643         case 2:         /* all LUNs */
3644                 lun_cnt = sdebug_max_luns;
3645                 wlun_cnt = 1;
3646                 break;
3647         case 0x10:      /* only administrative LUs */
3648         case 0x11:      /* see SPC-5 */
3649         case 0x12:      /* only subsiduary LUs owned by referenced LU */
3650         default:
3651                 pr_debug("select report invalid %d\n", select_report);
3652                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
3653                 return check_condition_result;
3654         }
3655
3656         if (sdebug_no_lun_0 && (lun_cnt > 0))
3657                 --lun_cnt;
3658
3659         tlun_cnt = lun_cnt + wlun_cnt;
3660         rlen = tlun_cnt * sz_lun;       /* excluding 8 byte header */
3661         scsi_set_resid(scp, scsi_bufflen(scp));
3662         pr_debug("select_report %d luns = %d wluns = %d no_lun0 %d\n",
3663                  select_report, lun_cnt, wlun_cnt, sdebug_no_lun_0);
3664
3665         /* loops rely on sizeof response header same as sizeof lun (both 8) */
3666         lun = sdebug_no_lun_0 ? 1 : 0;
3667         for (k = 0, j = 0, res = 0; true; ++k, j = 0) {
3668                 memset(arr, 0, sizeof(arr));
3669                 lun_p = (struct scsi_lun *)&arr[0];
3670                 if (k == 0) {
3671                         put_unaligned_be32(rlen, &arr[0]);
3672                         ++lun_p;
3673                         j = 1;
3674                 }
3675                 for ( ; j < RL_BUCKET_ELEMS; ++j, ++lun_p) {
3676                         if ((k * RL_BUCKET_ELEMS) + j > lun_cnt)
3677                                 break;
3678                         int_to_scsilun(lun++, lun_p);
3679                 }
3680                 if (j < RL_BUCKET_ELEMS)
3681                         break;
3682                 n = j * sz_lun;
3683                 res = p_fill_from_dev_buffer(scp, arr, n, off_rsp);
3684                 if (res)
3685                         return res;
3686                 off_rsp += n;
3687         }
3688         if (wlun_cnt) {
3689                 int_to_scsilun(SCSI_W_LUN_REPORT_LUNS, lun_p);
3690                 ++j;
3691         }
3692         if (j > 0)
3693                 res = p_fill_from_dev_buffer(scp, arr, j * sz_lun, off_rsp);
3694         return res;
3695 }
3696
3697 static struct sdebug_queue *get_queue(struct scsi_cmnd *cmnd)
3698 {
3699         u32 tag = blk_mq_unique_tag(cmnd->request);
3700         u16 hwq = blk_mq_unique_tag_to_hwq(tag);
3701
3702         pr_debug("tag=%#x, hwq=%d\n", tag, hwq);
3703         if (WARN_ON_ONCE(hwq >= submit_queues))
3704                 hwq = 0;
3705         return sdebug_q_arr + hwq;
3706 }
3707
3708 /* Queued (deferred) command completions converge here. */
3709 static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
3710 {
3711         bool aborted = sd_dp->aborted;
3712         int qc_idx;
3713         int retiring = 0;
3714         unsigned long iflags;
3715         struct sdebug_queue *sqp;
3716         struct sdebug_queued_cmd *sqcp;
3717         struct scsi_cmnd *scp;
3718         struct sdebug_dev_info *devip;
3719
3720         sd_dp->defer_t = SDEB_DEFER_NONE;
3721         if (unlikely(aborted))
3722                 sd_dp->aborted = false;
3723         qc_idx = sd_dp->qc_idx;
3724         sqp = sdebug_q_arr + sd_dp->sqa_idx;
3725         if (sdebug_statistics) {
3726                 atomic_inc(&sdebug_completions);
3727                 if (raw_smp_processor_id() != sd_dp->issuing_cpu)
3728                         atomic_inc(&sdebug_miss_cpus);
3729         }
3730         if (unlikely((qc_idx < 0) || (qc_idx >= SDEBUG_CANQUEUE))) {
3731                 pr_err("wild qc_idx=%d\n", qc_idx);
3732                 return;
3733         }
3734         spin_lock_irqsave(&sqp->qc_lock, iflags);
3735         sqcp = &sqp->qc_arr[qc_idx];
3736         scp = sqcp->a_cmnd;
3737         if (unlikely(scp == NULL)) {
3738                 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3739                 pr_err("scp is NULL, sqa_idx=%d, qc_idx=%d\n",
3740                        sd_dp->sqa_idx, qc_idx);
3741                 return;
3742         }
3743         devip = (struct sdebug_dev_info *)scp->device->hostdata;
3744         if (likely(devip))
3745                 atomic_dec(&devip->num_in_q);
3746         else
3747                 pr_err("devip=NULL\n");
3748         if (unlikely(atomic_read(&retired_max_queue) > 0))
3749                 retiring = 1;
3750
3751         sqcp->a_cmnd = NULL;
3752         if (unlikely(!test_and_clear_bit(qc_idx, sqp->in_use_bm))) {
3753                 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3754                 pr_err("Unexpected completion\n");
3755                 return;
3756         }
3757
3758         if (unlikely(retiring)) {       /* user has reduced max_queue */
3759                 int k, retval;
3760
3761                 retval = atomic_read(&retired_max_queue);
3762                 if (qc_idx >= retval) {
3763                         spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3764                         pr_err("index %d too large\n", retval);
3765                         return;
3766                 }
3767                 k = find_last_bit(sqp->in_use_bm, retval);
3768                 if ((k < sdebug_max_queue) || (k == retval))
3769                         atomic_set(&retired_max_queue, 0);
3770                 else
3771                         atomic_set(&retired_max_queue, k + 1);
3772         }
3773         spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3774         if (unlikely(aborted)) {
3775                 if (sdebug_verbose)
3776                         pr_info("bypassing scsi_done() due to aborted cmd\n");
3777                 return;
3778         }
3779         scp->scsi_done(scp); /* callback to mid level */
3780 }
3781
3782 /* When high resolution timer goes off this function is called. */
3783 static enum hrtimer_restart sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
3784 {
3785         struct sdebug_defer *sd_dp = container_of(timer, struct sdebug_defer,
3786                                                   hrt);
3787         sdebug_q_cmd_complete(sd_dp);
3788         return HRTIMER_NORESTART;
3789 }
3790
3791 /* When work queue schedules work, it calls this function. */
3792 static void sdebug_q_cmd_wq_complete(struct work_struct *work)
3793 {
3794         struct sdebug_defer *sd_dp = container_of(work, struct sdebug_defer,
3795                                                   ew.work);
3796         sdebug_q_cmd_complete(sd_dp);
3797 }
3798
3799 static bool got_shared_uuid;
3800 static uuid_t shared_uuid;
3801
3802 static struct sdebug_dev_info *sdebug_device_create(
3803                         struct sdebug_host_info *sdbg_host, gfp_t flags)
3804 {
3805         struct sdebug_dev_info *devip;
3806
3807         devip = kzalloc(sizeof(*devip), flags);
3808         if (devip) {
3809                 if (sdebug_uuid_ctl == 1)
3810                         uuid_gen(&devip->lu_name);
3811                 else if (sdebug_uuid_ctl == 2) {
3812                         if (got_shared_uuid)
3813                                 devip->lu_name = shared_uuid;
3814                         else {
3815                                 uuid_gen(&shared_uuid);
3816                                 got_shared_uuid = true;
3817                                 devip->lu_name = shared_uuid;
3818                         }
3819                 }
3820                 devip->sdbg_host = sdbg_host;
3821                 list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
3822         }
3823         return devip;
3824 }
3825
3826 static struct sdebug_dev_info *find_build_dev_info(struct scsi_device *sdev)
3827 {
3828         struct sdebug_host_info *sdbg_host;
3829         struct sdebug_dev_info *open_devip = NULL;
3830         struct sdebug_dev_info *devip;
3831
3832         sdbg_host = *(struct sdebug_host_info **)shost_priv(sdev->host);
3833         if (!sdbg_host) {
3834                 pr_err("Host info NULL\n");
3835                 return NULL;
3836         }
3837         list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
3838                 if ((devip->used) && (devip->channel == sdev->channel) &&
3839                     (devip->target == sdev->id) &&
3840                     (devip->lun == sdev->lun))
3841                         return devip;
3842                 else {
3843                         if ((!devip->used) && (!open_devip))
3844                                 open_devip = devip;
3845                 }
3846         }
3847         if (!open_devip) { /* try and make a new one */
3848                 open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
3849                 if (!open_devip) {
3850                         pr_err("out of memory at line %d\n", __LINE__);
3851                         return NULL;
3852                 }
3853         }
3854
3855         open_devip->channel = sdev->channel;
3856         open_devip->target = sdev->id;
3857         open_devip->lun = sdev->lun;
3858         open_devip->sdbg_host = sdbg_host;
3859         atomic_set(&open_devip->num_in_q, 0);
3860         set_bit(SDEBUG_UA_POR, open_devip->uas_bm);
3861         open_devip->used = true;
3862         return open_devip;
3863 }
3864
3865 static int scsi_debug_slave_alloc(struct scsi_device *sdp)
3866 {
3867         if (sdebug_verbose)
3868                 pr_info("slave_alloc <%u %u %u %llu>\n",
3869                        sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
3870         return 0;
3871 }
3872
3873 static int scsi_debug_slave_configure(struct scsi_device *sdp)
3874 {
3875         struct sdebug_dev_info *devip =
3876                         (struct sdebug_dev_info *)sdp->hostdata;
3877
3878         if (sdebug_verbose)
3879                 pr_info("slave_configure <%u %u %u %llu>\n",
3880                        sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
3881         if (sdp->host->max_cmd_len != SDEBUG_MAX_CMD_LEN)
3882                 sdp->host->max_cmd_len = SDEBUG_MAX_CMD_LEN;
3883         if (devip == NULL) {
3884                 devip = find_build_dev_info(sdp);
3885                 if (devip == NULL)
3886                         return 1;  /* no resources, will be marked offline */
3887         }
3888         sdp->hostdata = devip;
3889         if (sdebug_no_uld)
3890                 sdp->no_uld_attach = 1;
3891         config_cdb_len(sdp);
3892         return 0;
3893 }
3894
3895 static void scsi_debug_slave_destroy(struct scsi_device *sdp)
3896 {
3897         struct sdebug_dev_info *devip =
3898                 (struct sdebug_dev_info *)sdp->hostdata;
3899
3900         if (sdebug_verbose)
3901                 pr_info("slave_destroy <%u %u %u %llu>\n",
3902                        sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
3903         if (devip) {
3904                 /* make this slot available for re-use */
3905                 devip->used = false;
3906                 sdp->hostdata = NULL;
3907         }
3908 }
3909
3910 static void stop_qc_helper(struct sdebug_defer *sd_dp,
3911                            enum sdeb_defer_type defer_t)
3912 {
3913         if (!sd_dp)
3914                 return;
3915         if (defer_t == SDEB_DEFER_HRT)
3916                 hrtimer_cancel(&sd_dp->hrt);
3917         else if (defer_t == SDEB_DEFER_WQ)
3918                 cancel_work_sync(&sd_dp->ew.work);
3919 }
3920
3921 /* If @cmnd found deletes its timer or work queue and returns true; else
3922    returns false */
3923 static bool stop_queued_cmnd(struct scsi_cmnd *cmnd)
3924 {
3925         unsigned long iflags;
3926         int j, k, qmax, r_qmax;
3927         enum sdeb_defer_type l_defer_t;
3928         struct sdebug_queue *sqp;
3929         struct sdebug_queued_cmd *sqcp;
3930         struct sdebug_dev_info *devip;
3931         struct sdebug_defer *sd_dp;
3932
3933         for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
3934                 spin_lock_irqsave(&sqp->qc_lock, iflags);
3935                 qmax = sdebug_max_queue;
3936                 r_qmax = atomic_read(&retired_max_queue);
3937                 if (r_qmax > qmax)
3938                         qmax = r_qmax;
3939                 for (k = 0; k < qmax; ++k) {
3940                         if (test_bit(k, sqp->in_use_bm)) {
3941                                 sqcp = &sqp->qc_arr[k];
3942                                 if (cmnd != sqcp->a_cmnd)
3943                                         continue;
3944                                 /* found */
3945                                 devip = (struct sdebug_dev_info *)
3946                                                 cmnd->device->hostdata;
3947                                 if (devip)
3948                                         atomic_dec(&devip->num_in_q);
3949                                 sqcp->a_cmnd = NULL;
3950                                 sd_dp = sqcp->sd_dp;
3951                                 if (sd_dp) {
3952                                         l_defer_t = sd_dp->defer_t;
3953                                         sd_dp->defer_t = SDEB_DEFER_NONE;
3954                                 } else
3955                                         l_defer_t = SDEB_DEFER_NONE;
3956                                 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3957                                 stop_qc_helper(sd_dp, l_defer_t);
3958                                 clear_bit(k, sqp->in_use_bm);
3959                                 return true;
3960                         }
3961                 }
3962                 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3963         }
3964         return false;
3965 }
3966
3967 /* Deletes (stops) timers or work queues of all queued commands */
3968 static void stop_all_queued(void)
3969 {
3970         unsigned long iflags;
3971         int j, k;
3972         enum sdeb_defer_type l_defer_t;
3973         struct sdebug_queue *sqp;
3974         struct sdebug_queued_cmd *sqcp;
3975         struct sdebug_dev_info *devip;
3976         struct sdebug_defer *sd_dp;
3977
3978         for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
3979                 spin_lock_irqsave(&sqp->qc_lock, iflags);
3980                 for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
3981                         if (test_bit(k, sqp->in_use_bm)) {
3982                                 sqcp = &sqp->qc_arr[k];
3983                                 if (sqcp->a_cmnd == NULL)
3984                                         continue;
3985                                 devip = (struct sdebug_dev_info *)
3986                                         sqcp->a_cmnd->device->hostdata;
3987                                 if (devip)
3988                                         atomic_dec(&devip->num_in_q);
3989                                 sqcp->a_cmnd = NULL;
3990                                 sd_dp = sqcp->sd_dp;
3991                                 if (sd_dp) {
3992                                         l_defer_t = sd_dp->defer_t;
3993                                         sd_dp->defer_t = SDEB_DEFER_NONE;
3994                                 } else
3995                                         l_defer_t = SDEB_DEFER_NONE;
3996                                 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3997                                 stop_qc_helper(sd_dp, l_defer_t);
3998                                 clear_bit(k, sqp->in_use_bm);
3999                                 spin_lock_irqsave(&sqp->qc_lock, iflags);
4000                         }
4001                 }
4002                 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4003         }
4004 }
4005
4006 /* Free queued command memory on heap */
4007 static void free_all_queued(void)
4008 {
4009         int j, k;
4010         struct sdebug_queue *sqp;
4011         struct sdebug_queued_cmd *sqcp;
4012
4013         for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
4014                 for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
4015                         sqcp = &sqp->qc_arr[k];
4016                         kfree(sqcp->sd_dp);
4017                         sqcp->sd_dp = NULL;
4018                 }
4019         }
4020 }
4021
4022 static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
4023 {
4024         bool ok;
4025
4026         ++num_aborts;
4027         if (SCpnt) {
4028                 ok = stop_queued_cmnd(SCpnt);
4029                 if (SCpnt->device && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
4030                         sdev_printk(KERN_INFO, SCpnt->device,
4031                                     "%s: command%s found\n", __func__,
4032                                     ok ? "" : " not");
4033         }
4034         return SUCCESS;
4035 }
4036
4037 static int scsi_debug_device_reset(struct scsi_cmnd *SCpnt)
4038 {
4039         ++num_dev_resets;
4040         if (SCpnt && SCpnt->device) {
4041                 struct scsi_device *sdp = SCpnt->device;
4042                 struct sdebug_dev_info *devip =
4043                                 (struct sdebug_dev_info *)sdp->hostdata;
4044
4045                 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
4046                         sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
4047                 if (devip)
4048                         set_bit(SDEBUG_UA_POR, devip->uas_bm);
4049         }
4050         return SUCCESS;
4051 }
4052
4053 static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
4054 {
4055         struct sdebug_host_info *sdbg_host;
4056         struct sdebug_dev_info *devip;
4057         struct scsi_device *sdp;
4058         struct Scsi_Host *hp;
4059         int k = 0;
4060
4061         ++num_target_resets;
4062         if (!SCpnt)
4063                 goto lie;
4064         sdp = SCpnt->device;
4065         if (!sdp)
4066                 goto lie;
4067         if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
4068                 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
4069         hp = sdp->host;
4070         if (!hp)
4071                 goto lie;
4072         sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
4073         if (sdbg_host) {
4074                 list_for_each_entry(devip,
4075                                     &sdbg_host->dev_info_list,
4076                                     dev_list)
4077                         if (devip->target == sdp->id) {
4078                                 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
4079                                 ++k;
4080                         }
4081         }
4082         if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
4083                 sdev_printk(KERN_INFO, sdp,
4084                             "%s: %d device(s) found in target\n", __func__, k);
4085 lie:
4086         return SUCCESS;
4087 }
4088
4089 static int scsi_debug_bus_reset(struct scsi_cmnd *SCpnt)
4090 {
4091         struct sdebug_host_info *sdbg_host;
4092         struct sdebug_dev_info *devip;
4093         struct scsi_device *sdp;
4094         struct Scsi_Host *hp;
4095         int k = 0;
4096
4097         ++num_bus_resets;
4098         if (!(SCpnt && SCpnt->device))
4099                 goto lie;
4100         sdp = SCpnt->device;
4101         if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
4102                 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
4103         hp = sdp->host;
4104         if (hp) {
4105                 sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
4106                 if (sdbg_host) {
4107                         list_for_each_entry(devip,
4108                                             &sdbg_host->dev_info_list,
4109                                             dev_list) {
4110                                 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
4111                                 ++k;
4112                         }
4113                 }
4114         }
4115         if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
4116                 sdev_printk(KERN_INFO, sdp,
4117                             "%s: %d device(s) found in host\n", __func__, k);
4118 lie:
4119         return SUCCESS;
4120 }
4121
4122 static int scsi_debug_host_reset(struct scsi_cmnd *SCpnt)
4123 {
4124         struct sdebug_host_info *sdbg_host;
4125         struct sdebug_dev_info *devip;
4126         int k = 0;
4127
4128         ++num_host_resets;
4129         if ((SCpnt->device) && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
4130                 sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__);
4131         spin_lock(&sdebug_host_list_lock);
4132         list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
4133                 list_for_each_entry(devip, &sdbg_host->dev_info_list,
4134                                     dev_list) {
4135                         set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
4136                         ++k;
4137                 }
4138         }
4139         spin_unlock(&sdebug_host_list_lock);
4140         stop_all_queued();
4141         if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
4142                 sdev_printk(KERN_INFO, SCpnt->device,
4143                             "%s: %d device(s) found\n", __func__, k);
4144         return SUCCESS;
4145 }
4146
4147 static void __init sdebug_build_parts(unsigned char *ramp,
4148                                       unsigned long store_size)
4149 {
4150         struct msdos_partition *pp;
4151         int starts[SDEBUG_MAX_PARTS + 2];
4152         int sectors_per_part, num_sectors, k;
4153         int heads_by_sects, start_sec, end_sec;
4154
4155         /* assume partition table already zeroed */
4156         if ((sdebug_num_parts < 1) || (store_size < 1048576))
4157                 return;
4158         if (sdebug_num_parts > SDEBUG_MAX_PARTS) {
4159                 sdebug_num_parts = SDEBUG_MAX_PARTS;
4160                 pr_warn("reducing partitions to %d\n", SDEBUG_MAX_PARTS);
4161         }
4162         num_sectors = (int)sdebug_store_sectors;
4163         sectors_per_part = (num_sectors - sdebug_sectors_per)
4164                            / sdebug_num_parts;
4165         heads_by_sects = sdebug_heads * sdebug_sectors_per;
4166         starts[0] = sdebug_sectors_per;
4167         for (k = 1; k < sdebug_num_parts; ++k)
4168                 starts[k] = ((k * sectors_per_part) / heads_by_sects)
4169                             * heads_by_sects;
4170         starts[sdebug_num_parts] = num_sectors;
4171         starts[sdebug_num_parts + 1] = 0;
4172
4173         ramp[510] = 0x55;       /* magic partition markings */
4174         ramp[511] = 0xAA;
4175         pp = (struct msdos_partition *)(ramp + 0x1be);
4176         for (k = 0; starts[k + 1]; ++k, ++pp) {
4177                 start_sec = starts[k];
4178                 end_sec = starts[k + 1] - 1;
4179                 pp->boot_ind = 0;
4180
4181                 pp->cyl = start_sec / heads_by_sects;
4182                 pp->head = (start_sec - (pp->cyl * heads_by_sects))
4183                            / sdebug_sectors_per;
4184                 pp->sector = (start_sec % sdebug_sectors_per) + 1;
4185
4186                 pp->end_cyl = end_sec / heads_by_sects;
4187                 pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
4188                                / sdebug_sectors_per;
4189                 pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
4190
4191                 pp->start_sect = cpu_to_le32(start_sec);
4192                 pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1);
4193                 pp->sys_ind = 0x83;     /* plain Linux partition */
4194         }
4195 }
4196
4197 static void block_unblock_all_queues(bool block)
4198 {
4199         int j;
4200         struct sdebug_queue *sqp;
4201
4202         for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp)
4203                 atomic_set(&sqp->blocked, (int)block);
4204 }
4205
4206 /* Adjust (by rounding down) the sdebug_cmnd_count so abs(every_nth)-1
4207  * commands will be processed normally before triggers occur.
4208  */
4209 static void tweak_cmnd_count(void)
4210 {
4211         int count, modulo;
4212
4213         modulo = abs(sdebug_every_nth);
4214         if (modulo < 2)
4215                 return;
4216         block_unblock_all_queues(true);
4217         count = atomic_read(&sdebug_cmnd_count);
4218         atomic_set(&sdebug_cmnd_count, (count / modulo) * modulo);
4219         block_unblock_all_queues(false);
4220 }
4221
4222 static void clear_queue_stats(void)
4223 {
4224         atomic_set(&sdebug_cmnd_count, 0);
4225         atomic_set(&sdebug_completions, 0);
4226         atomic_set(&sdebug_miss_cpus, 0);
4227         atomic_set(&sdebug_a_tsf, 0);
4228 }
4229
4230 static void setup_inject(struct sdebug_queue *sqp,
4231                          struct sdebug_queued_cmd *sqcp)
4232 {
4233         if ((atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) > 0) {
4234                 if (sdebug_every_nth > 0)
4235                         sqcp->inj_recovered = sqcp->inj_transport
4236                                 = sqcp->inj_dif
4237                                 = sqcp->inj_dix = sqcp->inj_short
4238                                 = sqcp->inj_host_busy = sqcp->inj_cmd_abort = 0;
4239                 return;
4240         }
4241         sqcp->inj_recovered = !!(SDEBUG_OPT_RECOVERED_ERR & sdebug_opts);
4242         sqcp->inj_transport = !!(SDEBUG_OPT_TRANSPORT_ERR & sdebug_opts);
4243         sqcp->inj_dif = !!(SDEBUG_OPT_DIF_ERR & sdebug_opts);
4244         sqcp->inj_dix = !!(SDEBUG_OPT_DIX_ERR & sdebug_opts);
4245         sqcp->inj_short = !!(SDEBUG_OPT_SHORT_TRANSFER & sdebug_opts);
4246         sqcp->inj_host_busy = !!(SDEBUG_OPT_HOST_BUSY & sdebug_opts);
4247         sqcp->inj_cmd_abort = !!(SDEBUG_OPT_CMD_ABORT & sdebug_opts);
4248 }
4249
4250 /* Complete the processing of the thread that queued a SCSI command to this
4251  * driver. It either completes the command by calling cmnd_done() or
4252  * schedules a hr timer or work queue then returns 0. Returns
4253  * SCSI_MLQUEUE_HOST_BUSY if temporarily out of resources.
4254  */
4255 static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
4256                          int scsi_result,
4257                          int (*pfp)(struct scsi_cmnd *,
4258                                     struct sdebug_dev_info *),
4259                          int delta_jiff, int ndelay)
4260 {
4261         unsigned long iflags;
4262         int k, num_in_q, qdepth, inject;
4263         struct sdebug_queue *sqp;
4264         struct sdebug_queued_cmd *sqcp;
4265         struct scsi_device *sdp;
4266         struct sdebug_defer *sd_dp;
4267
4268         if (unlikely(devip == NULL)) {
4269                 if (scsi_result == 0)
4270                         scsi_result = DID_NO_CONNECT << 16;
4271                 goto respond_in_thread;
4272         }
4273         sdp = cmnd->device;
4274
4275         if (delta_jiff == 0)
4276                 goto respond_in_thread;
4277
4278         /* schedule the response at a later time if resources permit */
4279         sqp = get_queue(cmnd);
4280         spin_lock_irqsave(&sqp->qc_lock, iflags);
4281         if (unlikely(atomic_read(&sqp->blocked))) {
4282                 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4283                 return SCSI_MLQUEUE_HOST_BUSY;
4284         }
4285         num_in_q = atomic_read(&devip->num_in_q);
4286         qdepth = cmnd->device->queue_depth;
4287         inject = 0;
4288         if (unlikely((qdepth > 0) && (num_in_q >= qdepth))) {
4289                 if (scsi_result) {
4290                         spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4291                         goto respond_in_thread;
4292                 } else
4293                         scsi_result = device_qfull_result;
4294         } else if (unlikely(sdebug_every_nth &&
4295                             (SDEBUG_OPT_RARE_TSF & sdebug_opts) &&
4296                             (scsi_result == 0))) {
4297                 if ((num_in_q == (qdepth - 1)) &&
4298                     (atomic_inc_return(&sdebug_a_tsf) >=
4299                      abs(sdebug_every_nth))) {
4300                         atomic_set(&sdebug_a_tsf, 0);
4301                         inject = 1;
4302                         scsi_result = device_qfull_result;
4303                 }
4304         }
4305
4306         k = find_first_zero_bit(sqp->in_use_bm, sdebug_max_queue);
4307         if (unlikely(k >= sdebug_max_queue)) {
4308                 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4309                 if (scsi_result)
4310                         goto respond_in_thread;
4311                 else if (SDEBUG_OPT_ALL_TSF & sdebug_opts)
4312                         scsi_result = device_qfull_result;
4313                 if (SDEBUG_OPT_Q_NOISE & sdebug_opts)
4314                         sdev_printk(KERN_INFO, sdp,
4315                                     "%s: max_queue=%d exceeded, %s\n",
4316                                     __func__, sdebug_max_queue,
4317                                     (scsi_result ?  "status: TASK SET FULL" :
4318                                                     "report: host busy"));
4319                 if (scsi_result)
4320                         goto respond_in_thread;
4321                 else
4322                         return SCSI_MLQUEUE_HOST_BUSY;
4323         }
4324         __set_bit(k, sqp->in_use_bm);
4325         atomic_inc(&devip->num_in_q);
4326         sqcp = &sqp->qc_arr[k];
4327         sqcp->a_cmnd = cmnd;
4328         cmnd->host_scribble = (unsigned char *)sqcp;
4329         sd_dp = sqcp->sd_dp;
4330         spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4331         if (unlikely(sdebug_every_nth && sdebug_any_injecting_opt))
4332                 setup_inject(sqp, sqcp);
4333         if (sd_dp == NULL) {
4334                 sd_dp = kzalloc(sizeof(*sd_dp), GFP_ATOMIC);
4335                 if (sd_dp == NULL)
4336                         return SCSI_MLQUEUE_HOST_BUSY;
4337         }
4338
4339         cmnd->result = pfp != NULL ? pfp(cmnd, devip) : 0;
4340         if (cmnd->result & SDEG_RES_IMMED_MASK) {
4341                 /*
4342                  * This is the F_DELAY_OVERR case. No delay.
4343                  */
4344                 cmnd->result &= ~SDEG_RES_IMMED_MASK;
4345                 delta_jiff = ndelay = 0;
4346         }
4347         if (cmnd->result == 0 && scsi_result != 0)
4348                 cmnd->result = scsi_result;
4349
4350         if (unlikely(sdebug_verbose && cmnd->result))
4351                 sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n",
4352                             __func__, cmnd->result);
4353
4354         if (delta_jiff > 0 || ndelay > 0) {
4355                 ktime_t kt;
4356
4357                 if (delta_jiff > 0) {
4358                         kt = ns_to_ktime((u64)delta_jiff * (NSEC_PER_SEC / HZ));
4359                 } else
4360                         kt = ndelay;
4361                 if (!sd_dp->init_hrt) {
4362                         sd_dp->init_hrt = true;
4363                         sqcp->sd_dp = sd_dp;
4364                         hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC,
4365                                      HRTIMER_MODE_REL_PINNED);
4366                         sd_dp->hrt.function = sdebug_q_cmd_hrt_complete;
4367                         sd_dp->sqa_idx = sqp - sdebug_q_arr;
4368                         sd_dp->qc_idx = k;
4369                 }
4370                 if (sdebug_statistics)
4371                         sd_dp->issuing_cpu = raw_smp_processor_id();
4372                 sd_dp->defer_t = SDEB_DEFER_HRT;
4373                 hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED);
4374         } else {        /* jdelay < 0, use work queue */
4375                 if (!sd_dp->init_wq) {
4376                         sd_dp->init_wq = true;
4377                         sqcp->sd_dp = sd_dp;
4378                         sd_dp->sqa_idx = sqp - sdebug_q_arr;
4379                         sd_dp->qc_idx = k;
4380                         INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete);
4381                 }
4382                 if (sdebug_statistics)
4383                         sd_dp->issuing_cpu = raw_smp_processor_id();
4384                 sd_dp->defer_t = SDEB_DEFER_WQ;
4385                 if (unlikely(sqcp->inj_cmd_abort))
4386                         sd_dp->aborted = true;
4387                 schedule_work(&sd_dp->ew.work);
4388                 if (unlikely(sqcp->inj_cmd_abort)) {
4389                         sdev_printk(KERN_INFO, sdp, "abort request tag %d\n",
4390                                     cmnd->request->tag);
4391                         blk_abort_request(cmnd->request);
4392                 }
4393         }
4394         if (unlikely((SDEBUG_OPT_Q_NOISE & sdebug_opts) &&
4395                      (scsi_result == device_qfull_result)))
4396                 sdev_printk(KERN_INFO, sdp,
4397                             "%s: num_in_q=%d +1, %s%s\n", __func__,
4398                             num_in_q, (inject ? "<inject> " : ""),
4399                             "status: TASK SET FULL");
4400         return 0;
4401
4402 respond_in_thread:      /* call back to mid-layer using invocation thread */
4403         cmnd->result = pfp != NULL ? pfp(cmnd, devip) : 0;
4404         cmnd->result &= ~SDEG_RES_IMMED_MASK;
4405         if (cmnd->result == 0 && scsi_result != 0)
4406                 cmnd->result = scsi_result;
4407         cmnd->scsi_done(cmnd);
4408         return 0;
4409 }
4410
4411 /* Note: The following macros create attribute files in the
4412    /sys/module/scsi_debug/parameters directory. Unfortunately this
4413    driver is unaware of a change and cannot trigger auxiliary actions
4414    as it can when the corresponding attribute in the
4415    /sys/bus/pseudo/drivers/scsi_debug directory is changed.
4416  */
4417 module_param_named(add_host, sdebug_add_host, int, S_IRUGO | S_IWUSR);
4418 module_param_named(ato, sdebug_ato, int, S_IRUGO);
4419 module_param_named(cdb_len, sdebug_cdb_len, int, 0644);
4420 module_param_named(clustering, sdebug_clustering, bool, S_IRUGO | S_IWUSR);
4421 module_param_named(delay, sdebug_jdelay, int, S_IRUGO | S_IWUSR);
4422 module_param_named(dev_size_mb, sdebug_dev_size_mb, int, S_IRUGO);
4423 module_param_named(dif, sdebug_dif, int, S_IRUGO);
4424 module_param_named(dix, sdebug_dix, int, S_IRUGO);
4425 module_param_named(dsense, sdebug_dsense, int, S_IRUGO | S_IWUSR);
4426 module_param_named(every_nth, sdebug_every_nth, int, S_IRUGO | S_IWUSR);
4427 module_param_named(fake_rw, sdebug_fake_rw, int, S_IRUGO | S_IWUSR);
4428 module_param_named(guard, sdebug_guard, uint, S_IRUGO);
4429 module_param_named(host_lock, sdebug_host_lock, bool, S_IRUGO | S_IWUSR);
4430 module_param_string(inq_vendor, sdebug_inq_vendor_id,
4431                     sizeof(sdebug_inq_vendor_id), S_IRUGO|S_IWUSR);
4432 module_param_string(inq_product, sdebug_inq_product_id,
4433                     sizeof(sdebug_inq_product_id), S_IRUGO|S_IWUSR);
4434 module_param_string(inq_rev, sdebug_inq_product_rev,
4435                     sizeof(sdebug_inq_product_rev), S_IRUGO|S_IWUSR);
4436 module_param_named(lbpu, sdebug_lbpu, int, S_IRUGO);
4437 module_param_named(lbpws, sdebug_lbpws, int, S_IRUGO);
4438 module_param_named(lbpws10, sdebug_lbpws10, int, S_IRUGO);
4439 module_param_named(lbprz, sdebug_lbprz, int, S_IRUGO);
4440 module_param_named(lowest_aligned, sdebug_lowest_aligned, int, S_IRUGO);
4441 module_param_named(max_luns, sdebug_max_luns, int, S_IRUGO | S_IWUSR);
4442 module_param_named(max_queue, sdebug_max_queue, int, S_IRUGO | S_IWUSR);
4443 module_param_named(medium_error_start, sdebug_medium_error_start, int, S_IRUGO | S_IWUSR);
4444 module_param_named(medium_error_count, sdebug_medium_error_count, int, S_IRUGO | S_IWUSR);
4445 module_param_named(ndelay, sdebug_ndelay, int, S_IRUGO | S_IWUSR);
4446 module_param_named(no_lun_0, sdebug_no_lun_0, int, S_IRUGO | S_IWUSR);
4447 module_param_named(no_uld, sdebug_no_uld, int, S_IRUGO);
4448 module_param_named(num_parts, sdebug_num_parts, int, S_IRUGO);
4449 module_param_named(num_tgts, sdebug_num_tgts, int, S_IRUGO | S_IWUSR);
4450 module_param_named(opt_blks, sdebug_opt_blks, int, S_IRUGO);
4451 module_param_named(opts, sdebug_opts, int, S_IRUGO | S_IWUSR);
4452 module_param_named(physblk_exp, sdebug_physblk_exp, int, S_IRUGO);
4453 module_param_named(opt_xferlen_exp, sdebug_opt_xferlen_exp, int, S_IRUGO);
4454 module_param_named(ptype, sdebug_ptype, int, S_IRUGO | S_IWUSR);
4455 module_param_named(removable, sdebug_removable, bool, S_IRUGO | S_IWUSR);
4456 module_param_named(scsi_level, sdebug_scsi_level, int, S_IRUGO);
4457 module_param_named(sector_size, sdebug_sector_size, int, S_IRUGO);
4458 module_param_named(statistics, sdebug_statistics, bool, S_IRUGO | S_IWUSR);
4459 module_param_named(strict, sdebug_strict, bool, S_IRUGO | S_IWUSR);
4460 module_param_named(submit_queues, submit_queues, int, S_IRUGO);
4461 module_param_named(unmap_alignment, sdebug_unmap_alignment, int, S_IRUGO);
4462 module_param_named(unmap_granularity, sdebug_unmap_granularity, int, S_IRUGO);
4463 module_param_named(unmap_max_blocks, sdebug_unmap_max_blocks, int, S_IRUGO);
4464 module_param_named(unmap_max_desc, sdebug_unmap_max_desc, int, S_IRUGO);
4465 module_param_named(virtual_gb, sdebug_virtual_gb, int, S_IRUGO | S_IWUSR);
4466 module_param_named(uuid_ctl, sdebug_uuid_ctl, int, S_IRUGO);
4467 module_param_named(vpd_use_hostno, sdebug_vpd_use_hostno, int,
4468                    S_IRUGO | S_IWUSR);
4469 module_param_named(wp, sdebug_wp, bool, S_IRUGO | S_IWUSR);
4470 module_param_named(write_same_length, sdebug_write_same_length, int,
4471                    S_IRUGO | S_IWUSR);
4472
4473 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
4474 MODULE_DESCRIPTION("SCSI debug adapter driver");
4475 MODULE_LICENSE("GPL");
4476 MODULE_VERSION(SDEBUG_VERSION);
4477
4478 MODULE_PARM_DESC(add_host, "0..127 hosts allowed(def=1)");
4479 MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
4480 MODULE_PARM_DESC(cdb_len, "suggest CDB lengths to drivers (def=10)");
4481 MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
4482 MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
4483 MODULE_PARM_DESC(dev_size_mb, "size in MiB of ram shared by devs(def=8)");
4484 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
4485 MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
4486 MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
4487 MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
4488 MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
4489 MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
4490 MODULE_PARM_DESC(host_lock, "host_lock is ignored (def=0)");
4491 MODULE_PARM_DESC(inq_vendor, "SCSI INQUIRY vendor string (def=\"Linux\")");
4492 MODULE_PARM_DESC(inq_product, "SCSI INQUIRY product string (def=\"scsi_debug\")");
4493 MODULE_PARM_DESC(inq_rev, "SCSI INQUIRY revision string (def=\""
4494                  SDEBUG_VERSION "\")");
4495 MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
4496 MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
4497 MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
4498 MODULE_PARM_DESC(lbprz,
4499         "on read unmapped LBs return 0 when 1 (def), return 0xff when 2");
4500 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
4501 MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
4502 MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))");
4503 MODULE_PARM_DESC(medium_error_start, "starting sector number to return MEDIUM error");
4504 MODULE_PARM_DESC(medium_error_count, "count of sectors to return follow on MEDIUM error");
4505 MODULE_PARM_DESC(ndelay, "response delay in nanoseconds (def=0 -> ignore)");
4506 MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
4507 MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
4508 MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
4509 MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
4510 MODULE_PARM_DESC(opt_blks, "optimal transfer length in blocks (def=1024)");
4511 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
4512 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
4513 MODULE_PARM_DESC(opt_xferlen_exp, "optimal transfer length granularity exponent (def=physblk_exp)");
4514 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
4515 MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
4516 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=7[SPC-5])");
4517 MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
4518 MODULE_PARM_DESC(statistics, "collect statistics on commands, queues (def=0)");
4519 MODULE_PARM_DESC(strict, "stricter checks: reserved field in cdb (def=0)");
4520 MODULE_PARM_DESC(submit_queues, "support for block multi-queue (def=1)");
4521 MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
4522 MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
4523 MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
4524 MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
4525 MODULE_PARM_DESC(uuid_ctl,
4526                  "1->use uuid for lu name, 0->don't, 2->all use same (def=0)");
4527 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)");
4528 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
4529 MODULE_PARM_DESC(wp, "Write Protect (def=0)");
4530 MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
4531
4532 #define SDEBUG_INFO_LEN 256
4533 static char sdebug_info[SDEBUG_INFO_LEN];
4534
4535 static const char *scsi_debug_info(struct Scsi_Host *shp)
4536 {
4537         int k;
4538
4539         k = scnprintf(sdebug_info, SDEBUG_INFO_LEN, "%s: version %s [%s]\n",
4540                       my_name, SDEBUG_VERSION, sdebug_version_date);
4541         if (k >= (SDEBUG_INFO_LEN - 1))
4542                 return sdebug_info;
4543         scnprintf(sdebug_info + k, SDEBUG_INFO_LEN - k,
4544                   "  dev_size_mb=%d, opts=0x%x, submit_queues=%d, %s=%d",
4545                   sdebug_dev_size_mb, sdebug_opts, submit_queues,
4546                   "statistics", (int)sdebug_statistics);
4547         return sdebug_info;
4548 }
4549
4550 /* 'echo <val> > /proc/scsi/scsi_debug/<host_id>' writes to opts */
4551 static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer,
4552                                  int length)
4553 {
4554         char arr[16];
4555         int opts;
4556         int minLen = length > 15 ? 15 : length;
4557
4558         if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
4559                 return -EACCES;
4560         memcpy(arr, buffer, minLen);
4561         arr[minLen] = '\0';
4562         if (1 != sscanf(arr, "%d", &opts))
4563                 return -EINVAL;
4564         sdebug_opts = opts;
4565         sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
4566         sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
4567         if (sdebug_every_nth != 0)
4568                 tweak_cmnd_count();
4569         return length;
4570 }
4571
4572 /* Output seen with 'cat /proc/scsi/scsi_debug/<host_id>'. It will be the
4573  * same for each scsi_debug host (if more than one). Some of the counters
4574  * output are not atomics so might be inaccurate in a busy system. */
4575 static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
4576 {
4577         int f, j, l;
4578         struct sdebug_queue *sqp;
4579
4580         seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n",
4581                    SDEBUG_VERSION, sdebug_version_date);
4582         seq_printf(m, "num_tgts=%d, %ssize=%d MB, opts=0x%x, every_nth=%d\n",
4583                    sdebug_num_tgts, "shared (ram) ", sdebug_dev_size_mb,
4584                    sdebug_opts, sdebug_every_nth);
4585         seq_printf(m, "delay=%d, ndelay=%d, max_luns=%d, sector_size=%d %s\n",
4586                    sdebug_jdelay, sdebug_ndelay, sdebug_max_luns,
4587                    sdebug_sector_size, "bytes");
4588         seq_printf(m, "cylinders=%d, heads=%d, sectors=%d, command aborts=%d\n",
4589                    sdebug_cylinders_per, sdebug_heads, sdebug_sectors_per,
4590                    num_aborts);
4591         seq_printf(m, "RESETs: device=%d, target=%d, bus=%d, host=%d\n",
4592                    num_dev_resets, num_target_resets, num_bus_resets,
4593                    num_host_resets);
4594         seq_printf(m, "dix_reads=%d, dix_writes=%d, dif_errors=%d\n",
4595                    dix_reads, dix_writes, dif_errors);
4596         seq_printf(m, "usec_in_jiffy=%lu, statistics=%d\n", TICK_NSEC / 1000,
4597                    sdebug_statistics);
4598         seq_printf(m, "cmnd_count=%d, completions=%d, %s=%d, a_tsf=%d\n",
4599                    atomic_read(&sdebug_cmnd_count),
4600                    atomic_read(&sdebug_completions),
4601                    "miss_cpus", atomic_read(&sdebug_miss_cpus),
4602                    atomic_read(&sdebug_a_tsf));
4603
4604         seq_printf(m, "submit_queues=%d\n", submit_queues);
4605         for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
4606                 seq_printf(m, "  queue %d:\n", j);
4607                 f = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
4608                 if (f != sdebug_max_queue) {
4609                         l = find_last_bit(sqp->in_use_bm, sdebug_max_queue);
4610                         seq_printf(m, "    in_use_bm BUSY: %s: %d,%d\n",
4611                                    "first,last bits", f, l);
4612                 }
4613         }
4614         return 0;
4615 }
4616
4617 static ssize_t delay_show(struct device_driver *ddp, char *buf)
4618 {
4619         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_jdelay);
4620 }
4621 /* Returns -EBUSY if jdelay is being changed and commands are queued. The unit
4622  * of delay is jiffies.
4623  */
4624 static ssize_t delay_store(struct device_driver *ddp, const char *buf,
4625                            size_t count)
4626 {
4627         int jdelay, res;
4628
4629         if (count > 0 && sscanf(buf, "%d", &jdelay) == 1) {
4630                 res = count;
4631                 if (sdebug_jdelay != jdelay) {
4632                         int j, k;
4633                         struct sdebug_queue *sqp;
4634
4635                         block_unblock_all_queues(true);
4636                         for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
4637                              ++j, ++sqp) {
4638                                 k = find_first_bit(sqp->in_use_bm,
4639                                                    sdebug_max_queue);
4640                                 if (k != sdebug_max_queue) {
4641                                         res = -EBUSY;   /* queued commands */
4642                                         break;
4643                                 }
4644                         }
4645                         if (res > 0) {
4646                                 sdebug_jdelay = jdelay;
4647                                 sdebug_ndelay = 0;
4648                         }
4649                         block_unblock_all_queues(false);
4650                 }
4651                 return res;
4652         }
4653         return -EINVAL;
4654 }
4655 static DRIVER_ATTR_RW(delay);
4656
4657 static ssize_t ndelay_show(struct device_driver *ddp, char *buf)
4658 {
4659         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ndelay);
4660 }
4661 /* Returns -EBUSY if ndelay is being changed and commands are queued */
4662 /* If > 0 and accepted then sdebug_jdelay is set to JDELAY_OVERRIDDEN */
4663 static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
4664                             size_t count)
4665 {
4666         int ndelay, res;
4667
4668         if ((count > 0) && (1 == sscanf(buf, "%d", &ndelay)) &&
4669             (ndelay >= 0) && (ndelay < (1000 * 1000 * 1000))) {
4670                 res = count;
4671                 if (sdebug_ndelay != ndelay) {
4672                         int j, k;
4673                         struct sdebug_queue *sqp;
4674
4675                         block_unblock_all_queues(true);
4676                         for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
4677                              ++j, ++sqp) {
4678                                 k = find_first_bit(sqp->in_use_bm,
4679                                                    sdebug_max_queue);
4680                                 if (k != sdebug_max_queue) {
4681                                         res = -EBUSY;   /* queued commands */
4682                                         break;
4683                                 }
4684                         }
4685                         if (res > 0) {
4686                                 sdebug_ndelay = ndelay;
4687                                 sdebug_jdelay = ndelay  ? JDELAY_OVERRIDDEN
4688                                                         : DEF_JDELAY;
4689                         }
4690                         block_unblock_all_queues(false);
4691                 }
4692                 return res;
4693         }
4694         return -EINVAL;
4695 }
4696 static DRIVER_ATTR_RW(ndelay);
4697
4698 static ssize_t opts_show(struct device_driver *ddp, char *buf)
4699 {
4700         return scnprintf(buf, PAGE_SIZE, "0x%x\n", sdebug_opts);
4701 }
4702
4703 static ssize_t opts_store(struct device_driver *ddp, const char *buf,
4704                           size_t count)
4705 {
4706         int opts;
4707         char work[20];
4708
4709         if (sscanf(buf, "%10s", work) == 1) {
4710                 if (strncasecmp(work, "0x", 2) == 0) {
4711                         if (kstrtoint(work + 2, 16, &opts) == 0)
4712                                 goto opts_done;
4713                 } else {
4714                         if (kstrtoint(work, 10, &opts) == 0)
4715                                 goto opts_done;
4716                 }
4717         }
4718         return -EINVAL;
4719 opts_done:
4720         sdebug_opts = opts;
4721         sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
4722         sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
4723         tweak_cmnd_count();
4724         return count;
4725 }
4726 static DRIVER_ATTR_RW(opts);
4727
4728 static ssize_t ptype_show(struct device_driver *ddp, char *buf)
4729 {
4730         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ptype);
4731 }
4732 static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
4733                            size_t count)
4734 {
4735         int n;
4736
4737         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4738                 sdebug_ptype = n;
4739                 return count;
4740         }
4741         return -EINVAL;
4742 }
4743 static DRIVER_ATTR_RW(ptype);
4744
4745 static ssize_t dsense_show(struct device_driver *ddp, char *buf)
4746 {
4747         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dsense);
4748 }
4749 static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
4750                             size_t count)
4751 {
4752         int n;
4753
4754         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4755                 sdebug_dsense = n;
4756                 return count;
4757         }
4758         return -EINVAL;
4759 }
4760 static DRIVER_ATTR_RW(dsense);
4761
4762 static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
4763 {
4764         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_fake_rw);
4765 }
4766 static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
4767                              size_t count)
4768 {
4769         int n;
4770
4771         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4772                 n = (n > 0);
4773                 sdebug_fake_rw = (sdebug_fake_rw > 0);
4774                 if (sdebug_fake_rw != n) {
4775                         if ((0 == n) && (NULL == fake_storep)) {
4776                                 unsigned long sz =
4777                                         (unsigned long)sdebug_dev_size_mb *
4778                                         1048576;
4779
4780                                 fake_storep = vzalloc(sz);
4781                                 if (NULL == fake_storep) {
4782                                         pr_err("out of memory, 9\n");
4783                                         return -ENOMEM;
4784                                 }
4785                         }
4786                         sdebug_fake_rw = n;
4787                 }
4788                 return count;
4789         }
4790         return -EINVAL;
4791 }
4792 static DRIVER_ATTR_RW(fake_rw);
4793
4794 static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
4795 {
4796         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_lun_0);
4797 }
4798 static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
4799                               size_t count)
4800 {
4801         int n;
4802
4803         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4804                 sdebug_no_lun_0 = n;
4805                 return count;
4806         }
4807         return -EINVAL;
4808 }
4809 static DRIVER_ATTR_RW(no_lun_0);
4810
4811 static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
4812 {
4813         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_tgts);
4814 }
4815 static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
4816                               size_t count)
4817 {
4818         int n;
4819
4820         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4821                 sdebug_num_tgts = n;
4822                 sdebug_max_tgts_luns();
4823                 return count;
4824         }
4825         return -EINVAL;
4826 }
4827 static DRIVER_ATTR_RW(num_tgts);
4828
4829 static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)
4830 {
4831         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dev_size_mb);
4832 }
4833 static DRIVER_ATTR_RO(dev_size_mb);
4834
4835 static ssize_t num_parts_show(struct device_driver *ddp, char *buf)
4836 {
4837         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_parts);
4838 }
4839 static DRIVER_ATTR_RO(num_parts);
4840
4841 static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
4842 {
4843         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_every_nth);
4844 }
4845 static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
4846                                size_t count)
4847 {
4848         int nth;
4849
4850         if ((count > 0) && (1 == sscanf(buf, "%d", &nth))) {
4851                 sdebug_every_nth = nth;
4852                 if (nth && !sdebug_statistics) {
4853                         pr_info("every_nth needs statistics=1, set it\n");
4854                         sdebug_statistics = true;
4855                 }
4856                 tweak_cmnd_count();
4857                 return count;
4858         }
4859         return -EINVAL;
4860 }
4861 static DRIVER_ATTR_RW(every_nth);
4862
4863 static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
4864 {
4865         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_luns);
4866 }
4867 static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
4868                               size_t count)
4869 {
4870         int n;
4871         bool changed;
4872
4873         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4874                 if (n > 256) {
4875                         pr_warn("max_luns can be no more than 256\n");
4876                         return -EINVAL;
4877                 }
4878                 changed = (sdebug_max_luns != n);
4879                 sdebug_max_luns = n;
4880                 sdebug_max_tgts_luns();
4881                 if (changed && (sdebug_scsi_level >= 5)) {      /* >= SPC-3 */
4882                         struct sdebug_host_info *sdhp;
4883                         struct sdebug_dev_info *dp;
4884
4885                         spin_lock(&sdebug_host_list_lock);
4886                         list_for_each_entry(sdhp, &sdebug_host_list,
4887                                             host_list) {
4888                                 list_for_each_entry(dp, &sdhp->dev_info_list,
4889                                                     dev_list) {
4890                                         set_bit(SDEBUG_UA_LUNS_CHANGED,
4891                                                 dp->uas_bm);
4892                                 }
4893                         }
4894                         spin_unlock(&sdebug_host_list_lock);
4895                 }
4896                 return count;
4897         }
4898         return -EINVAL;
4899 }
4900 static DRIVER_ATTR_RW(max_luns);
4901
4902 static ssize_t max_queue_show(struct device_driver *ddp, char *buf)
4903 {
4904         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_queue);
4905 }
4906 /* N.B. max_queue can be changed while there are queued commands. In flight
4907  * commands beyond the new max_queue will be completed. */
4908 static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
4909                                size_t count)
4910 {
4911         int j, n, k, a;
4912         struct sdebug_queue *sqp;
4913
4914         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
4915             (n <= SDEBUG_CANQUEUE)) {
4916                 block_unblock_all_queues(true);
4917                 k = 0;
4918                 for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
4919                      ++j, ++sqp) {
4920                         a = find_last_bit(sqp->in_use_bm, SDEBUG_CANQUEUE);
4921                         if (a > k)
4922                                 k = a;
4923                 }
4924                 sdebug_max_queue = n;
4925                 if (k == SDEBUG_CANQUEUE)
4926                         atomic_set(&retired_max_queue, 0);
4927                 else if (k >= n)
4928                         atomic_set(&retired_max_queue, k + 1);
4929                 else
4930                         atomic_set(&retired_max_queue, 0);
4931                 block_unblock_all_queues(false);
4932                 return count;
4933         }
4934         return -EINVAL;
4935 }
4936 static DRIVER_ATTR_RW(max_queue);
4937
4938 static ssize_t no_uld_show(struct device_driver *ddp, char *buf)
4939 {
4940         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_uld);
4941 }
4942 static DRIVER_ATTR_RO(no_uld);
4943
4944 static ssize_t scsi_level_show(struct device_driver *ddp, char *buf)
4945 {
4946         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_scsi_level);
4947 }
4948 static DRIVER_ATTR_RO(scsi_level);
4949
4950 static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
4951 {
4952         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_virtual_gb);
4953 }
4954 static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
4955                                 size_t count)
4956 {
4957         int n;
4958         bool changed;
4959
4960         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4961                 changed = (sdebug_virtual_gb != n);
4962                 sdebug_virtual_gb = n;
4963                 sdebug_capacity = get_sdebug_capacity();
4964                 if (changed) {
4965                         struct sdebug_host_info *sdhp;
4966                         struct sdebug_dev_info *dp;
4967
4968                         spin_lock(&sdebug_host_list_lock);
4969                         list_for_each_entry(sdhp, &sdebug_host_list,
4970                                             host_list) {
4971                                 list_for_each_entry(dp, &sdhp->dev_info_list,
4972                                                     dev_list) {
4973                                         set_bit(SDEBUG_UA_CAPACITY_CHANGED,
4974                                                 dp->uas_bm);
4975                                 }
4976                         }
4977                         spin_unlock(&sdebug_host_list_lock);
4978                 }
4979                 return count;
4980         }
4981         return -EINVAL;
4982 }
4983 static DRIVER_ATTR_RW(virtual_gb);
4984
4985 static ssize_t add_host_show(struct device_driver *ddp, char *buf)
4986 {
4987         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_add_host);
4988 }
4989
4990 static int sdebug_add_adapter(void);
4991 static void sdebug_remove_adapter(void);
4992
4993 static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
4994                               size_t count)
4995 {
4996         int delta_hosts;
4997
4998         if (sscanf(buf, "%d", &delta_hosts) != 1)
4999                 return -EINVAL;
5000         if (delta_hosts > 0) {
5001                 do {
5002                         sdebug_add_adapter();
5003                 } while (--delta_hosts);
5004         } else if (delta_hosts < 0) {
5005                 do {
5006                         sdebug_remove_adapter();
5007                 } while (++delta_hosts);
5008         }
5009         return count;
5010 }
5011 static DRIVER_ATTR_RW(add_host);
5012
5013 static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf)
5014 {
5015         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_vpd_use_hostno);
5016 }
5017 static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf,
5018                                     size_t count)
5019 {
5020         int n;
5021
5022         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
5023                 sdebug_vpd_use_hostno = n;
5024                 return count;
5025         }
5026         return -EINVAL;
5027 }
5028 static DRIVER_ATTR_RW(vpd_use_hostno);
5029
5030 static ssize_t statistics_show(struct device_driver *ddp, char *buf)
5031 {
5032         return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_statistics);
5033 }
5034 static ssize_t statistics_store(struct device_driver *ddp, const char *buf,
5035                                 size_t count)
5036 {
5037         int n;
5038
5039         if ((count > 0) && (sscanf(buf, "%d", &n) == 1) && (n >= 0)) {
5040                 if (n > 0)
5041                         sdebug_statistics = true;
5042                 else {
5043                         clear_queue_stats();
5044                         sdebug_statistics = false;
5045                 }
5046                 return count;
5047         }
5048         return -EINVAL;
5049 }
5050 static DRIVER_ATTR_RW(statistics);
5051
5052 static ssize_t sector_size_show(struct device_driver *ddp, char *buf)
5053 {
5054         return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_sector_size);
5055 }
5056 static DRIVER_ATTR_RO(sector_size);
5057
5058 static ssize_t submit_queues_show(struct device_driver *ddp, char *buf)
5059 {
5060         return scnprintf(buf, PAGE_SIZE, "%d\n", submit_queues);
5061 }
5062 static DRIVER_ATTR_RO(submit_queues);
5063
5064 static ssize_t dix_show(struct device_driver *ddp, char *buf)
5065 {
5066         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dix);
5067 }
5068 static DRIVER_ATTR_RO(dix);
5069
5070 static ssize_t dif_show(struct device_driver *ddp, char *buf)
5071 {
5072         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dif);
5073 }
5074 static DRIVER_ATTR_RO(dif);
5075
5076 static ssize_t guard_show(struct device_driver *ddp, char *buf)
5077 {
5078         return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_guard);
5079 }
5080 static DRIVER_ATTR_RO(guard);
5081
5082 static ssize_t ato_show(struct device_driver *ddp, char *buf)
5083 {
5084         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ato);
5085 }
5086 static DRIVER_ATTR_RO(ato);
5087
5088 static ssize_t map_show(struct device_driver *ddp, char *buf)
5089 {
5090         ssize_t count;
5091
5092         if (!scsi_debug_lbp())
5093                 return scnprintf(buf, PAGE_SIZE, "0-%u\n",
5094                                  sdebug_store_sectors);
5095
5096         count = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
5097                           (int)map_size, map_storep);
5098         buf[count++] = '\n';
5099         buf[count] = '\0';
5100
5101         return count;
5102 }
5103 static DRIVER_ATTR_RO(map);
5104
5105 static ssize_t removable_show(struct device_driver *ddp, char *buf)
5106 {
5107         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_removable ? 1 : 0);
5108 }
5109 static ssize_t removable_store(struct device_driver *ddp, const char *buf,
5110                                size_t count)
5111 {
5112         int n;
5113
5114         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
5115                 sdebug_removable = (n > 0);
5116                 return count;
5117         }
5118         return -EINVAL;
5119 }
5120 static DRIVER_ATTR_RW(removable);
5121
5122 static ssize_t host_lock_show(struct device_driver *ddp, char *buf)
5123 {
5124         return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_host_lock);
5125 }
5126 /* N.B. sdebug_host_lock does nothing, kept for backward compatibility */
5127 static ssize_t host_lock_store(struct device_driver *ddp, const char *buf,
5128                                size_t count)
5129 {
5130         int n;
5131
5132         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
5133                 sdebug_host_lock = (n > 0);
5134                 return count;
5135         }
5136         return -EINVAL;
5137 }
5138 static DRIVER_ATTR_RW(host_lock);
5139
5140 static ssize_t strict_show(struct device_driver *ddp, char *buf)
5141 {
5142         return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_strict);
5143 }
5144 static ssize_t strict_store(struct device_driver *ddp, const char *buf,
5145                             size_t count)
5146 {
5147         int n;
5148
5149         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
5150                 sdebug_strict = (n > 0);
5151                 return count;
5152         }
5153         return -EINVAL;
5154 }
5155 static DRIVER_ATTR_RW(strict);
5156
5157 static ssize_t uuid_ctl_show(struct device_driver *ddp, char *buf)
5158 {
5159         return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_uuid_ctl);
5160 }
5161 static DRIVER_ATTR_RO(uuid_ctl);
5162
5163 static ssize_t cdb_len_show(struct device_driver *ddp, char *buf)
5164 {
5165         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_cdb_len);
5166 }
5167 static ssize_t cdb_len_store(struct device_driver *ddp, const char *buf,
5168                              size_t count)
5169 {
5170         int ret, n;
5171
5172         ret = kstrtoint(buf, 0, &n);
5173         if (ret)
5174                 return ret;
5175         sdebug_cdb_len = n;
5176         all_config_cdb_len();
5177         return count;
5178 }
5179 static DRIVER_ATTR_RW(cdb_len);
5180
5181
5182 /* Note: The following array creates attribute files in the
5183    /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
5184    files (over those found in the /sys/module/scsi_debug/parameters
5185    directory) is that auxiliary actions can be triggered when an attribute
5186    is changed. For example see: sdebug_add_host_store() above.
5187  */
5188
5189 static struct attribute *sdebug_drv_attrs[] = {
5190         &driver_attr_delay.attr,
5191         &driver_attr_opts.attr,
5192         &driver_attr_ptype.attr,
5193         &driver_attr_dsense.attr,
5194         &driver_attr_fake_rw.attr,
5195         &driver_attr_no_lun_0.attr,
5196         &driver_attr_num_tgts.attr,
5197         &driver_attr_dev_size_mb.attr,
5198         &driver_attr_num_parts.attr,
5199         &driver_attr_every_nth.attr,
5200         &driver_attr_max_luns.attr,
5201         &driver_attr_max_queue.attr,
5202         &driver_attr_no_uld.attr,
5203         &driver_attr_scsi_level.attr,
5204         &driver_attr_virtual_gb.attr,
5205         &driver_attr_add_host.attr,
5206         &driver_attr_vpd_use_hostno.attr,
5207         &driver_attr_sector_size.attr,
5208         &driver_attr_statistics.attr,
5209         &driver_attr_submit_queues.attr,
5210         &driver_attr_dix.attr,
5211         &driver_attr_dif.attr,
5212         &driver_attr_guard.attr,
5213         &driver_attr_ato.attr,
5214         &driver_attr_map.attr,
5215         &driver_attr_removable.attr,
5216         &driver_attr_host_lock.attr,
5217         &driver_attr_ndelay.attr,
5218         &driver_attr_strict.attr,
5219         &driver_attr_uuid_ctl.attr,
5220         &driver_attr_cdb_len.attr,
5221         NULL,
5222 };
5223 ATTRIBUTE_GROUPS(sdebug_drv);
5224
5225 static struct device *pseudo_primary;
5226
5227 static int __init scsi_debug_init(void)
5228 {
5229         unsigned long sz;
5230         int host_to_add;
5231         int k;
5232         int ret;
5233
5234         atomic_set(&retired_max_queue, 0);
5235
5236         if (sdebug_ndelay >= 1000 * 1000 * 1000) {
5237                 pr_warn("ndelay must be less than 1 second, ignored\n");
5238                 sdebug_ndelay = 0;
5239         } else if (sdebug_ndelay > 0)
5240                 sdebug_jdelay = JDELAY_OVERRIDDEN;
5241
5242         switch (sdebug_sector_size) {
5243         case  512:
5244         case 1024:
5245         case 2048:
5246         case 4096:
5247                 break;
5248         default:
5249                 pr_err("invalid sector_size %d\n", sdebug_sector_size);
5250                 return -EINVAL;
5251         }
5252
5253         switch (sdebug_dif) {
5254         case T10_PI_TYPE0_PROTECTION:
5255                 break;
5256         case T10_PI_TYPE1_PROTECTION:
5257         case T10_PI_TYPE2_PROTECTION:
5258         case T10_PI_TYPE3_PROTECTION:
5259                 have_dif_prot = true;
5260                 break;
5261
5262         default:
5263                 pr_err("dif must be 0, 1, 2 or 3\n");
5264                 return -EINVAL;
5265         }
5266
5267         if (sdebug_num_tgts < 0) {
5268                 pr_err("num_tgts must be >= 0\n");
5269                 return -EINVAL;
5270         }
5271
5272         if (sdebug_guard > 1) {
5273                 pr_err("guard must be 0 or 1\n");
5274                 return -EINVAL;
5275         }
5276
5277         if (sdebug_ato > 1) {
5278                 pr_err("ato must be 0 or 1\n");
5279                 return -EINVAL;
5280         }
5281
5282         if (sdebug_physblk_exp > 15) {
5283                 pr_err("invalid physblk_exp %u\n", sdebug_physblk_exp);
5284                 return -EINVAL;
5285         }
5286         if (sdebug_max_luns > 256) {
5287                 pr_warn("max_luns can be no more than 256, use default\n");
5288                 sdebug_max_luns = DEF_MAX_LUNS;
5289         }
5290
5291         if (sdebug_lowest_aligned > 0x3fff) {
5292                 pr_err("lowest_aligned too big: %u\n", sdebug_lowest_aligned);
5293                 return -EINVAL;
5294         }
5295
5296         if (submit_queues < 1) {
5297                 pr_err("submit_queues must be 1 or more\n");
5298                 return -EINVAL;
5299         }
5300         sdebug_q_arr = kcalloc(submit_queues, sizeof(struct sdebug_queue),
5301                                GFP_KERNEL);
5302         if (sdebug_q_arr == NULL)
5303                 return -ENOMEM;
5304         for (k = 0; k < submit_queues; ++k)
5305                 spin_lock_init(&sdebug_q_arr[k].qc_lock);
5306
5307         if (sdebug_dev_size_mb < 1)
5308                 sdebug_dev_size_mb = 1;  /* force minimum 1 MB ramdisk */
5309         sz = (unsigned long)sdebug_dev_size_mb * 1048576;
5310         sdebug_store_sectors = sz / sdebug_sector_size;
5311         sdebug_capacity = get_sdebug_capacity();
5312
5313         /* play around with geometry, don't waste too much on track 0 */
5314         sdebug_heads = 8;
5315         sdebug_sectors_per = 32;
5316         if (sdebug_dev_size_mb >= 256)
5317                 sdebug_heads = 64;
5318         else if (sdebug_dev_size_mb >= 16)
5319                 sdebug_heads = 32;
5320         sdebug_cylinders_per = (unsigned long)sdebug_capacity /
5321                                (sdebug_sectors_per * sdebug_heads);
5322         if (sdebug_cylinders_per >= 1024) {
5323                 /* other LLDs do this; implies >= 1GB ram disk ... */
5324                 sdebug_heads = 255;
5325                 sdebug_sectors_per = 63;
5326                 sdebug_cylinders_per = (unsigned long)sdebug_capacity /
5327                                (sdebug_sectors_per * sdebug_heads);
5328         }
5329
5330         if (sdebug_fake_rw == 0) {
5331                 fake_storep = vzalloc(sz);
5332                 if (NULL == fake_storep) {
5333                         pr_err("out of memory, 1\n");
5334                         ret = -ENOMEM;
5335                         goto free_q_arr;
5336                 }
5337                 if (sdebug_num_parts > 0)
5338                         sdebug_build_parts(fake_storep, sz);
5339         }
5340
5341         if (sdebug_dix) {
5342                 int dif_size;
5343
5344                 dif_size = sdebug_store_sectors * sizeof(struct t10_pi_tuple);
5345                 dif_storep = vmalloc(dif_size);
5346
5347                 pr_err("dif_storep %u bytes @ %p\n", dif_size, dif_storep);
5348
5349                 if (dif_storep == NULL) {
5350                         pr_err("out of mem. (DIX)\n");
5351                         ret = -ENOMEM;
5352                         goto free_vm;
5353                 }
5354
5355                 memset(dif_storep, 0xff, dif_size);
5356         }
5357
5358         /* Logical Block Provisioning */
5359         if (scsi_debug_lbp()) {
5360                 sdebug_unmap_max_blocks =
5361                         clamp(sdebug_unmap_max_blocks, 0U, 0xffffffffU);
5362
5363                 sdebug_unmap_max_desc =
5364                         clamp(sdebug_unmap_max_desc, 0U, 256U);
5365
5366                 sdebug_unmap_granularity =
5367                         clamp(sdebug_unmap_granularity, 1U, 0xffffffffU);
5368
5369                 if (sdebug_unmap_alignment &&
5370                     sdebug_unmap_granularity <=
5371                     sdebug_unmap_alignment) {
5372                         pr_err("ERR: unmap_granularity <= unmap_alignment\n");
5373                         ret = -EINVAL;
5374                         goto free_vm;
5375                 }
5376
5377                 map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
5378                 map_storep = vmalloc(array_size(sizeof(long),
5379                                                 BITS_TO_LONGS(map_size)));
5380
5381                 pr_info("%lu provisioning blocks\n", map_size);
5382
5383                 if (map_storep == NULL) {
5384                         pr_err("out of mem. (MAP)\n");
5385                         ret = -ENOMEM;
5386                         goto free_vm;
5387                 }
5388
5389                 bitmap_zero(map_storep, map_size);
5390
5391                 /* Map first 1KB for partition table */
5392                 if (sdebug_num_parts)
5393                         map_region(0, 2);
5394         }
5395
5396         pseudo_primary = root_device_register("pseudo_0");
5397         if (IS_ERR(pseudo_primary)) {
5398                 pr_warn("root_device_register() error\n");
5399                 ret = PTR_ERR(pseudo_primary);
5400                 goto free_vm;
5401         }
5402         ret = bus_register(&pseudo_lld_bus);
5403         if (ret < 0) {
5404                 pr_warn("bus_register error: %d\n", ret);
5405                 goto dev_unreg;
5406         }
5407         ret = driver_register(&sdebug_driverfs_driver);
5408         if (ret < 0) {
5409                 pr_warn("driver_register error: %d\n", ret);
5410                 goto bus_unreg;
5411         }
5412
5413         host_to_add = sdebug_add_host;
5414         sdebug_add_host = 0;
5415
5416         for (k = 0; k < host_to_add; k++) {
5417                 if (sdebug_add_adapter()) {
5418                         pr_err("sdebug_add_adapter failed k=%d\n", k);
5419                         break;
5420                 }
5421         }
5422
5423         if (sdebug_verbose)
5424                 pr_info("built %d host(s)\n", sdebug_add_host);
5425
5426         return 0;
5427
5428 bus_unreg:
5429         bus_unregister(&pseudo_lld_bus);
5430 dev_unreg:
5431         root_device_unregister(pseudo_primary);
5432 free_vm:
5433         vfree(map_storep);
5434         vfree(dif_storep);
5435         vfree(fake_storep);
5436 free_q_arr:
5437         kfree(sdebug_q_arr);
5438         return ret;
5439 }
5440
5441 static void __exit scsi_debug_exit(void)
5442 {
5443         int k = sdebug_add_host;
5444
5445         stop_all_queued();
5446         for (; k; k--)
5447                 sdebug_remove_adapter();
5448         free_all_queued();
5449         driver_unregister(&sdebug_driverfs_driver);
5450         bus_unregister(&pseudo_lld_bus);
5451         root_device_unregister(pseudo_primary);
5452
5453         vfree(map_storep);
5454         vfree(dif_storep);
5455         vfree(fake_storep);
5456         kfree(sdebug_q_arr);
5457 }
5458
5459 device_initcall(scsi_debug_init);
5460 module_exit(scsi_debug_exit);
5461
5462 static void sdebug_release_adapter(struct device *dev)
5463 {
5464         struct sdebug_host_info *sdbg_host;
5465
5466         sdbg_host = to_sdebug_host(dev);
5467         kfree(sdbg_host);
5468 }
5469
5470 static int sdebug_add_adapter(void)
5471 {
5472         int k, devs_per_host;
5473         int error = 0;
5474         struct sdebug_host_info *sdbg_host;
5475         struct sdebug_dev_info *sdbg_devinfo, *tmp;
5476
5477         sdbg_host = kzalloc(sizeof(*sdbg_host), GFP_KERNEL);
5478         if (sdbg_host == NULL) {
5479                 pr_err("out of memory at line %d\n", __LINE__);
5480                 return -ENOMEM;
5481         }
5482
5483         INIT_LIST_HEAD(&sdbg_host->dev_info_list);
5484
5485         devs_per_host = sdebug_num_tgts * sdebug_max_luns;
5486         for (k = 0; k < devs_per_host; k++) {
5487                 sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
5488                 if (!sdbg_devinfo) {
5489                         pr_err("out of memory at line %d\n", __LINE__);
5490                         error = -ENOMEM;
5491                         goto clean;
5492                 }
5493         }
5494
5495         spin_lock(&sdebug_host_list_lock);
5496         list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
5497         spin_unlock(&sdebug_host_list_lock);
5498
5499         sdbg_host->dev.bus = &pseudo_lld_bus;
5500         sdbg_host->dev.parent = pseudo_primary;
5501         sdbg_host->dev.release = &sdebug_release_adapter;
5502         dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_add_host);
5503
5504         error = device_register(&sdbg_host->dev);
5505
5506         if (error)
5507                 goto clean;
5508
5509         ++sdebug_add_host;
5510         return error;
5511
5512 clean:
5513         list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
5514                                  dev_list) {
5515                 list_del(&sdbg_devinfo->dev_list);
5516                 kfree(sdbg_devinfo);
5517         }
5518
5519         kfree(sdbg_host);
5520         return error;
5521 }
5522
5523 static void sdebug_remove_adapter(void)
5524 {
5525         struct sdebug_host_info *sdbg_host = NULL;
5526
5527         spin_lock(&sdebug_host_list_lock);
5528         if (!list_empty(&sdebug_host_list)) {
5529                 sdbg_host = list_entry(sdebug_host_list.prev,
5530                                        struct sdebug_host_info, host_list);
5531                 list_del(&sdbg_host->host_list);
5532         }
5533         spin_unlock(&sdebug_host_list_lock);
5534
5535         if (!sdbg_host)
5536                 return;
5537
5538         device_unregister(&sdbg_host->dev);
5539         --sdebug_add_host;
5540 }
5541
5542 static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
5543 {
5544         int num_in_q = 0;
5545         struct sdebug_dev_info *devip;
5546
5547         block_unblock_all_queues(true);
5548         devip = (struct sdebug_dev_info *)sdev->hostdata;
5549         if (NULL == devip) {
5550                 block_unblock_all_queues(false);
5551                 return  -ENODEV;
5552         }
5553         num_in_q = atomic_read(&devip->num_in_q);
5554
5555         if (qdepth < 1)
5556                 qdepth = 1;
5557         /* allow to exceed max host qc_arr elements for testing */
5558         if (qdepth > SDEBUG_CANQUEUE + 10)
5559                 qdepth = SDEBUG_CANQUEUE + 10;
5560         scsi_change_queue_depth(sdev, qdepth);
5561
5562         if (SDEBUG_OPT_Q_NOISE & sdebug_opts) {
5563                 sdev_printk(KERN_INFO, sdev, "%s: qdepth=%d, num_in_q=%d\n",
5564                             __func__, qdepth, num_in_q);
5565         }
5566         block_unblock_all_queues(false);
5567         return sdev->queue_depth;
5568 }
5569
5570 static bool fake_timeout(struct scsi_cmnd *scp)
5571 {
5572         if (0 == (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth))) {
5573                 if (sdebug_every_nth < -1)
5574                         sdebug_every_nth = -1;
5575                 if (SDEBUG_OPT_TIMEOUT & sdebug_opts)
5576                         return true; /* ignore command causing timeout */
5577                 else if (SDEBUG_OPT_MAC_TIMEOUT & sdebug_opts &&
5578                          scsi_medium_access_command(scp))
5579                         return true; /* time out reads and writes */
5580         }
5581         return false;
5582 }
5583
5584 static bool fake_host_busy(struct scsi_cmnd *scp)
5585 {
5586         return (sdebug_opts & SDEBUG_OPT_HOST_BUSY) &&
5587                 (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) == 0;
5588 }
5589
5590 static int scsi_debug_queuecommand(struct Scsi_Host *shost,
5591                                    struct scsi_cmnd *scp)
5592 {
5593         u8 sdeb_i;
5594         struct scsi_device *sdp = scp->device;
5595         const struct opcode_info_t *oip;
5596         const struct opcode_info_t *r_oip;
5597         struct sdebug_dev_info *devip;
5598         u8 *cmd = scp->cmnd;
5599         int (*r_pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
5600         int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *) = NULL;
5601         int k, na;
5602         int errsts = 0;
5603         u32 flags;
5604         u16 sa;
5605         u8 opcode = cmd[0];
5606         bool has_wlun_rl;
5607
5608         scsi_set_resid(scp, 0);
5609         if (sdebug_statistics)
5610                 atomic_inc(&sdebug_cmnd_count);
5611         if (unlikely(sdebug_verbose &&
5612                      !(SDEBUG_OPT_NO_CDB_NOISE & sdebug_opts))) {
5613                 char b[120];
5614                 int n, len, sb;
5615
5616                 len = scp->cmd_len;
5617                 sb = (int)sizeof(b);
5618                 if (len > 32)
5619                         strcpy(b, "too long, over 32 bytes");
5620                 else {
5621                         for (k = 0, n = 0; k < len && n < sb; ++k)
5622                                 n += scnprintf(b + n, sb - n, "%02x ",
5623                                                (u32)cmd[k]);
5624                 }
5625                 sdev_printk(KERN_INFO, sdp, "%s: tag=%#x, cmd %s\n", my_name,
5626                             blk_mq_unique_tag(scp->request), b);
5627         }
5628         if (fake_host_busy(scp))
5629                 return SCSI_MLQUEUE_HOST_BUSY;
5630         has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS);
5631         if (unlikely((sdp->lun >= sdebug_max_luns) && !has_wlun_rl))
5632                 goto err_out;
5633
5634         sdeb_i = opcode_ind_arr[opcode];        /* fully mapped */
5635         oip = &opcode_info_arr[sdeb_i];         /* safe if table consistent */
5636         devip = (struct sdebug_dev_info *)sdp->hostdata;
5637         if (unlikely(!devip)) {
5638                 devip = find_build_dev_info(sdp);
5639                 if (NULL == devip)
5640                         goto err_out;
5641         }
5642         na = oip->num_attached;
5643         r_pfp = oip->pfp;
5644         if (na) {       /* multiple commands with this opcode */
5645                 r_oip = oip;
5646                 if (FF_SA & r_oip->flags) {
5647                         if (F_SA_LOW & oip->flags)
5648                                 sa = 0x1f & cmd[1];
5649                         else
5650                                 sa = get_unaligned_be16(cmd + 8);
5651                         for (k = 0; k <= na; oip = r_oip->arrp + k++) {
5652                                 if (opcode == oip->opcode && sa == oip->sa)
5653                                         break;
5654                         }
5655                 } else {   /* since no service action only check opcode */
5656                         for (k = 0; k <= na; oip = r_oip->arrp + k++) {
5657                                 if (opcode == oip->opcode)
5658                                         break;
5659                         }
5660                 }
5661                 if (k > na) {
5662                         if (F_SA_LOW & r_oip->flags)
5663                                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 4);
5664                         else if (F_SA_HIGH & r_oip->flags)
5665                                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, 7);
5666                         else
5667                                 mk_sense_invalid_opcode(scp);
5668                         goto check_cond;
5669                 }
5670         }       /* else (when na==0) we assume the oip is a match */
5671         flags = oip->flags;
5672         if (unlikely(F_INV_OP & flags)) {
5673                 mk_sense_invalid_opcode(scp);
5674                 goto check_cond;
5675         }
5676         if (unlikely(has_wlun_rl && !(F_RL_WLUN_OK & flags))) {
5677                 if (sdebug_verbose)
5678                         sdev_printk(KERN_INFO, sdp, "%s: Opcode 0x%x not%s\n",
5679                                     my_name, opcode, " supported for wlun");
5680                 mk_sense_invalid_opcode(scp);
5681                 goto check_cond;
5682         }
5683         if (unlikely(sdebug_strict)) {  /* check cdb against mask */
5684                 u8 rem;
5685                 int j;
5686
5687                 for (k = 1; k < oip->len_mask[0] && k < 16; ++k) {
5688                         rem = ~oip->len_mask[k] & cmd[k];
5689                         if (rem) {
5690                                 for (j = 7; j >= 0; --j, rem <<= 1) {
5691                                         if (0x80 & rem)
5692                                                 break;
5693                                 }
5694                                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, k, j);
5695                                 goto check_cond;
5696                         }
5697                 }
5698         }
5699         if (unlikely(!(F_SKIP_UA & flags) &&
5700                      find_first_bit(devip->uas_bm,
5701                                     SDEBUG_NUM_UAS) != SDEBUG_NUM_UAS)) {
5702                 errsts = make_ua(scp, devip);
5703                 if (errsts)
5704                         goto check_cond;
5705         }
5706         if (unlikely((F_M_ACCESS & flags) && atomic_read(&devip->stopped))) {
5707                 mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x2);
5708                 if (sdebug_verbose)
5709                         sdev_printk(KERN_INFO, sdp, "%s reports: Not ready: "
5710                                     "%s\n", my_name, "initializing command "
5711                                     "required");
5712                 errsts = check_condition_result;
5713                 goto fini;
5714         }
5715         if (sdebug_fake_rw && (F_FAKE_RW & flags))
5716                 goto fini;
5717         if (unlikely(sdebug_every_nth)) {
5718                 if (fake_timeout(scp))
5719                         return 0;       /* ignore command: make trouble */
5720         }
5721         if (likely(oip->pfp))
5722                 pfp = oip->pfp; /* calls a resp_* function */
5723         else
5724                 pfp = r_pfp;    /* if leaf function ptr NULL, try the root's */
5725
5726 fini:
5727         if (F_DELAY_OVERR & flags)
5728                 return schedule_resp(scp, devip, errsts, pfp, 0, 0);
5729         else if ((flags & F_LONG_DELAY) && (sdebug_jdelay > 0 ||
5730                                             sdebug_ndelay > 10000)) {
5731                 /*
5732                  * Skip long delays if ndelay <= 10 microseconds. Otherwise
5733                  * for Start Stop Unit (SSU) want at least 1 second delay and
5734                  * if sdebug_jdelay>1 want a long delay of that many seconds.
5735                  * For Synchronize Cache want 1/20 of SSU's delay.
5736                  */
5737                 int jdelay = (sdebug_jdelay < 2) ? 1 : sdebug_jdelay;
5738                 int denom = (flags & F_SYNC_DELAY) ? 20 : 1;
5739
5740                 jdelay = mult_frac(USER_HZ * jdelay, HZ, denom * USER_HZ);
5741                 return schedule_resp(scp, devip, errsts, pfp, jdelay, 0);
5742         } else
5743                 return schedule_resp(scp, devip, errsts, pfp, sdebug_jdelay,
5744                                      sdebug_ndelay);
5745 check_cond:
5746         return schedule_resp(scp, devip, check_condition_result, NULL, 0, 0);
5747 err_out:
5748         return schedule_resp(scp, NULL, DID_NO_CONNECT << 16, NULL, 0, 0);
5749 }
5750
5751 static struct scsi_host_template sdebug_driver_template = {
5752         .show_info =            scsi_debug_show_info,
5753         .write_info =           scsi_debug_write_info,
5754         .proc_name =            sdebug_proc_name,
5755         .name =                 "SCSI DEBUG",
5756         .info =                 scsi_debug_info,
5757         .slave_alloc =          scsi_debug_slave_alloc,
5758         .slave_configure =      scsi_debug_slave_configure,
5759         .slave_destroy =        scsi_debug_slave_destroy,
5760         .ioctl =                scsi_debug_ioctl,
5761         .queuecommand =         scsi_debug_queuecommand,
5762         .change_queue_depth =   sdebug_change_qdepth,
5763         .eh_abort_handler =     scsi_debug_abort,
5764         .eh_device_reset_handler = scsi_debug_device_reset,
5765         .eh_target_reset_handler = scsi_debug_target_reset,
5766         .eh_bus_reset_handler = scsi_debug_bus_reset,
5767         .eh_host_reset_handler = scsi_debug_host_reset,
5768         .can_queue =            SDEBUG_CANQUEUE,
5769         .this_id =              7,
5770         .sg_tablesize =         SG_MAX_SEGMENTS,
5771         .cmd_per_lun =          DEF_CMD_PER_LUN,
5772         .max_sectors =          -1U,
5773         .max_segment_size =     -1U,
5774         .module =               THIS_MODULE,
5775         .track_queue_depth =    1,
5776 };
5777
5778 static int sdebug_driver_probe(struct device *dev)
5779 {
5780         int error = 0;
5781         struct sdebug_host_info *sdbg_host;
5782         struct Scsi_Host *hpnt;
5783         int hprot;
5784
5785         sdbg_host = to_sdebug_host(dev);
5786
5787         sdebug_driver_template.can_queue = sdebug_max_queue;
5788         if (!sdebug_clustering)
5789                 sdebug_driver_template.dma_boundary = PAGE_SIZE - 1;
5790
5791         hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
5792         if (NULL == hpnt) {
5793                 pr_err("scsi_host_alloc failed\n");
5794                 error = -ENODEV;
5795                 return error;
5796         }
5797         if (submit_queues > nr_cpu_ids) {
5798                 pr_warn("%s: trim submit_queues (was %d) to nr_cpu_ids=%u\n",
5799                         my_name, submit_queues, nr_cpu_ids);
5800                 submit_queues = nr_cpu_ids;
5801         }
5802         /* Decide whether to tell scsi subsystem that we want mq */
5803         /* Following should give the same answer for each host */
5804         hpnt->nr_hw_queues = submit_queues;
5805
5806         sdbg_host->shost = hpnt;
5807         *((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host;
5808         if ((hpnt->this_id >= 0) && (sdebug_num_tgts > hpnt->this_id))
5809                 hpnt->max_id = sdebug_num_tgts + 1;
5810         else
5811                 hpnt->max_id = sdebug_num_tgts;
5812         /* = sdebug_max_luns; */
5813         hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
5814
5815         hprot = 0;
5816
5817         switch (sdebug_dif) {
5818
5819         case T10_PI_TYPE1_PROTECTION:
5820                 hprot = SHOST_DIF_TYPE1_PROTECTION;
5821                 if (sdebug_dix)
5822                         hprot |= SHOST_DIX_TYPE1_PROTECTION;
5823                 break;
5824
5825         case T10_PI_TYPE2_PROTECTION:
5826                 hprot = SHOST_DIF_TYPE2_PROTECTION;
5827                 if (sdebug_dix)
5828                         hprot |= SHOST_DIX_TYPE2_PROTECTION;
5829                 break;
5830
5831         case T10_PI_TYPE3_PROTECTION:
5832                 hprot = SHOST_DIF_TYPE3_PROTECTION;
5833                 if (sdebug_dix)
5834                         hprot |= SHOST_DIX_TYPE3_PROTECTION;
5835                 break;
5836
5837         default:
5838                 if (sdebug_dix)
5839                         hprot |= SHOST_DIX_TYPE0_PROTECTION;
5840                 break;
5841         }
5842
5843         scsi_host_set_prot(hpnt, hprot);
5844
5845         if (have_dif_prot || sdebug_dix)
5846                 pr_info("host protection%s%s%s%s%s%s%s\n",
5847                         (hprot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
5848                         (hprot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
5849                         (hprot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
5850                         (hprot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
5851                         (hprot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
5852                         (hprot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
5853                         (hprot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
5854
5855         if (sdebug_guard == 1)
5856                 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
5857         else
5858                 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
5859
5860         sdebug_verbose = !!(SDEBUG_OPT_NOISE & sdebug_opts);
5861         sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & sdebug_opts);
5862         if (sdebug_every_nth)   /* need stats counters for every_nth */
5863                 sdebug_statistics = true;
5864         error = scsi_add_host(hpnt, &sdbg_host->dev);
5865         if (error) {
5866                 pr_err("scsi_add_host failed\n");
5867                 error = -ENODEV;
5868                 scsi_host_put(hpnt);
5869         } else
5870                 scsi_scan_host(hpnt);
5871
5872         return error;
5873 }
5874
5875 static int sdebug_driver_remove(struct device *dev)
5876 {
5877         struct sdebug_host_info *sdbg_host;
5878         struct sdebug_dev_info *sdbg_devinfo, *tmp;
5879
5880         sdbg_host = to_sdebug_host(dev);
5881
5882         if (!sdbg_host) {
5883                 pr_err("Unable to locate host info\n");
5884                 return -ENODEV;
5885         }
5886
5887         scsi_remove_host(sdbg_host->shost);
5888
5889         list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
5890                                  dev_list) {
5891                 list_del(&sdbg_devinfo->dev_list);
5892                 kfree(sdbg_devinfo);
5893         }
5894
5895         scsi_host_put(sdbg_host->shost);
5896         return 0;
5897 }
5898
5899 static int pseudo_lld_bus_match(struct device *dev,
5900                                 struct device_driver *dev_driver)
5901 {
5902         return 1;
5903 }
5904
5905 static struct bus_type pseudo_lld_bus = {
5906         .name = "pseudo",
5907         .match = pseudo_lld_bus_match,
5908         .probe = sdebug_driver_probe,
5909         .remove = sdebug_driver_remove,
5910         .drv_groups = sdebug_drv_groups,
5911 };