scsi: qla2xxx: remove double assignment in qla2x00_update_fcport
[linux-2.6-microblaze.git] / drivers / scsi / scsi_debug.c
1 /*
2  * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
3  *  Copyright (C) 1992  Eric Youngdale
4  *  Simulate a host adapter with 2 disks attached.  Do a lot of checking
5  *  to make sure that we are not getting blocks mixed up, and PANIC if
6  *  anything out of the ordinary is seen.
7  * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
8  *
9  * Copyright (C) 2001 - 2018 Douglas Gilbert
10  *
11  * This program is free software; you can redistribute it and/or modify
12  * it under the terms of the GNU General Public License as published by
13  * the Free Software Foundation; either version 2, or (at your option)
14  * any later version.
15  *
16  *  For documentation see http://sg.danny.cz/sg/sdebug26.html
17  *
18  */
19
20
21 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
22
23 #include <linux/module.h>
24
25 #include <linux/kernel.h>
26 #include <linux/errno.h>
27 #include <linux/jiffies.h>
28 #include <linux/slab.h>
29 #include <linux/types.h>
30 #include <linux/string.h>
31 #include <linux/genhd.h>
32 #include <linux/fs.h>
33 #include <linux/init.h>
34 #include <linux/proc_fs.h>
35 #include <linux/vmalloc.h>
36 #include <linux/moduleparam.h>
37 #include <linux/scatterlist.h>
38 #include <linux/blkdev.h>
39 #include <linux/crc-t10dif.h>
40 #include <linux/spinlock.h>
41 #include <linux/interrupt.h>
42 #include <linux/atomic.h>
43 #include <linux/hrtimer.h>
44 #include <linux/uuid.h>
45 #include <linux/t10-pi.h>
46
47 #include <net/checksum.h>
48
49 #include <asm/unaligned.h>
50
51 #include <scsi/scsi.h>
52 #include <scsi/scsi_cmnd.h>
53 #include <scsi/scsi_device.h>
54 #include <scsi/scsi_host.h>
55 #include <scsi/scsicam.h>
56 #include <scsi/scsi_eh.h>
57 #include <scsi/scsi_tcq.h>
58 #include <scsi/scsi_dbg.h>
59
60 #include "sd.h"
61 #include "scsi_logging.h"
62
63 /* make sure inq_product_rev string corresponds to this version */
64 #define SDEBUG_VERSION "0188"   /* format to fit INQUIRY revision field */
65 static const char *sdebug_version_date = "20190125";
66
67 #define MY_NAME "scsi_debug"
68
69 /* Additional Sense Code (ASC) */
70 #define NO_ADDITIONAL_SENSE 0x0
71 #define LOGICAL_UNIT_NOT_READY 0x4
72 #define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
73 #define UNRECOVERED_READ_ERR 0x11
74 #define PARAMETER_LIST_LENGTH_ERR 0x1a
75 #define INVALID_OPCODE 0x20
76 #define LBA_OUT_OF_RANGE 0x21
77 #define INVALID_FIELD_IN_CDB 0x24
78 #define INVALID_FIELD_IN_PARAM_LIST 0x26
79 #define WRITE_PROTECTED 0x27
80 #define UA_RESET_ASC 0x29
81 #define UA_CHANGED_ASC 0x2a
82 #define TARGET_CHANGED_ASC 0x3f
83 #define LUNS_CHANGED_ASCQ 0x0e
84 #define INSUFF_RES_ASC 0x55
85 #define INSUFF_RES_ASCQ 0x3
86 #define POWER_ON_RESET_ASCQ 0x0
87 #define BUS_RESET_ASCQ 0x2      /* scsi bus reset occurred */
88 #define MODE_CHANGED_ASCQ 0x1   /* mode parameters changed */
89 #define CAPACITY_CHANGED_ASCQ 0x9
90 #define SAVING_PARAMS_UNSUP 0x39
91 #define TRANSPORT_PROBLEM 0x4b
92 #define THRESHOLD_EXCEEDED 0x5d
93 #define LOW_POWER_COND_ON 0x5e
94 #define MISCOMPARE_VERIFY_ASC 0x1d
95 #define MICROCODE_CHANGED_ASCQ 0x1      /* with TARGET_CHANGED_ASC */
96 #define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16
97 #define WRITE_ERROR_ASC 0xc
98
99 /* Additional Sense Code Qualifier (ASCQ) */
100 #define ACK_NAK_TO 0x3
101
102 /* Default values for driver parameters */
103 #define DEF_NUM_HOST   1
104 #define DEF_NUM_TGTS   1
105 #define DEF_MAX_LUNS   1
106 /* With these defaults, this driver will make 1 host with 1 target
107  * (id 0) containing 1 logical unit (lun 0). That is 1 device.
108  */
109 #define DEF_ATO 1
110 #define DEF_CDB_LEN 10
111 #define DEF_JDELAY   1          /* if > 0 unit is a jiffy */
112 #define DEF_DEV_SIZE_MB   8
113 #define DEF_DIF 0
114 #define DEF_DIX 0
115 #define DEF_D_SENSE   0
116 #define DEF_EVERY_NTH   0
117 #define DEF_FAKE_RW     0
118 #define DEF_GUARD 0
119 #define DEF_HOST_LOCK 0
120 #define DEF_LBPU 0
121 #define DEF_LBPWS 0
122 #define DEF_LBPWS10 0
123 #define DEF_LBPRZ 1
124 #define DEF_LOWEST_ALIGNED 0
125 #define DEF_NDELAY   0          /* if > 0 unit is a nanosecond */
126 #define DEF_NO_LUN_0   0
127 #define DEF_NUM_PARTS   0
128 #define DEF_OPTS   0
129 #define DEF_OPT_BLKS 1024
130 #define DEF_PHYSBLK_EXP 0
131 #define DEF_OPT_XFERLEN_EXP 0
132 #define DEF_PTYPE   TYPE_DISK
133 #define DEF_REMOVABLE false
134 #define DEF_SCSI_LEVEL   7    /* INQUIRY, byte2 [6->SPC-4; 7->SPC-5] */
135 #define DEF_SECTOR_SIZE 512
136 #define DEF_UNMAP_ALIGNMENT 0
137 #define DEF_UNMAP_GRANULARITY 1
138 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
139 #define DEF_UNMAP_MAX_DESC 256
140 #define DEF_VIRTUAL_GB   0
141 #define DEF_VPD_USE_HOSTNO 1
142 #define DEF_WRITESAME_LENGTH 0xFFFF
143 #define DEF_STRICT 0
144 #define DEF_STATISTICS false
145 #define DEF_SUBMIT_QUEUES 1
146 #define DEF_UUID_CTL 0
147 #define JDELAY_OVERRIDDEN -9999
148
149 #define SDEBUG_LUN_0_VAL 0
150
151 /* bit mask values for sdebug_opts */
152 #define SDEBUG_OPT_NOISE                1
153 #define SDEBUG_OPT_MEDIUM_ERR           2
154 #define SDEBUG_OPT_TIMEOUT              4
155 #define SDEBUG_OPT_RECOVERED_ERR        8
156 #define SDEBUG_OPT_TRANSPORT_ERR        16
157 #define SDEBUG_OPT_DIF_ERR              32
158 #define SDEBUG_OPT_DIX_ERR              64
159 #define SDEBUG_OPT_MAC_TIMEOUT          128
160 #define SDEBUG_OPT_SHORT_TRANSFER       0x100
161 #define SDEBUG_OPT_Q_NOISE              0x200
162 #define SDEBUG_OPT_ALL_TSF              0x400
163 #define SDEBUG_OPT_RARE_TSF             0x800
164 #define SDEBUG_OPT_N_WCE                0x1000
165 #define SDEBUG_OPT_RESET_NOISE          0x2000
166 #define SDEBUG_OPT_NO_CDB_NOISE         0x4000
167 #define SDEBUG_OPT_HOST_BUSY            0x8000
168 #define SDEBUG_OPT_CMD_ABORT            0x10000
169 #define SDEBUG_OPT_ALL_NOISE (SDEBUG_OPT_NOISE | SDEBUG_OPT_Q_NOISE | \
170                               SDEBUG_OPT_RESET_NOISE)
171 #define SDEBUG_OPT_ALL_INJECTING (SDEBUG_OPT_RECOVERED_ERR | \
172                                   SDEBUG_OPT_TRANSPORT_ERR | \
173                                   SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR | \
174                                   SDEBUG_OPT_SHORT_TRANSFER | \
175                                   SDEBUG_OPT_HOST_BUSY | \
176                                   SDEBUG_OPT_CMD_ABORT)
177 /* When "every_nth" > 0 then modulo "every_nth" commands:
178  *   - a missing response is simulated if SDEBUG_OPT_TIMEOUT is set
179  *   - a RECOVERED_ERROR is simulated on successful read and write
180  *     commands if SDEBUG_OPT_RECOVERED_ERR is set.
181  *   - a TRANSPORT_ERROR is simulated on successful read and write
182  *     commands if SDEBUG_OPT_TRANSPORT_ERR is set.
183  *   - similarly for DIF_ERR, DIX_ERR, SHORT_TRANSFER, HOST_BUSY and
184  *     CMD_ABORT
185  *
186  * When "every_nth" < 0 then after "- every_nth" commands the selected
187  * error will be injected. The error will be injected on every subsequent
188  * command until some other action occurs; for example, the user writing
189  * a new value (other than -1 or 1) to every_nth:
190  *      echo 0 > /sys/bus/pseudo/drivers/scsi_debug/every_nth
191  */
192
193 /* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs) are returned in
194  * priority order. In the subset implemented here lower numbers have higher
195  * priority. The UA numbers should be a sequence starting from 0 with
196  * SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */
197 #define SDEBUG_UA_POR 0         /* Power on, reset, or bus device reset */
198 #define SDEBUG_UA_BUS_RESET 1
199 #define SDEBUG_UA_MODE_CHANGED 2
200 #define SDEBUG_UA_CAPACITY_CHANGED 3
201 #define SDEBUG_UA_LUNS_CHANGED 4
202 #define SDEBUG_UA_MICROCODE_CHANGED 5   /* simulate firmware change */
203 #define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 6
204 #define SDEBUG_NUM_UAS 7
205
206 /* when 1==SDEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
207  * sector on read commands: */
208 #define OPT_MEDIUM_ERR_ADDR   0x1234 /* that's sector 4660 in decimal */
209 #define OPT_MEDIUM_ERR_NUM    10     /* number of consecutive medium errs */
210
211 /* If REPORT LUNS has luns >= 256 it can choose "flat space" (value 1)
212  * or "peripheral device" addressing (value 0) */
213 #define SAM2_LUN_ADDRESS_METHOD 0
214
215 /* SDEBUG_CANQUEUE is the maximum number of commands that can be queued
216  * (for response) per submit queue at one time. Can be reduced by max_queue
217  * option. Command responses are not queued when jdelay=0 and ndelay=0. The
218  * per-device DEF_CMD_PER_LUN can be changed via sysfs:
219  * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth
220  * but cannot exceed SDEBUG_CANQUEUE .
221  */
222 #define SDEBUG_CANQUEUE_WORDS  3        /* a WORD is bits in a long */
223 #define SDEBUG_CANQUEUE  (SDEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
224 #define DEF_CMD_PER_LUN  255
225
226 #define F_D_IN                  1
227 #define F_D_OUT                 2
228 #define F_D_OUT_MAYBE           4       /* WRITE SAME, NDOB bit */
229 #define F_D_UNKN                8
230 #define F_RL_WLUN_OK            0x10
231 #define F_SKIP_UA               0x20
232 #define F_DELAY_OVERR           0x40
233 #define F_SA_LOW                0x80    /* cdb byte 1, bits 4 to 0 */
234 #define F_SA_HIGH               0x100   /* as used by variable length cdbs */
235 #define F_INV_OP                0x200
236 #define F_FAKE_RW               0x400
237 #define F_M_ACCESS              0x800   /* media access */
238 #define F_SSU_DELAY             0x1000
239 #define F_SYNC_DELAY            0x2000
240
241 #define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
242 #define FF_MEDIA_IO (F_M_ACCESS | F_FAKE_RW)
243 #define FF_SA (F_SA_HIGH | F_SA_LOW)
244 #define F_LONG_DELAY            (F_SSU_DELAY | F_SYNC_DELAY)
245
246 #define SDEBUG_MAX_PARTS 4
247
248 #define SDEBUG_MAX_CMD_LEN 32
249
250
251 struct sdebug_dev_info {
252         struct list_head dev_list;
253         unsigned int channel;
254         unsigned int target;
255         u64 lun;
256         uuid_t lu_name;
257         struct sdebug_host_info *sdbg_host;
258         unsigned long uas_bm[1];
259         atomic_t num_in_q;
260         atomic_t stopped;
261         bool used;
262 };
263
264 struct sdebug_host_info {
265         struct list_head host_list;
266         struct Scsi_Host *shost;
267         struct device dev;
268         struct list_head dev_info_list;
269 };
270
271 #define to_sdebug_host(d)       \
272         container_of(d, struct sdebug_host_info, dev)
273
274 enum sdeb_defer_type {SDEB_DEFER_NONE = 0, SDEB_DEFER_HRT = 1,
275                       SDEB_DEFER_WQ = 2};
276
277 struct sdebug_defer {
278         struct hrtimer hrt;
279         struct execute_work ew;
280         int sqa_idx;    /* index of sdebug_queue array */
281         int qc_idx;     /* index of sdebug_queued_cmd array within sqa_idx */
282         int issuing_cpu;
283         bool init_hrt;
284         bool init_wq;
285         bool aborted;   /* true when blk_abort_request() already called */
286         enum sdeb_defer_type defer_t;
287 };
288
289 struct sdebug_queued_cmd {
290         /* corresponding bit set in in_use_bm[] in owning struct sdebug_queue
291          * instance indicates this slot is in use.
292          */
293         struct sdebug_defer *sd_dp;
294         struct scsi_cmnd *a_cmnd;
295         unsigned int inj_recovered:1;
296         unsigned int inj_transport:1;
297         unsigned int inj_dif:1;
298         unsigned int inj_dix:1;
299         unsigned int inj_short:1;
300         unsigned int inj_host_busy:1;
301         unsigned int inj_cmd_abort:1;
302 };
303
304 struct sdebug_queue {
305         struct sdebug_queued_cmd qc_arr[SDEBUG_CANQUEUE];
306         unsigned long in_use_bm[SDEBUG_CANQUEUE_WORDS];
307         spinlock_t qc_lock;
308         atomic_t blocked;       /* to temporarily stop more being queued */
309 };
310
311 static atomic_t sdebug_cmnd_count;   /* number of incoming commands */
312 static atomic_t sdebug_completions;  /* count of deferred completions */
313 static atomic_t sdebug_miss_cpus;    /* submission + completion cpus differ */
314 static atomic_t sdebug_a_tsf;        /* 'almost task set full' counter */
315
316 struct opcode_info_t {
317         u8 num_attached;        /* 0 if this is it (i.e. a leaf); use 0xff */
318                                 /* for terminating element */
319         u8 opcode;              /* if num_attached > 0, preferred */
320         u16 sa;                 /* service action */
321         u32 flags;              /* OR-ed set of SDEB_F_* */
322         int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
323         const struct opcode_info_t *arrp;  /* num_attached elements or NULL */
324         u8 len_mask[16];        /* len_mask[0]-->cdb_len, then mask for cdb */
325                                 /* 1 to min(cdb_len, 15); ignore cdb[15...] */
326 };
327
328 /* SCSI opcodes (first byte of cdb) of interest mapped onto these indexes */
329 enum sdeb_opcode_index {
330         SDEB_I_INVALID_OPCODE = 0,
331         SDEB_I_INQUIRY = 1,
332         SDEB_I_REPORT_LUNS = 2,
333         SDEB_I_REQUEST_SENSE = 3,
334         SDEB_I_TEST_UNIT_READY = 4,
335         SDEB_I_MODE_SENSE = 5,          /* 6, 10 */
336         SDEB_I_MODE_SELECT = 6,         /* 6, 10 */
337         SDEB_I_LOG_SENSE = 7,
338         SDEB_I_READ_CAPACITY = 8,       /* 10; 16 is in SA_IN(16) */
339         SDEB_I_READ = 9,                /* 6, 10, 12, 16 */
340         SDEB_I_WRITE = 10,              /* 6, 10, 12, 16 */
341         SDEB_I_START_STOP = 11,
342         SDEB_I_SERV_ACT_IN_16 = 12,     /* add ...SERV_ACT_IN_12 if needed */
343         SDEB_I_SERV_ACT_OUT_16 = 13,    /* add ...SERV_ACT_OUT_12 if needed */
344         SDEB_I_MAINT_IN = 14,
345         SDEB_I_MAINT_OUT = 15,
346         SDEB_I_VERIFY = 16,             /* 10 only */
347         SDEB_I_VARIABLE_LEN = 17,       /* READ(32), WRITE(32), WR_SCAT(32) */
348         SDEB_I_RESERVE = 18,            /* 6, 10 */
349         SDEB_I_RELEASE = 19,            /* 6, 10 */
350         SDEB_I_ALLOW_REMOVAL = 20,      /* PREVENT ALLOW MEDIUM REMOVAL */
351         SDEB_I_REZERO_UNIT = 21,        /* REWIND in SSC */
352         SDEB_I_ATA_PT = 22,             /* 12, 16 */
353         SDEB_I_SEND_DIAG = 23,
354         SDEB_I_UNMAP = 24,
355         SDEB_I_WRITE_BUFFER = 25,
356         SDEB_I_WRITE_SAME = 26,         /* 10, 16 */
357         SDEB_I_SYNC_CACHE = 27,         /* 10, 16 */
358         SDEB_I_COMP_WRITE = 28,
359         SDEB_I_LAST_ELEMENT = 29,       /* keep this last (previous + 1) */
360 };
361
362
363 static const unsigned char opcode_ind_arr[256] = {
364 /* 0x0; 0x0->0x1f: 6 byte cdbs */
365         SDEB_I_TEST_UNIT_READY, SDEB_I_REZERO_UNIT, 0, SDEB_I_REQUEST_SENSE,
366             0, 0, 0, 0,
367         SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, 0,
368         0, 0, SDEB_I_INQUIRY, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
369             SDEB_I_RELEASE,
370         0, 0, SDEB_I_MODE_SENSE, SDEB_I_START_STOP, 0, SDEB_I_SEND_DIAG,
371             SDEB_I_ALLOW_REMOVAL, 0,
372 /* 0x20; 0x20->0x3f: 10 byte cdbs */
373         0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY, 0, 0,
374         SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, SDEB_I_VERIFY,
375         0, 0, 0, 0, 0, SDEB_I_SYNC_CACHE, 0, 0,
376         0, 0, 0, SDEB_I_WRITE_BUFFER, 0, 0, 0, 0,
377 /* 0x40; 0x40->0x5f: 10 byte cdbs */
378         0, SDEB_I_WRITE_SAME, SDEB_I_UNMAP, 0, 0, 0, 0, 0,
379         0, 0, 0, 0, 0, SDEB_I_LOG_SENSE, 0, 0,
380         0, 0, 0, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
381             SDEB_I_RELEASE,
382         0, 0, SDEB_I_MODE_SENSE, 0, 0, 0, 0, 0,
383 /* 0x60; 0x60->0x7d are reserved, 0x7e is "extended cdb" */
384         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
385         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
386         0, SDEB_I_VARIABLE_LEN,
387 /* 0x80; 0x80->0x9f: 16 byte cdbs */
388         0, 0, 0, 0, 0, SDEB_I_ATA_PT, 0, 0,
389         SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0, 0, 0, 0, 0,
390         0, SDEB_I_SYNC_CACHE, 0, SDEB_I_WRITE_SAME, 0, 0, 0, 0,
391         0, 0, 0, 0, 0, 0, SDEB_I_SERV_ACT_IN_16, SDEB_I_SERV_ACT_OUT_16,
392 /* 0xa0; 0xa0->0xbf: 12 byte cdbs */
393         SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN,
394              SDEB_I_MAINT_OUT, 0, 0, 0,
395         SDEB_I_READ, 0 /* SDEB_I_SERV_ACT_OUT_12 */, SDEB_I_WRITE,
396              0 /* SDEB_I_SERV_ACT_IN_12 */, 0, 0, 0, 0,
397         0, 0, 0, 0, 0, 0, 0, 0,
398         0, 0, 0, 0, 0, 0, 0, 0,
399 /* 0xc0; 0xc0->0xff: vendor specific */
400         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
401         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
402         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
403         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
404 };
405
406 /*
407  * The following "response" functions return the SCSI mid-level's 4 byte
408  * tuple-in-an-int. To handle commands with an IMMED bit, for a faster
409  * command completion, they can mask their return value with
410  * SDEG_RES_IMMED_MASK .
411  */
412 #define SDEG_RES_IMMED_MASK 0x40000000
413
414 static int resp_inquiry(struct scsi_cmnd *, struct sdebug_dev_info *);
415 static int resp_report_luns(struct scsi_cmnd *, struct sdebug_dev_info *);
416 static int resp_requests(struct scsi_cmnd *, struct sdebug_dev_info *);
417 static int resp_mode_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
418 static int resp_mode_select(struct scsi_cmnd *, struct sdebug_dev_info *);
419 static int resp_log_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
420 static int resp_readcap(struct scsi_cmnd *, struct sdebug_dev_info *);
421 static int resp_read_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
422 static int resp_write_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
423 static int resp_write_scat(struct scsi_cmnd *, struct sdebug_dev_info *);
424 static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *);
425 static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *);
426 static int resp_get_lba_status(struct scsi_cmnd *, struct sdebug_dev_info *);
427 static int resp_report_tgtpgs(struct scsi_cmnd *, struct sdebug_dev_info *);
428 static int resp_unmap(struct scsi_cmnd *, struct sdebug_dev_info *);
429 static int resp_rsup_opcodes(struct scsi_cmnd *, struct sdebug_dev_info *);
430 static int resp_rsup_tmfs(struct scsi_cmnd *, struct sdebug_dev_info *);
431 static int resp_write_same_10(struct scsi_cmnd *, struct sdebug_dev_info *);
432 static int resp_write_same_16(struct scsi_cmnd *, struct sdebug_dev_info *);
433 static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *);
434 static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *);
435 static int resp_sync_cache(struct scsi_cmnd *, struct sdebug_dev_info *);
436
437 /*
438  * The following are overflow arrays for cdbs that "hit" the same index in
439  * the opcode_info_arr array. The most time sensitive (or commonly used) cdb
440  * should be placed in opcode_info_arr[], the others should be placed here.
441  */
442 static const struct opcode_info_t msense_iarr[] = {
443         {0, 0x1a, 0, F_D_IN, NULL, NULL,
444             {6,  0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
445 };
446
447 static const struct opcode_info_t mselect_iarr[] = {
448         {0, 0x15, 0, F_D_OUT, NULL, NULL,
449             {6,  0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
450 };
451
452 static const struct opcode_info_t read_iarr[] = {
453         {0, 0x28, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(10) */
454             {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
455              0, 0, 0, 0} },
456         {0, 0x8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL, /* READ(6) */
457             {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
458         {0, 0xa8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(12) */
459             {12,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf,
460              0xc7, 0, 0, 0, 0} },
461 };
462
463 static const struct opcode_info_t write_iarr[] = {
464         {0, 0x2a, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,  /* WRITE(10) */
465             NULL, {10,  0xfb, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7,
466                    0, 0, 0, 0, 0, 0} },
467         {0, 0xa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,   /* WRITE(6) */
468             NULL, {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0,
469                    0, 0, 0} },
470         {0, 0xaa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,  /* WRITE(12) */
471             NULL, {12,  0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
472                    0xbf, 0xc7, 0, 0, 0, 0} },
473 };
474
475 static const struct opcode_info_t sa_in_16_iarr[] = {
476         {0, 0x9e, 0x12, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
477             {16,  0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
478              0xff, 0xff, 0xff, 0, 0xc7} },      /* GET LBA STATUS(16) */
479 };
480
481 static const struct opcode_info_t vl_iarr[] = { /* VARIABLE LENGTH */
482         {0, 0x7f, 0xb, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_dt0,
483             NULL, {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0xb, 0xfa,
484                    0, 0xff, 0xff, 0xff, 0xff} },        /* WRITE(32) */
485         {0, 0x7f, 0x11, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
486             NULL, {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x11, 0xf8,
487                    0, 0xff, 0xff, 0x0, 0x0} },  /* WRITE SCATTERED(32) */
488 };
489
490 static const struct opcode_info_t maint_in_iarr[] = {   /* MAINT IN */
491         {0, 0xa3, 0xc, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL,
492             {12,  0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
493              0xc7, 0, 0, 0, 0} }, /* REPORT SUPPORTED OPERATION CODES */
494         {0, 0xa3, 0xd, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL,
495             {12,  0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
496              0, 0} },   /* REPORTED SUPPORTED TASK MANAGEMENT FUNCTIONS */
497 };
498
499 static const struct opcode_info_t write_same_iarr[] = {
500         {0, 0x93, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_write_same_16, NULL,
501             {16,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
502              0xff, 0xff, 0xff, 0x3f, 0xc7} },           /* WRITE SAME(16) */
503 };
504
505 static const struct opcode_info_t reserve_iarr[] = {
506         {0, 0x16, 0, F_D_OUT, NULL, NULL,               /* RESERVE(6) */
507             {6,  0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
508 };
509
510 static const struct opcode_info_t release_iarr[] = {
511         {0, 0x17, 0, F_D_OUT, NULL, NULL,               /* RELEASE(6) */
512             {6,  0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
513 };
514
515 static const struct opcode_info_t sync_cache_iarr[] = {
516         {0, 0x91, 0, F_SYNC_DELAY | F_M_ACCESS, resp_sync_cache, NULL,
517             {16,  0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
518              0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },     /* SYNC_CACHE (16) */
519 };
520
521
522 /* This array is accessed via SDEB_I_* values. Make sure all are mapped,
523  * plus the terminating elements for logic that scans this table such as
524  * REPORT SUPPORTED OPERATION CODES. */
525 static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEMENT + 1] = {
526 /* 0 */
527         {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL,    /* unknown opcodes */
528             {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
529         {0, 0x12, 0, FF_RESPOND | F_D_IN, resp_inquiry, NULL, /* INQUIRY */
530             {6,  0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
531         {0, 0xa0, 0, FF_RESPOND | F_D_IN, resp_report_luns, NULL,
532             {12,  0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
533              0, 0} },                                   /* REPORT LUNS */
534         {0, 0x3, 0, FF_RESPOND | F_D_IN, resp_requests, NULL,
535             {6,  0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
536         {0, 0x0, 0, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */
537             {6,  0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
538 /* 5 */
539         {ARRAY_SIZE(msense_iarr), 0x5a, 0, F_D_IN,      /* MODE SENSE(10) */
540             resp_mode_sense, msense_iarr, {10,  0xf8, 0xff, 0xff, 0, 0, 0,
541                 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
542         {ARRAY_SIZE(mselect_iarr), 0x55, 0, F_D_OUT,    /* MODE SELECT(10) */
543             resp_mode_select, mselect_iarr, {10,  0xf1, 0, 0, 0, 0, 0, 0xff,
544                 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
545         {0, 0x4d, 0, F_D_IN, resp_log_sense, NULL,      /* LOG SENSE */
546             {10,  0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0,
547              0, 0, 0} },
548         {0, 0x25, 0, F_D_IN, resp_readcap, NULL,    /* READ CAPACITY(10) */
549             {10,  0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0,
550              0, 0} },
551         {ARRAY_SIZE(read_iarr), 0x88, 0, F_D_IN | FF_MEDIA_IO, /* READ(16) */
552             resp_read_dt0, read_iarr, {16,  0xfe, 0xff, 0xff, 0xff, 0xff,
553             0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
554 /* 10 */
555         {ARRAY_SIZE(write_iarr), 0x8a, 0, F_D_OUT | FF_MEDIA_IO,
556             resp_write_dt0, write_iarr,                 /* WRITE(16) */
557                 {16,  0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
558                  0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
559         {0, 0x1b, 0, F_SSU_DELAY, resp_start_stop, NULL,/* START STOP UNIT */
560             {6,  0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
561         {ARRAY_SIZE(sa_in_16_iarr), 0x9e, 0x10, F_SA_LOW | F_D_IN,
562             resp_readcap16, sa_in_16_iarr, /* SA_IN(16), READ CAPACITY(16) */
563                 {16,  0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
564                  0xff, 0xff, 0xff, 0xff, 0x1, 0xc7} },
565         {0, 0x9f, 0x12, F_SA_LOW | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
566             NULL, {16,  0x12, 0xf9, 0x0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff,
567             0xff, 0xff, 0xff, 0xff, 0xc7} },  /* SA_OUT(16), WRITE SCAT(16) */
568         {ARRAY_SIZE(maint_in_iarr), 0xa3, 0xa, F_SA_LOW | F_D_IN,
569             resp_report_tgtpgs, /* MAINT IN, REPORT TARGET PORT GROUPS */
570                 maint_in_iarr, {12,  0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff,
571                                 0xff, 0, 0xc7, 0, 0, 0, 0} },
572 /* 15 */
573         {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
574             {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
575         {0, 0x2f, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, NULL, NULL, /* VERIFY(10) */
576             {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7,
577              0, 0, 0, 0, 0, 0} },
578         {ARRAY_SIZE(vl_iarr), 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_MEDIA_IO,
579             resp_read_dt0, vl_iarr,     /* VARIABLE LENGTH, READ(32) */
580             {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x9, 0xfe, 0, 0xff, 0xff,
581              0xff, 0xff} },
582         {ARRAY_SIZE(reserve_iarr), 0x56, 0, F_D_OUT,
583             NULL, reserve_iarr, /* RESERVE(10) <no response function> */
584             {10,  0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
585              0} },
586         {ARRAY_SIZE(release_iarr), 0x57, 0, F_D_OUT,
587             NULL, release_iarr, /* RELEASE(10) <no response function> */
588             {10,  0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
589              0} },
590 /* 20 */
591         {0, 0x1e, 0, 0, NULL, NULL, /* ALLOW REMOVAL */
592             {6,  0, 0, 0, 0x3, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
593         {0, 0x1, 0, 0, resp_start_stop, NULL, /* REWIND ?? */
594             {6,  0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
595         {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */
596             {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
597         {0, 0x1d, F_D_OUT, 0, NULL, NULL,       /* SEND DIAGNOSTIC */
598             {6,  0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
599         {0, 0x42, 0, F_D_OUT | FF_MEDIA_IO, resp_unmap, NULL, /* UNMAP */
600             {10,  0x1, 0, 0, 0, 0, 0x3f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
601 /* 25 */
602         {0, 0x3b, 0, F_D_OUT_MAYBE, resp_write_buffer, NULL,
603             {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0,
604              0, 0, 0, 0} },                     /* WRITE_BUFFER */
605         {ARRAY_SIZE(write_same_iarr), 0x41, 0, F_D_OUT_MAYBE | FF_MEDIA_IO,
606             resp_write_same_10, write_same_iarr,        /* WRITE SAME(10) */
607                 {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0,
608                  0, 0, 0, 0, 0} },
609         {ARRAY_SIZE(sync_cache_iarr), 0x35, 0, F_SYNC_DELAY | F_M_ACCESS,
610             resp_sync_cache, sync_cache_iarr,
611             {10,  0x7, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
612              0, 0, 0, 0} },                     /* SYNC_CACHE (10) */
613         {0, 0x89, 0, F_D_OUT | FF_MEDIA_IO, resp_comp_write, NULL,
614             {16,  0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
615              0, 0xff, 0x3f, 0xc7} },            /* COMPARE AND WRITE */
616
617 /* 29 */
618         {0xff, 0, 0, 0, NULL, NULL,             /* terminating element */
619             {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
620 };
621
622 static int sdebug_add_host = DEF_NUM_HOST;
623 static int sdebug_ato = DEF_ATO;
624 static int sdebug_cdb_len = DEF_CDB_LEN;
625 static int sdebug_jdelay = DEF_JDELAY;  /* if > 0 then unit is jiffies */
626 static int sdebug_dev_size_mb = DEF_DEV_SIZE_MB;
627 static int sdebug_dif = DEF_DIF;
628 static int sdebug_dix = DEF_DIX;
629 static int sdebug_dsense = DEF_D_SENSE;
630 static int sdebug_every_nth = DEF_EVERY_NTH;
631 static int sdebug_fake_rw = DEF_FAKE_RW;
632 static unsigned int sdebug_guard = DEF_GUARD;
633 static int sdebug_lowest_aligned = DEF_LOWEST_ALIGNED;
634 static int sdebug_max_luns = DEF_MAX_LUNS;
635 static int sdebug_max_queue = SDEBUG_CANQUEUE;  /* per submit queue */
636 static unsigned int sdebug_medium_error_start = OPT_MEDIUM_ERR_ADDR;
637 static int sdebug_medium_error_count = OPT_MEDIUM_ERR_NUM;
638 static atomic_t retired_max_queue;      /* if > 0 then was prior max_queue */
639 static int sdebug_ndelay = DEF_NDELAY;  /* if > 0 then unit is nanoseconds */
640 static int sdebug_no_lun_0 = DEF_NO_LUN_0;
641 static int sdebug_no_uld;
642 static int sdebug_num_parts = DEF_NUM_PARTS;
643 static int sdebug_num_tgts = DEF_NUM_TGTS; /* targets per host */
644 static int sdebug_opt_blks = DEF_OPT_BLKS;
645 static int sdebug_opts = DEF_OPTS;
646 static int sdebug_physblk_exp = DEF_PHYSBLK_EXP;
647 static int sdebug_opt_xferlen_exp = DEF_OPT_XFERLEN_EXP;
648 static int sdebug_ptype = DEF_PTYPE; /* SCSI peripheral device type */
649 static int sdebug_scsi_level = DEF_SCSI_LEVEL;
650 static int sdebug_sector_size = DEF_SECTOR_SIZE;
651 static int sdebug_virtual_gb = DEF_VIRTUAL_GB;
652 static int sdebug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
653 static unsigned int sdebug_lbpu = DEF_LBPU;
654 static unsigned int sdebug_lbpws = DEF_LBPWS;
655 static unsigned int sdebug_lbpws10 = DEF_LBPWS10;
656 static unsigned int sdebug_lbprz = DEF_LBPRZ;
657 static unsigned int sdebug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
658 static unsigned int sdebug_unmap_granularity = DEF_UNMAP_GRANULARITY;
659 static unsigned int sdebug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
660 static unsigned int sdebug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
661 static unsigned int sdebug_write_same_length = DEF_WRITESAME_LENGTH;
662 static int sdebug_uuid_ctl = DEF_UUID_CTL;
663 static bool sdebug_removable = DEF_REMOVABLE;
664 static bool sdebug_clustering;
665 static bool sdebug_host_lock = DEF_HOST_LOCK;
666 static bool sdebug_strict = DEF_STRICT;
667 static bool sdebug_any_injecting_opt;
668 static bool sdebug_verbose;
669 static bool have_dif_prot;
670 static bool write_since_sync;
671 static bool sdebug_statistics = DEF_STATISTICS;
672 static bool sdebug_wp;
673
674 static unsigned int sdebug_store_sectors;
675 static sector_t sdebug_capacity;        /* in sectors */
676
677 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
678    may still need them */
679 static int sdebug_heads;                /* heads per disk */
680 static int sdebug_cylinders_per;        /* cylinders per surface */
681 static int sdebug_sectors_per;          /* sectors per cylinder */
682
683 static LIST_HEAD(sdebug_host_list);
684 static DEFINE_SPINLOCK(sdebug_host_list_lock);
685
686 static unsigned char *fake_storep;      /* ramdisk storage */
687 static struct t10_pi_tuple *dif_storep; /* protection info */
688 static void *map_storep;                /* provisioning map */
689
690 static unsigned long map_size;
691 static int num_aborts;
692 static int num_dev_resets;
693 static int num_target_resets;
694 static int num_bus_resets;
695 static int num_host_resets;
696 static int dix_writes;
697 static int dix_reads;
698 static int dif_errors;
699
700 static int submit_queues = DEF_SUBMIT_QUEUES;  /* > 1 for multi-queue (mq) */
701 static struct sdebug_queue *sdebug_q_arr;  /* ptr to array of submit queues */
702
703 static DEFINE_RWLOCK(atomic_rw);
704
705 static char sdebug_proc_name[] = MY_NAME;
706 static const char *my_name = MY_NAME;
707
708 static struct bus_type pseudo_lld_bus;
709
710 static struct device_driver sdebug_driverfs_driver = {
711         .name           = sdebug_proc_name,
712         .bus            = &pseudo_lld_bus,
713 };
714
715 static const int check_condition_result =
716                 (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
717
718 static const int illegal_condition_result =
719         (DRIVER_SENSE << 24) | (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
720
721 static const int device_qfull_result =
722         (DID_OK << 16) | (COMMAND_COMPLETE << 8) | SAM_STAT_TASK_SET_FULL;
723
724
725 /* Only do the extra work involved in logical block provisioning if one or
726  * more of the lbpu, lbpws or lbpws10 parameters are given and we are doing
727  * real reads and writes (i.e. not skipping them for speed).
728  */
729 static inline bool scsi_debug_lbp(void)
730 {
731         return 0 == sdebug_fake_rw &&
732                 (sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10);
733 }
734
735 static void *lba2fake_store(unsigned long long lba)
736 {
737         lba = do_div(lba, sdebug_store_sectors);
738
739         return fake_storep + lba * sdebug_sector_size;
740 }
741
742 static struct t10_pi_tuple *dif_store(sector_t sector)
743 {
744         sector = sector_div(sector, sdebug_store_sectors);
745
746         return dif_storep + sector;
747 }
748
749 static void sdebug_max_tgts_luns(void)
750 {
751         struct sdebug_host_info *sdbg_host;
752         struct Scsi_Host *hpnt;
753
754         spin_lock(&sdebug_host_list_lock);
755         list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
756                 hpnt = sdbg_host->shost;
757                 if ((hpnt->this_id >= 0) &&
758                     (sdebug_num_tgts > hpnt->this_id))
759                         hpnt->max_id = sdebug_num_tgts + 1;
760                 else
761                         hpnt->max_id = sdebug_num_tgts;
762                 /* sdebug_max_luns; */
763                 hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
764         }
765         spin_unlock(&sdebug_host_list_lock);
766 }
767
768 enum sdeb_cmd_data {SDEB_IN_DATA = 0, SDEB_IN_CDB = 1};
769
770 /* Set in_bit to -1 to indicate no bit position of invalid field */
771 static void mk_sense_invalid_fld(struct scsi_cmnd *scp,
772                                  enum sdeb_cmd_data c_d,
773                                  int in_byte, int in_bit)
774 {
775         unsigned char *sbuff;
776         u8 sks[4];
777         int sl, asc;
778
779         sbuff = scp->sense_buffer;
780         if (!sbuff) {
781                 sdev_printk(KERN_ERR, scp->device,
782                             "%s: sense_buffer is NULL\n", __func__);
783                 return;
784         }
785         asc = c_d ? INVALID_FIELD_IN_CDB : INVALID_FIELD_IN_PARAM_LIST;
786         memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
787         scsi_build_sense_buffer(sdebug_dsense, sbuff, ILLEGAL_REQUEST, asc, 0);
788         memset(sks, 0, sizeof(sks));
789         sks[0] = 0x80;
790         if (c_d)
791                 sks[0] |= 0x40;
792         if (in_bit >= 0) {
793                 sks[0] |= 0x8;
794                 sks[0] |= 0x7 & in_bit;
795         }
796         put_unaligned_be16(in_byte, sks + 1);
797         if (sdebug_dsense) {
798                 sl = sbuff[7] + 8;
799                 sbuff[7] = sl;
800                 sbuff[sl] = 0x2;
801                 sbuff[sl + 1] = 0x6;
802                 memcpy(sbuff + sl + 4, sks, 3);
803         } else
804                 memcpy(sbuff + 15, sks, 3);
805         if (sdebug_verbose)
806                 sdev_printk(KERN_INFO, scp->device, "%s:  [sense_key,asc,ascq"
807                             "]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n",
808                             my_name, asc, c_d ? 'C' : 'D', in_byte, in_bit);
809 }
810
811 static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
812 {
813         unsigned char *sbuff;
814
815         sbuff = scp->sense_buffer;
816         if (!sbuff) {
817                 sdev_printk(KERN_ERR, scp->device,
818                             "%s: sense_buffer is NULL\n", __func__);
819                 return;
820         }
821         memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
822
823         scsi_build_sense_buffer(sdebug_dsense, sbuff, key, asc, asq);
824
825         if (sdebug_verbose)
826                 sdev_printk(KERN_INFO, scp->device,
827                             "%s:  [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
828                             my_name, key, asc, asq);
829 }
830
831 static void mk_sense_invalid_opcode(struct scsi_cmnd *scp)
832 {
833         mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
834 }
835
836 static int scsi_debug_ioctl(struct scsi_device *dev, unsigned int cmd,
837                             void __user *arg)
838 {
839         if (sdebug_verbose) {
840                 if (0x1261 == cmd)
841                         sdev_printk(KERN_INFO, dev,
842                                     "%s: BLKFLSBUF [0x1261]\n", __func__);
843                 else if (0x5331 == cmd)
844                         sdev_printk(KERN_INFO, dev,
845                                     "%s: CDROM_GET_CAPABILITY [0x5331]\n",
846                                     __func__);
847                 else
848                         sdev_printk(KERN_INFO, dev, "%s: cmd=0x%x\n",
849                                     __func__, cmd);
850         }
851         return -EINVAL;
852         /* return -ENOTTY; // correct return but upsets fdisk */
853 }
854
855 static void config_cdb_len(struct scsi_device *sdev)
856 {
857         switch (sdebug_cdb_len) {
858         case 6: /* suggest 6 byte READ, WRITE and MODE SENSE/SELECT */
859                 sdev->use_10_for_rw = false;
860                 sdev->use_16_for_rw = false;
861                 sdev->use_10_for_ms = false;
862                 break;
863         case 10: /* suggest 10 byte RWs and 6 byte MODE SENSE/SELECT */
864                 sdev->use_10_for_rw = true;
865                 sdev->use_16_for_rw = false;
866                 sdev->use_10_for_ms = false;
867                 break;
868         case 12: /* suggest 10 byte RWs and 10 byte MODE SENSE/SELECT */
869                 sdev->use_10_for_rw = true;
870                 sdev->use_16_for_rw = false;
871                 sdev->use_10_for_ms = true;
872                 break;
873         case 16:
874                 sdev->use_10_for_rw = false;
875                 sdev->use_16_for_rw = true;
876                 sdev->use_10_for_ms = true;
877                 break;
878         case 32: /* No knobs to suggest this so same as 16 for now */
879                 sdev->use_10_for_rw = false;
880                 sdev->use_16_for_rw = true;
881                 sdev->use_10_for_ms = true;
882                 break;
883         default:
884                 pr_warn("unexpected cdb_len=%d, force to 10\n",
885                         sdebug_cdb_len);
886                 sdev->use_10_for_rw = true;
887                 sdev->use_16_for_rw = false;
888                 sdev->use_10_for_ms = false;
889                 sdebug_cdb_len = 10;
890                 break;
891         }
892 }
893
894 static void all_config_cdb_len(void)
895 {
896         struct sdebug_host_info *sdbg_host;
897         struct Scsi_Host *shost;
898         struct scsi_device *sdev;
899
900         spin_lock(&sdebug_host_list_lock);
901         list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
902                 shost = sdbg_host->shost;
903                 shost_for_each_device(sdev, shost) {
904                         config_cdb_len(sdev);
905                 }
906         }
907         spin_unlock(&sdebug_host_list_lock);
908 }
909
910 static void clear_luns_changed_on_target(struct sdebug_dev_info *devip)
911 {
912         struct sdebug_host_info *sdhp;
913         struct sdebug_dev_info *dp;
914
915         spin_lock(&sdebug_host_list_lock);
916         list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
917                 list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
918                         if ((devip->sdbg_host == dp->sdbg_host) &&
919                             (devip->target == dp->target))
920                                 clear_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
921                 }
922         }
923         spin_unlock(&sdebug_host_list_lock);
924 }
925
926 static int make_ua(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
927 {
928         int k;
929
930         k = find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS);
931         if (k != SDEBUG_NUM_UAS) {
932                 const char *cp = NULL;
933
934                 switch (k) {
935                 case SDEBUG_UA_POR:
936                         mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
937                                         POWER_ON_RESET_ASCQ);
938                         if (sdebug_verbose)
939                                 cp = "power on reset";
940                         break;
941                 case SDEBUG_UA_BUS_RESET:
942                         mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
943                                         BUS_RESET_ASCQ);
944                         if (sdebug_verbose)
945                                 cp = "bus reset";
946                         break;
947                 case SDEBUG_UA_MODE_CHANGED:
948                         mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
949                                         MODE_CHANGED_ASCQ);
950                         if (sdebug_verbose)
951                                 cp = "mode parameters changed";
952                         break;
953                 case SDEBUG_UA_CAPACITY_CHANGED:
954                         mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
955                                         CAPACITY_CHANGED_ASCQ);
956                         if (sdebug_verbose)
957                                 cp = "capacity data changed";
958                         break;
959                 case SDEBUG_UA_MICROCODE_CHANGED:
960                         mk_sense_buffer(scp, UNIT_ATTENTION,
961                                         TARGET_CHANGED_ASC,
962                                         MICROCODE_CHANGED_ASCQ);
963                         if (sdebug_verbose)
964                                 cp = "microcode has been changed";
965                         break;
966                 case SDEBUG_UA_MICROCODE_CHANGED_WO_RESET:
967                         mk_sense_buffer(scp, UNIT_ATTENTION,
968                                         TARGET_CHANGED_ASC,
969                                         MICROCODE_CHANGED_WO_RESET_ASCQ);
970                         if (sdebug_verbose)
971                                 cp = "microcode has been changed without reset";
972                         break;
973                 case SDEBUG_UA_LUNS_CHANGED:
974                         /*
975                          * SPC-3 behavior is to report a UNIT ATTENTION with
976                          * ASC/ASCQ REPORTED LUNS DATA HAS CHANGED on every LUN
977                          * on the target, until a REPORT LUNS command is
978                          * received.  SPC-4 behavior is to report it only once.
979                          * NOTE:  sdebug_scsi_level does not use the same
980                          * values as struct scsi_device->scsi_level.
981                          */
982                         if (sdebug_scsi_level >= 6)     /* SPC-4 and above */
983                                 clear_luns_changed_on_target(devip);
984                         mk_sense_buffer(scp, UNIT_ATTENTION,
985                                         TARGET_CHANGED_ASC,
986                                         LUNS_CHANGED_ASCQ);
987                         if (sdebug_verbose)
988                                 cp = "reported luns data has changed";
989                         break;
990                 default:
991                         pr_warn("unexpected unit attention code=%d\n", k);
992                         if (sdebug_verbose)
993                                 cp = "unknown";
994                         break;
995                 }
996                 clear_bit(k, devip->uas_bm);
997                 if (sdebug_verbose)
998                         sdev_printk(KERN_INFO, scp->device,
999                                    "%s reports: Unit attention: %s\n",
1000                                    my_name, cp);
1001                 return check_condition_result;
1002         }
1003         return 0;
1004 }
1005
1006 /* Build SCSI "data-in" buffer. Returns 0 if ok else (DID_ERROR << 16). */
1007 static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1008                                 int arr_len)
1009 {
1010         int act_len;
1011         struct scsi_data_buffer *sdb = &scp->sdb;
1012
1013         if (!sdb->length)
1014                 return 0;
1015         if (scp->sc_data_direction != DMA_FROM_DEVICE)
1016                 return DID_ERROR << 16;
1017
1018         act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
1019                                       arr, arr_len);
1020         scsi_set_resid(scp, scsi_bufflen(scp) - act_len);
1021
1022         return 0;
1023 }
1024
1025 /* Partial build of SCSI "data-in" buffer. Returns 0 if ok else
1026  * (DID_ERROR << 16). Can write to offset in data-in buffer. If multiple
1027  * calls, not required to write in ascending offset order. Assumes resid
1028  * set to scsi_bufflen() prior to any calls.
1029  */
1030 static int p_fill_from_dev_buffer(struct scsi_cmnd *scp, const void *arr,
1031                                   int arr_len, unsigned int off_dst)
1032 {
1033         int act_len, n;
1034         struct scsi_data_buffer *sdb = &scp->sdb;
1035         off_t skip = off_dst;
1036
1037         if (sdb->length <= off_dst)
1038                 return 0;
1039         if (scp->sc_data_direction != DMA_FROM_DEVICE)
1040                 return DID_ERROR << 16;
1041
1042         act_len = sg_pcopy_from_buffer(sdb->table.sgl, sdb->table.nents,
1043                                        arr, arr_len, skip);
1044         pr_debug("%s: off_dst=%u, scsi_bufflen=%u, act_len=%u, resid=%d\n",
1045                  __func__, off_dst, scsi_bufflen(scp), act_len,
1046                  scsi_get_resid(scp));
1047         n = (int)scsi_bufflen(scp) - ((int)off_dst + act_len);
1048         scsi_set_resid(scp, min(scsi_get_resid(scp), n));
1049         return 0;
1050 }
1051
1052 /* Fetches from SCSI "data-out" buffer. Returns number of bytes fetched into
1053  * 'arr' or -1 if error.
1054  */
1055 static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1056                                int arr_len)
1057 {
1058         if (!scsi_bufflen(scp))
1059                 return 0;
1060         if (scp->sc_data_direction != DMA_TO_DEVICE)
1061                 return -1;
1062
1063         return scsi_sg_copy_to_buffer(scp, arr, arr_len);
1064 }
1065
1066
1067 static char sdebug_inq_vendor_id[9] = "Linux   ";
1068 static char sdebug_inq_product_id[17] = "scsi_debug      ";
1069 static char sdebug_inq_product_rev[5] = SDEBUG_VERSION;
1070 /* Use some locally assigned NAAs for SAS addresses. */
1071 static const u64 naa3_comp_a = 0x3222222000000000ULL;
1072 static const u64 naa3_comp_b = 0x3333333000000000ULL;
1073 static const u64 naa3_comp_c = 0x3111111000000000ULL;
1074
1075 /* Device identification VPD page. Returns number of bytes placed in arr */
1076 static int inquiry_vpd_83(unsigned char *arr, int port_group_id,
1077                           int target_dev_id, int dev_id_num,
1078                           const char *dev_id_str, int dev_id_str_len,
1079                           const uuid_t *lu_name)
1080 {
1081         int num, port_a;
1082         char b[32];
1083
1084         port_a = target_dev_id + 1;
1085         /* T10 vendor identifier field format (faked) */
1086         arr[0] = 0x2;   /* ASCII */
1087         arr[1] = 0x1;
1088         arr[2] = 0x0;
1089         memcpy(&arr[4], sdebug_inq_vendor_id, 8);
1090         memcpy(&arr[12], sdebug_inq_product_id, 16);
1091         memcpy(&arr[28], dev_id_str, dev_id_str_len);
1092         num = 8 + 16 + dev_id_str_len;
1093         arr[3] = num;
1094         num += 4;
1095         if (dev_id_num >= 0) {
1096                 if (sdebug_uuid_ctl) {
1097                         /* Locally assigned UUID */
1098                         arr[num++] = 0x1;  /* binary (not necessarily sas) */
1099                         arr[num++] = 0xa;  /* PIV=0, lu, naa */
1100                         arr[num++] = 0x0;
1101                         arr[num++] = 0x12;
1102                         arr[num++] = 0x10; /* uuid type=1, locally assigned */
1103                         arr[num++] = 0x0;
1104                         memcpy(arr + num, lu_name, 16);
1105                         num += 16;
1106                 } else {
1107                         /* NAA-3, Logical unit identifier (binary) */
1108                         arr[num++] = 0x1;  /* binary (not necessarily sas) */
1109                         arr[num++] = 0x3;  /* PIV=0, lu, naa */
1110                         arr[num++] = 0x0;
1111                         arr[num++] = 0x8;
1112                         put_unaligned_be64(naa3_comp_b + dev_id_num, arr + num);
1113                         num += 8;
1114                 }
1115                 /* Target relative port number */
1116                 arr[num++] = 0x61;      /* proto=sas, binary */
1117                 arr[num++] = 0x94;      /* PIV=1, target port, rel port */
1118                 arr[num++] = 0x0;       /* reserved */
1119                 arr[num++] = 0x4;       /* length */
1120                 arr[num++] = 0x0;       /* reserved */
1121                 arr[num++] = 0x0;       /* reserved */
1122                 arr[num++] = 0x0;
1123                 arr[num++] = 0x1;       /* relative port A */
1124         }
1125         /* NAA-3, Target port identifier */
1126         arr[num++] = 0x61;      /* proto=sas, binary */
1127         arr[num++] = 0x93;      /* piv=1, target port, naa */
1128         arr[num++] = 0x0;
1129         arr[num++] = 0x8;
1130         put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1131         num += 8;
1132         /* NAA-3, Target port group identifier */
1133         arr[num++] = 0x61;      /* proto=sas, binary */
1134         arr[num++] = 0x95;      /* piv=1, target port group id */
1135         arr[num++] = 0x0;
1136         arr[num++] = 0x4;
1137         arr[num++] = 0;
1138         arr[num++] = 0;
1139         put_unaligned_be16(port_group_id, arr + num);
1140         num += 2;
1141         /* NAA-3, Target device identifier */
1142         arr[num++] = 0x61;      /* proto=sas, binary */
1143         arr[num++] = 0xa3;      /* piv=1, target device, naa */
1144         arr[num++] = 0x0;
1145         arr[num++] = 0x8;
1146         put_unaligned_be64(naa3_comp_a + target_dev_id, arr + num);
1147         num += 8;
1148         /* SCSI name string: Target device identifier */
1149         arr[num++] = 0x63;      /* proto=sas, UTF-8 */
1150         arr[num++] = 0xa8;      /* piv=1, target device, SCSI name string */
1151         arr[num++] = 0x0;
1152         arr[num++] = 24;
1153         memcpy(arr + num, "naa.32222220", 12);
1154         num += 12;
1155         snprintf(b, sizeof(b), "%08X", target_dev_id);
1156         memcpy(arr + num, b, 8);
1157         num += 8;
1158         memset(arr + num, 0, 4);
1159         num += 4;
1160         return num;
1161 }
1162
1163 static unsigned char vpd84_data[] = {
1164 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
1165     0x22,0x22,0x22,0x0,0xbb,0x1,
1166     0x22,0x22,0x22,0x0,0xbb,0x2,
1167 };
1168
1169 /*  Software interface identification VPD page */
1170 static int inquiry_vpd_84(unsigned char *arr)
1171 {
1172         memcpy(arr, vpd84_data, sizeof(vpd84_data));
1173         return sizeof(vpd84_data);
1174 }
1175
1176 /* Management network addresses VPD page */
1177 static int inquiry_vpd_85(unsigned char *arr)
1178 {
1179         int num = 0;
1180         const char *na1 = "https://www.kernel.org/config";
1181         const char *na2 = "http://www.kernel.org/log";
1182         int plen, olen;
1183
1184         arr[num++] = 0x1;       /* lu, storage config */
1185         arr[num++] = 0x0;       /* reserved */
1186         arr[num++] = 0x0;
1187         olen = strlen(na1);
1188         plen = olen + 1;
1189         if (plen % 4)
1190                 plen = ((plen / 4) + 1) * 4;
1191         arr[num++] = plen;      /* length, null termianted, padded */
1192         memcpy(arr + num, na1, olen);
1193         memset(arr + num + olen, 0, plen - olen);
1194         num += plen;
1195
1196         arr[num++] = 0x4;       /* lu, logging */
1197         arr[num++] = 0x0;       /* reserved */
1198         arr[num++] = 0x0;
1199         olen = strlen(na2);
1200         plen = olen + 1;
1201         if (plen % 4)
1202                 plen = ((plen / 4) + 1) * 4;
1203         arr[num++] = plen;      /* length, null terminated, padded */
1204         memcpy(arr + num, na2, olen);
1205         memset(arr + num + olen, 0, plen - olen);
1206         num += plen;
1207
1208         return num;
1209 }
1210
1211 /* SCSI ports VPD page */
1212 static int inquiry_vpd_88(unsigned char *arr, int target_dev_id)
1213 {
1214         int num = 0;
1215         int port_a, port_b;
1216
1217         port_a = target_dev_id + 1;
1218         port_b = port_a + 1;
1219         arr[num++] = 0x0;       /* reserved */
1220         arr[num++] = 0x0;       /* reserved */
1221         arr[num++] = 0x0;
1222         arr[num++] = 0x1;       /* relative port 1 (primary) */
1223         memset(arr + num, 0, 6);
1224         num += 6;
1225         arr[num++] = 0x0;
1226         arr[num++] = 12;        /* length tp descriptor */
1227         /* naa-5 target port identifier (A) */
1228         arr[num++] = 0x61;      /* proto=sas, binary */
1229         arr[num++] = 0x93;      /* PIV=1, target port, NAA */
1230         arr[num++] = 0x0;       /* reserved */
1231         arr[num++] = 0x8;       /* length */
1232         put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1233         num += 8;
1234         arr[num++] = 0x0;       /* reserved */
1235         arr[num++] = 0x0;       /* reserved */
1236         arr[num++] = 0x0;
1237         arr[num++] = 0x2;       /* relative port 2 (secondary) */
1238         memset(arr + num, 0, 6);
1239         num += 6;
1240         arr[num++] = 0x0;
1241         arr[num++] = 12;        /* length tp descriptor */
1242         /* naa-5 target port identifier (B) */
1243         arr[num++] = 0x61;      /* proto=sas, binary */
1244         arr[num++] = 0x93;      /* PIV=1, target port, NAA */
1245         arr[num++] = 0x0;       /* reserved */
1246         arr[num++] = 0x8;       /* length */
1247         put_unaligned_be64(naa3_comp_a + port_b, arr + num);
1248         num += 8;
1249
1250         return num;
1251 }
1252
1253
1254 static unsigned char vpd89_data[] = {
1255 /* from 4th byte */ 0,0,0,0,
1256 'l','i','n','u','x',' ',' ',' ',
1257 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
1258 '1','2','3','4',
1259 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
1260 0xec,0,0,0,
1261 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
1262 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
1263 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
1264 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
1265 0x53,0x41,
1266 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1267 0x20,0x20,
1268 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1269 0x10,0x80,
1270 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
1271 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
1272 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
1273 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
1274 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
1275 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
1276 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
1277 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1278 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1279 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1280 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
1281 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
1282 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
1283 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
1284 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1285 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1286 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1287 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1288 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1289 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1290 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1291 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1292 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1293 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1294 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1295 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
1296 };
1297
1298 /* ATA Information VPD page */
1299 static int inquiry_vpd_89(unsigned char *arr)
1300 {
1301         memcpy(arr, vpd89_data, sizeof(vpd89_data));
1302         return sizeof(vpd89_data);
1303 }
1304
1305
1306 static unsigned char vpdb0_data[] = {
1307         /* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
1308         0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1309         0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1310         0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1311 };
1312
1313 /* Block limits VPD page (SBC-3) */
1314 static int inquiry_vpd_b0(unsigned char *arr)
1315 {
1316         unsigned int gran;
1317
1318         memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
1319
1320         /* Optimal transfer length granularity */
1321         if (sdebug_opt_xferlen_exp != 0 &&
1322             sdebug_physblk_exp < sdebug_opt_xferlen_exp)
1323                 gran = 1 << sdebug_opt_xferlen_exp;
1324         else
1325                 gran = 1 << sdebug_physblk_exp;
1326         put_unaligned_be16(gran, arr + 2);
1327
1328         /* Maximum Transfer Length */
1329         if (sdebug_store_sectors > 0x400)
1330                 put_unaligned_be32(sdebug_store_sectors, arr + 4);
1331
1332         /* Optimal Transfer Length */
1333         put_unaligned_be32(sdebug_opt_blks, &arr[8]);
1334
1335         if (sdebug_lbpu) {
1336                 /* Maximum Unmap LBA Count */
1337                 put_unaligned_be32(sdebug_unmap_max_blocks, &arr[16]);
1338
1339                 /* Maximum Unmap Block Descriptor Count */
1340                 put_unaligned_be32(sdebug_unmap_max_desc, &arr[20]);
1341         }
1342
1343         /* Unmap Granularity Alignment */
1344         if (sdebug_unmap_alignment) {
1345                 put_unaligned_be32(sdebug_unmap_alignment, &arr[28]);
1346                 arr[28] |= 0x80; /* UGAVALID */
1347         }
1348
1349         /* Optimal Unmap Granularity */
1350         put_unaligned_be32(sdebug_unmap_granularity, &arr[24]);
1351
1352         /* Maximum WRITE SAME Length */
1353         put_unaligned_be64(sdebug_write_same_length, &arr[32]);
1354
1355         return 0x3c; /* Mandatory page length for Logical Block Provisioning */
1356
1357         return sizeof(vpdb0_data);
1358 }
1359
1360 /* Block device characteristics VPD page (SBC-3) */
1361 static int inquiry_vpd_b1(unsigned char *arr)
1362 {
1363         memset(arr, 0, 0x3c);
1364         arr[0] = 0;
1365         arr[1] = 1;     /* non rotating medium (e.g. solid state) */
1366         arr[2] = 0;
1367         arr[3] = 5;     /* less than 1.8" */
1368
1369         return 0x3c;
1370 }
1371
1372 /* Logical block provisioning VPD page (SBC-4) */
1373 static int inquiry_vpd_b2(unsigned char *arr)
1374 {
1375         memset(arr, 0, 0x4);
1376         arr[0] = 0;                     /* threshold exponent */
1377         if (sdebug_lbpu)
1378                 arr[1] = 1 << 7;
1379         if (sdebug_lbpws)
1380                 arr[1] |= 1 << 6;
1381         if (sdebug_lbpws10)
1382                 arr[1] |= 1 << 5;
1383         if (sdebug_lbprz && scsi_debug_lbp())
1384                 arr[1] |= (sdebug_lbprz & 0x7) << 2;  /* sbc4r07 and later */
1385         /* anc_sup=0; dp=0 (no provisioning group descriptor) */
1386         /* minimum_percentage=0; provisioning_type=0 (unknown) */
1387         /* threshold_percentage=0 */
1388         return 0x4;
1389 }
1390
1391 #define SDEBUG_LONG_INQ_SZ 96
1392 #define SDEBUG_MAX_INQ_ARR_SZ 584
1393
1394 static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1395 {
1396         unsigned char pq_pdt;
1397         unsigned char *arr;
1398         unsigned char *cmd = scp->cmnd;
1399         int alloc_len, n, ret;
1400         bool have_wlun, is_disk;
1401
1402         alloc_len = get_unaligned_be16(cmd + 3);
1403         arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
1404         if (! arr)
1405                 return DID_REQUEUE << 16;
1406         is_disk = (sdebug_ptype == TYPE_DISK);
1407         have_wlun = scsi_is_wlun(scp->device->lun);
1408         if (have_wlun)
1409                 pq_pdt = TYPE_WLUN;     /* present, wlun */
1410         else if (sdebug_no_lun_0 && (devip->lun == SDEBUG_LUN_0_VAL))
1411                 pq_pdt = 0x7f;  /* not present, PQ=3, PDT=0x1f */
1412         else
1413                 pq_pdt = (sdebug_ptype & 0x1f);
1414         arr[0] = pq_pdt;
1415         if (0x2 & cmd[1]) {  /* CMDDT bit set */
1416                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
1417                 kfree(arr);
1418                 return check_condition_result;
1419         } else if (0x1 & cmd[1]) {  /* EVPD bit set */
1420                 int lu_id_num, port_group_id, target_dev_id, len;
1421                 char lu_id_str[6];
1422                 int host_no = devip->sdbg_host->shost->host_no;
1423                 
1424                 port_group_id = (((host_no + 1) & 0x7f) << 8) +
1425                     (devip->channel & 0x7f);
1426                 if (sdebug_vpd_use_hostno == 0)
1427                         host_no = 0;
1428                 lu_id_num = have_wlun ? -1 : (((host_no + 1) * 2000) +
1429                             (devip->target * 1000) + devip->lun);
1430                 target_dev_id = ((host_no + 1) * 2000) +
1431                                  (devip->target * 1000) - 3;
1432                 len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
1433                 if (0 == cmd[2]) { /* supported vital product data pages */
1434                         arr[1] = cmd[2];        /*sanity */
1435                         n = 4;
1436                         arr[n++] = 0x0;   /* this page */
1437                         arr[n++] = 0x80;  /* unit serial number */
1438                         arr[n++] = 0x83;  /* device identification */
1439                         arr[n++] = 0x84;  /* software interface ident. */
1440                         arr[n++] = 0x85;  /* management network addresses */
1441                         arr[n++] = 0x86;  /* extended inquiry */
1442                         arr[n++] = 0x87;  /* mode page policy */
1443                         arr[n++] = 0x88;  /* SCSI ports */
1444                         if (is_disk) {    /* SBC only */
1445                                 arr[n++] = 0x89;  /* ATA information */
1446                                 arr[n++] = 0xb0;  /* Block limits */
1447                                 arr[n++] = 0xb1;  /* Block characteristics */
1448                                 arr[n++] = 0xb2;  /* Logical Block Prov */
1449                         }
1450                         arr[3] = n - 4;   /* number of supported VPD pages */
1451                 } else if (0x80 == cmd[2]) { /* unit serial number */
1452                         arr[1] = cmd[2];        /*sanity */
1453                         arr[3] = len;
1454                         memcpy(&arr[4], lu_id_str, len);
1455                 } else if (0x83 == cmd[2]) { /* device identification */
1456                         arr[1] = cmd[2];        /*sanity */
1457                         arr[3] = inquiry_vpd_83(&arr[4], port_group_id,
1458                                                 target_dev_id, lu_id_num,
1459                                                 lu_id_str, len,
1460                                                 &devip->lu_name);
1461                 } else if (0x84 == cmd[2]) { /* Software interface ident. */
1462                         arr[1] = cmd[2];        /*sanity */
1463                         arr[3] = inquiry_vpd_84(&arr[4]);
1464                 } else if (0x85 == cmd[2]) { /* Management network addresses */
1465                         arr[1] = cmd[2];        /*sanity */
1466                         arr[3] = inquiry_vpd_85(&arr[4]);
1467                 } else if (0x86 == cmd[2]) { /* extended inquiry */
1468                         arr[1] = cmd[2];        /*sanity */
1469                         arr[3] = 0x3c;  /* number of following entries */
1470                         if (sdebug_dif == T10_PI_TYPE3_PROTECTION)
1471                                 arr[4] = 0x4;   /* SPT: GRD_CHK:1 */
1472                         else if (have_dif_prot)
1473                                 arr[4] = 0x5;   /* SPT: GRD_CHK:1, REF_CHK:1 */
1474                         else
1475                                 arr[4] = 0x0;   /* no protection stuff */
1476                         arr[5] = 0x7;   /* head of q, ordered + simple q's */
1477                 } else if (0x87 == cmd[2]) { /* mode page policy */
1478                         arr[1] = cmd[2];        /*sanity */
1479                         arr[3] = 0x8;   /* number of following entries */
1480                         arr[4] = 0x2;   /* disconnect-reconnect mp */
1481                         arr[6] = 0x80;  /* mlus, shared */
1482                         arr[8] = 0x18;   /* protocol specific lu */
1483                         arr[10] = 0x82;  /* mlus, per initiator port */
1484                 } else if (0x88 == cmd[2]) { /* SCSI Ports */
1485                         arr[1] = cmd[2];        /*sanity */
1486                         arr[3] = inquiry_vpd_88(&arr[4], target_dev_id);
1487                 } else if (is_disk && 0x89 == cmd[2]) { /* ATA information */
1488                         arr[1] = cmd[2];        /*sanity */
1489                         n = inquiry_vpd_89(&arr[4]);
1490                         put_unaligned_be16(n, arr + 2);
1491                 } else if (is_disk && 0xb0 == cmd[2]) { /* Block limits */
1492                         arr[1] = cmd[2];        /*sanity */
1493                         arr[3] = inquiry_vpd_b0(&arr[4]);
1494                 } else if (is_disk && 0xb1 == cmd[2]) { /* Block char. */
1495                         arr[1] = cmd[2];        /*sanity */
1496                         arr[3] = inquiry_vpd_b1(&arr[4]);
1497                 } else if (is_disk && 0xb2 == cmd[2]) { /* LB Prov. */
1498                         arr[1] = cmd[2];        /*sanity */
1499                         arr[3] = inquiry_vpd_b2(&arr[4]);
1500                 } else {
1501                         mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
1502                         kfree(arr);
1503                         return check_condition_result;
1504                 }
1505                 len = min(get_unaligned_be16(arr + 2) + 4, alloc_len);
1506                 ret = fill_from_dev_buffer(scp, arr,
1507                             min(len, SDEBUG_MAX_INQ_ARR_SZ));
1508                 kfree(arr);
1509                 return ret;
1510         }
1511         /* drops through here for a standard inquiry */
1512         arr[1] = sdebug_removable ? 0x80 : 0;   /* Removable disk */
1513         arr[2] = sdebug_scsi_level;
1514         arr[3] = 2;    /* response_data_format==2 */
1515         arr[4] = SDEBUG_LONG_INQ_SZ - 5;
1516         arr[5] = (int)have_dif_prot;    /* PROTECT bit */
1517         if (sdebug_vpd_use_hostno == 0)
1518                 arr[5] |= 0x10; /* claim: implicit TPGS */
1519         arr[6] = 0x10; /* claim: MultiP */
1520         /* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
1521         arr[7] = 0xa; /* claim: LINKED + CMDQUE */
1522         memcpy(&arr[8], sdebug_inq_vendor_id, 8);
1523         memcpy(&arr[16], sdebug_inq_product_id, 16);
1524         memcpy(&arr[32], sdebug_inq_product_rev, 4);
1525         /* Use Vendor Specific area to place driver date in ASCII hex */
1526         memcpy(&arr[36], sdebug_version_date, 8);
1527         /* version descriptors (2 bytes each) follow */
1528         put_unaligned_be16(0xc0, arr + 58);   /* SAM-6 no version claimed */
1529         put_unaligned_be16(0x5c0, arr + 60);  /* SPC-5 no version claimed */
1530         n = 62;
1531         if (is_disk) {          /* SBC-4 no version claimed */
1532                 put_unaligned_be16(0x600, arr + n);
1533                 n += 2;
1534         } else if (sdebug_ptype == TYPE_TAPE) { /* SSC-4 rev 3 */
1535                 put_unaligned_be16(0x525, arr + n);
1536                 n += 2;
1537         }
1538         put_unaligned_be16(0x2100, arr + n);    /* SPL-4 no version claimed */
1539         ret = fill_from_dev_buffer(scp, arr,
1540                             min(alloc_len, SDEBUG_LONG_INQ_SZ));
1541         kfree(arr);
1542         return ret;
1543 }
1544
1545 static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1546                                    0, 0, 0x0, 0x0};
1547
1548 static int resp_requests(struct scsi_cmnd *scp,
1549                          struct sdebug_dev_info *devip)
1550 {
1551         unsigned char *sbuff;
1552         unsigned char *cmd = scp->cmnd;
1553         unsigned char arr[SCSI_SENSE_BUFFERSIZE];
1554         bool dsense;
1555         int len = 18;
1556
1557         memset(arr, 0, sizeof(arr));
1558         dsense = !!(cmd[1] & 1);
1559         sbuff = scp->sense_buffer;
1560         if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
1561                 if (dsense) {
1562                         arr[0] = 0x72;
1563                         arr[1] = 0x0;           /* NO_SENSE in sense_key */
1564                         arr[2] = THRESHOLD_EXCEEDED;
1565                         arr[3] = 0xff;          /* TEST set and MRIE==6 */
1566                         len = 8;
1567                 } else {
1568                         arr[0] = 0x70;
1569                         arr[2] = 0x0;           /* NO_SENSE in sense_key */
1570                         arr[7] = 0xa;           /* 18 byte sense buffer */
1571                         arr[12] = THRESHOLD_EXCEEDED;
1572                         arr[13] = 0xff;         /* TEST set and MRIE==6 */
1573                 }
1574         } else {
1575                 memcpy(arr, sbuff, SCSI_SENSE_BUFFERSIZE);
1576                 if (arr[0] >= 0x70 && dsense == sdebug_dsense)
1577                         ;       /* have sense and formats match */
1578                 else if (arr[0] <= 0x70) {
1579                         if (dsense) {
1580                                 memset(arr, 0, 8);
1581                                 arr[0] = 0x72;
1582                                 len = 8;
1583                         } else {
1584                                 memset(arr, 0, 18);
1585                                 arr[0] = 0x70;
1586                                 arr[7] = 0xa;
1587                         }
1588                 } else if (dsense) {
1589                         memset(arr, 0, 8);
1590                         arr[0] = 0x72;
1591                         arr[1] = sbuff[2];     /* sense key */
1592                         arr[2] = sbuff[12];    /* asc */
1593                         arr[3] = sbuff[13];    /* ascq */
1594                         len = 8;
1595                 } else {
1596                         memset(arr, 0, 18);
1597                         arr[0] = 0x70;
1598                         arr[2] = sbuff[1];
1599                         arr[7] = 0xa;
1600                         arr[12] = sbuff[1];
1601                         arr[13] = sbuff[3];
1602                 }
1603
1604         }
1605         mk_sense_buffer(scp, 0, NO_ADDITIONAL_SENSE, 0);
1606         return fill_from_dev_buffer(scp, arr, len);
1607 }
1608
1609 static int resp_start_stop(struct scsi_cmnd *scp,
1610                            struct sdebug_dev_info *devip)
1611 {
1612         unsigned char *cmd = scp->cmnd;
1613         int power_cond, stop;
1614         bool changing;
1615
1616         power_cond = (cmd[4] & 0xf0) >> 4;
1617         if (power_cond) {
1618                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 7);
1619                 return check_condition_result;
1620         }
1621         stop = !(cmd[4] & 1);
1622         changing = atomic_read(&devip->stopped) == !stop;
1623         atomic_xchg(&devip->stopped, stop);
1624         if (!changing || cmd[1] & 0x1)  /* state unchanged or IMMED set */
1625                 return SDEG_RES_IMMED_MASK;
1626         else
1627                 return 0;
1628 }
1629
1630 static sector_t get_sdebug_capacity(void)
1631 {
1632         static const unsigned int gibibyte = 1073741824;
1633
1634         if (sdebug_virtual_gb > 0)
1635                 return (sector_t)sdebug_virtual_gb *
1636                         (gibibyte / sdebug_sector_size);
1637         else
1638                 return sdebug_store_sectors;
1639 }
1640
1641 #define SDEBUG_READCAP_ARR_SZ 8
1642 static int resp_readcap(struct scsi_cmnd *scp,
1643                         struct sdebug_dev_info *devip)
1644 {
1645         unsigned char arr[SDEBUG_READCAP_ARR_SZ];
1646         unsigned int capac;
1647
1648         /* following just in case virtual_gb changed */
1649         sdebug_capacity = get_sdebug_capacity();
1650         memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
1651         if (sdebug_capacity < 0xffffffff) {
1652                 capac = (unsigned int)sdebug_capacity - 1;
1653                 put_unaligned_be32(capac, arr + 0);
1654         } else
1655                 put_unaligned_be32(0xffffffff, arr + 0);
1656         put_unaligned_be16(sdebug_sector_size, arr + 6);
1657         return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
1658 }
1659
1660 #define SDEBUG_READCAP16_ARR_SZ 32
1661 static int resp_readcap16(struct scsi_cmnd *scp,
1662                           struct sdebug_dev_info *devip)
1663 {
1664         unsigned char *cmd = scp->cmnd;
1665         unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
1666         int alloc_len;
1667
1668         alloc_len = get_unaligned_be32(cmd + 10);
1669         /* following just in case virtual_gb changed */
1670         sdebug_capacity = get_sdebug_capacity();
1671         memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
1672         put_unaligned_be64((u64)(sdebug_capacity - 1), arr + 0);
1673         put_unaligned_be32(sdebug_sector_size, arr + 8);
1674         arr[13] = sdebug_physblk_exp & 0xf;
1675         arr[14] = (sdebug_lowest_aligned >> 8) & 0x3f;
1676
1677         if (scsi_debug_lbp()) {
1678                 arr[14] |= 0x80; /* LBPME */
1679                 /* from sbc4r07, this LBPRZ field is 1 bit, but the LBPRZ in
1680                  * the LB Provisioning VPD page is 3 bits. Note that lbprz=2
1681                  * in the wider field maps to 0 in this field.
1682                  */
1683                 if (sdebug_lbprz & 1)   /* precisely what the draft requires */
1684                         arr[14] |= 0x40;
1685         }
1686
1687         arr[15] = sdebug_lowest_aligned & 0xff;
1688
1689         if (have_dif_prot) {
1690                 arr[12] = (sdebug_dif - 1) << 1; /* P_TYPE */
1691                 arr[12] |= 1; /* PROT_EN */
1692         }
1693
1694         return fill_from_dev_buffer(scp, arr,
1695                                     min(alloc_len, SDEBUG_READCAP16_ARR_SZ));
1696 }
1697
1698 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
1699
1700 static int resp_report_tgtpgs(struct scsi_cmnd *scp,
1701                               struct sdebug_dev_info *devip)
1702 {
1703         unsigned char *cmd = scp->cmnd;
1704         unsigned char *arr;
1705         int host_no = devip->sdbg_host->shost->host_no;
1706         int n, ret, alen, rlen;
1707         int port_group_a, port_group_b, port_a, port_b;
1708
1709         alen = get_unaligned_be32(cmd + 6);
1710         arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
1711         if (! arr)
1712                 return DID_REQUEUE << 16;
1713         /*
1714          * EVPD page 0x88 states we have two ports, one
1715          * real and a fake port with no device connected.
1716          * So we create two port groups with one port each
1717          * and set the group with port B to unavailable.
1718          */
1719         port_a = 0x1; /* relative port A */
1720         port_b = 0x2; /* relative port B */
1721         port_group_a = (((host_no + 1) & 0x7f) << 8) +
1722                         (devip->channel & 0x7f);
1723         port_group_b = (((host_no + 1) & 0x7f) << 8) +
1724                         (devip->channel & 0x7f) + 0x80;
1725
1726         /*
1727          * The asymmetric access state is cycled according to the host_id.
1728          */
1729         n = 4;
1730         if (sdebug_vpd_use_hostno == 0) {
1731                 arr[n++] = host_no % 3; /* Asymm access state */
1732                 arr[n++] = 0x0F; /* claim: all states are supported */
1733         } else {
1734                 arr[n++] = 0x0; /* Active/Optimized path */
1735                 arr[n++] = 0x01; /* only support active/optimized paths */
1736         }
1737         put_unaligned_be16(port_group_a, arr + n);
1738         n += 2;
1739         arr[n++] = 0;    /* Reserved */
1740         arr[n++] = 0;    /* Status code */
1741         arr[n++] = 0;    /* Vendor unique */
1742         arr[n++] = 0x1;  /* One port per group */
1743         arr[n++] = 0;    /* Reserved */
1744         arr[n++] = 0;    /* Reserved */
1745         put_unaligned_be16(port_a, arr + n);
1746         n += 2;
1747         arr[n++] = 3;    /* Port unavailable */
1748         arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
1749         put_unaligned_be16(port_group_b, arr + n);
1750         n += 2;
1751         arr[n++] = 0;    /* Reserved */
1752         arr[n++] = 0;    /* Status code */
1753         arr[n++] = 0;    /* Vendor unique */
1754         arr[n++] = 0x1;  /* One port per group */
1755         arr[n++] = 0;    /* Reserved */
1756         arr[n++] = 0;    /* Reserved */
1757         put_unaligned_be16(port_b, arr + n);
1758         n += 2;
1759
1760         rlen = n - 4;
1761         put_unaligned_be32(rlen, arr + 0);
1762
1763         /*
1764          * Return the smallest value of either
1765          * - The allocated length
1766          * - The constructed command length
1767          * - The maximum array size
1768          */
1769         rlen = min(alen,n);
1770         ret = fill_from_dev_buffer(scp, arr,
1771                                    min(rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
1772         kfree(arr);
1773         return ret;
1774 }
1775
1776 static int resp_rsup_opcodes(struct scsi_cmnd *scp,
1777                              struct sdebug_dev_info *devip)
1778 {
1779         bool rctd;
1780         u8 reporting_opts, req_opcode, sdeb_i, supp;
1781         u16 req_sa, u;
1782         u32 alloc_len, a_len;
1783         int k, offset, len, errsts, count, bump, na;
1784         const struct opcode_info_t *oip;
1785         const struct opcode_info_t *r_oip;
1786         u8 *arr;
1787         u8 *cmd = scp->cmnd;
1788
1789         rctd = !!(cmd[2] & 0x80);
1790         reporting_opts = cmd[2] & 0x7;
1791         req_opcode = cmd[3];
1792         req_sa = get_unaligned_be16(cmd + 4);
1793         alloc_len = get_unaligned_be32(cmd + 6);
1794         if (alloc_len < 4 || alloc_len > 0xffff) {
1795                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
1796                 return check_condition_result;
1797         }
1798         if (alloc_len > 8192)
1799                 a_len = 8192;
1800         else
1801                 a_len = alloc_len;
1802         arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_ATOMIC);
1803         if (NULL == arr) {
1804                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
1805                                 INSUFF_RES_ASCQ);
1806                 return check_condition_result;
1807         }
1808         switch (reporting_opts) {
1809         case 0: /* all commands */
1810                 /* count number of commands */
1811                 for (count = 0, oip = opcode_info_arr;
1812                      oip->num_attached != 0xff; ++oip) {
1813                         if (F_INV_OP & oip->flags)
1814                                 continue;
1815                         count += (oip->num_attached + 1);
1816                 }
1817                 bump = rctd ? 20 : 8;
1818                 put_unaligned_be32(count * bump, arr);
1819                 for (offset = 4, oip = opcode_info_arr;
1820                      oip->num_attached != 0xff && offset < a_len; ++oip) {
1821                         if (F_INV_OP & oip->flags)
1822                                 continue;
1823                         na = oip->num_attached;
1824                         arr[offset] = oip->opcode;
1825                         put_unaligned_be16(oip->sa, arr + offset + 2);
1826                         if (rctd)
1827                                 arr[offset + 5] |= 0x2;
1828                         if (FF_SA & oip->flags)
1829                                 arr[offset + 5] |= 0x1;
1830                         put_unaligned_be16(oip->len_mask[0], arr + offset + 6);
1831                         if (rctd)
1832                                 put_unaligned_be16(0xa, arr + offset + 8);
1833                         r_oip = oip;
1834                         for (k = 0, oip = oip->arrp; k < na; ++k, ++oip) {
1835                                 if (F_INV_OP & oip->flags)
1836                                         continue;
1837                                 offset += bump;
1838                                 arr[offset] = oip->opcode;
1839                                 put_unaligned_be16(oip->sa, arr + offset + 2);
1840                                 if (rctd)
1841                                         arr[offset + 5] |= 0x2;
1842                                 if (FF_SA & oip->flags)
1843                                         arr[offset + 5] |= 0x1;
1844                                 put_unaligned_be16(oip->len_mask[0],
1845                                                    arr + offset + 6);
1846                                 if (rctd)
1847                                         put_unaligned_be16(0xa,
1848                                                            arr + offset + 8);
1849                         }
1850                         oip = r_oip;
1851                         offset += bump;
1852                 }
1853                 break;
1854         case 1: /* one command: opcode only */
1855         case 2: /* one command: opcode plus service action */
1856         case 3: /* one command: if sa==0 then opcode only else opcode+sa */
1857                 sdeb_i = opcode_ind_arr[req_opcode];
1858                 oip = &opcode_info_arr[sdeb_i];
1859                 if (F_INV_OP & oip->flags) {
1860                         supp = 1;
1861                         offset = 4;
1862                 } else {
1863                         if (1 == reporting_opts) {
1864                                 if (FF_SA & oip->flags) {
1865                                         mk_sense_invalid_fld(scp, SDEB_IN_CDB,
1866                                                              2, 2);
1867                                         kfree(arr);
1868                                         return check_condition_result;
1869                                 }
1870                                 req_sa = 0;
1871                         } else if (2 == reporting_opts &&
1872                                    0 == (FF_SA & oip->flags)) {
1873                                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
1874                                 kfree(arr);     /* point at requested sa */
1875                                 return check_condition_result;
1876                         }
1877                         if (0 == (FF_SA & oip->flags) &&
1878                             req_opcode == oip->opcode)
1879                                 supp = 3;
1880                         else if (0 == (FF_SA & oip->flags)) {
1881                                 na = oip->num_attached;
1882                                 for (k = 0, oip = oip->arrp; k < na;
1883                                      ++k, ++oip) {
1884                                         if (req_opcode == oip->opcode)
1885                                                 break;
1886                                 }
1887                                 supp = (k >= na) ? 1 : 3;
1888                         } else if (req_sa != oip->sa) {
1889                                 na = oip->num_attached;
1890                                 for (k = 0, oip = oip->arrp; k < na;
1891                                      ++k, ++oip) {
1892                                         if (req_sa == oip->sa)
1893                                                 break;
1894                                 }
1895                                 supp = (k >= na) ? 1 : 3;
1896                         } else
1897                                 supp = 3;
1898                         if (3 == supp) {
1899                                 u = oip->len_mask[0];
1900                                 put_unaligned_be16(u, arr + 2);
1901                                 arr[4] = oip->opcode;
1902                                 for (k = 1; k < u; ++k)
1903                                         arr[4 + k] = (k < 16) ?
1904                                                  oip->len_mask[k] : 0xff;
1905                                 offset = 4 + u;
1906                         } else
1907                                 offset = 4;
1908                 }
1909                 arr[1] = (rctd ? 0x80 : 0) | supp;
1910                 if (rctd) {
1911                         put_unaligned_be16(0xa, arr + offset);
1912                         offset += 12;
1913                 }
1914                 break;
1915         default:
1916                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
1917                 kfree(arr);
1918                 return check_condition_result;
1919         }
1920         offset = (offset < a_len) ? offset : a_len;
1921         len = (offset < alloc_len) ? offset : alloc_len;
1922         errsts = fill_from_dev_buffer(scp, arr, len);
1923         kfree(arr);
1924         return errsts;
1925 }
1926
1927 static int resp_rsup_tmfs(struct scsi_cmnd *scp,
1928                           struct sdebug_dev_info *devip)
1929 {
1930         bool repd;
1931         u32 alloc_len, len;
1932         u8 arr[16];
1933         u8 *cmd = scp->cmnd;
1934
1935         memset(arr, 0, sizeof(arr));
1936         repd = !!(cmd[2] & 0x80);
1937         alloc_len = get_unaligned_be32(cmd + 6);
1938         if (alloc_len < 4) {
1939                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
1940                 return check_condition_result;
1941         }
1942         arr[0] = 0xc8;          /* ATS | ATSS | LURS */
1943         arr[1] = 0x1;           /* ITNRS */
1944         if (repd) {
1945                 arr[3] = 0xc;
1946                 len = 16;
1947         } else
1948                 len = 4;
1949
1950         len = (len < alloc_len) ? len : alloc_len;
1951         return fill_from_dev_buffer(scp, arr, len);
1952 }
1953
1954 /* <<Following mode page info copied from ST318451LW>> */
1955
1956 static int resp_err_recov_pg(unsigned char *p, int pcontrol, int target)
1957 {       /* Read-Write Error Recovery page for mode_sense */
1958         unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
1959                                         5, 0, 0xff, 0xff};
1960
1961         memcpy(p, err_recov_pg, sizeof(err_recov_pg));
1962         if (1 == pcontrol)
1963                 memset(p + 2, 0, sizeof(err_recov_pg) - 2);
1964         return sizeof(err_recov_pg);
1965 }
1966
1967 static int resp_disconnect_pg(unsigned char *p, int pcontrol, int target)
1968 {       /* Disconnect-Reconnect page for mode_sense */
1969         unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
1970                                          0, 0, 0, 0, 0, 0, 0, 0};
1971
1972         memcpy(p, disconnect_pg, sizeof(disconnect_pg));
1973         if (1 == pcontrol)
1974                 memset(p + 2, 0, sizeof(disconnect_pg) - 2);
1975         return sizeof(disconnect_pg);
1976 }
1977
1978 static int resp_format_pg(unsigned char *p, int pcontrol, int target)
1979 {       /* Format device page for mode_sense */
1980         unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
1981                                      0, 0, 0, 0, 0, 0, 0, 0,
1982                                      0, 0, 0, 0, 0x40, 0, 0, 0};
1983
1984         memcpy(p, format_pg, sizeof(format_pg));
1985         put_unaligned_be16(sdebug_sectors_per, p + 10);
1986         put_unaligned_be16(sdebug_sector_size, p + 12);
1987         if (sdebug_removable)
1988                 p[20] |= 0x20; /* should agree with INQUIRY */
1989         if (1 == pcontrol)
1990                 memset(p + 2, 0, sizeof(format_pg) - 2);
1991         return sizeof(format_pg);
1992 }
1993
1994 static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
1995                                      0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,
1996                                      0, 0, 0, 0};
1997
1998 static int resp_caching_pg(unsigned char *p, int pcontrol, int target)
1999 {       /* Caching page for mode_sense */
2000         unsigned char ch_caching_pg[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0,
2001                 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
2002         unsigned char d_caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2003                 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,     0, 0, 0, 0};
2004
2005         if (SDEBUG_OPT_N_WCE & sdebug_opts)
2006                 caching_pg[2] &= ~0x4;  /* set WCE=0 (default WCE=1) */
2007         memcpy(p, caching_pg, sizeof(caching_pg));
2008         if (1 == pcontrol)
2009                 memcpy(p + 2, ch_caching_pg, sizeof(ch_caching_pg));
2010         else if (2 == pcontrol)
2011                 memcpy(p, d_caching_pg, sizeof(d_caching_pg));
2012         return sizeof(caching_pg);
2013 }
2014
2015 static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2016                                     0, 0, 0x2, 0x4b};
2017
2018 static int resp_ctrl_m_pg(unsigned char *p, int pcontrol, int target)
2019 {       /* Control mode page for mode_sense */
2020         unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
2021                                         0, 0, 0, 0};
2022         unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2023                                      0, 0, 0x2, 0x4b};
2024
2025         if (sdebug_dsense)
2026                 ctrl_m_pg[2] |= 0x4;
2027         else
2028                 ctrl_m_pg[2] &= ~0x4;
2029
2030         if (sdebug_ato)
2031                 ctrl_m_pg[5] |= 0x80; /* ATO=1 */
2032
2033         memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
2034         if (1 == pcontrol)
2035                 memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
2036         else if (2 == pcontrol)
2037                 memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
2038         return sizeof(ctrl_m_pg);
2039 }
2040
2041
2042 static int resp_iec_m_pg(unsigned char *p, int pcontrol, int target)
2043 {       /* Informational Exceptions control mode page for mode_sense */
2044         unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
2045                                        0, 0, 0x0, 0x0};
2046         unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
2047                                       0, 0, 0x0, 0x0};
2048
2049         memcpy(p, iec_m_pg, sizeof(iec_m_pg));
2050         if (1 == pcontrol)
2051                 memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
2052         else if (2 == pcontrol)
2053                 memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
2054         return sizeof(iec_m_pg);
2055 }
2056
2057 static int resp_sas_sf_m_pg(unsigned char *p, int pcontrol, int target)
2058 {       /* SAS SSP mode page - short format for mode_sense */
2059         unsigned char sas_sf_m_pg[] = {0x19, 0x6,
2060                 0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
2061
2062         memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
2063         if (1 == pcontrol)
2064                 memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
2065         return sizeof(sas_sf_m_pg);
2066 }
2067
2068
2069 static int resp_sas_pcd_m_spg(unsigned char *p, int pcontrol, int target,
2070                               int target_dev_id)
2071 {       /* SAS phy control and discover mode page for mode_sense */
2072         unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
2073                     0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
2074                     0, 0, 0, 0, 0, 0, 0, 0,     /* insert SAS addr */
2075                     0, 0, 0, 0, 0, 0, 0, 0,     /* insert SAS addr */
2076                     0x2, 0, 0, 0, 0, 0, 0, 0,
2077                     0x88, 0x99, 0, 0, 0, 0, 0, 0,
2078                     0, 0, 0, 0, 0, 0, 0, 0,
2079                     0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
2080                     0, 0, 0, 0, 0, 0, 0, 0,     /* insert SAS addr */
2081                     0, 0, 0, 0, 0, 0, 0, 0,     /* insert SAS addr */
2082                     0x3, 0, 0, 0, 0, 0, 0, 0,
2083                     0x88, 0x99, 0, 0, 0, 0, 0, 0,
2084                     0, 0, 0, 0, 0, 0, 0, 0,
2085                 };
2086         int port_a, port_b;
2087
2088         put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 16);
2089         put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 24);
2090         put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 64);
2091         put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 72);
2092         port_a = target_dev_id + 1;
2093         port_b = port_a + 1;
2094         memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
2095         put_unaligned_be32(port_a, p + 20);
2096         put_unaligned_be32(port_b, p + 48 + 20);
2097         if (1 == pcontrol)
2098                 memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
2099         return sizeof(sas_pcd_m_pg);
2100 }
2101
2102 static int resp_sas_sha_m_spg(unsigned char *p, int pcontrol)
2103 {       /* SAS SSP shared protocol specific port mode subpage */
2104         unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
2105                     0, 0, 0, 0, 0, 0, 0, 0,
2106                 };
2107
2108         memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
2109         if (1 == pcontrol)
2110                 memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
2111         return sizeof(sas_sha_m_pg);
2112 }
2113
2114 #define SDEBUG_MAX_MSENSE_SZ 256
2115
2116 static int resp_mode_sense(struct scsi_cmnd *scp,
2117                            struct sdebug_dev_info *devip)
2118 {
2119         int pcontrol, pcode, subpcode, bd_len;
2120         unsigned char dev_spec;
2121         int alloc_len, offset, len, target_dev_id;
2122         int target = scp->device->id;
2123         unsigned char *ap;
2124         unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
2125         unsigned char *cmd = scp->cmnd;
2126         bool dbd, llbaa, msense_6, is_disk, bad_pcode;
2127
2128         dbd = !!(cmd[1] & 0x8);         /* disable block descriptors */
2129         pcontrol = (cmd[2] & 0xc0) >> 6;
2130         pcode = cmd[2] & 0x3f;
2131         subpcode = cmd[3];
2132         msense_6 = (MODE_SENSE == cmd[0]);
2133         llbaa = msense_6 ? false : !!(cmd[1] & 0x10);
2134         is_disk = (sdebug_ptype == TYPE_DISK);
2135         if (is_disk && !dbd)
2136                 bd_len = llbaa ? 16 : 8;
2137         else
2138                 bd_len = 0;
2139         alloc_len = msense_6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2140         memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
2141         if (0x3 == pcontrol) {  /* Saving values not supported */
2142                 mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0);
2143                 return check_condition_result;
2144         }
2145         target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
2146                         (devip->target * 1000) - 3;
2147         /* for disks set DPOFUA bit and clear write protect (WP) bit */
2148         if (is_disk) {
2149                 dev_spec = 0x10;        /* =0x90 if WP=1 implies read-only */
2150                 if (sdebug_wp)
2151                         dev_spec |= 0x80;
2152         } else
2153                 dev_spec = 0x0;
2154         if (msense_6) {
2155                 arr[2] = dev_spec;
2156                 arr[3] = bd_len;
2157                 offset = 4;
2158         } else {
2159                 arr[3] = dev_spec;
2160                 if (16 == bd_len)
2161                         arr[4] = 0x1;   /* set LONGLBA bit */
2162                 arr[7] = bd_len;        /* assume 255 or less */
2163                 offset = 8;
2164         }
2165         ap = arr + offset;
2166         if ((bd_len > 0) && (!sdebug_capacity))
2167                 sdebug_capacity = get_sdebug_capacity();
2168
2169         if (8 == bd_len) {
2170                 if (sdebug_capacity > 0xfffffffe)
2171                         put_unaligned_be32(0xffffffff, ap + 0);
2172                 else
2173                         put_unaligned_be32(sdebug_capacity, ap + 0);
2174                 put_unaligned_be16(sdebug_sector_size, ap + 6);
2175                 offset += bd_len;
2176                 ap = arr + offset;
2177         } else if (16 == bd_len) {
2178                 put_unaligned_be64((u64)sdebug_capacity, ap + 0);
2179                 put_unaligned_be32(sdebug_sector_size, ap + 12);
2180                 offset += bd_len;
2181                 ap = arr + offset;
2182         }
2183
2184         if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) {
2185                 /* TODO: Control Extension page */
2186                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2187                 return check_condition_result;
2188         }
2189         bad_pcode = false;
2190
2191         switch (pcode) {
2192         case 0x1:       /* Read-Write error recovery page, direct access */
2193                 len = resp_err_recov_pg(ap, pcontrol, target);
2194                 offset += len;
2195                 break;
2196         case 0x2:       /* Disconnect-Reconnect page, all devices */
2197                 len = resp_disconnect_pg(ap, pcontrol, target);
2198                 offset += len;
2199                 break;
2200         case 0x3:       /* Format device page, direct access */
2201                 if (is_disk) {
2202                         len = resp_format_pg(ap, pcontrol, target);
2203                         offset += len;
2204                 } else
2205                         bad_pcode = true;
2206                 break;
2207         case 0x8:       /* Caching page, direct access */
2208                 if (is_disk) {
2209                         len = resp_caching_pg(ap, pcontrol, target);
2210                         offset += len;
2211                 } else
2212                         bad_pcode = true;
2213                 break;
2214         case 0xa:       /* Control Mode page, all devices */
2215                 len = resp_ctrl_m_pg(ap, pcontrol, target);
2216                 offset += len;
2217                 break;
2218         case 0x19:      /* if spc==1 then sas phy, control+discover */
2219                 if ((subpcode > 0x2) && (subpcode < 0xff)) {
2220                         mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2221                         return check_condition_result;
2222                 }
2223                 len = 0;
2224                 if ((0x0 == subpcode) || (0xff == subpcode))
2225                         len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2226                 if ((0x1 == subpcode) || (0xff == subpcode))
2227                         len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
2228                                                   target_dev_id);
2229                 if ((0x2 == subpcode) || (0xff == subpcode))
2230                         len += resp_sas_sha_m_spg(ap + len, pcontrol);
2231                 offset += len;
2232                 break;
2233         case 0x1c:      /* Informational Exceptions Mode page, all devices */
2234                 len = resp_iec_m_pg(ap, pcontrol, target);
2235                 offset += len;
2236                 break;
2237         case 0x3f:      /* Read all Mode pages */
2238                 if ((0 == subpcode) || (0xff == subpcode)) {
2239                         len = resp_err_recov_pg(ap, pcontrol, target);
2240                         len += resp_disconnect_pg(ap + len, pcontrol, target);
2241                         if (is_disk) {
2242                                 len += resp_format_pg(ap + len, pcontrol,
2243                                                       target);
2244                                 len += resp_caching_pg(ap + len, pcontrol,
2245                                                        target);
2246                         }
2247                         len += resp_ctrl_m_pg(ap + len, pcontrol, target);
2248                         len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2249                         if (0xff == subpcode) {
2250                                 len += resp_sas_pcd_m_spg(ap + len, pcontrol,
2251                                                   target, target_dev_id);
2252                                 len += resp_sas_sha_m_spg(ap + len, pcontrol);
2253                         }
2254                         len += resp_iec_m_pg(ap + len, pcontrol, target);
2255                         offset += len;
2256                 } else {
2257                         mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2258                         return check_condition_result;
2259                 }
2260                 break;
2261         default:
2262                 bad_pcode = true;
2263                 break;
2264         }
2265         if (bad_pcode) {
2266                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2267                 return check_condition_result;
2268         }
2269         if (msense_6)
2270                 arr[0] = offset - 1;
2271         else
2272                 put_unaligned_be16((offset - 2), arr + 0);
2273         return fill_from_dev_buffer(scp, arr, min(alloc_len, offset));
2274 }
2275
2276 #define SDEBUG_MAX_MSELECT_SZ 512
2277
2278 static int resp_mode_select(struct scsi_cmnd *scp,
2279                             struct sdebug_dev_info *devip)
2280 {
2281         int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
2282         int param_len, res, mpage;
2283         unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
2284         unsigned char *cmd = scp->cmnd;
2285         int mselect6 = (MODE_SELECT == cmd[0]);
2286
2287         memset(arr, 0, sizeof(arr));
2288         pf = cmd[1] & 0x10;
2289         sp = cmd[1] & 0x1;
2290         param_len = mselect6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2291         if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
2292                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1);
2293                 return check_condition_result;
2294         }
2295         res = fetch_to_dev_buffer(scp, arr, param_len);
2296         if (-1 == res)
2297                 return DID_ERROR << 16;
2298         else if (sdebug_verbose && (res < param_len))
2299                 sdev_printk(KERN_INFO, scp->device,
2300                             "%s: cdb indicated=%d, IO sent=%d bytes\n",
2301                             __func__, param_len, res);
2302         md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2);
2303         bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6);
2304         if (md_len > 2) {
2305                 mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
2306                 return check_condition_result;
2307         }
2308         off = bd_len + (mselect6 ? 4 : 8);
2309         mpage = arr[off] & 0x3f;
2310         ps = !!(arr[off] & 0x80);
2311         if (ps) {
2312                 mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 7);
2313                 return check_condition_result;
2314         }
2315         spf = !!(arr[off] & 0x40);
2316         pg_len = spf ? (get_unaligned_be16(arr + off + 2) + 4) :
2317                        (arr[off + 1] + 2);
2318         if ((pg_len + off) > param_len) {
2319                 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2320                                 PARAMETER_LIST_LENGTH_ERR, 0);
2321                 return check_condition_result;
2322         }
2323         switch (mpage) {
2324         case 0x8:      /* Caching Mode page */
2325                 if (caching_pg[1] == arr[off + 1]) {
2326                         memcpy(caching_pg + 2, arr + off + 2,
2327                                sizeof(caching_pg) - 2);
2328                         goto set_mode_changed_ua;
2329                 }
2330                 break;
2331         case 0xa:      /* Control Mode page */
2332                 if (ctrl_m_pg[1] == arr[off + 1]) {
2333                         memcpy(ctrl_m_pg + 2, arr + off + 2,
2334                                sizeof(ctrl_m_pg) - 2);
2335                         if (ctrl_m_pg[4] & 0x8)
2336                                 sdebug_wp = true;
2337                         else
2338                                 sdebug_wp = false;
2339                         sdebug_dsense = !!(ctrl_m_pg[2] & 0x4);
2340                         goto set_mode_changed_ua;
2341                 }
2342                 break;
2343         case 0x1c:      /* Informational Exceptions Mode page */
2344                 if (iec_m_pg[1] == arr[off + 1]) {
2345                         memcpy(iec_m_pg + 2, arr + off + 2,
2346                                sizeof(iec_m_pg) - 2);
2347                         goto set_mode_changed_ua;
2348                 }
2349                 break;
2350         default:
2351                 break;
2352         }
2353         mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 5);
2354         return check_condition_result;
2355 set_mode_changed_ua:
2356         set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm);
2357         return 0;
2358 }
2359
2360 static int resp_temp_l_pg(unsigned char *arr)
2361 {
2362         unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
2363                                      0x0, 0x1, 0x3, 0x2, 0x0, 65,
2364                 };
2365
2366         memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
2367         return sizeof(temp_l_pg);
2368 }
2369
2370 static int resp_ie_l_pg(unsigned char *arr)
2371 {
2372         unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
2373                 };
2374
2375         memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
2376         if (iec_m_pg[2] & 0x4) {        /* TEST bit set */
2377                 arr[4] = THRESHOLD_EXCEEDED;
2378                 arr[5] = 0xff;
2379         }
2380         return sizeof(ie_l_pg);
2381 }
2382
2383 #define SDEBUG_MAX_LSENSE_SZ 512
2384
2385 static int resp_log_sense(struct scsi_cmnd *scp,
2386                           struct sdebug_dev_info *devip)
2387 {
2388         int ppc, sp, pcode, subpcode, alloc_len, len, n;
2389         unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
2390         unsigned char *cmd = scp->cmnd;
2391
2392         memset(arr, 0, sizeof(arr));
2393         ppc = cmd[1] & 0x2;
2394         sp = cmd[1] & 0x1;
2395         if (ppc || sp) {
2396                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, ppc ? 1 : 0);
2397                 return check_condition_result;
2398         }
2399         pcode = cmd[2] & 0x3f;
2400         subpcode = cmd[3] & 0xff;
2401         alloc_len = get_unaligned_be16(cmd + 7);
2402         arr[0] = pcode;
2403         if (0 == subpcode) {
2404                 switch (pcode) {
2405                 case 0x0:       /* Supported log pages log page */
2406                         n = 4;
2407                         arr[n++] = 0x0;         /* this page */
2408                         arr[n++] = 0xd;         /* Temperature */
2409                         arr[n++] = 0x2f;        /* Informational exceptions */
2410                         arr[3] = n - 4;
2411                         break;
2412                 case 0xd:       /* Temperature log page */
2413                         arr[3] = resp_temp_l_pg(arr + 4);
2414                         break;
2415                 case 0x2f:      /* Informational exceptions log page */
2416                         arr[3] = resp_ie_l_pg(arr + 4);
2417                         break;
2418                 default:
2419                         mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2420                         return check_condition_result;
2421                 }
2422         } else if (0xff == subpcode) {
2423                 arr[0] |= 0x40;
2424                 arr[1] = subpcode;
2425                 switch (pcode) {
2426                 case 0x0:       /* Supported log pages and subpages log page */
2427                         n = 4;
2428                         arr[n++] = 0x0;
2429                         arr[n++] = 0x0;         /* 0,0 page */
2430                         arr[n++] = 0x0;
2431                         arr[n++] = 0xff;        /* this page */
2432                         arr[n++] = 0xd;
2433                         arr[n++] = 0x0;         /* Temperature */
2434                         arr[n++] = 0x2f;
2435                         arr[n++] = 0x0; /* Informational exceptions */
2436                         arr[3] = n - 4;
2437                         break;
2438                 case 0xd:       /* Temperature subpages */
2439                         n = 4;
2440                         arr[n++] = 0xd;
2441                         arr[n++] = 0x0;         /* Temperature */
2442                         arr[3] = n - 4;
2443                         break;
2444                 case 0x2f:      /* Informational exceptions subpages */
2445                         n = 4;
2446                         arr[n++] = 0x2f;
2447                         arr[n++] = 0x0;         /* Informational exceptions */
2448                         arr[3] = n - 4;
2449                         break;
2450                 default:
2451                         mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2452                         return check_condition_result;
2453                 }
2454         } else {
2455                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2456                 return check_condition_result;
2457         }
2458         len = min(get_unaligned_be16(arr + 2) + 4, alloc_len);
2459         return fill_from_dev_buffer(scp, arr,
2460                     min(len, SDEBUG_MAX_INQ_ARR_SZ));
2461 }
2462
2463 static inline int check_device_access_params(struct scsi_cmnd *scp,
2464         unsigned long long lba, unsigned int num, bool write)
2465 {
2466         if (lba + num > sdebug_capacity) {
2467                 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2468                 return check_condition_result;
2469         }
2470         /* transfer length excessive (tie in to block limits VPD page) */
2471         if (num > sdebug_store_sectors) {
2472                 /* needs work to find which cdb byte 'num' comes from */
2473                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2474                 return check_condition_result;
2475         }
2476         if (write && unlikely(sdebug_wp)) {
2477                 mk_sense_buffer(scp, DATA_PROTECT, WRITE_PROTECTED, 0x2);
2478                 return check_condition_result;
2479         }
2480         return 0;
2481 }
2482
2483 /* Returns number of bytes copied or -1 if error. */
2484 static int do_device_access(struct scsi_cmnd *scmd, u32 sg_skip, u64 lba,
2485                             u32 num, bool do_write)
2486 {
2487         int ret;
2488         u64 block, rest = 0;
2489         struct scsi_data_buffer *sdb = &scmd->sdb;
2490         enum dma_data_direction dir;
2491
2492         if (do_write) {
2493                 dir = DMA_TO_DEVICE;
2494                 write_since_sync = true;
2495         } else {
2496                 dir = DMA_FROM_DEVICE;
2497         }
2498
2499         if (!sdb->length)
2500                 return 0;
2501         if (scmd->sc_data_direction != dir)
2502                 return -1;
2503
2504         block = do_div(lba, sdebug_store_sectors);
2505         if (block + num > sdebug_store_sectors)
2506                 rest = block + num - sdebug_store_sectors;
2507
2508         ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
2509                    fake_storep + (block * sdebug_sector_size),
2510                    (num - rest) * sdebug_sector_size, sg_skip, do_write);
2511         if (ret != (num - rest) * sdebug_sector_size)
2512                 return ret;
2513
2514         if (rest) {
2515                 ret += sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
2516                             fake_storep, rest * sdebug_sector_size,
2517                             sg_skip + ((num - rest) * sdebug_sector_size),
2518                             do_write);
2519         }
2520
2521         return ret;
2522 }
2523
2524 /* If lba2fake_store(lba,num) compares equal to arr(num), then copy top half of
2525  * arr into lba2fake_store(lba,num) and return true. If comparison fails then
2526  * return false. */
2527 static bool comp_write_worker(u64 lba, u32 num, const u8 *arr)
2528 {
2529         bool res;
2530         u64 block, rest = 0;
2531         u32 store_blks = sdebug_store_sectors;
2532         u32 lb_size = sdebug_sector_size;
2533
2534         block = do_div(lba, store_blks);
2535         if (block + num > store_blks)
2536                 rest = block + num - store_blks;
2537
2538         res = !memcmp(fake_storep + (block * lb_size), arr,
2539                       (num - rest) * lb_size);
2540         if (!res)
2541                 return res;
2542         if (rest)
2543                 res = memcmp(fake_storep, arr + ((num - rest) * lb_size),
2544                              rest * lb_size);
2545         if (!res)
2546                 return res;
2547         arr += num * lb_size;
2548         memcpy(fake_storep + (block * lb_size), arr, (num - rest) * lb_size);
2549         if (rest)
2550                 memcpy(fake_storep, arr + ((num - rest) * lb_size),
2551                        rest * lb_size);
2552         return res;
2553 }
2554
2555 static __be16 dif_compute_csum(const void *buf, int len)
2556 {
2557         __be16 csum;
2558
2559         if (sdebug_guard)
2560                 csum = (__force __be16)ip_compute_csum(buf, len);
2561         else
2562                 csum = cpu_to_be16(crc_t10dif(buf, len));
2563
2564         return csum;
2565 }
2566
2567 static int dif_verify(struct t10_pi_tuple *sdt, const void *data,
2568                       sector_t sector, u32 ei_lba)
2569 {
2570         __be16 csum = dif_compute_csum(data, sdebug_sector_size);
2571
2572         if (sdt->guard_tag != csum) {
2573                 pr_err("GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
2574                         (unsigned long)sector,
2575                         be16_to_cpu(sdt->guard_tag),
2576                         be16_to_cpu(csum));
2577                 return 0x01;
2578         }
2579         if (sdebug_dif == T10_PI_TYPE1_PROTECTION &&
2580             be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
2581                 pr_err("REF check failed on sector %lu\n",
2582                         (unsigned long)sector);
2583                 return 0x03;
2584         }
2585         if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
2586             be32_to_cpu(sdt->ref_tag) != ei_lba) {
2587                 pr_err("REF check failed on sector %lu\n",
2588                         (unsigned long)sector);
2589                 return 0x03;
2590         }
2591         return 0;
2592 }
2593
2594 static void dif_copy_prot(struct scsi_cmnd *SCpnt, sector_t sector,
2595                           unsigned int sectors, bool read)
2596 {
2597         size_t resid;
2598         void *paddr;
2599         const void *dif_store_end = dif_storep + sdebug_store_sectors;
2600         struct sg_mapping_iter miter;
2601
2602         /* Bytes of protection data to copy into sgl */
2603         resid = sectors * sizeof(*dif_storep);
2604
2605         sg_miter_start(&miter, scsi_prot_sglist(SCpnt),
2606                         scsi_prot_sg_count(SCpnt), SG_MITER_ATOMIC |
2607                         (read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
2608
2609         while (sg_miter_next(&miter) && resid > 0) {
2610                 size_t len = min(miter.length, resid);
2611                 void *start = dif_store(sector);
2612                 size_t rest = 0;
2613
2614                 if (dif_store_end < start + len)
2615                         rest = start + len - dif_store_end;
2616
2617                 paddr = miter.addr;
2618
2619                 if (read)
2620                         memcpy(paddr, start, len - rest);
2621                 else
2622                         memcpy(start, paddr, len - rest);
2623
2624                 if (rest) {
2625                         if (read)
2626                                 memcpy(paddr + len - rest, dif_storep, rest);
2627                         else
2628                                 memcpy(dif_storep, paddr + len - rest, rest);
2629                 }
2630
2631                 sector += len / sizeof(*dif_storep);
2632                 resid -= len;
2633         }
2634         sg_miter_stop(&miter);
2635 }
2636
2637 static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec,
2638                             unsigned int sectors, u32 ei_lba)
2639 {
2640         unsigned int i;
2641         struct t10_pi_tuple *sdt;
2642         sector_t sector;
2643
2644         for (i = 0; i < sectors; i++, ei_lba++) {
2645                 int ret;
2646
2647                 sector = start_sec + i;
2648                 sdt = dif_store(sector);
2649
2650                 if (sdt->app_tag == cpu_to_be16(0xffff))
2651                         continue;
2652
2653                 ret = dif_verify(sdt, lba2fake_store(sector), sector, ei_lba);
2654                 if (ret) {
2655                         dif_errors++;
2656                         return ret;
2657                 }
2658         }
2659
2660         dif_copy_prot(SCpnt, start_sec, sectors, true);
2661         dix_reads++;
2662
2663         return 0;
2664 }
2665
2666 static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2667 {
2668         u8 *cmd = scp->cmnd;
2669         struct sdebug_queued_cmd *sqcp;
2670         u64 lba;
2671         u32 num;
2672         u32 ei_lba;
2673         unsigned long iflags;
2674         int ret;
2675         bool check_prot;
2676
2677         switch (cmd[0]) {
2678         case READ_16:
2679                 ei_lba = 0;
2680                 lba = get_unaligned_be64(cmd + 2);
2681                 num = get_unaligned_be32(cmd + 10);
2682                 check_prot = true;
2683                 break;
2684         case READ_10:
2685                 ei_lba = 0;
2686                 lba = get_unaligned_be32(cmd + 2);
2687                 num = get_unaligned_be16(cmd + 7);
2688                 check_prot = true;
2689                 break;
2690         case READ_6:
2691                 ei_lba = 0;
2692                 lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
2693                       (u32)(cmd[1] & 0x1f) << 16;
2694                 num = (0 == cmd[4]) ? 256 : cmd[4];
2695                 check_prot = true;
2696                 break;
2697         case READ_12:
2698                 ei_lba = 0;
2699                 lba = get_unaligned_be32(cmd + 2);
2700                 num = get_unaligned_be32(cmd + 6);
2701                 check_prot = true;
2702                 break;
2703         case XDWRITEREAD_10:
2704                 ei_lba = 0;
2705                 lba = get_unaligned_be32(cmd + 2);
2706                 num = get_unaligned_be16(cmd + 7);
2707                 check_prot = false;
2708                 break;
2709         default:        /* assume READ(32) */
2710                 lba = get_unaligned_be64(cmd + 12);
2711                 ei_lba = get_unaligned_be32(cmd + 20);
2712                 num = get_unaligned_be32(cmd + 28);
2713                 check_prot = false;
2714                 break;
2715         }
2716         if (unlikely(have_dif_prot && check_prot)) {
2717                 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
2718                     (cmd[1] & 0xe0)) {
2719                         mk_sense_invalid_opcode(scp);
2720                         return check_condition_result;
2721                 }
2722                 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
2723                      sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
2724                     (cmd[1] & 0xe0) == 0)
2725                         sdev_printk(KERN_ERR, scp->device, "Unprotected RD "
2726                                     "to DIF device\n");
2727         }
2728         if (unlikely(sdebug_any_injecting_opt)) {
2729                 sqcp = (struct sdebug_queued_cmd *)scp->host_scribble;
2730
2731                 if (sqcp) {
2732                         if (sqcp->inj_short)
2733                                 num /= 2;
2734                 }
2735         } else
2736                 sqcp = NULL;
2737
2738         ret = check_device_access_params(scp, lba, num, false);
2739         if (ret)
2740                 return ret;
2741         if (unlikely((SDEBUG_OPT_MEDIUM_ERR & sdebug_opts) &&
2742                      (lba <= (sdebug_medium_error_start + sdebug_medium_error_count - 1)) &&
2743                      ((lba + num) > sdebug_medium_error_start))) {
2744                 /* claim unrecoverable read error */
2745                 mk_sense_buffer(scp, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
2746                 /* set info field and valid bit for fixed descriptor */
2747                 if (0x70 == (scp->sense_buffer[0] & 0x7f)) {
2748                         scp->sense_buffer[0] |= 0x80;   /* Valid bit */
2749                         ret = (lba < OPT_MEDIUM_ERR_ADDR)
2750                               ? OPT_MEDIUM_ERR_ADDR : (int)lba;
2751                         put_unaligned_be32(ret, scp->sense_buffer + 3);
2752                 }
2753                 scsi_set_resid(scp, scsi_bufflen(scp));
2754                 return check_condition_result;
2755         }
2756
2757         read_lock_irqsave(&atomic_rw, iflags);
2758
2759         /* DIX + T10 DIF */
2760         if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
2761                 int prot_ret = prot_verify_read(scp, lba, num, ei_lba);
2762
2763                 if (prot_ret) {
2764                         read_unlock_irqrestore(&atomic_rw, iflags);
2765                         mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, prot_ret);
2766                         return illegal_condition_result;
2767                 }
2768         }
2769
2770         ret = do_device_access(scp, 0, lba, num, false);
2771         read_unlock_irqrestore(&atomic_rw, iflags);
2772         if (unlikely(ret == -1))
2773                 return DID_ERROR << 16;
2774
2775         scsi_set_resid(scp, scsi_bufflen(scp) - ret);
2776
2777         if (unlikely(sqcp)) {
2778                 if (sqcp->inj_recovered) {
2779                         mk_sense_buffer(scp, RECOVERED_ERROR,
2780                                         THRESHOLD_EXCEEDED, 0);
2781                         return check_condition_result;
2782                 } else if (sqcp->inj_transport) {
2783                         mk_sense_buffer(scp, ABORTED_COMMAND,
2784                                         TRANSPORT_PROBLEM, ACK_NAK_TO);
2785                         return check_condition_result;
2786                 } else if (sqcp->inj_dif) {
2787                         /* Logical block guard check failed */
2788                         mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
2789                         return illegal_condition_result;
2790                 } else if (sqcp->inj_dix) {
2791                         mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
2792                         return illegal_condition_result;
2793                 }
2794         }
2795         return 0;
2796 }
2797
2798 static void dump_sector(unsigned char *buf, int len)
2799 {
2800         int i, j, n;
2801
2802         pr_err(">>> Sector Dump <<<\n");
2803         for (i = 0 ; i < len ; i += 16) {
2804                 char b[128];
2805
2806                 for (j = 0, n = 0; j < 16; j++) {
2807                         unsigned char c = buf[i+j];
2808
2809                         if (c >= 0x20 && c < 0x7e)
2810                                 n += scnprintf(b + n, sizeof(b) - n,
2811                                                " %c ", buf[i+j]);
2812                         else
2813                                 n += scnprintf(b + n, sizeof(b) - n,
2814                                                "%02x ", buf[i+j]);
2815                 }
2816                 pr_err("%04d: %s\n", i, b);
2817         }
2818 }
2819
2820 static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
2821                              unsigned int sectors, u32 ei_lba)
2822 {
2823         int ret;
2824         struct t10_pi_tuple *sdt;
2825         void *daddr;
2826         sector_t sector = start_sec;
2827         int ppage_offset;
2828         int dpage_offset;
2829         struct sg_mapping_iter diter;
2830         struct sg_mapping_iter piter;
2831
2832         BUG_ON(scsi_sg_count(SCpnt) == 0);
2833         BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
2834
2835         sg_miter_start(&piter, scsi_prot_sglist(SCpnt),
2836                         scsi_prot_sg_count(SCpnt),
2837                         SG_MITER_ATOMIC | SG_MITER_FROM_SG);
2838         sg_miter_start(&diter, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
2839                         SG_MITER_ATOMIC | SG_MITER_FROM_SG);
2840
2841         /* For each protection page */
2842         while (sg_miter_next(&piter)) {
2843                 dpage_offset = 0;
2844                 if (WARN_ON(!sg_miter_next(&diter))) {
2845                         ret = 0x01;
2846                         goto out;
2847                 }
2848
2849                 for (ppage_offset = 0; ppage_offset < piter.length;
2850                      ppage_offset += sizeof(struct t10_pi_tuple)) {
2851                         /* If we're at the end of the current
2852                          * data page advance to the next one
2853                          */
2854                         if (dpage_offset >= diter.length) {
2855                                 if (WARN_ON(!sg_miter_next(&diter))) {
2856                                         ret = 0x01;
2857                                         goto out;
2858                                 }
2859                                 dpage_offset = 0;
2860                         }
2861
2862                         sdt = piter.addr + ppage_offset;
2863                         daddr = diter.addr + dpage_offset;
2864
2865                         ret = dif_verify(sdt, daddr, sector, ei_lba);
2866                         if (ret) {
2867                                 dump_sector(daddr, sdebug_sector_size);
2868                                 goto out;
2869                         }
2870
2871                         sector++;
2872                         ei_lba++;
2873                         dpage_offset += sdebug_sector_size;
2874                 }
2875                 diter.consumed = dpage_offset;
2876                 sg_miter_stop(&diter);
2877         }
2878         sg_miter_stop(&piter);
2879
2880         dif_copy_prot(SCpnt, start_sec, sectors, false);
2881         dix_writes++;
2882
2883         return 0;
2884
2885 out:
2886         dif_errors++;
2887         sg_miter_stop(&diter);
2888         sg_miter_stop(&piter);
2889         return ret;
2890 }
2891
2892 static unsigned long lba_to_map_index(sector_t lba)
2893 {
2894         if (sdebug_unmap_alignment)
2895                 lba += sdebug_unmap_granularity - sdebug_unmap_alignment;
2896         sector_div(lba, sdebug_unmap_granularity);
2897         return lba;
2898 }
2899
2900 static sector_t map_index_to_lba(unsigned long index)
2901 {
2902         sector_t lba = index * sdebug_unmap_granularity;
2903
2904         if (sdebug_unmap_alignment)
2905                 lba -= sdebug_unmap_granularity - sdebug_unmap_alignment;
2906         return lba;
2907 }
2908
2909 static unsigned int map_state(sector_t lba, unsigned int *num)
2910 {
2911         sector_t end;
2912         unsigned int mapped;
2913         unsigned long index;
2914         unsigned long next;
2915
2916         index = lba_to_map_index(lba);
2917         mapped = test_bit(index, map_storep);
2918
2919         if (mapped)
2920                 next = find_next_zero_bit(map_storep, map_size, index);
2921         else
2922                 next = find_next_bit(map_storep, map_size, index);
2923
2924         end = min_t(sector_t, sdebug_store_sectors,  map_index_to_lba(next));
2925         *num = end - lba;
2926         return mapped;
2927 }
2928
2929 static void map_region(sector_t lba, unsigned int len)
2930 {
2931         sector_t end = lba + len;
2932
2933         while (lba < end) {
2934                 unsigned long index = lba_to_map_index(lba);
2935
2936                 if (index < map_size)
2937                         set_bit(index, map_storep);
2938
2939                 lba = map_index_to_lba(index + 1);
2940         }
2941 }
2942
2943 static void unmap_region(sector_t lba, unsigned int len)
2944 {
2945         sector_t end = lba + len;
2946
2947         while (lba < end) {
2948                 unsigned long index = lba_to_map_index(lba);
2949
2950                 if (lba == map_index_to_lba(index) &&
2951                     lba + sdebug_unmap_granularity <= end &&
2952                     index < map_size) {
2953                         clear_bit(index, map_storep);
2954                         if (sdebug_lbprz) {  /* for LBPRZ=2 return 0xff_s */
2955                                 memset(fake_storep +
2956                                        lba * sdebug_sector_size,
2957                                        (sdebug_lbprz & 1) ? 0 : 0xff,
2958                                        sdebug_sector_size *
2959                                        sdebug_unmap_granularity);
2960                         }
2961                         if (dif_storep) {
2962                                 memset(dif_storep + lba, 0xff,
2963                                        sizeof(*dif_storep) *
2964                                        sdebug_unmap_granularity);
2965                         }
2966                 }
2967                 lba = map_index_to_lba(index + 1);
2968         }
2969 }
2970
2971 static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2972 {
2973         u8 *cmd = scp->cmnd;
2974         u64 lba;
2975         u32 num;
2976         u32 ei_lba;
2977         unsigned long iflags;
2978         int ret;
2979         bool check_prot;
2980
2981         switch (cmd[0]) {
2982         case WRITE_16:
2983                 ei_lba = 0;
2984                 lba = get_unaligned_be64(cmd + 2);
2985                 num = get_unaligned_be32(cmd + 10);
2986                 check_prot = true;
2987                 break;
2988         case WRITE_10:
2989                 ei_lba = 0;
2990                 lba = get_unaligned_be32(cmd + 2);
2991                 num = get_unaligned_be16(cmd + 7);
2992                 check_prot = true;
2993                 break;
2994         case WRITE_6:
2995                 ei_lba = 0;
2996                 lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
2997                       (u32)(cmd[1] & 0x1f) << 16;
2998                 num = (0 == cmd[4]) ? 256 : cmd[4];
2999                 check_prot = true;
3000                 break;
3001         case WRITE_12:
3002                 ei_lba = 0;
3003                 lba = get_unaligned_be32(cmd + 2);
3004                 num = get_unaligned_be32(cmd + 6);
3005                 check_prot = true;
3006                 break;
3007         case 0x53:      /* XDWRITEREAD(10) */
3008                 ei_lba = 0;
3009                 lba = get_unaligned_be32(cmd + 2);
3010                 num = get_unaligned_be16(cmd + 7);
3011                 check_prot = false;
3012                 break;
3013         default:        /* assume WRITE(32) */
3014                 lba = get_unaligned_be64(cmd + 12);
3015                 ei_lba = get_unaligned_be32(cmd + 20);
3016                 num = get_unaligned_be32(cmd + 28);
3017                 check_prot = false;
3018                 break;
3019         }
3020         if (unlikely(have_dif_prot && check_prot)) {
3021                 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3022                     (cmd[1] & 0xe0)) {
3023                         mk_sense_invalid_opcode(scp);
3024                         return check_condition_result;
3025                 }
3026                 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3027                      sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3028                     (cmd[1] & 0xe0) == 0)
3029                         sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3030                                     "to DIF device\n");
3031         }
3032         ret = check_device_access_params(scp, lba, num, true);
3033         if (ret)
3034                 return ret;
3035         write_lock_irqsave(&atomic_rw, iflags);
3036
3037         /* DIX + T10 DIF */
3038         if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3039                 int prot_ret = prot_verify_write(scp, lba, num, ei_lba);
3040
3041                 if (prot_ret) {
3042                         write_unlock_irqrestore(&atomic_rw, iflags);
3043                         mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, prot_ret);
3044                         return illegal_condition_result;
3045                 }
3046         }
3047
3048         ret = do_device_access(scp, 0, lba, num, true);
3049         if (unlikely(scsi_debug_lbp()))
3050                 map_region(lba, num);
3051         write_unlock_irqrestore(&atomic_rw, iflags);
3052         if (unlikely(-1 == ret))
3053                 return DID_ERROR << 16;
3054         else if (unlikely(sdebug_verbose &&
3055                           (ret < (num * sdebug_sector_size))))
3056                 sdev_printk(KERN_INFO, scp->device,
3057                             "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3058                             my_name, num * sdebug_sector_size, ret);
3059
3060         if (unlikely(sdebug_any_injecting_opt)) {
3061                 struct sdebug_queued_cmd *sqcp =
3062                                 (struct sdebug_queued_cmd *)scp->host_scribble;
3063
3064                 if (sqcp) {
3065                         if (sqcp->inj_recovered) {
3066                                 mk_sense_buffer(scp, RECOVERED_ERROR,
3067                                                 THRESHOLD_EXCEEDED, 0);
3068                                 return check_condition_result;
3069                         } else if (sqcp->inj_dif) {
3070                                 /* Logical block guard check failed */
3071                                 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3072                                 return illegal_condition_result;
3073                         } else if (sqcp->inj_dix) {
3074                                 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3075                                 return illegal_condition_result;
3076                         }
3077                 }
3078         }
3079         return 0;
3080 }
3081
3082 /*
3083  * T10 has only specified WRITE SCATTERED(16) and WRITE SCATTERED(32).
3084  * No READ GATHERED yet (requires bidi or long cdb holding gather list).
3085  */
3086 static int resp_write_scat(struct scsi_cmnd *scp,
3087                            struct sdebug_dev_info *devip)
3088 {
3089         u8 *cmd = scp->cmnd;
3090         u8 *lrdp = NULL;
3091         u8 *up;
3092         u8 wrprotect;
3093         u16 lbdof, num_lrd, k;
3094         u32 num, num_by, bt_len, lbdof_blen, sg_off, cum_lb;
3095         u32 lb_size = sdebug_sector_size;
3096         u32 ei_lba;
3097         u64 lba;
3098         unsigned long iflags;
3099         int ret, res;
3100         bool is_16;
3101         static const u32 lrd_size = 32; /* + parameter list header size */
3102
3103         if (cmd[0] == VARIABLE_LENGTH_CMD) {
3104                 is_16 = false;
3105                 wrprotect = (cmd[10] >> 5) & 0x7;
3106                 lbdof = get_unaligned_be16(cmd + 12);
3107                 num_lrd = get_unaligned_be16(cmd + 16);
3108                 bt_len = get_unaligned_be32(cmd + 28);
3109         } else {        /* that leaves WRITE SCATTERED(16) */
3110                 is_16 = true;
3111                 wrprotect = (cmd[2] >> 5) & 0x7;
3112                 lbdof = get_unaligned_be16(cmd + 4);
3113                 num_lrd = get_unaligned_be16(cmd + 8);
3114                 bt_len = get_unaligned_be32(cmd + 10);
3115                 if (unlikely(have_dif_prot)) {
3116                         if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3117                             wrprotect) {
3118                                 mk_sense_invalid_opcode(scp);
3119                                 return illegal_condition_result;
3120                         }
3121                         if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3122                              sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3123                              wrprotect == 0)
3124                                 sdev_printk(KERN_ERR, scp->device,
3125                                             "Unprotected WR to DIF device\n");
3126                 }
3127         }
3128         if ((num_lrd == 0) || (bt_len == 0))
3129                 return 0;       /* T10 says these do-nothings are not errors */
3130         if (lbdof == 0) {
3131                 if (sdebug_verbose)
3132                         sdev_printk(KERN_INFO, scp->device,
3133                                 "%s: %s: LB Data Offset field bad\n",
3134                                 my_name, __func__);
3135                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3136                 return illegal_condition_result;
3137         }
3138         lbdof_blen = lbdof * lb_size;
3139         if ((lrd_size + (num_lrd * lrd_size)) > lbdof_blen) {
3140                 if (sdebug_verbose)
3141                         sdev_printk(KERN_INFO, scp->device,
3142                                 "%s: %s: LBA range descriptors don't fit\n",
3143                                 my_name, __func__);
3144                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3145                 return illegal_condition_result;
3146         }
3147         lrdp = kzalloc(lbdof_blen, GFP_ATOMIC);
3148         if (lrdp == NULL)
3149                 return SCSI_MLQUEUE_HOST_BUSY;
3150         if (sdebug_verbose)
3151                 sdev_printk(KERN_INFO, scp->device,
3152                         "%s: %s: Fetch header+scatter_list, lbdof_blen=%u\n",
3153                         my_name, __func__, lbdof_blen);
3154         res = fetch_to_dev_buffer(scp, lrdp, lbdof_blen);
3155         if (res == -1) {
3156                 ret = DID_ERROR << 16;
3157                 goto err_out;
3158         }
3159
3160         write_lock_irqsave(&atomic_rw, iflags);
3161         sg_off = lbdof_blen;
3162         /* Spec says Buffer xfer Length field in number of LBs in dout */
3163         cum_lb = 0;
3164         for (k = 0, up = lrdp + lrd_size; k < num_lrd; ++k, up += lrd_size) {
3165                 lba = get_unaligned_be64(up + 0);
3166                 num = get_unaligned_be32(up + 8);
3167                 if (sdebug_verbose)
3168                         sdev_printk(KERN_INFO, scp->device,
3169                                 "%s: %s: k=%d  LBA=0x%llx num=%u  sg_off=%u\n",
3170                                 my_name, __func__, k, lba, num, sg_off);
3171                 if (num == 0)
3172                         continue;
3173                 ret = check_device_access_params(scp, lba, num, true);
3174                 if (ret)
3175                         goto err_out_unlock;
3176                 num_by = num * lb_size;
3177                 ei_lba = is_16 ? 0 : get_unaligned_be32(up + 12);
3178
3179                 if ((cum_lb + num) > bt_len) {
3180                         if (sdebug_verbose)
3181                                 sdev_printk(KERN_INFO, scp->device,
3182                                     "%s: %s: sum of blocks > data provided\n",
3183                                     my_name, __func__);
3184                         mk_sense_buffer(scp, ILLEGAL_REQUEST, WRITE_ERROR_ASC,
3185                                         0);
3186                         ret = illegal_condition_result;
3187                         goto err_out_unlock;
3188                 }
3189
3190                 /* DIX + T10 DIF */
3191                 if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3192                         int prot_ret = prot_verify_write(scp, lba, num,
3193                                                          ei_lba);
3194
3195                         if (prot_ret) {
3196                                 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10,
3197                                                 prot_ret);
3198                                 ret = illegal_condition_result;
3199                                 goto err_out_unlock;
3200                         }
3201                 }
3202
3203                 ret = do_device_access(scp, sg_off, lba, num, true);
3204                 if (unlikely(scsi_debug_lbp()))
3205                         map_region(lba, num);
3206                 if (unlikely(-1 == ret)) {
3207                         ret = DID_ERROR << 16;
3208                         goto err_out_unlock;
3209                 } else if (unlikely(sdebug_verbose && (ret < num_by)))
3210                         sdev_printk(KERN_INFO, scp->device,
3211                             "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3212                             my_name, num_by, ret);
3213
3214                 if (unlikely(sdebug_any_injecting_opt)) {
3215                         struct sdebug_queued_cmd *sqcp =
3216                                 (struct sdebug_queued_cmd *)scp->host_scribble;
3217
3218                         if (sqcp) {
3219                                 if (sqcp->inj_recovered) {
3220                                         mk_sense_buffer(scp, RECOVERED_ERROR,
3221                                                         THRESHOLD_EXCEEDED, 0);
3222                                         ret = illegal_condition_result;
3223                                         goto err_out_unlock;
3224                                 } else if (sqcp->inj_dif) {
3225                                         /* Logical block guard check failed */
3226                                         mk_sense_buffer(scp, ABORTED_COMMAND,
3227                                                         0x10, 1);
3228                                         ret = illegal_condition_result;
3229                                         goto err_out_unlock;
3230                                 } else if (sqcp->inj_dix) {
3231                                         mk_sense_buffer(scp, ILLEGAL_REQUEST,
3232                                                         0x10, 1);
3233                                         ret = illegal_condition_result;
3234                                         goto err_out_unlock;
3235                                 }
3236                         }
3237                 }
3238                 sg_off += num_by;
3239                 cum_lb += num;
3240         }
3241         ret = 0;
3242 err_out_unlock:
3243         write_unlock_irqrestore(&atomic_rw, iflags);
3244 err_out:
3245         kfree(lrdp);
3246         return ret;
3247 }
3248
3249 static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
3250                            u32 ei_lba, bool unmap, bool ndob)
3251 {
3252         int ret;
3253         unsigned long iflags;
3254         unsigned long long i;
3255         u32 lb_size = sdebug_sector_size;
3256         u64 block, lbaa;
3257         u8 *fs1p;
3258
3259         ret = check_device_access_params(scp, lba, num, true);
3260         if (ret)
3261                 return ret;
3262
3263         write_lock_irqsave(&atomic_rw, iflags);
3264
3265         if (unmap && scsi_debug_lbp()) {
3266                 unmap_region(lba, num);
3267                 goto out;
3268         }
3269         lbaa = lba;
3270         block = do_div(lbaa, sdebug_store_sectors);
3271         /* if ndob then zero 1 logical block, else fetch 1 logical block */
3272         fs1p = fake_storep + (block * lb_size);
3273         if (ndob) {
3274                 memset(fs1p, 0, lb_size);
3275                 ret = 0;
3276         } else
3277                 ret = fetch_to_dev_buffer(scp, fs1p, lb_size);
3278
3279         if (-1 == ret) {
3280                 write_unlock_irqrestore(&atomic_rw, iflags);
3281                 return DID_ERROR << 16;
3282         } else if (sdebug_verbose && !ndob && (ret < lb_size))
3283                 sdev_printk(KERN_INFO, scp->device,
3284                             "%s: %s: lb size=%u, IO sent=%d bytes\n",
3285                             my_name, "write same", lb_size, ret);
3286
3287         /* Copy first sector to remaining blocks */
3288         for (i = 1 ; i < num ; i++) {
3289                 lbaa = lba + i;
3290                 block = do_div(lbaa, sdebug_store_sectors);
3291                 memmove(fake_storep + (block * lb_size), fs1p, lb_size);
3292         }
3293         if (scsi_debug_lbp())
3294                 map_region(lba, num);
3295 out:
3296         write_unlock_irqrestore(&atomic_rw, iflags);
3297
3298         return 0;
3299 }
3300
3301 static int resp_write_same_10(struct scsi_cmnd *scp,
3302                               struct sdebug_dev_info *devip)
3303 {
3304         u8 *cmd = scp->cmnd;
3305         u32 lba;
3306         u16 num;
3307         u32 ei_lba = 0;
3308         bool unmap = false;
3309
3310         if (cmd[1] & 0x8) {
3311                 if (sdebug_lbpws10 == 0) {
3312                         mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3313                         return check_condition_result;
3314                 } else
3315                         unmap = true;
3316         }
3317         lba = get_unaligned_be32(cmd + 2);
3318         num = get_unaligned_be16(cmd + 7);
3319         if (num > sdebug_write_same_length) {
3320                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3321                 return check_condition_result;
3322         }
3323         return resp_write_same(scp, lba, num, ei_lba, unmap, false);
3324 }
3325
3326 static int resp_write_same_16(struct scsi_cmnd *scp,
3327                               struct sdebug_dev_info *devip)
3328 {
3329         u8 *cmd = scp->cmnd;
3330         u64 lba;
3331         u32 num;
3332         u32 ei_lba = 0;
3333         bool unmap = false;
3334         bool ndob = false;
3335
3336         if (cmd[1] & 0x8) {     /* UNMAP */
3337                 if (sdebug_lbpws == 0) {
3338                         mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3339                         return check_condition_result;
3340                 } else
3341                         unmap = true;
3342         }
3343         if (cmd[1] & 0x1)  /* NDOB (no data-out buffer, assumes zeroes) */
3344                 ndob = true;
3345         lba = get_unaligned_be64(cmd + 2);
3346         num = get_unaligned_be32(cmd + 10);
3347         if (num > sdebug_write_same_length) {
3348                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
3349                 return check_condition_result;
3350         }
3351         return resp_write_same(scp, lba, num, ei_lba, unmap, ndob);
3352 }
3353
3354 /* Note the mode field is in the same position as the (lower) service action
3355  * field. For the Report supported operation codes command, SPC-4 suggests
3356  * each mode of this command should be reported separately; for future. */
3357 static int resp_write_buffer(struct scsi_cmnd *scp,
3358                              struct sdebug_dev_info *devip)
3359 {
3360         u8 *cmd = scp->cmnd;
3361         struct scsi_device *sdp = scp->device;
3362         struct sdebug_dev_info *dp;
3363         u8 mode;
3364
3365         mode = cmd[1] & 0x1f;
3366         switch (mode) {
3367         case 0x4:       /* download microcode (MC) and activate (ACT) */
3368                 /* set UAs on this device only */
3369                 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3370                 set_bit(SDEBUG_UA_MICROCODE_CHANGED, devip->uas_bm);
3371                 break;
3372         case 0x5:       /* download MC, save and ACT */
3373                 set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET, devip->uas_bm);
3374                 break;
3375         case 0x6:       /* download MC with offsets and ACT */
3376                 /* set UAs on most devices (LUs) in this target */
3377                 list_for_each_entry(dp,
3378                                     &devip->sdbg_host->dev_info_list,
3379                                     dev_list)
3380                         if (dp->target == sdp->id) {
3381                                 set_bit(SDEBUG_UA_BUS_RESET, dp->uas_bm);
3382                                 if (devip != dp)
3383                                         set_bit(SDEBUG_UA_MICROCODE_CHANGED,
3384                                                 dp->uas_bm);
3385                         }
3386                 break;
3387         case 0x7:       /* download MC with offsets, save, and ACT */
3388                 /* set UA on all devices (LUs) in this target */
3389                 list_for_each_entry(dp,
3390                                     &devip->sdbg_host->dev_info_list,
3391                                     dev_list)
3392                         if (dp->target == sdp->id)
3393                                 set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET,
3394                                         dp->uas_bm);
3395                 break;
3396         default:
3397                 /* do nothing for this command for other mode values */
3398                 break;
3399         }
3400         return 0;
3401 }
3402
3403 static int resp_comp_write(struct scsi_cmnd *scp,
3404                            struct sdebug_dev_info *devip)
3405 {
3406         u8 *cmd = scp->cmnd;
3407         u8 *arr;
3408         u8 *fake_storep_hold;
3409         u64 lba;
3410         u32 dnum;
3411         u32 lb_size = sdebug_sector_size;
3412         u8 num;
3413         unsigned long iflags;
3414         int ret;
3415         int retval = 0;
3416
3417         lba = get_unaligned_be64(cmd + 2);
3418         num = cmd[13];          /* 1 to a maximum of 255 logical blocks */
3419         if (0 == num)
3420                 return 0;       /* degenerate case, not an error */
3421         if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3422             (cmd[1] & 0xe0)) {
3423                 mk_sense_invalid_opcode(scp);
3424                 return check_condition_result;
3425         }
3426         if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3427              sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3428             (cmd[1] & 0xe0) == 0)
3429                 sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3430                             "to DIF device\n");
3431         ret = check_device_access_params(scp, lba, num, false);
3432         if (ret)
3433                 return ret;
3434         dnum = 2 * num;
3435         arr = kcalloc(lb_size, dnum, GFP_ATOMIC);
3436         if (NULL == arr) {
3437                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3438                                 INSUFF_RES_ASCQ);
3439                 return check_condition_result;
3440         }
3441
3442         write_lock_irqsave(&atomic_rw, iflags);
3443
3444         /* trick do_device_access() to fetch both compare and write buffers
3445          * from data-in into arr. Safe (atomic) since write_lock held. */
3446         fake_storep_hold = fake_storep;
3447         fake_storep = arr;
3448         ret = do_device_access(scp, 0, 0, dnum, true);
3449         fake_storep = fake_storep_hold;
3450         if (ret == -1) {
3451                 retval = DID_ERROR << 16;
3452                 goto cleanup;
3453         } else if (sdebug_verbose && (ret < (dnum * lb_size)))
3454                 sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb "
3455                             "indicated=%u, IO sent=%d bytes\n", my_name,
3456                             dnum * lb_size, ret);
3457         if (!comp_write_worker(lba, num, arr)) {
3458                 mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
3459                 retval = check_condition_result;
3460                 goto cleanup;
3461         }
3462         if (scsi_debug_lbp())
3463                 map_region(lba, num);
3464 cleanup:
3465         write_unlock_irqrestore(&atomic_rw, iflags);
3466         kfree(arr);
3467         return retval;
3468 }
3469
3470 struct unmap_block_desc {
3471         __be64  lba;
3472         __be32  blocks;
3473         __be32  __reserved;
3474 };
3475
3476 static int resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3477 {
3478         unsigned char *buf;
3479         struct unmap_block_desc *desc;
3480         unsigned int i, payload_len, descriptors;
3481         int ret;
3482         unsigned long iflags;
3483
3484
3485         if (!scsi_debug_lbp())
3486                 return 0;       /* fib and say its done */
3487         payload_len = get_unaligned_be16(scp->cmnd + 7);
3488         BUG_ON(scsi_bufflen(scp) != payload_len);
3489
3490         descriptors = (payload_len - 8) / 16;
3491         if (descriptors > sdebug_unmap_max_desc) {
3492                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3493                 return check_condition_result;
3494         }
3495
3496         buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC);
3497         if (!buf) {
3498                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3499                                 INSUFF_RES_ASCQ);
3500                 return check_condition_result;
3501         }
3502
3503         scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
3504
3505         BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
3506         BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
3507
3508         desc = (void *)&buf[8];
3509
3510         write_lock_irqsave(&atomic_rw, iflags);
3511
3512         for (i = 0 ; i < descriptors ; i++) {
3513                 unsigned long long lba = get_unaligned_be64(&desc[i].lba);
3514                 unsigned int num = get_unaligned_be32(&desc[i].blocks);
3515
3516                 ret = check_device_access_params(scp, lba, num, true);
3517                 if (ret)
3518                         goto out;
3519
3520                 unmap_region(lba, num);
3521         }
3522
3523         ret = 0;
3524
3525 out:
3526         write_unlock_irqrestore(&atomic_rw, iflags);
3527         kfree(buf);
3528
3529         return ret;
3530 }
3531
3532 #define SDEBUG_GET_LBA_STATUS_LEN 32
3533
3534 static int resp_get_lba_status(struct scsi_cmnd *scp,
3535                                struct sdebug_dev_info *devip)
3536 {
3537         u8 *cmd = scp->cmnd;
3538         u64 lba;
3539         u32 alloc_len, mapped, num;
3540         u8 arr[SDEBUG_GET_LBA_STATUS_LEN];
3541         int ret;
3542
3543         lba = get_unaligned_be64(cmd + 2);
3544         alloc_len = get_unaligned_be32(cmd + 10);
3545
3546         if (alloc_len < 24)
3547                 return 0;
3548
3549         ret = check_device_access_params(scp, lba, 1, false);
3550         if (ret)
3551                 return ret;
3552
3553         if (scsi_debug_lbp())
3554                 mapped = map_state(lba, &num);
3555         else {
3556                 mapped = 1;
3557                 /* following just in case virtual_gb changed */
3558                 sdebug_capacity = get_sdebug_capacity();
3559                 if (sdebug_capacity - lba <= 0xffffffff)
3560                         num = sdebug_capacity - lba;
3561                 else
3562                         num = 0xffffffff;
3563         }
3564
3565         memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
3566         put_unaligned_be32(20, arr);            /* Parameter Data Length */
3567         put_unaligned_be64(lba, arr + 8);       /* LBA */
3568         put_unaligned_be32(num, arr + 16);      /* Number of blocks */
3569         arr[20] = !mapped;              /* prov_stat=0: mapped; 1: dealloc */
3570
3571         return fill_from_dev_buffer(scp, arr, SDEBUG_GET_LBA_STATUS_LEN);
3572 }
3573
3574 static int resp_sync_cache(struct scsi_cmnd *scp,
3575                            struct sdebug_dev_info *devip)
3576 {
3577         int res = 0;
3578         u64 lba;
3579         u32 num_blocks;
3580         u8 *cmd = scp->cmnd;
3581
3582         if (cmd[0] == SYNCHRONIZE_CACHE) {      /* 10 byte cdb */
3583                 lba = get_unaligned_be32(cmd + 2);
3584                 num_blocks = get_unaligned_be16(cmd + 7);
3585         } else {                                /* SYNCHRONIZE_CACHE(16) */
3586                 lba = get_unaligned_be64(cmd + 2);
3587                 num_blocks = get_unaligned_be32(cmd + 10);
3588         }
3589         if (lba + num_blocks > sdebug_capacity) {
3590                 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
3591                 return check_condition_result;
3592         }
3593         if (!write_since_sync || cmd[1] & 0x2)
3594                 res = SDEG_RES_IMMED_MASK;
3595         else            /* delay if write_since_sync and IMMED clear */
3596                 write_since_sync = false;
3597         return res;
3598 }
3599
3600 #define RL_BUCKET_ELEMS 8
3601
3602 /* Even though each pseudo target has a REPORT LUNS "well known logical unit"
3603  * (W-LUN), the normal Linux scanning logic does not associate it with a
3604  * device (e.g. /dev/sg7). The following magic will make that association:
3605  *   "cd /sys/class/scsi_host/host<n> ; echo '- - 49409' > scan"
3606  * where <n> is a host number. If there are multiple targets in a host then
3607  * the above will associate a W-LUN to each target. To only get a W-LUN
3608  * for target 2, then use "echo '- 2 49409' > scan" .
3609  */
3610 static int resp_report_luns(struct scsi_cmnd *scp,
3611                             struct sdebug_dev_info *devip)
3612 {
3613         unsigned char *cmd = scp->cmnd;
3614         unsigned int alloc_len;
3615         unsigned char select_report;
3616         u64 lun;
3617         struct scsi_lun *lun_p;
3618         u8 arr[RL_BUCKET_ELEMS * sizeof(struct scsi_lun)];
3619         unsigned int lun_cnt;   /* normal LUN count (max: 256) */
3620         unsigned int wlun_cnt;  /* report luns W-LUN count */
3621         unsigned int tlun_cnt;  /* total LUN count */
3622         unsigned int rlen;      /* response length (in bytes) */
3623         int k, j, n, res;
3624         unsigned int off_rsp = 0;
3625         const int sz_lun = sizeof(struct scsi_lun);
3626
3627         clear_luns_changed_on_target(devip);
3628
3629         select_report = cmd[2];
3630         alloc_len = get_unaligned_be32(cmd + 6);
3631
3632         if (alloc_len < 4) {
3633                 pr_err("alloc len too small %d\n", alloc_len);
3634                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
3635                 return check_condition_result;
3636         }
3637
3638         switch (select_report) {
3639         case 0:         /* all LUNs apart from W-LUNs */
3640                 lun_cnt = sdebug_max_luns;
3641                 wlun_cnt = 0;
3642                 break;
3643         case 1:         /* only W-LUNs */
3644                 lun_cnt = 0;
3645                 wlun_cnt = 1;
3646                 break;
3647         case 2:         /* all LUNs */
3648                 lun_cnt = sdebug_max_luns;
3649                 wlun_cnt = 1;
3650                 break;
3651         case 0x10:      /* only administrative LUs */
3652         case 0x11:      /* see SPC-5 */
3653         case 0x12:      /* only subsiduary LUs owned by referenced LU */
3654         default:
3655                 pr_debug("select report invalid %d\n", select_report);
3656                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
3657                 return check_condition_result;
3658         }
3659
3660         if (sdebug_no_lun_0 && (lun_cnt > 0))
3661                 --lun_cnt;
3662
3663         tlun_cnt = lun_cnt + wlun_cnt;
3664         rlen = tlun_cnt * sz_lun;       /* excluding 8 byte header */
3665         scsi_set_resid(scp, scsi_bufflen(scp));
3666         pr_debug("select_report %d luns = %d wluns = %d no_lun0 %d\n",
3667                  select_report, lun_cnt, wlun_cnt, sdebug_no_lun_0);
3668
3669         /* loops rely on sizeof response header same as sizeof lun (both 8) */
3670         lun = sdebug_no_lun_0 ? 1 : 0;
3671         for (k = 0, j = 0, res = 0; true; ++k, j = 0) {
3672                 memset(arr, 0, sizeof(arr));
3673                 lun_p = (struct scsi_lun *)&arr[0];
3674                 if (k == 0) {
3675                         put_unaligned_be32(rlen, &arr[0]);
3676                         ++lun_p;
3677                         j = 1;
3678                 }
3679                 for ( ; j < RL_BUCKET_ELEMS; ++j, ++lun_p) {
3680                         if ((k * RL_BUCKET_ELEMS) + j > lun_cnt)
3681                                 break;
3682                         int_to_scsilun(lun++, lun_p);
3683                 }
3684                 if (j < RL_BUCKET_ELEMS)
3685                         break;
3686                 n = j * sz_lun;
3687                 res = p_fill_from_dev_buffer(scp, arr, n, off_rsp);
3688                 if (res)
3689                         return res;
3690                 off_rsp += n;
3691         }
3692         if (wlun_cnt) {
3693                 int_to_scsilun(SCSI_W_LUN_REPORT_LUNS, lun_p);
3694                 ++j;
3695         }
3696         if (j > 0)
3697                 res = p_fill_from_dev_buffer(scp, arr, j * sz_lun, off_rsp);
3698         return res;
3699 }
3700
3701 static struct sdebug_queue *get_queue(struct scsi_cmnd *cmnd)
3702 {
3703         u32 tag = blk_mq_unique_tag(cmnd->request);
3704         u16 hwq = blk_mq_unique_tag_to_hwq(tag);
3705
3706         pr_debug("tag=%#x, hwq=%d\n", tag, hwq);
3707         if (WARN_ON_ONCE(hwq >= submit_queues))
3708                 hwq = 0;
3709         return sdebug_q_arr + hwq;
3710 }
3711
3712 /* Queued (deferred) command completions converge here. */
3713 static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
3714 {
3715         bool aborted = sd_dp->aborted;
3716         int qc_idx;
3717         int retiring = 0;
3718         unsigned long iflags;
3719         struct sdebug_queue *sqp;
3720         struct sdebug_queued_cmd *sqcp;
3721         struct scsi_cmnd *scp;
3722         struct sdebug_dev_info *devip;
3723
3724         sd_dp->defer_t = SDEB_DEFER_NONE;
3725         if (unlikely(aborted))
3726                 sd_dp->aborted = false;
3727         qc_idx = sd_dp->qc_idx;
3728         sqp = sdebug_q_arr + sd_dp->sqa_idx;
3729         if (sdebug_statistics) {
3730                 atomic_inc(&sdebug_completions);
3731                 if (raw_smp_processor_id() != sd_dp->issuing_cpu)
3732                         atomic_inc(&sdebug_miss_cpus);
3733         }
3734         if (unlikely((qc_idx < 0) || (qc_idx >= SDEBUG_CANQUEUE))) {
3735                 pr_err("wild qc_idx=%d\n", qc_idx);
3736                 return;
3737         }
3738         spin_lock_irqsave(&sqp->qc_lock, iflags);
3739         sqcp = &sqp->qc_arr[qc_idx];
3740         scp = sqcp->a_cmnd;
3741         if (unlikely(scp == NULL)) {
3742                 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3743                 pr_err("scp is NULL, sqa_idx=%d, qc_idx=%d\n",
3744                        sd_dp->sqa_idx, qc_idx);
3745                 return;
3746         }
3747         devip = (struct sdebug_dev_info *)scp->device->hostdata;
3748         if (likely(devip))
3749                 atomic_dec(&devip->num_in_q);
3750         else
3751                 pr_err("devip=NULL\n");
3752         if (unlikely(atomic_read(&retired_max_queue) > 0))
3753                 retiring = 1;
3754
3755         sqcp->a_cmnd = NULL;
3756         if (unlikely(!test_and_clear_bit(qc_idx, sqp->in_use_bm))) {
3757                 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3758                 pr_err("Unexpected completion\n");
3759                 return;
3760         }
3761
3762         if (unlikely(retiring)) {       /* user has reduced max_queue */
3763                 int k, retval;
3764
3765                 retval = atomic_read(&retired_max_queue);
3766                 if (qc_idx >= retval) {
3767                         spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3768                         pr_err("index %d too large\n", retval);
3769                         return;
3770                 }
3771                 k = find_last_bit(sqp->in_use_bm, retval);
3772                 if ((k < sdebug_max_queue) || (k == retval))
3773                         atomic_set(&retired_max_queue, 0);
3774                 else
3775                         atomic_set(&retired_max_queue, k + 1);
3776         }
3777         spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3778         if (unlikely(aborted)) {
3779                 if (sdebug_verbose)
3780                         pr_info("bypassing scsi_done() due to aborted cmd\n");
3781                 return;
3782         }
3783         scp->scsi_done(scp); /* callback to mid level */
3784 }
3785
3786 /* When high resolution timer goes off this function is called. */
3787 static enum hrtimer_restart sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
3788 {
3789         struct sdebug_defer *sd_dp = container_of(timer, struct sdebug_defer,
3790                                                   hrt);
3791         sdebug_q_cmd_complete(sd_dp);
3792         return HRTIMER_NORESTART;
3793 }
3794
3795 /* When work queue schedules work, it calls this function. */
3796 static void sdebug_q_cmd_wq_complete(struct work_struct *work)
3797 {
3798         struct sdebug_defer *sd_dp = container_of(work, struct sdebug_defer,
3799                                                   ew.work);
3800         sdebug_q_cmd_complete(sd_dp);
3801 }
3802
3803 static bool got_shared_uuid;
3804 static uuid_t shared_uuid;
3805
3806 static struct sdebug_dev_info *sdebug_device_create(
3807                         struct sdebug_host_info *sdbg_host, gfp_t flags)
3808 {
3809         struct sdebug_dev_info *devip;
3810
3811         devip = kzalloc(sizeof(*devip), flags);
3812         if (devip) {
3813                 if (sdebug_uuid_ctl == 1)
3814                         uuid_gen(&devip->lu_name);
3815                 else if (sdebug_uuid_ctl == 2) {
3816                         if (got_shared_uuid)
3817                                 devip->lu_name = shared_uuid;
3818                         else {
3819                                 uuid_gen(&shared_uuid);
3820                                 got_shared_uuid = true;
3821                                 devip->lu_name = shared_uuid;
3822                         }
3823                 }
3824                 devip->sdbg_host = sdbg_host;
3825                 list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
3826         }
3827         return devip;
3828 }
3829
3830 static struct sdebug_dev_info *find_build_dev_info(struct scsi_device *sdev)
3831 {
3832         struct sdebug_host_info *sdbg_host;
3833         struct sdebug_dev_info *open_devip = NULL;
3834         struct sdebug_dev_info *devip;
3835
3836         sdbg_host = *(struct sdebug_host_info **)shost_priv(sdev->host);
3837         if (!sdbg_host) {
3838                 pr_err("Host info NULL\n");
3839                 return NULL;
3840         }
3841         list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
3842                 if ((devip->used) && (devip->channel == sdev->channel) &&
3843                     (devip->target == sdev->id) &&
3844                     (devip->lun == sdev->lun))
3845                         return devip;
3846                 else {
3847                         if ((!devip->used) && (!open_devip))
3848                                 open_devip = devip;
3849                 }
3850         }
3851         if (!open_devip) { /* try and make a new one */
3852                 open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
3853                 if (!open_devip) {
3854                         pr_err("out of memory at line %d\n", __LINE__);
3855                         return NULL;
3856                 }
3857         }
3858
3859         open_devip->channel = sdev->channel;
3860         open_devip->target = sdev->id;
3861         open_devip->lun = sdev->lun;
3862         open_devip->sdbg_host = sdbg_host;
3863         atomic_set(&open_devip->num_in_q, 0);
3864         set_bit(SDEBUG_UA_POR, open_devip->uas_bm);
3865         open_devip->used = true;
3866         return open_devip;
3867 }
3868
3869 static int scsi_debug_slave_alloc(struct scsi_device *sdp)
3870 {
3871         if (sdebug_verbose)
3872                 pr_info("slave_alloc <%u %u %u %llu>\n",
3873                        sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
3874         return 0;
3875 }
3876
3877 static int scsi_debug_slave_configure(struct scsi_device *sdp)
3878 {
3879         struct sdebug_dev_info *devip =
3880                         (struct sdebug_dev_info *)sdp->hostdata;
3881
3882         if (sdebug_verbose)
3883                 pr_info("slave_configure <%u %u %u %llu>\n",
3884                        sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
3885         if (sdp->host->max_cmd_len != SDEBUG_MAX_CMD_LEN)
3886                 sdp->host->max_cmd_len = SDEBUG_MAX_CMD_LEN;
3887         if (devip == NULL) {
3888                 devip = find_build_dev_info(sdp);
3889                 if (devip == NULL)
3890                         return 1;  /* no resources, will be marked offline */
3891         }
3892         sdp->hostdata = devip;
3893         if (sdebug_no_uld)
3894                 sdp->no_uld_attach = 1;
3895         config_cdb_len(sdp);
3896         return 0;
3897 }
3898
3899 static void scsi_debug_slave_destroy(struct scsi_device *sdp)
3900 {
3901         struct sdebug_dev_info *devip =
3902                 (struct sdebug_dev_info *)sdp->hostdata;
3903
3904         if (sdebug_verbose)
3905                 pr_info("slave_destroy <%u %u %u %llu>\n",
3906                        sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
3907         if (devip) {
3908                 /* make this slot available for re-use */
3909                 devip->used = false;
3910                 sdp->hostdata = NULL;
3911         }
3912 }
3913
3914 static void stop_qc_helper(struct sdebug_defer *sd_dp,
3915                            enum sdeb_defer_type defer_t)
3916 {
3917         if (!sd_dp)
3918                 return;
3919         if (defer_t == SDEB_DEFER_HRT)
3920                 hrtimer_cancel(&sd_dp->hrt);
3921         else if (defer_t == SDEB_DEFER_WQ)
3922                 cancel_work_sync(&sd_dp->ew.work);
3923 }
3924
3925 /* If @cmnd found deletes its timer or work queue and returns true; else
3926    returns false */
3927 static bool stop_queued_cmnd(struct scsi_cmnd *cmnd)
3928 {
3929         unsigned long iflags;
3930         int j, k, qmax, r_qmax;
3931         enum sdeb_defer_type l_defer_t;
3932         struct sdebug_queue *sqp;
3933         struct sdebug_queued_cmd *sqcp;
3934         struct sdebug_dev_info *devip;
3935         struct sdebug_defer *sd_dp;
3936
3937         for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
3938                 spin_lock_irqsave(&sqp->qc_lock, iflags);
3939                 qmax = sdebug_max_queue;
3940                 r_qmax = atomic_read(&retired_max_queue);
3941                 if (r_qmax > qmax)
3942                         qmax = r_qmax;
3943                 for (k = 0; k < qmax; ++k) {
3944                         if (test_bit(k, sqp->in_use_bm)) {
3945                                 sqcp = &sqp->qc_arr[k];
3946                                 if (cmnd != sqcp->a_cmnd)
3947                                         continue;
3948                                 /* found */
3949                                 devip = (struct sdebug_dev_info *)
3950                                                 cmnd->device->hostdata;
3951                                 if (devip)
3952                                         atomic_dec(&devip->num_in_q);
3953                                 sqcp->a_cmnd = NULL;
3954                                 sd_dp = sqcp->sd_dp;
3955                                 if (sd_dp) {
3956                                         l_defer_t = sd_dp->defer_t;
3957                                         sd_dp->defer_t = SDEB_DEFER_NONE;
3958                                 } else
3959                                         l_defer_t = SDEB_DEFER_NONE;
3960                                 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3961                                 stop_qc_helper(sd_dp, l_defer_t);
3962                                 clear_bit(k, sqp->in_use_bm);
3963                                 return true;
3964                         }
3965                 }
3966                 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3967         }
3968         return false;
3969 }
3970
3971 /* Deletes (stops) timers or work queues of all queued commands */
3972 static void stop_all_queued(void)
3973 {
3974         unsigned long iflags;
3975         int j, k;
3976         enum sdeb_defer_type l_defer_t;
3977         struct sdebug_queue *sqp;
3978         struct sdebug_queued_cmd *sqcp;
3979         struct sdebug_dev_info *devip;
3980         struct sdebug_defer *sd_dp;
3981
3982         for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
3983                 spin_lock_irqsave(&sqp->qc_lock, iflags);
3984                 for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
3985                         if (test_bit(k, sqp->in_use_bm)) {
3986                                 sqcp = &sqp->qc_arr[k];
3987                                 if (sqcp->a_cmnd == NULL)
3988                                         continue;
3989                                 devip = (struct sdebug_dev_info *)
3990                                         sqcp->a_cmnd->device->hostdata;
3991                                 if (devip)
3992                                         atomic_dec(&devip->num_in_q);
3993                                 sqcp->a_cmnd = NULL;
3994                                 sd_dp = sqcp->sd_dp;
3995                                 if (sd_dp) {
3996                                         l_defer_t = sd_dp->defer_t;
3997                                         sd_dp->defer_t = SDEB_DEFER_NONE;
3998                                 } else
3999                                         l_defer_t = SDEB_DEFER_NONE;
4000                                 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4001                                 stop_qc_helper(sd_dp, l_defer_t);
4002                                 clear_bit(k, sqp->in_use_bm);
4003                                 spin_lock_irqsave(&sqp->qc_lock, iflags);
4004                         }
4005                 }
4006                 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4007         }
4008 }
4009
4010 /* Free queued command memory on heap */
4011 static void free_all_queued(void)
4012 {
4013         int j, k;
4014         struct sdebug_queue *sqp;
4015         struct sdebug_queued_cmd *sqcp;
4016
4017         for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
4018                 for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
4019                         sqcp = &sqp->qc_arr[k];
4020                         kfree(sqcp->sd_dp);
4021                         sqcp->sd_dp = NULL;
4022                 }
4023         }
4024 }
4025
4026 static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
4027 {
4028         bool ok;
4029
4030         ++num_aborts;
4031         if (SCpnt) {
4032                 ok = stop_queued_cmnd(SCpnt);
4033                 if (SCpnt->device && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
4034                         sdev_printk(KERN_INFO, SCpnt->device,
4035                                     "%s: command%s found\n", __func__,
4036                                     ok ? "" : " not");
4037         }
4038         return SUCCESS;
4039 }
4040
4041 static int scsi_debug_device_reset(struct scsi_cmnd *SCpnt)
4042 {
4043         ++num_dev_resets;
4044         if (SCpnt && SCpnt->device) {
4045                 struct scsi_device *sdp = SCpnt->device;
4046                 struct sdebug_dev_info *devip =
4047                                 (struct sdebug_dev_info *)sdp->hostdata;
4048
4049                 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
4050                         sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
4051                 if (devip)
4052                         set_bit(SDEBUG_UA_POR, devip->uas_bm);
4053         }
4054         return SUCCESS;
4055 }
4056
4057 static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
4058 {
4059         struct sdebug_host_info *sdbg_host;
4060         struct sdebug_dev_info *devip;
4061         struct scsi_device *sdp;
4062         struct Scsi_Host *hp;
4063         int k = 0;
4064
4065         ++num_target_resets;
4066         if (!SCpnt)
4067                 goto lie;
4068         sdp = SCpnt->device;
4069         if (!sdp)
4070                 goto lie;
4071         if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
4072                 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
4073         hp = sdp->host;
4074         if (!hp)
4075                 goto lie;
4076         sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
4077         if (sdbg_host) {
4078                 list_for_each_entry(devip,
4079                                     &sdbg_host->dev_info_list,
4080                                     dev_list)
4081                         if (devip->target == sdp->id) {
4082                                 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
4083                                 ++k;
4084                         }
4085         }
4086         if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
4087                 sdev_printk(KERN_INFO, sdp,
4088                             "%s: %d device(s) found in target\n", __func__, k);
4089 lie:
4090         return SUCCESS;
4091 }
4092
4093 static int scsi_debug_bus_reset(struct scsi_cmnd *SCpnt)
4094 {
4095         struct sdebug_host_info *sdbg_host;
4096         struct sdebug_dev_info *devip;
4097         struct scsi_device *sdp;
4098         struct Scsi_Host *hp;
4099         int k = 0;
4100
4101         ++num_bus_resets;
4102         if (!(SCpnt && SCpnt->device))
4103                 goto lie;
4104         sdp = SCpnt->device;
4105         if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
4106                 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
4107         hp = sdp->host;
4108         if (hp) {
4109                 sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
4110                 if (sdbg_host) {
4111                         list_for_each_entry(devip,
4112                                             &sdbg_host->dev_info_list,
4113                                             dev_list) {
4114                                 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
4115                                 ++k;
4116                         }
4117                 }
4118         }
4119         if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
4120                 sdev_printk(KERN_INFO, sdp,
4121                             "%s: %d device(s) found in host\n", __func__, k);
4122 lie:
4123         return SUCCESS;
4124 }
4125
4126 static int scsi_debug_host_reset(struct scsi_cmnd *SCpnt)
4127 {
4128         struct sdebug_host_info *sdbg_host;
4129         struct sdebug_dev_info *devip;
4130         int k = 0;
4131
4132         ++num_host_resets;
4133         if ((SCpnt->device) && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
4134                 sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__);
4135         spin_lock(&sdebug_host_list_lock);
4136         list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
4137                 list_for_each_entry(devip, &sdbg_host->dev_info_list,
4138                                     dev_list) {
4139                         set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
4140                         ++k;
4141                 }
4142         }
4143         spin_unlock(&sdebug_host_list_lock);
4144         stop_all_queued();
4145         if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
4146                 sdev_printk(KERN_INFO, SCpnt->device,
4147                             "%s: %d device(s) found\n", __func__, k);
4148         return SUCCESS;
4149 }
4150
4151 static void __init sdebug_build_parts(unsigned char *ramp,
4152                                       unsigned long store_size)
4153 {
4154         struct partition *pp;
4155         int starts[SDEBUG_MAX_PARTS + 2];
4156         int sectors_per_part, num_sectors, k;
4157         int heads_by_sects, start_sec, end_sec;
4158
4159         /* assume partition table already zeroed */
4160         if ((sdebug_num_parts < 1) || (store_size < 1048576))
4161                 return;
4162         if (sdebug_num_parts > SDEBUG_MAX_PARTS) {
4163                 sdebug_num_parts = SDEBUG_MAX_PARTS;
4164                 pr_warn("reducing partitions to %d\n", SDEBUG_MAX_PARTS);
4165         }
4166         num_sectors = (int)sdebug_store_sectors;
4167         sectors_per_part = (num_sectors - sdebug_sectors_per)
4168                            / sdebug_num_parts;
4169         heads_by_sects = sdebug_heads * sdebug_sectors_per;
4170         starts[0] = sdebug_sectors_per;
4171         for (k = 1; k < sdebug_num_parts; ++k)
4172                 starts[k] = ((k * sectors_per_part) / heads_by_sects)
4173                             * heads_by_sects;
4174         starts[sdebug_num_parts] = num_sectors;
4175         starts[sdebug_num_parts + 1] = 0;
4176
4177         ramp[510] = 0x55;       /* magic partition markings */
4178         ramp[511] = 0xAA;
4179         pp = (struct partition *)(ramp + 0x1be);
4180         for (k = 0; starts[k + 1]; ++k, ++pp) {
4181                 start_sec = starts[k];
4182                 end_sec = starts[k + 1] - 1;
4183                 pp->boot_ind = 0;
4184
4185                 pp->cyl = start_sec / heads_by_sects;
4186                 pp->head = (start_sec - (pp->cyl * heads_by_sects))
4187                            / sdebug_sectors_per;
4188                 pp->sector = (start_sec % sdebug_sectors_per) + 1;
4189
4190                 pp->end_cyl = end_sec / heads_by_sects;
4191                 pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
4192                                / sdebug_sectors_per;
4193                 pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
4194
4195                 pp->start_sect = cpu_to_le32(start_sec);
4196                 pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1);
4197                 pp->sys_ind = 0x83;     /* plain Linux partition */
4198         }
4199 }
4200
4201 static void block_unblock_all_queues(bool block)
4202 {
4203         int j;
4204         struct sdebug_queue *sqp;
4205
4206         for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp)
4207                 atomic_set(&sqp->blocked, (int)block);
4208 }
4209
4210 /* Adjust (by rounding down) the sdebug_cmnd_count so abs(every_nth)-1
4211  * commands will be processed normally before triggers occur.
4212  */
4213 static void tweak_cmnd_count(void)
4214 {
4215         int count, modulo;
4216
4217         modulo = abs(sdebug_every_nth);
4218         if (modulo < 2)
4219                 return;
4220         block_unblock_all_queues(true);
4221         count = atomic_read(&sdebug_cmnd_count);
4222         atomic_set(&sdebug_cmnd_count, (count / modulo) * modulo);
4223         block_unblock_all_queues(false);
4224 }
4225
4226 static void clear_queue_stats(void)
4227 {
4228         atomic_set(&sdebug_cmnd_count, 0);
4229         atomic_set(&sdebug_completions, 0);
4230         atomic_set(&sdebug_miss_cpus, 0);
4231         atomic_set(&sdebug_a_tsf, 0);
4232 }
4233
4234 static void setup_inject(struct sdebug_queue *sqp,
4235                          struct sdebug_queued_cmd *sqcp)
4236 {
4237         if ((atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) > 0) {
4238                 if (sdebug_every_nth > 0)
4239                         sqcp->inj_recovered = sqcp->inj_transport
4240                                 = sqcp->inj_dif
4241                                 = sqcp->inj_dix = sqcp->inj_short
4242                                 = sqcp->inj_host_busy = sqcp->inj_cmd_abort = 0;
4243                 return;
4244         }
4245         sqcp->inj_recovered = !!(SDEBUG_OPT_RECOVERED_ERR & sdebug_opts);
4246         sqcp->inj_transport = !!(SDEBUG_OPT_TRANSPORT_ERR & sdebug_opts);
4247         sqcp->inj_dif = !!(SDEBUG_OPT_DIF_ERR & sdebug_opts);
4248         sqcp->inj_dix = !!(SDEBUG_OPT_DIX_ERR & sdebug_opts);
4249         sqcp->inj_short = !!(SDEBUG_OPT_SHORT_TRANSFER & sdebug_opts);
4250         sqcp->inj_host_busy = !!(SDEBUG_OPT_HOST_BUSY & sdebug_opts);
4251         sqcp->inj_cmd_abort = !!(SDEBUG_OPT_CMD_ABORT & sdebug_opts);
4252 }
4253
4254 /* Complete the processing of the thread that queued a SCSI command to this
4255  * driver. It either completes the command by calling cmnd_done() or
4256  * schedules a hr timer or work queue then returns 0. Returns
4257  * SCSI_MLQUEUE_HOST_BUSY if temporarily out of resources.
4258  */
4259 static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
4260                          int scsi_result,
4261                          int (*pfp)(struct scsi_cmnd *,
4262                                     struct sdebug_dev_info *),
4263                          int delta_jiff, int ndelay)
4264 {
4265         unsigned long iflags;
4266         int k, num_in_q, qdepth, inject;
4267         struct sdebug_queue *sqp;
4268         struct sdebug_queued_cmd *sqcp;
4269         struct scsi_device *sdp;
4270         struct sdebug_defer *sd_dp;
4271
4272         if (unlikely(devip == NULL)) {
4273                 if (scsi_result == 0)
4274                         scsi_result = DID_NO_CONNECT << 16;
4275                 goto respond_in_thread;
4276         }
4277         sdp = cmnd->device;
4278
4279         if (delta_jiff == 0)
4280                 goto respond_in_thread;
4281
4282         /* schedule the response at a later time if resources permit */
4283         sqp = get_queue(cmnd);
4284         spin_lock_irqsave(&sqp->qc_lock, iflags);
4285         if (unlikely(atomic_read(&sqp->blocked))) {
4286                 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4287                 return SCSI_MLQUEUE_HOST_BUSY;
4288         }
4289         num_in_q = atomic_read(&devip->num_in_q);
4290         qdepth = cmnd->device->queue_depth;
4291         inject = 0;
4292         if (unlikely((qdepth > 0) && (num_in_q >= qdepth))) {
4293                 if (scsi_result) {
4294                         spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4295                         goto respond_in_thread;
4296                 } else
4297                         scsi_result = device_qfull_result;
4298         } else if (unlikely(sdebug_every_nth &&
4299                             (SDEBUG_OPT_RARE_TSF & sdebug_opts) &&
4300                             (scsi_result == 0))) {
4301                 if ((num_in_q == (qdepth - 1)) &&
4302                     (atomic_inc_return(&sdebug_a_tsf) >=
4303                      abs(sdebug_every_nth))) {
4304                         atomic_set(&sdebug_a_tsf, 0);
4305                         inject = 1;
4306                         scsi_result = device_qfull_result;
4307                 }
4308         }
4309
4310         k = find_first_zero_bit(sqp->in_use_bm, sdebug_max_queue);
4311         if (unlikely(k >= sdebug_max_queue)) {
4312                 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4313                 if (scsi_result)
4314                         goto respond_in_thread;
4315                 else if (SDEBUG_OPT_ALL_TSF & sdebug_opts)
4316                         scsi_result = device_qfull_result;
4317                 if (SDEBUG_OPT_Q_NOISE & sdebug_opts)
4318                         sdev_printk(KERN_INFO, sdp,
4319                                     "%s: max_queue=%d exceeded, %s\n",
4320                                     __func__, sdebug_max_queue,
4321                                     (scsi_result ?  "status: TASK SET FULL" :
4322                                                     "report: host busy"));
4323                 if (scsi_result)
4324                         goto respond_in_thread;
4325                 else
4326                         return SCSI_MLQUEUE_HOST_BUSY;
4327         }
4328         __set_bit(k, sqp->in_use_bm);
4329         atomic_inc(&devip->num_in_q);
4330         sqcp = &sqp->qc_arr[k];
4331         sqcp->a_cmnd = cmnd;
4332         cmnd->host_scribble = (unsigned char *)sqcp;
4333         sd_dp = sqcp->sd_dp;
4334         spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4335         if (unlikely(sdebug_every_nth && sdebug_any_injecting_opt))
4336                 setup_inject(sqp, sqcp);
4337         if (sd_dp == NULL) {
4338                 sd_dp = kzalloc(sizeof(*sd_dp), GFP_ATOMIC);
4339                 if (sd_dp == NULL)
4340                         return SCSI_MLQUEUE_HOST_BUSY;
4341         }
4342
4343         cmnd->result = pfp != NULL ? pfp(cmnd, devip) : 0;
4344         if (cmnd->result & SDEG_RES_IMMED_MASK) {
4345                 /*
4346                  * This is the F_DELAY_OVERR case. No delay.
4347                  */
4348                 cmnd->result &= ~SDEG_RES_IMMED_MASK;
4349                 delta_jiff = ndelay = 0;
4350         }
4351         if (cmnd->result == 0 && scsi_result != 0)
4352                 cmnd->result = scsi_result;
4353
4354         if (unlikely(sdebug_verbose && cmnd->result))
4355                 sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n",
4356                             __func__, cmnd->result);
4357
4358         if (delta_jiff > 0 || ndelay > 0) {
4359                 ktime_t kt;
4360
4361                 if (delta_jiff > 0) {
4362                         kt = ns_to_ktime((u64)delta_jiff * (NSEC_PER_SEC / HZ));
4363                 } else
4364                         kt = ndelay;
4365                 if (!sd_dp->init_hrt) {
4366                         sd_dp->init_hrt = true;
4367                         sqcp->sd_dp = sd_dp;
4368                         hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC,
4369                                      HRTIMER_MODE_REL_PINNED);
4370                         sd_dp->hrt.function = sdebug_q_cmd_hrt_complete;
4371                         sd_dp->sqa_idx = sqp - sdebug_q_arr;
4372                         sd_dp->qc_idx = k;
4373                 }
4374                 if (sdebug_statistics)
4375                         sd_dp->issuing_cpu = raw_smp_processor_id();
4376                 sd_dp->defer_t = SDEB_DEFER_HRT;
4377                 hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED);
4378         } else {        /* jdelay < 0, use work queue */
4379                 if (!sd_dp->init_wq) {
4380                         sd_dp->init_wq = true;
4381                         sqcp->sd_dp = sd_dp;
4382                         sd_dp->sqa_idx = sqp - sdebug_q_arr;
4383                         sd_dp->qc_idx = k;
4384                         INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete);
4385                 }
4386                 if (sdebug_statistics)
4387                         sd_dp->issuing_cpu = raw_smp_processor_id();
4388                 sd_dp->defer_t = SDEB_DEFER_WQ;
4389                 if (unlikely(sqcp->inj_cmd_abort))
4390                         sd_dp->aborted = true;
4391                 schedule_work(&sd_dp->ew.work);
4392                 if (unlikely(sqcp->inj_cmd_abort)) {
4393                         sdev_printk(KERN_INFO, sdp, "abort request tag %d\n",
4394                                     cmnd->request->tag);
4395                         blk_abort_request(cmnd->request);
4396                 }
4397         }
4398         if (unlikely((SDEBUG_OPT_Q_NOISE & sdebug_opts) &&
4399                      (scsi_result == device_qfull_result)))
4400                 sdev_printk(KERN_INFO, sdp,
4401                             "%s: num_in_q=%d +1, %s%s\n", __func__,
4402                             num_in_q, (inject ? "<inject> " : ""),
4403                             "status: TASK SET FULL");
4404         return 0;
4405
4406 respond_in_thread:      /* call back to mid-layer using invocation thread */
4407         cmnd->result = pfp != NULL ? pfp(cmnd, devip) : 0;
4408         cmnd->result &= ~SDEG_RES_IMMED_MASK;
4409         if (cmnd->result == 0 && scsi_result != 0)
4410                 cmnd->result = scsi_result;
4411         cmnd->scsi_done(cmnd);
4412         return 0;
4413 }
4414
4415 /* Note: The following macros create attribute files in the
4416    /sys/module/scsi_debug/parameters directory. Unfortunately this
4417    driver is unaware of a change and cannot trigger auxiliary actions
4418    as it can when the corresponding attribute in the
4419    /sys/bus/pseudo/drivers/scsi_debug directory is changed.
4420  */
4421 module_param_named(add_host, sdebug_add_host, int, S_IRUGO | S_IWUSR);
4422 module_param_named(ato, sdebug_ato, int, S_IRUGO);
4423 module_param_named(cdb_len, sdebug_cdb_len, int, 0644);
4424 module_param_named(clustering, sdebug_clustering, bool, S_IRUGO | S_IWUSR);
4425 module_param_named(delay, sdebug_jdelay, int, S_IRUGO | S_IWUSR);
4426 module_param_named(dev_size_mb, sdebug_dev_size_mb, int, S_IRUGO);
4427 module_param_named(dif, sdebug_dif, int, S_IRUGO);
4428 module_param_named(dix, sdebug_dix, int, S_IRUGO);
4429 module_param_named(dsense, sdebug_dsense, int, S_IRUGO | S_IWUSR);
4430 module_param_named(every_nth, sdebug_every_nth, int, S_IRUGO | S_IWUSR);
4431 module_param_named(fake_rw, sdebug_fake_rw, int, S_IRUGO | S_IWUSR);
4432 module_param_named(guard, sdebug_guard, uint, S_IRUGO);
4433 module_param_named(host_lock, sdebug_host_lock, bool, S_IRUGO | S_IWUSR);
4434 module_param_string(inq_vendor, sdebug_inq_vendor_id,
4435                     sizeof(sdebug_inq_vendor_id), S_IRUGO|S_IWUSR);
4436 module_param_string(inq_product, sdebug_inq_product_id,
4437                     sizeof(sdebug_inq_product_id), S_IRUGO|S_IWUSR);
4438 module_param_string(inq_rev, sdebug_inq_product_rev,
4439                     sizeof(sdebug_inq_product_rev), S_IRUGO|S_IWUSR);
4440 module_param_named(lbpu, sdebug_lbpu, int, S_IRUGO);
4441 module_param_named(lbpws, sdebug_lbpws, int, S_IRUGO);
4442 module_param_named(lbpws10, sdebug_lbpws10, int, S_IRUGO);
4443 module_param_named(lbprz, sdebug_lbprz, int, S_IRUGO);
4444 module_param_named(lowest_aligned, sdebug_lowest_aligned, int, S_IRUGO);
4445 module_param_named(max_luns, sdebug_max_luns, int, S_IRUGO | S_IWUSR);
4446 module_param_named(max_queue, sdebug_max_queue, int, S_IRUGO | S_IWUSR);
4447 module_param_named(medium_error_start, sdebug_medium_error_start, int, S_IRUGO | S_IWUSR);
4448 module_param_named(medium_error_count, sdebug_medium_error_count, int, S_IRUGO | S_IWUSR);
4449 module_param_named(ndelay, sdebug_ndelay, int, S_IRUGO | S_IWUSR);
4450 module_param_named(no_lun_0, sdebug_no_lun_0, int, S_IRUGO | S_IWUSR);
4451 module_param_named(no_uld, sdebug_no_uld, int, S_IRUGO);
4452 module_param_named(num_parts, sdebug_num_parts, int, S_IRUGO);
4453 module_param_named(num_tgts, sdebug_num_tgts, int, S_IRUGO | S_IWUSR);
4454 module_param_named(opt_blks, sdebug_opt_blks, int, S_IRUGO);
4455 module_param_named(opts, sdebug_opts, int, S_IRUGO | S_IWUSR);
4456 module_param_named(physblk_exp, sdebug_physblk_exp, int, S_IRUGO);
4457 module_param_named(opt_xferlen_exp, sdebug_opt_xferlen_exp, int, S_IRUGO);
4458 module_param_named(ptype, sdebug_ptype, int, S_IRUGO | S_IWUSR);
4459 module_param_named(removable, sdebug_removable, bool, S_IRUGO | S_IWUSR);
4460 module_param_named(scsi_level, sdebug_scsi_level, int, S_IRUGO);
4461 module_param_named(sector_size, sdebug_sector_size, int, S_IRUGO);
4462 module_param_named(statistics, sdebug_statistics, bool, S_IRUGO | S_IWUSR);
4463 module_param_named(strict, sdebug_strict, bool, S_IRUGO | S_IWUSR);
4464 module_param_named(submit_queues, submit_queues, int, S_IRUGO);
4465 module_param_named(unmap_alignment, sdebug_unmap_alignment, int, S_IRUGO);
4466 module_param_named(unmap_granularity, sdebug_unmap_granularity, int, S_IRUGO);
4467 module_param_named(unmap_max_blocks, sdebug_unmap_max_blocks, int, S_IRUGO);
4468 module_param_named(unmap_max_desc, sdebug_unmap_max_desc, int, S_IRUGO);
4469 module_param_named(virtual_gb, sdebug_virtual_gb, int, S_IRUGO | S_IWUSR);
4470 module_param_named(uuid_ctl, sdebug_uuid_ctl, int, S_IRUGO);
4471 module_param_named(vpd_use_hostno, sdebug_vpd_use_hostno, int,
4472                    S_IRUGO | S_IWUSR);
4473 module_param_named(wp, sdebug_wp, bool, S_IRUGO | S_IWUSR);
4474 module_param_named(write_same_length, sdebug_write_same_length, int,
4475                    S_IRUGO | S_IWUSR);
4476
4477 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
4478 MODULE_DESCRIPTION("SCSI debug adapter driver");
4479 MODULE_LICENSE("GPL");
4480 MODULE_VERSION(SDEBUG_VERSION);
4481
4482 MODULE_PARM_DESC(add_host, "0..127 hosts allowed(def=1)");
4483 MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
4484 MODULE_PARM_DESC(cdb_len, "suggest CDB lengths to drivers (def=10)");
4485 MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
4486 MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
4487 MODULE_PARM_DESC(dev_size_mb, "size in MiB of ram shared by devs(def=8)");
4488 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
4489 MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
4490 MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
4491 MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
4492 MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
4493 MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
4494 MODULE_PARM_DESC(host_lock, "host_lock is ignored (def=0)");
4495 MODULE_PARM_DESC(inq_vendor, "SCSI INQUIRY vendor string (def=\"Linux\")");
4496 MODULE_PARM_DESC(inq_product, "SCSI INQUIRY product string (def=\"scsi_debug\")");
4497 MODULE_PARM_DESC(inq_rev, "SCSI INQUIRY revision string (def=\""
4498                  SDEBUG_VERSION "\")");
4499 MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
4500 MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
4501 MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
4502 MODULE_PARM_DESC(lbprz,
4503         "on read unmapped LBs return 0 when 1 (def), return 0xff when 2");
4504 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
4505 MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
4506 MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))");
4507 MODULE_PARM_DESC(medium_error_start, "starting sector number to return MEDIUM error");
4508 MODULE_PARM_DESC(medium_error_count, "count of sectors to return follow on MEDIUM error");
4509 MODULE_PARM_DESC(ndelay, "response delay in nanoseconds (def=0 -> ignore)");
4510 MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
4511 MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
4512 MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
4513 MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
4514 MODULE_PARM_DESC(opt_blks, "optimal transfer length in blocks (def=1024)");
4515 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
4516 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
4517 MODULE_PARM_DESC(opt_xferlen_exp, "optimal transfer length granularity exponent (def=physblk_exp)");
4518 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
4519 MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
4520 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=7[SPC-5])");
4521 MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
4522 MODULE_PARM_DESC(statistics, "collect statistics on commands, queues (def=0)");
4523 MODULE_PARM_DESC(strict, "stricter checks: reserved field in cdb (def=0)");
4524 MODULE_PARM_DESC(submit_queues, "support for block multi-queue (def=1)");
4525 MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
4526 MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
4527 MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
4528 MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
4529 MODULE_PARM_DESC(uuid_ctl,
4530                  "1->use uuid for lu name, 0->don't, 2->all use same (def=0)");
4531 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)");
4532 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
4533 MODULE_PARM_DESC(wp, "Write Protect (def=0)");
4534 MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
4535
4536 #define SDEBUG_INFO_LEN 256
4537 static char sdebug_info[SDEBUG_INFO_LEN];
4538
4539 static const char *scsi_debug_info(struct Scsi_Host *shp)
4540 {
4541         int k;
4542
4543         k = scnprintf(sdebug_info, SDEBUG_INFO_LEN, "%s: version %s [%s]\n",
4544                       my_name, SDEBUG_VERSION, sdebug_version_date);
4545         if (k >= (SDEBUG_INFO_LEN - 1))
4546                 return sdebug_info;
4547         scnprintf(sdebug_info + k, SDEBUG_INFO_LEN - k,
4548                   "  dev_size_mb=%d, opts=0x%x, submit_queues=%d, %s=%d",
4549                   sdebug_dev_size_mb, sdebug_opts, submit_queues,
4550                   "statistics", (int)sdebug_statistics);
4551         return sdebug_info;
4552 }
4553
4554 /* 'echo <val> > /proc/scsi/scsi_debug/<host_id>' writes to opts */
4555 static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer,
4556                                  int length)
4557 {
4558         char arr[16];
4559         int opts;
4560         int minLen = length > 15 ? 15 : length;
4561
4562         if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
4563                 return -EACCES;
4564         memcpy(arr, buffer, minLen);
4565         arr[minLen] = '\0';
4566         if (1 != sscanf(arr, "%d", &opts))
4567                 return -EINVAL;
4568         sdebug_opts = opts;
4569         sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
4570         sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
4571         if (sdebug_every_nth != 0)
4572                 tweak_cmnd_count();
4573         return length;
4574 }
4575
4576 /* Output seen with 'cat /proc/scsi/scsi_debug/<host_id>'. It will be the
4577  * same for each scsi_debug host (if more than one). Some of the counters
4578  * output are not atomics so might be inaccurate in a busy system. */
4579 static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
4580 {
4581         int f, j, l;
4582         struct sdebug_queue *sqp;
4583
4584         seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n",
4585                    SDEBUG_VERSION, sdebug_version_date);
4586         seq_printf(m, "num_tgts=%d, %ssize=%d MB, opts=0x%x, every_nth=%d\n",
4587                    sdebug_num_tgts, "shared (ram) ", sdebug_dev_size_mb,
4588                    sdebug_opts, sdebug_every_nth);
4589         seq_printf(m, "delay=%d, ndelay=%d, max_luns=%d, sector_size=%d %s\n",
4590                    sdebug_jdelay, sdebug_ndelay, sdebug_max_luns,
4591                    sdebug_sector_size, "bytes");
4592         seq_printf(m, "cylinders=%d, heads=%d, sectors=%d, command aborts=%d\n",
4593                    sdebug_cylinders_per, sdebug_heads, sdebug_sectors_per,
4594                    num_aborts);
4595         seq_printf(m, "RESETs: device=%d, target=%d, bus=%d, host=%d\n",
4596                    num_dev_resets, num_target_resets, num_bus_resets,
4597                    num_host_resets);
4598         seq_printf(m, "dix_reads=%d, dix_writes=%d, dif_errors=%d\n",
4599                    dix_reads, dix_writes, dif_errors);
4600         seq_printf(m, "usec_in_jiffy=%lu, statistics=%d\n", TICK_NSEC / 1000,
4601                    sdebug_statistics);
4602         seq_printf(m, "cmnd_count=%d, completions=%d, %s=%d, a_tsf=%d\n",
4603                    atomic_read(&sdebug_cmnd_count),
4604                    atomic_read(&sdebug_completions),
4605                    "miss_cpus", atomic_read(&sdebug_miss_cpus),
4606                    atomic_read(&sdebug_a_tsf));
4607
4608         seq_printf(m, "submit_queues=%d\n", submit_queues);
4609         for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
4610                 seq_printf(m, "  queue %d:\n", j);
4611                 f = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
4612                 if (f != sdebug_max_queue) {
4613                         l = find_last_bit(sqp->in_use_bm, sdebug_max_queue);
4614                         seq_printf(m, "    in_use_bm BUSY: %s: %d,%d\n",
4615                                    "first,last bits", f, l);
4616                 }
4617         }
4618         return 0;
4619 }
4620
4621 static ssize_t delay_show(struct device_driver *ddp, char *buf)
4622 {
4623         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_jdelay);
4624 }
4625 /* Returns -EBUSY if jdelay is being changed and commands are queued. The unit
4626  * of delay is jiffies.
4627  */
4628 static ssize_t delay_store(struct device_driver *ddp, const char *buf,
4629                            size_t count)
4630 {
4631         int jdelay, res;
4632
4633         if (count > 0 && sscanf(buf, "%d", &jdelay) == 1) {
4634                 res = count;
4635                 if (sdebug_jdelay != jdelay) {
4636                         int j, k;
4637                         struct sdebug_queue *sqp;
4638
4639                         block_unblock_all_queues(true);
4640                         for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
4641                              ++j, ++sqp) {
4642                                 k = find_first_bit(sqp->in_use_bm,
4643                                                    sdebug_max_queue);
4644                                 if (k != sdebug_max_queue) {
4645                                         res = -EBUSY;   /* queued commands */
4646                                         break;
4647                                 }
4648                         }
4649                         if (res > 0) {
4650                                 sdebug_jdelay = jdelay;
4651                                 sdebug_ndelay = 0;
4652                         }
4653                         block_unblock_all_queues(false);
4654                 }
4655                 return res;
4656         }
4657         return -EINVAL;
4658 }
4659 static DRIVER_ATTR_RW(delay);
4660
4661 static ssize_t ndelay_show(struct device_driver *ddp, char *buf)
4662 {
4663         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ndelay);
4664 }
4665 /* Returns -EBUSY if ndelay is being changed and commands are queued */
4666 /* If > 0 and accepted then sdebug_jdelay is set to JDELAY_OVERRIDDEN */
4667 static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
4668                             size_t count)
4669 {
4670         int ndelay, res;
4671
4672         if ((count > 0) && (1 == sscanf(buf, "%d", &ndelay)) &&
4673             (ndelay >= 0) && (ndelay < (1000 * 1000 * 1000))) {
4674                 res = count;
4675                 if (sdebug_ndelay != ndelay) {
4676                         int j, k;
4677                         struct sdebug_queue *sqp;
4678
4679                         block_unblock_all_queues(true);
4680                         for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
4681                              ++j, ++sqp) {
4682                                 k = find_first_bit(sqp->in_use_bm,
4683                                                    sdebug_max_queue);
4684                                 if (k != sdebug_max_queue) {
4685                                         res = -EBUSY;   /* queued commands */
4686                                         break;
4687                                 }
4688                         }
4689                         if (res > 0) {
4690                                 sdebug_ndelay = ndelay;
4691                                 sdebug_jdelay = ndelay  ? JDELAY_OVERRIDDEN
4692                                                         : DEF_JDELAY;
4693                         }
4694                         block_unblock_all_queues(false);
4695                 }
4696                 return res;
4697         }
4698         return -EINVAL;
4699 }
4700 static DRIVER_ATTR_RW(ndelay);
4701
4702 static ssize_t opts_show(struct device_driver *ddp, char *buf)
4703 {
4704         return scnprintf(buf, PAGE_SIZE, "0x%x\n", sdebug_opts);
4705 }
4706
4707 static ssize_t opts_store(struct device_driver *ddp, const char *buf,
4708                           size_t count)
4709 {
4710         int opts;
4711         char work[20];
4712
4713         if (sscanf(buf, "%10s", work) == 1) {
4714                 if (strncasecmp(work, "0x", 2) == 0) {
4715                         if (kstrtoint(work + 2, 16, &opts) == 0)
4716                                 goto opts_done;
4717                 } else {
4718                         if (kstrtoint(work, 10, &opts) == 0)
4719                                 goto opts_done;
4720                 }
4721         }
4722         return -EINVAL;
4723 opts_done:
4724         sdebug_opts = opts;
4725         sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
4726         sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
4727         tweak_cmnd_count();
4728         return count;
4729 }
4730 static DRIVER_ATTR_RW(opts);
4731
4732 static ssize_t ptype_show(struct device_driver *ddp, char *buf)
4733 {
4734         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ptype);
4735 }
4736 static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
4737                            size_t count)
4738 {
4739         int n;
4740
4741         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4742                 sdebug_ptype = n;
4743                 return count;
4744         }
4745         return -EINVAL;
4746 }
4747 static DRIVER_ATTR_RW(ptype);
4748
4749 static ssize_t dsense_show(struct device_driver *ddp, char *buf)
4750 {
4751         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dsense);
4752 }
4753 static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
4754                             size_t count)
4755 {
4756         int n;
4757
4758         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4759                 sdebug_dsense = n;
4760                 return count;
4761         }
4762         return -EINVAL;
4763 }
4764 static DRIVER_ATTR_RW(dsense);
4765
4766 static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
4767 {
4768         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_fake_rw);
4769 }
4770 static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
4771                              size_t count)
4772 {
4773         int n;
4774
4775         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4776                 n = (n > 0);
4777                 sdebug_fake_rw = (sdebug_fake_rw > 0);
4778                 if (sdebug_fake_rw != n) {
4779                         if ((0 == n) && (NULL == fake_storep)) {
4780                                 unsigned long sz =
4781                                         (unsigned long)sdebug_dev_size_mb *
4782                                         1048576;
4783
4784                                 fake_storep = vzalloc(sz);
4785                                 if (NULL == fake_storep) {
4786                                         pr_err("out of memory, 9\n");
4787                                         return -ENOMEM;
4788                                 }
4789                         }
4790                         sdebug_fake_rw = n;
4791                 }
4792                 return count;
4793         }
4794         return -EINVAL;
4795 }
4796 static DRIVER_ATTR_RW(fake_rw);
4797
4798 static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
4799 {
4800         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_lun_0);
4801 }
4802 static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
4803                               size_t count)
4804 {
4805         int n;
4806
4807         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4808                 sdebug_no_lun_0 = n;
4809                 return count;
4810         }
4811         return -EINVAL;
4812 }
4813 static DRIVER_ATTR_RW(no_lun_0);
4814
4815 static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
4816 {
4817         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_tgts);
4818 }
4819 static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
4820                               size_t count)
4821 {
4822         int n;
4823
4824         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4825                 sdebug_num_tgts = n;
4826                 sdebug_max_tgts_luns();
4827                 return count;
4828         }
4829         return -EINVAL;
4830 }
4831 static DRIVER_ATTR_RW(num_tgts);
4832
4833 static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)
4834 {
4835         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dev_size_mb);
4836 }
4837 static DRIVER_ATTR_RO(dev_size_mb);
4838
4839 static ssize_t num_parts_show(struct device_driver *ddp, char *buf)
4840 {
4841         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_parts);
4842 }
4843 static DRIVER_ATTR_RO(num_parts);
4844
4845 static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
4846 {
4847         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_every_nth);
4848 }
4849 static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
4850                                size_t count)
4851 {
4852         int nth;
4853
4854         if ((count > 0) && (1 == sscanf(buf, "%d", &nth))) {
4855                 sdebug_every_nth = nth;
4856                 if (nth && !sdebug_statistics) {
4857                         pr_info("every_nth needs statistics=1, set it\n");
4858                         sdebug_statistics = true;
4859                 }
4860                 tweak_cmnd_count();
4861                 return count;
4862         }
4863         return -EINVAL;
4864 }
4865 static DRIVER_ATTR_RW(every_nth);
4866
4867 static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
4868 {
4869         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_luns);
4870 }
4871 static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
4872                               size_t count)
4873 {
4874         int n;
4875         bool changed;
4876
4877         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4878                 if (n > 256) {
4879                         pr_warn("max_luns can be no more than 256\n");
4880                         return -EINVAL;
4881                 }
4882                 changed = (sdebug_max_luns != n);
4883                 sdebug_max_luns = n;
4884                 sdebug_max_tgts_luns();
4885                 if (changed && (sdebug_scsi_level >= 5)) {      /* >= SPC-3 */
4886                         struct sdebug_host_info *sdhp;
4887                         struct sdebug_dev_info *dp;
4888
4889                         spin_lock(&sdebug_host_list_lock);
4890                         list_for_each_entry(sdhp, &sdebug_host_list,
4891                                             host_list) {
4892                                 list_for_each_entry(dp, &sdhp->dev_info_list,
4893                                                     dev_list) {
4894                                         set_bit(SDEBUG_UA_LUNS_CHANGED,
4895                                                 dp->uas_bm);
4896                                 }
4897                         }
4898                         spin_unlock(&sdebug_host_list_lock);
4899                 }
4900                 return count;
4901         }
4902         return -EINVAL;
4903 }
4904 static DRIVER_ATTR_RW(max_luns);
4905
4906 static ssize_t max_queue_show(struct device_driver *ddp, char *buf)
4907 {
4908         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_queue);
4909 }
4910 /* N.B. max_queue can be changed while there are queued commands. In flight
4911  * commands beyond the new max_queue will be completed. */
4912 static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
4913                                size_t count)
4914 {
4915         int j, n, k, a;
4916         struct sdebug_queue *sqp;
4917
4918         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
4919             (n <= SDEBUG_CANQUEUE)) {
4920                 block_unblock_all_queues(true);
4921                 k = 0;
4922                 for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
4923                      ++j, ++sqp) {
4924                         a = find_last_bit(sqp->in_use_bm, SDEBUG_CANQUEUE);
4925                         if (a > k)
4926                                 k = a;
4927                 }
4928                 sdebug_max_queue = n;
4929                 if (k == SDEBUG_CANQUEUE)
4930                         atomic_set(&retired_max_queue, 0);
4931                 else if (k >= n)
4932                         atomic_set(&retired_max_queue, k + 1);
4933                 else
4934                         atomic_set(&retired_max_queue, 0);
4935                 block_unblock_all_queues(false);
4936                 return count;
4937         }
4938         return -EINVAL;
4939 }
4940 static DRIVER_ATTR_RW(max_queue);
4941
4942 static ssize_t no_uld_show(struct device_driver *ddp, char *buf)
4943 {
4944         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_uld);
4945 }
4946 static DRIVER_ATTR_RO(no_uld);
4947
4948 static ssize_t scsi_level_show(struct device_driver *ddp, char *buf)
4949 {
4950         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_scsi_level);
4951 }
4952 static DRIVER_ATTR_RO(scsi_level);
4953
4954 static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
4955 {
4956         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_virtual_gb);
4957 }
4958 static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
4959                                 size_t count)
4960 {
4961         int n;
4962         bool changed;
4963
4964         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4965                 changed = (sdebug_virtual_gb != n);
4966                 sdebug_virtual_gb = n;
4967                 sdebug_capacity = get_sdebug_capacity();
4968                 if (changed) {
4969                         struct sdebug_host_info *sdhp;
4970                         struct sdebug_dev_info *dp;
4971
4972                         spin_lock(&sdebug_host_list_lock);
4973                         list_for_each_entry(sdhp, &sdebug_host_list,
4974                                             host_list) {
4975                                 list_for_each_entry(dp, &sdhp->dev_info_list,
4976                                                     dev_list) {
4977                                         set_bit(SDEBUG_UA_CAPACITY_CHANGED,
4978                                                 dp->uas_bm);
4979                                 }
4980                         }
4981                         spin_unlock(&sdebug_host_list_lock);
4982                 }
4983                 return count;
4984         }
4985         return -EINVAL;
4986 }
4987 static DRIVER_ATTR_RW(virtual_gb);
4988
4989 static ssize_t add_host_show(struct device_driver *ddp, char *buf)
4990 {
4991         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_add_host);
4992 }
4993
4994 static int sdebug_add_adapter(void);
4995 static void sdebug_remove_adapter(void);
4996
4997 static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
4998                               size_t count)
4999 {
5000         int delta_hosts;
5001
5002         if (sscanf(buf, "%d", &delta_hosts) != 1)
5003                 return -EINVAL;
5004         if (delta_hosts > 0) {
5005                 do {
5006                         sdebug_add_adapter();
5007                 } while (--delta_hosts);
5008         } else if (delta_hosts < 0) {
5009                 do {
5010                         sdebug_remove_adapter();
5011                 } while (++delta_hosts);
5012         }
5013         return count;
5014 }
5015 static DRIVER_ATTR_RW(add_host);
5016
5017 static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf)
5018 {
5019         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_vpd_use_hostno);
5020 }
5021 static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf,
5022                                     size_t count)
5023 {
5024         int n;
5025
5026         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
5027                 sdebug_vpd_use_hostno = n;
5028                 return count;
5029         }
5030         return -EINVAL;
5031 }
5032 static DRIVER_ATTR_RW(vpd_use_hostno);
5033
5034 static ssize_t statistics_show(struct device_driver *ddp, char *buf)
5035 {
5036         return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_statistics);
5037 }
5038 static ssize_t statistics_store(struct device_driver *ddp, const char *buf,
5039                                 size_t count)
5040 {
5041         int n;
5042
5043         if ((count > 0) && (sscanf(buf, "%d", &n) == 1) && (n >= 0)) {
5044                 if (n > 0)
5045                         sdebug_statistics = true;
5046                 else {
5047                         clear_queue_stats();
5048                         sdebug_statistics = false;
5049                 }
5050                 return count;
5051         }
5052         return -EINVAL;
5053 }
5054 static DRIVER_ATTR_RW(statistics);
5055
5056 static ssize_t sector_size_show(struct device_driver *ddp, char *buf)
5057 {
5058         return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_sector_size);
5059 }
5060 static DRIVER_ATTR_RO(sector_size);
5061
5062 static ssize_t submit_queues_show(struct device_driver *ddp, char *buf)
5063 {
5064         return scnprintf(buf, PAGE_SIZE, "%d\n", submit_queues);
5065 }
5066 static DRIVER_ATTR_RO(submit_queues);
5067
5068 static ssize_t dix_show(struct device_driver *ddp, char *buf)
5069 {
5070         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dix);
5071 }
5072 static DRIVER_ATTR_RO(dix);
5073
5074 static ssize_t dif_show(struct device_driver *ddp, char *buf)
5075 {
5076         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dif);
5077 }
5078 static DRIVER_ATTR_RO(dif);
5079
5080 static ssize_t guard_show(struct device_driver *ddp, char *buf)
5081 {
5082         return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_guard);
5083 }
5084 static DRIVER_ATTR_RO(guard);
5085
5086 static ssize_t ato_show(struct device_driver *ddp, char *buf)
5087 {
5088         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ato);
5089 }
5090 static DRIVER_ATTR_RO(ato);
5091
5092 static ssize_t map_show(struct device_driver *ddp, char *buf)
5093 {
5094         ssize_t count;
5095
5096         if (!scsi_debug_lbp())
5097                 return scnprintf(buf, PAGE_SIZE, "0-%u\n",
5098                                  sdebug_store_sectors);
5099
5100         count = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
5101                           (int)map_size, map_storep);
5102         buf[count++] = '\n';
5103         buf[count] = '\0';
5104
5105         return count;
5106 }
5107 static DRIVER_ATTR_RO(map);
5108
5109 static ssize_t removable_show(struct device_driver *ddp, char *buf)
5110 {
5111         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_removable ? 1 : 0);
5112 }
5113 static ssize_t removable_store(struct device_driver *ddp, const char *buf,
5114                                size_t count)
5115 {
5116         int n;
5117
5118         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
5119                 sdebug_removable = (n > 0);
5120                 return count;
5121         }
5122         return -EINVAL;
5123 }
5124 static DRIVER_ATTR_RW(removable);
5125
5126 static ssize_t host_lock_show(struct device_driver *ddp, char *buf)
5127 {
5128         return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_host_lock);
5129 }
5130 /* N.B. sdebug_host_lock does nothing, kept for backward compatibility */
5131 static ssize_t host_lock_store(struct device_driver *ddp, const char *buf,
5132                                size_t count)
5133 {
5134         int n;
5135
5136         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
5137                 sdebug_host_lock = (n > 0);
5138                 return count;
5139         }
5140         return -EINVAL;
5141 }
5142 static DRIVER_ATTR_RW(host_lock);
5143
5144 static ssize_t strict_show(struct device_driver *ddp, char *buf)
5145 {
5146         return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_strict);
5147 }
5148 static ssize_t strict_store(struct device_driver *ddp, const char *buf,
5149                             size_t count)
5150 {
5151         int n;
5152
5153         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
5154                 sdebug_strict = (n > 0);
5155                 return count;
5156         }
5157         return -EINVAL;
5158 }
5159 static DRIVER_ATTR_RW(strict);
5160
5161 static ssize_t uuid_ctl_show(struct device_driver *ddp, char *buf)
5162 {
5163         return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_uuid_ctl);
5164 }
5165 static DRIVER_ATTR_RO(uuid_ctl);
5166
5167 static ssize_t cdb_len_show(struct device_driver *ddp, char *buf)
5168 {
5169         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_cdb_len);
5170 }
5171 static ssize_t cdb_len_store(struct device_driver *ddp, const char *buf,
5172                              size_t count)
5173 {
5174         int ret, n;
5175
5176         ret = kstrtoint(buf, 0, &n);
5177         if (ret)
5178                 return ret;
5179         sdebug_cdb_len = n;
5180         all_config_cdb_len();
5181         return count;
5182 }
5183 static DRIVER_ATTR_RW(cdb_len);
5184
5185
5186 /* Note: The following array creates attribute files in the
5187    /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
5188    files (over those found in the /sys/module/scsi_debug/parameters
5189    directory) is that auxiliary actions can be triggered when an attribute
5190    is changed. For example see: sdebug_add_host_store() above.
5191  */
5192
5193 static struct attribute *sdebug_drv_attrs[] = {
5194         &driver_attr_delay.attr,
5195         &driver_attr_opts.attr,
5196         &driver_attr_ptype.attr,
5197         &driver_attr_dsense.attr,
5198         &driver_attr_fake_rw.attr,
5199         &driver_attr_no_lun_0.attr,
5200         &driver_attr_num_tgts.attr,
5201         &driver_attr_dev_size_mb.attr,
5202         &driver_attr_num_parts.attr,
5203         &driver_attr_every_nth.attr,
5204         &driver_attr_max_luns.attr,
5205         &driver_attr_max_queue.attr,
5206         &driver_attr_no_uld.attr,
5207         &driver_attr_scsi_level.attr,
5208         &driver_attr_virtual_gb.attr,
5209         &driver_attr_add_host.attr,
5210         &driver_attr_vpd_use_hostno.attr,
5211         &driver_attr_sector_size.attr,
5212         &driver_attr_statistics.attr,
5213         &driver_attr_submit_queues.attr,
5214         &driver_attr_dix.attr,
5215         &driver_attr_dif.attr,
5216         &driver_attr_guard.attr,
5217         &driver_attr_ato.attr,
5218         &driver_attr_map.attr,
5219         &driver_attr_removable.attr,
5220         &driver_attr_host_lock.attr,
5221         &driver_attr_ndelay.attr,
5222         &driver_attr_strict.attr,
5223         &driver_attr_uuid_ctl.attr,
5224         &driver_attr_cdb_len.attr,
5225         NULL,
5226 };
5227 ATTRIBUTE_GROUPS(sdebug_drv);
5228
5229 static struct device *pseudo_primary;
5230
5231 static int __init scsi_debug_init(void)
5232 {
5233         unsigned long sz;
5234         int host_to_add;
5235         int k;
5236         int ret;
5237
5238         atomic_set(&retired_max_queue, 0);
5239
5240         if (sdebug_ndelay >= 1000 * 1000 * 1000) {
5241                 pr_warn("ndelay must be less than 1 second, ignored\n");
5242                 sdebug_ndelay = 0;
5243         } else if (sdebug_ndelay > 0)
5244                 sdebug_jdelay = JDELAY_OVERRIDDEN;
5245
5246         switch (sdebug_sector_size) {
5247         case  512:
5248         case 1024:
5249         case 2048:
5250         case 4096:
5251                 break;
5252         default:
5253                 pr_err("invalid sector_size %d\n", sdebug_sector_size);
5254                 return -EINVAL;
5255         }
5256
5257         switch (sdebug_dif) {
5258         case T10_PI_TYPE0_PROTECTION:
5259                 break;
5260         case T10_PI_TYPE1_PROTECTION:
5261         case T10_PI_TYPE2_PROTECTION:
5262         case T10_PI_TYPE3_PROTECTION:
5263                 have_dif_prot = true;
5264                 break;
5265
5266         default:
5267                 pr_err("dif must be 0, 1, 2 or 3\n");
5268                 return -EINVAL;
5269         }
5270
5271         if (sdebug_guard > 1) {
5272                 pr_err("guard must be 0 or 1\n");
5273                 return -EINVAL;
5274         }
5275
5276         if (sdebug_ato > 1) {
5277                 pr_err("ato must be 0 or 1\n");
5278                 return -EINVAL;
5279         }
5280
5281         if (sdebug_physblk_exp > 15) {
5282                 pr_err("invalid physblk_exp %u\n", sdebug_physblk_exp);
5283                 return -EINVAL;
5284         }
5285         if (sdebug_max_luns > 256) {
5286                 pr_warn("max_luns can be no more than 256, use default\n");
5287                 sdebug_max_luns = DEF_MAX_LUNS;
5288         }
5289
5290         if (sdebug_lowest_aligned > 0x3fff) {
5291                 pr_err("lowest_aligned too big: %u\n", sdebug_lowest_aligned);
5292                 return -EINVAL;
5293         }
5294
5295         if (submit_queues < 1) {
5296                 pr_err("submit_queues must be 1 or more\n");
5297                 return -EINVAL;
5298         }
5299         sdebug_q_arr = kcalloc(submit_queues, sizeof(struct sdebug_queue),
5300                                GFP_KERNEL);
5301         if (sdebug_q_arr == NULL)
5302                 return -ENOMEM;
5303         for (k = 0; k < submit_queues; ++k)
5304                 spin_lock_init(&sdebug_q_arr[k].qc_lock);
5305
5306         if (sdebug_dev_size_mb < 1)
5307                 sdebug_dev_size_mb = 1;  /* force minimum 1 MB ramdisk */
5308         sz = (unsigned long)sdebug_dev_size_mb * 1048576;
5309         sdebug_store_sectors = sz / sdebug_sector_size;
5310         sdebug_capacity = get_sdebug_capacity();
5311
5312         /* play around with geometry, don't waste too much on track 0 */
5313         sdebug_heads = 8;
5314         sdebug_sectors_per = 32;
5315         if (sdebug_dev_size_mb >= 256)
5316                 sdebug_heads = 64;
5317         else if (sdebug_dev_size_mb >= 16)
5318                 sdebug_heads = 32;
5319         sdebug_cylinders_per = (unsigned long)sdebug_capacity /
5320                                (sdebug_sectors_per * sdebug_heads);
5321         if (sdebug_cylinders_per >= 1024) {
5322                 /* other LLDs do this; implies >= 1GB ram disk ... */
5323                 sdebug_heads = 255;
5324                 sdebug_sectors_per = 63;
5325                 sdebug_cylinders_per = (unsigned long)sdebug_capacity /
5326                                (sdebug_sectors_per * sdebug_heads);
5327         }
5328
5329         if (sdebug_fake_rw == 0) {
5330                 fake_storep = vzalloc(sz);
5331                 if (NULL == fake_storep) {
5332                         pr_err("out of memory, 1\n");
5333                         ret = -ENOMEM;
5334                         goto free_q_arr;
5335                 }
5336                 if (sdebug_num_parts > 0)
5337                         sdebug_build_parts(fake_storep, sz);
5338         }
5339
5340         if (sdebug_dix) {
5341                 int dif_size;
5342
5343                 dif_size = sdebug_store_sectors * sizeof(struct t10_pi_tuple);
5344                 dif_storep = vmalloc(dif_size);
5345
5346                 pr_err("dif_storep %u bytes @ %p\n", dif_size, dif_storep);
5347
5348                 if (dif_storep == NULL) {
5349                         pr_err("out of mem. (DIX)\n");
5350                         ret = -ENOMEM;
5351                         goto free_vm;
5352                 }
5353
5354                 memset(dif_storep, 0xff, dif_size);
5355         }
5356
5357         /* Logical Block Provisioning */
5358         if (scsi_debug_lbp()) {
5359                 sdebug_unmap_max_blocks =
5360                         clamp(sdebug_unmap_max_blocks, 0U, 0xffffffffU);
5361
5362                 sdebug_unmap_max_desc =
5363                         clamp(sdebug_unmap_max_desc, 0U, 256U);
5364
5365                 sdebug_unmap_granularity =
5366                         clamp(sdebug_unmap_granularity, 1U, 0xffffffffU);
5367
5368                 if (sdebug_unmap_alignment &&
5369                     sdebug_unmap_granularity <=
5370                     sdebug_unmap_alignment) {
5371                         pr_err("ERR: unmap_granularity <= unmap_alignment\n");
5372                         ret = -EINVAL;
5373                         goto free_vm;
5374                 }
5375
5376                 map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
5377                 map_storep = vmalloc(array_size(sizeof(long),
5378                                                 BITS_TO_LONGS(map_size)));
5379
5380                 pr_info("%lu provisioning blocks\n", map_size);
5381
5382                 if (map_storep == NULL) {
5383                         pr_err("out of mem. (MAP)\n");
5384                         ret = -ENOMEM;
5385                         goto free_vm;
5386                 }
5387
5388                 bitmap_zero(map_storep, map_size);
5389
5390                 /* Map first 1KB for partition table */
5391                 if (sdebug_num_parts)
5392                         map_region(0, 2);
5393         }
5394
5395         pseudo_primary = root_device_register("pseudo_0");
5396         if (IS_ERR(pseudo_primary)) {
5397                 pr_warn("root_device_register() error\n");
5398                 ret = PTR_ERR(pseudo_primary);
5399                 goto free_vm;
5400         }
5401         ret = bus_register(&pseudo_lld_bus);
5402         if (ret < 0) {
5403                 pr_warn("bus_register error: %d\n", ret);
5404                 goto dev_unreg;
5405         }
5406         ret = driver_register(&sdebug_driverfs_driver);
5407         if (ret < 0) {
5408                 pr_warn("driver_register error: %d\n", ret);
5409                 goto bus_unreg;
5410         }
5411
5412         host_to_add = sdebug_add_host;
5413         sdebug_add_host = 0;
5414
5415         for (k = 0; k < host_to_add; k++) {
5416                 if (sdebug_add_adapter()) {
5417                         pr_err("sdebug_add_adapter failed k=%d\n", k);
5418                         break;
5419                 }
5420         }
5421
5422         if (sdebug_verbose)
5423                 pr_info("built %d host(s)\n", sdebug_add_host);
5424
5425         return 0;
5426
5427 bus_unreg:
5428         bus_unregister(&pseudo_lld_bus);
5429 dev_unreg:
5430         root_device_unregister(pseudo_primary);
5431 free_vm:
5432         vfree(map_storep);
5433         vfree(dif_storep);
5434         vfree(fake_storep);
5435 free_q_arr:
5436         kfree(sdebug_q_arr);
5437         return ret;
5438 }
5439
5440 static void __exit scsi_debug_exit(void)
5441 {
5442         int k = sdebug_add_host;
5443
5444         stop_all_queued();
5445         for (; k; k--)
5446                 sdebug_remove_adapter();
5447         free_all_queued();
5448         driver_unregister(&sdebug_driverfs_driver);
5449         bus_unregister(&pseudo_lld_bus);
5450         root_device_unregister(pseudo_primary);
5451
5452         vfree(map_storep);
5453         vfree(dif_storep);
5454         vfree(fake_storep);
5455         kfree(sdebug_q_arr);
5456 }
5457
5458 device_initcall(scsi_debug_init);
5459 module_exit(scsi_debug_exit);
5460
5461 static void sdebug_release_adapter(struct device *dev)
5462 {
5463         struct sdebug_host_info *sdbg_host;
5464
5465         sdbg_host = to_sdebug_host(dev);
5466         kfree(sdbg_host);
5467 }
5468
5469 static int sdebug_add_adapter(void)
5470 {
5471         int k, devs_per_host;
5472         int error = 0;
5473         struct sdebug_host_info *sdbg_host;
5474         struct sdebug_dev_info *sdbg_devinfo, *tmp;
5475
5476         sdbg_host = kzalloc(sizeof(*sdbg_host), GFP_KERNEL);
5477         if (sdbg_host == NULL) {
5478                 pr_err("out of memory at line %d\n", __LINE__);
5479                 return -ENOMEM;
5480         }
5481
5482         INIT_LIST_HEAD(&sdbg_host->dev_info_list);
5483
5484         devs_per_host = sdebug_num_tgts * sdebug_max_luns;
5485         for (k = 0; k < devs_per_host; k++) {
5486                 sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
5487                 if (!sdbg_devinfo) {
5488                         pr_err("out of memory at line %d\n", __LINE__);
5489                         error = -ENOMEM;
5490                         goto clean;
5491                 }
5492         }
5493
5494         spin_lock(&sdebug_host_list_lock);
5495         list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
5496         spin_unlock(&sdebug_host_list_lock);
5497
5498         sdbg_host->dev.bus = &pseudo_lld_bus;
5499         sdbg_host->dev.parent = pseudo_primary;
5500         sdbg_host->dev.release = &sdebug_release_adapter;
5501         dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_add_host);
5502
5503         error = device_register(&sdbg_host->dev);
5504
5505         if (error)
5506                 goto clean;
5507
5508         ++sdebug_add_host;
5509         return error;
5510
5511 clean:
5512         list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
5513                                  dev_list) {
5514                 list_del(&sdbg_devinfo->dev_list);
5515                 kfree(sdbg_devinfo);
5516         }
5517
5518         kfree(sdbg_host);
5519         return error;
5520 }
5521
5522 static void sdebug_remove_adapter(void)
5523 {
5524         struct sdebug_host_info *sdbg_host = NULL;
5525
5526         spin_lock(&sdebug_host_list_lock);
5527         if (!list_empty(&sdebug_host_list)) {
5528                 sdbg_host = list_entry(sdebug_host_list.prev,
5529                                        struct sdebug_host_info, host_list);
5530                 list_del(&sdbg_host->host_list);
5531         }
5532         spin_unlock(&sdebug_host_list_lock);
5533
5534         if (!sdbg_host)
5535                 return;
5536
5537         device_unregister(&sdbg_host->dev);
5538         --sdebug_add_host;
5539 }
5540
5541 static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
5542 {
5543         int num_in_q = 0;
5544         struct sdebug_dev_info *devip;
5545
5546         block_unblock_all_queues(true);
5547         devip = (struct sdebug_dev_info *)sdev->hostdata;
5548         if (NULL == devip) {
5549                 block_unblock_all_queues(false);
5550                 return  -ENODEV;
5551         }
5552         num_in_q = atomic_read(&devip->num_in_q);
5553
5554         if (qdepth < 1)
5555                 qdepth = 1;
5556         /* allow to exceed max host qc_arr elements for testing */
5557         if (qdepth > SDEBUG_CANQUEUE + 10)
5558                 qdepth = SDEBUG_CANQUEUE + 10;
5559         scsi_change_queue_depth(sdev, qdepth);
5560
5561         if (SDEBUG_OPT_Q_NOISE & sdebug_opts) {
5562                 sdev_printk(KERN_INFO, sdev, "%s: qdepth=%d, num_in_q=%d\n",
5563                             __func__, qdepth, num_in_q);
5564         }
5565         block_unblock_all_queues(false);
5566         return sdev->queue_depth;
5567 }
5568
5569 static bool fake_timeout(struct scsi_cmnd *scp)
5570 {
5571         if (0 == (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth))) {
5572                 if (sdebug_every_nth < -1)
5573                         sdebug_every_nth = -1;
5574                 if (SDEBUG_OPT_TIMEOUT & sdebug_opts)
5575                         return true; /* ignore command causing timeout */
5576                 else if (SDEBUG_OPT_MAC_TIMEOUT & sdebug_opts &&
5577                          scsi_medium_access_command(scp))
5578                         return true; /* time out reads and writes */
5579         }
5580         return false;
5581 }
5582
5583 static bool fake_host_busy(struct scsi_cmnd *scp)
5584 {
5585         return (sdebug_opts & SDEBUG_OPT_HOST_BUSY) &&
5586                 (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) == 0;
5587 }
5588
5589 static int scsi_debug_queuecommand(struct Scsi_Host *shost,
5590                                    struct scsi_cmnd *scp)
5591 {
5592         u8 sdeb_i;
5593         struct scsi_device *sdp = scp->device;
5594         const struct opcode_info_t *oip;
5595         const struct opcode_info_t *r_oip;
5596         struct sdebug_dev_info *devip;
5597         u8 *cmd = scp->cmnd;
5598         int (*r_pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
5599         int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *) = NULL;
5600         int k, na;
5601         int errsts = 0;
5602         u32 flags;
5603         u16 sa;
5604         u8 opcode = cmd[0];
5605         bool has_wlun_rl;
5606
5607         scsi_set_resid(scp, 0);
5608         if (sdebug_statistics)
5609                 atomic_inc(&sdebug_cmnd_count);
5610         if (unlikely(sdebug_verbose &&
5611                      !(SDEBUG_OPT_NO_CDB_NOISE & sdebug_opts))) {
5612                 char b[120];
5613                 int n, len, sb;
5614
5615                 len = scp->cmd_len;
5616                 sb = (int)sizeof(b);
5617                 if (len > 32)
5618                         strcpy(b, "too long, over 32 bytes");
5619                 else {
5620                         for (k = 0, n = 0; k < len && n < sb; ++k)
5621                                 n += scnprintf(b + n, sb - n, "%02x ",
5622                                                (u32)cmd[k]);
5623                 }
5624                 sdev_printk(KERN_INFO, sdp, "%s: tag=%#x, cmd %s\n", my_name,
5625                             blk_mq_unique_tag(scp->request), b);
5626         }
5627         if (fake_host_busy(scp))
5628                 return SCSI_MLQUEUE_HOST_BUSY;
5629         has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS);
5630         if (unlikely((sdp->lun >= sdebug_max_luns) && !has_wlun_rl))
5631                 goto err_out;
5632
5633         sdeb_i = opcode_ind_arr[opcode];        /* fully mapped */
5634         oip = &opcode_info_arr[sdeb_i];         /* safe if table consistent */
5635         devip = (struct sdebug_dev_info *)sdp->hostdata;
5636         if (unlikely(!devip)) {
5637                 devip = find_build_dev_info(sdp);
5638                 if (NULL == devip)
5639                         goto err_out;
5640         }
5641         na = oip->num_attached;
5642         r_pfp = oip->pfp;
5643         if (na) {       /* multiple commands with this opcode */
5644                 r_oip = oip;
5645                 if (FF_SA & r_oip->flags) {
5646                         if (F_SA_LOW & oip->flags)
5647                                 sa = 0x1f & cmd[1];
5648                         else
5649                                 sa = get_unaligned_be16(cmd + 8);
5650                         for (k = 0; k <= na; oip = r_oip->arrp + k++) {
5651                                 if (opcode == oip->opcode && sa == oip->sa)
5652                                         break;
5653                         }
5654                 } else {   /* since no service action only check opcode */
5655                         for (k = 0; k <= na; oip = r_oip->arrp + k++) {
5656                                 if (opcode == oip->opcode)
5657                                         break;
5658                         }
5659                 }
5660                 if (k > na) {
5661                         if (F_SA_LOW & r_oip->flags)
5662                                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 4);
5663                         else if (F_SA_HIGH & r_oip->flags)
5664                                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, 7);
5665                         else
5666                                 mk_sense_invalid_opcode(scp);
5667                         goto check_cond;
5668                 }
5669         }       /* else (when na==0) we assume the oip is a match */
5670         flags = oip->flags;
5671         if (unlikely(F_INV_OP & flags)) {
5672                 mk_sense_invalid_opcode(scp);
5673                 goto check_cond;
5674         }
5675         if (unlikely(has_wlun_rl && !(F_RL_WLUN_OK & flags))) {
5676                 if (sdebug_verbose)
5677                         sdev_printk(KERN_INFO, sdp, "%s: Opcode 0x%x not%s\n",
5678                                     my_name, opcode, " supported for wlun");
5679                 mk_sense_invalid_opcode(scp);
5680                 goto check_cond;
5681         }
5682         if (unlikely(sdebug_strict)) {  /* check cdb against mask */
5683                 u8 rem;
5684                 int j;
5685
5686                 for (k = 1; k < oip->len_mask[0] && k < 16; ++k) {
5687                         rem = ~oip->len_mask[k] & cmd[k];
5688                         if (rem) {
5689                                 for (j = 7; j >= 0; --j, rem <<= 1) {
5690                                         if (0x80 & rem)
5691                                                 break;
5692                                 }
5693                                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, k, j);
5694                                 goto check_cond;
5695                         }
5696                 }
5697         }
5698         if (unlikely(!(F_SKIP_UA & flags) &&
5699                      find_first_bit(devip->uas_bm,
5700                                     SDEBUG_NUM_UAS) != SDEBUG_NUM_UAS)) {
5701                 errsts = make_ua(scp, devip);
5702                 if (errsts)
5703                         goto check_cond;
5704         }
5705         if (unlikely((F_M_ACCESS & flags) && atomic_read(&devip->stopped))) {
5706                 mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x2);
5707                 if (sdebug_verbose)
5708                         sdev_printk(KERN_INFO, sdp, "%s reports: Not ready: "
5709                                     "%s\n", my_name, "initializing command "
5710                                     "required");
5711                 errsts = check_condition_result;
5712                 goto fini;
5713         }
5714         if (sdebug_fake_rw && (F_FAKE_RW & flags))
5715                 goto fini;
5716         if (unlikely(sdebug_every_nth)) {
5717                 if (fake_timeout(scp))
5718                         return 0;       /* ignore command: make trouble */
5719         }
5720         if (likely(oip->pfp))
5721                 pfp = oip->pfp; /* calls a resp_* function */
5722         else
5723                 pfp = r_pfp;    /* if leaf function ptr NULL, try the root's */
5724
5725 fini:
5726         if (F_DELAY_OVERR & flags)
5727                 return schedule_resp(scp, devip, errsts, pfp, 0, 0);
5728         else if ((flags & F_LONG_DELAY) && (sdebug_jdelay > 0 ||
5729                                             sdebug_ndelay > 10000)) {
5730                 /*
5731                  * Skip long delays if ndelay <= 10 microseconds. Otherwise
5732                  * for Start Stop Unit (SSU) want at least 1 second delay and
5733                  * if sdebug_jdelay>1 want a long delay of that many seconds.
5734                  * For Synchronize Cache want 1/20 of SSU's delay.
5735                  */
5736                 int jdelay = (sdebug_jdelay < 2) ? 1 : sdebug_jdelay;
5737                 int denom = (flags & F_SYNC_DELAY) ? 20 : 1;
5738
5739                 jdelay = mult_frac(USER_HZ * jdelay, HZ, denom * USER_HZ);
5740                 return schedule_resp(scp, devip, errsts, pfp, jdelay, 0);
5741         } else
5742                 return schedule_resp(scp, devip, errsts, pfp, sdebug_jdelay,
5743                                      sdebug_ndelay);
5744 check_cond:
5745         return schedule_resp(scp, devip, check_condition_result, NULL, 0, 0);
5746 err_out:
5747         return schedule_resp(scp, NULL, DID_NO_CONNECT << 16, NULL, 0, 0);
5748 }
5749
5750 static struct scsi_host_template sdebug_driver_template = {
5751         .show_info =            scsi_debug_show_info,
5752         .write_info =           scsi_debug_write_info,
5753         .proc_name =            sdebug_proc_name,
5754         .name =                 "SCSI DEBUG",
5755         .info =                 scsi_debug_info,
5756         .slave_alloc =          scsi_debug_slave_alloc,
5757         .slave_configure =      scsi_debug_slave_configure,
5758         .slave_destroy =        scsi_debug_slave_destroy,
5759         .ioctl =                scsi_debug_ioctl,
5760         .queuecommand =         scsi_debug_queuecommand,
5761         .change_queue_depth =   sdebug_change_qdepth,
5762         .eh_abort_handler =     scsi_debug_abort,
5763         .eh_device_reset_handler = scsi_debug_device_reset,
5764         .eh_target_reset_handler = scsi_debug_target_reset,
5765         .eh_bus_reset_handler = scsi_debug_bus_reset,
5766         .eh_host_reset_handler = scsi_debug_host_reset,
5767         .can_queue =            SDEBUG_CANQUEUE,
5768         .this_id =              7,
5769         .sg_tablesize =         SG_MAX_SEGMENTS,
5770         .cmd_per_lun =          DEF_CMD_PER_LUN,
5771         .max_sectors =          -1U,
5772         .max_segment_size =     -1U,
5773         .module =               THIS_MODULE,
5774         .track_queue_depth =    1,
5775 };
5776
5777 static int sdebug_driver_probe(struct device *dev)
5778 {
5779         int error = 0;
5780         struct sdebug_host_info *sdbg_host;
5781         struct Scsi_Host *hpnt;
5782         int hprot;
5783
5784         sdbg_host = to_sdebug_host(dev);
5785
5786         sdebug_driver_template.can_queue = sdebug_max_queue;
5787         if (!sdebug_clustering)
5788                 sdebug_driver_template.dma_boundary = PAGE_SIZE - 1;
5789
5790         hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
5791         if (NULL == hpnt) {
5792                 pr_err("scsi_host_alloc failed\n");
5793                 error = -ENODEV;
5794                 return error;
5795         }
5796         if (submit_queues > nr_cpu_ids) {
5797                 pr_warn("%s: trim submit_queues (was %d) to nr_cpu_ids=%u\n",
5798                         my_name, submit_queues, nr_cpu_ids);
5799                 submit_queues = nr_cpu_ids;
5800         }
5801         /* Decide whether to tell scsi subsystem that we want mq */
5802         /* Following should give the same answer for each host */
5803         hpnt->nr_hw_queues = submit_queues;
5804
5805         sdbg_host->shost = hpnt;
5806         *((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host;
5807         if ((hpnt->this_id >= 0) && (sdebug_num_tgts > hpnt->this_id))
5808                 hpnt->max_id = sdebug_num_tgts + 1;
5809         else
5810                 hpnt->max_id = sdebug_num_tgts;
5811         /* = sdebug_max_luns; */
5812         hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
5813
5814         hprot = 0;
5815
5816         switch (sdebug_dif) {
5817
5818         case T10_PI_TYPE1_PROTECTION:
5819                 hprot = SHOST_DIF_TYPE1_PROTECTION;
5820                 if (sdebug_dix)
5821                         hprot |= SHOST_DIX_TYPE1_PROTECTION;
5822                 break;
5823
5824         case T10_PI_TYPE2_PROTECTION:
5825                 hprot = SHOST_DIF_TYPE2_PROTECTION;
5826                 if (sdebug_dix)
5827                         hprot |= SHOST_DIX_TYPE2_PROTECTION;
5828                 break;
5829
5830         case T10_PI_TYPE3_PROTECTION:
5831                 hprot = SHOST_DIF_TYPE3_PROTECTION;
5832                 if (sdebug_dix)
5833                         hprot |= SHOST_DIX_TYPE3_PROTECTION;
5834                 break;
5835
5836         default:
5837                 if (sdebug_dix)
5838                         hprot |= SHOST_DIX_TYPE0_PROTECTION;
5839                 break;
5840         }
5841
5842         scsi_host_set_prot(hpnt, hprot);
5843
5844         if (have_dif_prot || sdebug_dix)
5845                 pr_info("host protection%s%s%s%s%s%s%s\n",
5846                         (hprot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
5847                         (hprot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
5848                         (hprot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
5849                         (hprot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
5850                         (hprot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
5851                         (hprot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
5852                         (hprot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
5853
5854         if (sdebug_guard == 1)
5855                 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
5856         else
5857                 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
5858
5859         sdebug_verbose = !!(SDEBUG_OPT_NOISE & sdebug_opts);
5860         sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & sdebug_opts);
5861         if (sdebug_every_nth)   /* need stats counters for every_nth */
5862                 sdebug_statistics = true;
5863         error = scsi_add_host(hpnt, &sdbg_host->dev);
5864         if (error) {
5865                 pr_err("scsi_add_host failed\n");
5866                 error = -ENODEV;
5867                 scsi_host_put(hpnt);
5868         } else
5869                 scsi_scan_host(hpnt);
5870
5871         return error;
5872 }
5873
5874 static int sdebug_driver_remove(struct device *dev)
5875 {
5876         struct sdebug_host_info *sdbg_host;
5877         struct sdebug_dev_info *sdbg_devinfo, *tmp;
5878
5879         sdbg_host = to_sdebug_host(dev);
5880
5881         if (!sdbg_host) {
5882                 pr_err("Unable to locate host info\n");
5883                 return -ENODEV;
5884         }
5885
5886         scsi_remove_host(sdbg_host->shost);
5887
5888         list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
5889                                  dev_list) {
5890                 list_del(&sdbg_devinfo->dev_list);
5891                 kfree(sdbg_devinfo);
5892         }
5893
5894         scsi_host_put(sdbg_host->shost);
5895         return 0;
5896 }
5897
5898 static int pseudo_lld_bus_match(struct device *dev,
5899                                 struct device_driver *dev_driver)
5900 {
5901         return 1;
5902 }
5903
5904 static struct bus_type pseudo_lld_bus = {
5905         .name = "pseudo",
5906         .match = pseudo_lld_bus_match,
5907         .probe = sdebug_driver_probe,
5908         .remove = sdebug_driver_remove,
5909         .drv_groups = sdebug_drv_groups,
5910 };