GNU Linux-libre 5.10.153-gnu1
[releases.git] / drivers / scsi / scsi_debug.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
4  *  Copyright (C) 1992  Eric Youngdale
5  *  Simulate a host adapter with 2 disks attached.  Do a lot of checking
6  *  to make sure that we are not getting blocks mixed up, and PANIC if
7  *  anything out of the ordinary is seen.
8  * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
9  *
10  * Copyright (C) 2001 - 2020 Douglas Gilbert
11  *
12  *  For documentation see http://sg.danny.cz/sg/scsi_debug.html
13  */
14
15
16 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
17
18 #include <linux/module.h>
19
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/jiffies.h>
23 #include <linux/slab.h>
24 #include <linux/types.h>
25 #include <linux/string.h>
26 #include <linux/genhd.h>
27 #include <linux/fs.h>
28 #include <linux/init.h>
29 #include <linux/proc_fs.h>
30 #include <linux/vmalloc.h>
31 #include <linux/moduleparam.h>
32 #include <linux/scatterlist.h>
33 #include <linux/blkdev.h>
34 #include <linux/crc-t10dif.h>
35 #include <linux/spinlock.h>
36 #include <linux/interrupt.h>
37 #include <linux/atomic.h>
38 #include <linux/hrtimer.h>
39 #include <linux/uuid.h>
40 #include <linux/t10-pi.h>
41 #include <linux/msdos_partition.h>
42 #include <linux/random.h>
43 #include <linux/xarray.h>
44 #include <linux/prefetch.h>
45
46 #include <net/checksum.h>
47
48 #include <asm/unaligned.h>
49
50 #include <scsi/scsi.h>
51 #include <scsi/scsi_cmnd.h>
52 #include <scsi/scsi_device.h>
53 #include <scsi/scsi_host.h>
54 #include <scsi/scsicam.h>
55 #include <scsi/scsi_eh.h>
56 #include <scsi/scsi_tcq.h>
57 #include <scsi/scsi_dbg.h>
58
59 #include "sd.h"
60 #include "scsi_logging.h"
61
62 /* make sure inq_product_rev string corresponds to this version */
63 #define SDEBUG_VERSION "0190"   /* format to fit INQUIRY revision field */
64 static const char *sdebug_version_date = "20200710";
65
66 #define MY_NAME "scsi_debug"
67
68 /* Additional Sense Code (ASC) */
69 #define NO_ADDITIONAL_SENSE 0x0
70 #define LOGICAL_UNIT_NOT_READY 0x4
71 #define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
72 #define UNRECOVERED_READ_ERR 0x11
73 #define PARAMETER_LIST_LENGTH_ERR 0x1a
74 #define INVALID_OPCODE 0x20
75 #define LBA_OUT_OF_RANGE 0x21
76 #define INVALID_FIELD_IN_CDB 0x24
77 #define INVALID_FIELD_IN_PARAM_LIST 0x26
78 #define WRITE_PROTECTED 0x27
79 #define UA_RESET_ASC 0x29
80 #define UA_CHANGED_ASC 0x2a
81 #define TARGET_CHANGED_ASC 0x3f
82 #define LUNS_CHANGED_ASCQ 0x0e
83 #define INSUFF_RES_ASC 0x55
84 #define INSUFF_RES_ASCQ 0x3
85 #define POWER_ON_RESET_ASCQ 0x0
86 #define BUS_RESET_ASCQ 0x2      /* scsi bus reset occurred */
87 #define MODE_CHANGED_ASCQ 0x1   /* mode parameters changed */
88 #define CAPACITY_CHANGED_ASCQ 0x9
89 #define SAVING_PARAMS_UNSUP 0x39
90 #define TRANSPORT_PROBLEM 0x4b
91 #define THRESHOLD_EXCEEDED 0x5d
92 #define LOW_POWER_COND_ON 0x5e
93 #define MISCOMPARE_VERIFY_ASC 0x1d
94 #define MICROCODE_CHANGED_ASCQ 0x1      /* with TARGET_CHANGED_ASC */
95 #define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16
96 #define WRITE_ERROR_ASC 0xc
97 #define UNALIGNED_WRITE_ASCQ 0x4
98 #define WRITE_BOUNDARY_ASCQ 0x5
99 #define READ_INVDATA_ASCQ 0x6
100 #define READ_BOUNDARY_ASCQ 0x7
101 #define INSUFF_ZONE_ASCQ 0xe
102
103 /* Additional Sense Code Qualifier (ASCQ) */
104 #define ACK_NAK_TO 0x3
105
106 /* Default values for driver parameters */
107 #define DEF_NUM_HOST   1
108 #define DEF_NUM_TGTS   1
109 #define DEF_MAX_LUNS   1
110 /* With these defaults, this driver will make 1 host with 1 target
111  * (id 0) containing 1 logical unit (lun 0). That is 1 device.
112  */
113 #define DEF_ATO 1
114 #define DEF_CDB_LEN 10
115 #define DEF_JDELAY   1          /* if > 0 unit is a jiffy */
116 #define DEF_DEV_SIZE_PRE_INIT   0
117 #define DEF_DEV_SIZE_MB   8
118 #define DEF_ZBC_DEV_SIZE_MB   128
119 #define DEF_DIF 0
120 #define DEF_DIX 0
121 #define DEF_PER_HOST_STORE false
122 #define DEF_D_SENSE   0
123 #define DEF_EVERY_NTH   0
124 #define DEF_FAKE_RW     0
125 #define DEF_GUARD 0
126 #define DEF_HOST_LOCK 0
127 #define DEF_LBPU 0
128 #define DEF_LBPWS 0
129 #define DEF_LBPWS10 0
130 #define DEF_LBPRZ 1
131 #define DEF_LOWEST_ALIGNED 0
132 #define DEF_NDELAY   0          /* if > 0 unit is a nanosecond */
133 #define DEF_NO_LUN_0   0
134 #define DEF_NUM_PARTS   0
135 #define DEF_OPTS   0
136 #define DEF_OPT_BLKS 1024
137 #define DEF_PHYSBLK_EXP 0
138 #define DEF_OPT_XFERLEN_EXP 0
139 #define DEF_PTYPE   TYPE_DISK
140 #define DEF_RANDOM false
141 #define DEF_REMOVABLE false
142 #define DEF_SCSI_LEVEL   7    /* INQUIRY, byte2 [6->SPC-4; 7->SPC-5] */
143 #define DEF_SECTOR_SIZE 512
144 #define DEF_UNMAP_ALIGNMENT 0
145 #define DEF_UNMAP_GRANULARITY 1
146 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
147 #define DEF_UNMAP_MAX_DESC 256
148 #define DEF_VIRTUAL_GB   0
149 #define DEF_VPD_USE_HOSTNO 1
150 #define DEF_WRITESAME_LENGTH 0xFFFF
151 #define DEF_STRICT 0
152 #define DEF_STATISTICS false
153 #define DEF_SUBMIT_QUEUES 1
154 #define DEF_TUR_MS_TO_READY 0
155 #define DEF_UUID_CTL 0
156 #define JDELAY_OVERRIDDEN -9999
157
158 /* Default parameters for ZBC drives */
159 #define DEF_ZBC_ZONE_SIZE_MB    128
160 #define DEF_ZBC_MAX_OPEN_ZONES  8
161 #define DEF_ZBC_NR_CONV_ZONES   1
162
163 #define SDEBUG_LUN_0_VAL 0
164
165 /* bit mask values for sdebug_opts */
166 #define SDEBUG_OPT_NOISE                1
167 #define SDEBUG_OPT_MEDIUM_ERR           2
168 #define SDEBUG_OPT_TIMEOUT              4
169 #define SDEBUG_OPT_RECOVERED_ERR        8
170 #define SDEBUG_OPT_TRANSPORT_ERR        16
171 #define SDEBUG_OPT_DIF_ERR              32
172 #define SDEBUG_OPT_DIX_ERR              64
173 #define SDEBUG_OPT_MAC_TIMEOUT          128
174 #define SDEBUG_OPT_SHORT_TRANSFER       0x100
175 #define SDEBUG_OPT_Q_NOISE              0x200
176 #define SDEBUG_OPT_ALL_TSF              0x400
177 #define SDEBUG_OPT_RARE_TSF             0x800
178 #define SDEBUG_OPT_N_WCE                0x1000
179 #define SDEBUG_OPT_RESET_NOISE          0x2000
180 #define SDEBUG_OPT_NO_CDB_NOISE         0x4000
181 #define SDEBUG_OPT_HOST_BUSY            0x8000
182 #define SDEBUG_OPT_CMD_ABORT            0x10000
183 #define SDEBUG_OPT_ALL_NOISE (SDEBUG_OPT_NOISE | SDEBUG_OPT_Q_NOISE | \
184                               SDEBUG_OPT_RESET_NOISE)
185 #define SDEBUG_OPT_ALL_INJECTING (SDEBUG_OPT_RECOVERED_ERR | \
186                                   SDEBUG_OPT_TRANSPORT_ERR | \
187                                   SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR | \
188                                   SDEBUG_OPT_SHORT_TRANSFER | \
189                                   SDEBUG_OPT_HOST_BUSY | \
190                                   SDEBUG_OPT_CMD_ABORT)
191 #define SDEBUG_OPT_RECOV_DIF_DIX (SDEBUG_OPT_RECOVERED_ERR | \
192                                   SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR)
193
194 /* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs) are returned in
195  * priority order. In the subset implemented here lower numbers have higher
196  * priority. The UA numbers should be a sequence starting from 0 with
197  * SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */
198 #define SDEBUG_UA_POR 0         /* Power on, reset, or bus device reset */
199 #define SDEBUG_UA_BUS_RESET 1
200 #define SDEBUG_UA_MODE_CHANGED 2
201 #define SDEBUG_UA_CAPACITY_CHANGED 3
202 #define SDEBUG_UA_LUNS_CHANGED 4
203 #define SDEBUG_UA_MICROCODE_CHANGED 5   /* simulate firmware change */
204 #define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 6
205 #define SDEBUG_NUM_UAS 7
206
207 /* when 1==SDEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
208  * sector on read commands: */
209 #define OPT_MEDIUM_ERR_ADDR   0x1234 /* that's sector 4660 in decimal */
210 #define OPT_MEDIUM_ERR_NUM    10     /* number of consecutive medium errs */
211
212 /* SDEBUG_CANQUEUE is the maximum number of commands that can be queued
213  * (for response) per submit queue at one time. Can be reduced by max_queue
214  * option. Command responses are not queued when jdelay=0 and ndelay=0. The
215  * per-device DEF_CMD_PER_LUN can be changed via sysfs:
216  * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth
217  * but cannot exceed SDEBUG_CANQUEUE .
218  */
219 #define SDEBUG_CANQUEUE_WORDS  3        /* a WORD is bits in a long */
220 #define SDEBUG_CANQUEUE  (SDEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
221 #define DEF_CMD_PER_LUN  255
222
223 /* UA - Unit Attention; SA - Service Action; SSU - Start Stop Unit */
224 #define F_D_IN                  1       /* Data-in command (e.g. READ) */
225 #define F_D_OUT                 2       /* Data-out command (e.g. WRITE) */
226 #define F_D_OUT_MAYBE           4       /* WRITE SAME, NDOB bit */
227 #define F_D_UNKN                8
228 #define F_RL_WLUN_OK            0x10    /* allowed with REPORT LUNS W-LUN */
229 #define F_SKIP_UA               0x20    /* bypass UAs (e.g. INQUIRY command) */
230 #define F_DELAY_OVERR           0x40    /* for commands like INQUIRY */
231 #define F_SA_LOW                0x80    /* SA is in cdb byte 1, bits 4 to 0 */
232 #define F_SA_HIGH               0x100   /* SA is in cdb bytes 8 and 9 */
233 #define F_INV_OP                0x200   /* invalid opcode (not supported) */
234 #define F_FAKE_RW               0x400   /* bypass resp_*() when fake_rw set */
235 #define F_M_ACCESS              0x800   /* media access, reacts to SSU state */
236 #define F_SSU_DELAY             0x1000  /* SSU command delay (long-ish) */
237 #define F_SYNC_DELAY            0x2000  /* SYNCHRONIZE CACHE delay */
238
239 /* Useful combinations of the above flags */
240 #define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
241 #define FF_MEDIA_IO (F_M_ACCESS | F_FAKE_RW)
242 #define FF_SA (F_SA_HIGH | F_SA_LOW)
243 #define F_LONG_DELAY            (F_SSU_DELAY | F_SYNC_DELAY)
244
245 #define SDEBUG_MAX_PARTS 4
246
247 #define SDEBUG_MAX_CMD_LEN 32
248
249 #define SDEB_XA_NOT_IN_USE XA_MARK_1
250
251 /* Zone types (zbcr05 table 25) */
252 enum sdebug_z_type {
253         ZBC_ZONE_TYPE_CNV       = 0x1,
254         ZBC_ZONE_TYPE_SWR       = 0x2,
255         ZBC_ZONE_TYPE_SWP       = 0x3,
256 };
257
258 /* enumeration names taken from table 26, zbcr05 */
259 enum sdebug_z_cond {
260         ZBC_NOT_WRITE_POINTER   = 0x0,
261         ZC1_EMPTY               = 0x1,
262         ZC2_IMPLICIT_OPEN       = 0x2,
263         ZC3_EXPLICIT_OPEN       = 0x3,
264         ZC4_CLOSED              = 0x4,
265         ZC6_READ_ONLY           = 0xd,
266         ZC5_FULL                = 0xe,
267         ZC7_OFFLINE             = 0xf,
268 };
269
270 struct sdeb_zone_state {        /* ZBC: per zone state */
271         enum sdebug_z_type z_type;
272         enum sdebug_z_cond z_cond;
273         bool z_non_seq_resource;
274         unsigned int z_size;
275         sector_t z_start;
276         sector_t z_wp;
277 };
278
279 struct sdebug_dev_info {
280         struct list_head dev_list;
281         unsigned int channel;
282         unsigned int target;
283         u64 lun;
284         uuid_t lu_name;
285         struct sdebug_host_info *sdbg_host;
286         unsigned long uas_bm[1];
287         atomic_t num_in_q;
288         atomic_t stopped;       /* 1: by SSU, 2: device start */
289         bool used;
290
291         /* For ZBC devices */
292         enum blk_zoned_model zmodel;
293         unsigned int zsize;
294         unsigned int zsize_shift;
295         unsigned int nr_zones;
296         unsigned int nr_conv_zones;
297         unsigned int nr_imp_open;
298         unsigned int nr_exp_open;
299         unsigned int nr_closed;
300         unsigned int max_open;
301         ktime_t create_ts;      /* time since bootup that this device was created */
302         struct sdeb_zone_state *zstate;
303 };
304
305 struct sdebug_host_info {
306         struct list_head host_list;
307         int si_idx;     /* sdeb_store_info (per host) xarray index */
308         struct Scsi_Host *shost;
309         struct device dev;
310         struct list_head dev_info_list;
311 };
312
313 /* There is an xarray of pointers to this struct's objects, one per host */
314 struct sdeb_store_info {
315         rwlock_t macc_lck;      /* for atomic media access on this store */
316         u8 *storep;             /* user data storage (ram) */
317         struct t10_pi_tuple *dif_storep; /* protection info */
318         void *map_storep;       /* provisioning map */
319 };
320
321 #define to_sdebug_host(d)       \
322         container_of(d, struct sdebug_host_info, dev)
323
324 enum sdeb_defer_type {SDEB_DEFER_NONE = 0, SDEB_DEFER_HRT = 1,
325                       SDEB_DEFER_WQ = 2};
326
327 struct sdebug_defer {
328         struct hrtimer hrt;
329         struct execute_work ew;
330         int sqa_idx;    /* index of sdebug_queue array */
331         int qc_idx;     /* index of sdebug_queued_cmd array within sqa_idx */
332         int hc_idx;     /* hostwide tag index */
333         int issuing_cpu;
334         bool init_hrt;
335         bool init_wq;
336         bool aborted;   /* true when blk_abort_request() already called */
337         enum sdeb_defer_type defer_t;
338 };
339
340 struct sdebug_queued_cmd {
341         /* corresponding bit set in in_use_bm[] in owning struct sdebug_queue
342          * instance indicates this slot is in use.
343          */
344         struct sdebug_defer *sd_dp;
345         struct scsi_cmnd *a_cmnd;
346 };
347
348 struct sdebug_queue {
349         struct sdebug_queued_cmd qc_arr[SDEBUG_CANQUEUE];
350         unsigned long in_use_bm[SDEBUG_CANQUEUE_WORDS];
351         spinlock_t qc_lock;
352         atomic_t blocked;       /* to temporarily stop more being queued */
353 };
354
355 static atomic_t sdebug_cmnd_count;   /* number of incoming commands */
356 static atomic_t sdebug_completions;  /* count of deferred completions */
357 static atomic_t sdebug_miss_cpus;    /* submission + completion cpus differ */
358 static atomic_t sdebug_a_tsf;        /* 'almost task set full' counter */
359 static atomic_t sdeb_inject_pending;
360
361 struct opcode_info_t {
362         u8 num_attached;        /* 0 if this is it (i.e. a leaf); use 0xff */
363                                 /* for terminating element */
364         u8 opcode;              /* if num_attached > 0, preferred */
365         u16 sa;                 /* service action */
366         u32 flags;              /* OR-ed set of SDEB_F_* */
367         int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
368         const struct opcode_info_t *arrp;  /* num_attached elements or NULL */
369         u8 len_mask[16];        /* len_mask[0]-->cdb_len, then mask for cdb */
370                                 /* 1 to min(cdb_len, 15); ignore cdb[15...] */
371 };
372
373 /* SCSI opcodes (first byte of cdb) of interest mapped onto these indexes */
374 enum sdeb_opcode_index {
375         SDEB_I_INVALID_OPCODE = 0,
376         SDEB_I_INQUIRY = 1,
377         SDEB_I_REPORT_LUNS = 2,
378         SDEB_I_REQUEST_SENSE = 3,
379         SDEB_I_TEST_UNIT_READY = 4,
380         SDEB_I_MODE_SENSE = 5,          /* 6, 10 */
381         SDEB_I_MODE_SELECT = 6,         /* 6, 10 */
382         SDEB_I_LOG_SENSE = 7,
383         SDEB_I_READ_CAPACITY = 8,       /* 10; 16 is in SA_IN(16) */
384         SDEB_I_READ = 9,                /* 6, 10, 12, 16 */
385         SDEB_I_WRITE = 10,              /* 6, 10, 12, 16 */
386         SDEB_I_START_STOP = 11,
387         SDEB_I_SERV_ACT_IN_16 = 12,     /* add ...SERV_ACT_IN_12 if needed */
388         SDEB_I_SERV_ACT_OUT_16 = 13,    /* add ...SERV_ACT_OUT_12 if needed */
389         SDEB_I_MAINT_IN = 14,
390         SDEB_I_MAINT_OUT = 15,
391         SDEB_I_VERIFY = 16,             /* VERIFY(10), VERIFY(16) */
392         SDEB_I_VARIABLE_LEN = 17,       /* READ(32), WRITE(32), WR_SCAT(32) */
393         SDEB_I_RESERVE = 18,            /* 6, 10 */
394         SDEB_I_RELEASE = 19,            /* 6, 10 */
395         SDEB_I_ALLOW_REMOVAL = 20,      /* PREVENT ALLOW MEDIUM REMOVAL */
396         SDEB_I_REZERO_UNIT = 21,        /* REWIND in SSC */
397         SDEB_I_ATA_PT = 22,             /* 12, 16 */
398         SDEB_I_SEND_DIAG = 23,
399         SDEB_I_UNMAP = 24,
400         SDEB_I_WRITE_BUFFER = 25,
401         SDEB_I_WRITE_SAME = 26,         /* 10, 16 */
402         SDEB_I_SYNC_CACHE = 27,         /* 10, 16 */
403         SDEB_I_COMP_WRITE = 28,
404         SDEB_I_PRE_FETCH = 29,          /* 10, 16 */
405         SDEB_I_ZONE_OUT = 30,           /* 0x94+SA; includes no data xfer */
406         SDEB_I_ZONE_IN = 31,            /* 0x95+SA; all have data-in */
407         SDEB_I_LAST_ELEM_P1 = 32,       /* keep this last (previous + 1) */
408 };
409
410
411 static const unsigned char opcode_ind_arr[256] = {
412 /* 0x0; 0x0->0x1f: 6 byte cdbs */
413         SDEB_I_TEST_UNIT_READY, SDEB_I_REZERO_UNIT, 0, SDEB_I_REQUEST_SENSE,
414             0, 0, 0, 0,
415         SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, 0,
416         0, 0, SDEB_I_INQUIRY, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
417             SDEB_I_RELEASE,
418         0, 0, SDEB_I_MODE_SENSE, SDEB_I_START_STOP, 0, SDEB_I_SEND_DIAG,
419             SDEB_I_ALLOW_REMOVAL, 0,
420 /* 0x20; 0x20->0x3f: 10 byte cdbs */
421         0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY, 0, 0,
422         SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, SDEB_I_VERIFY,
423         0, 0, 0, 0, SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, 0,
424         0, 0, 0, SDEB_I_WRITE_BUFFER, 0, 0, 0, 0,
425 /* 0x40; 0x40->0x5f: 10 byte cdbs */
426         0, SDEB_I_WRITE_SAME, SDEB_I_UNMAP, 0, 0, 0, 0, 0,
427         0, 0, 0, 0, 0, SDEB_I_LOG_SENSE, 0, 0,
428         0, 0, 0, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
429             SDEB_I_RELEASE,
430         0, 0, SDEB_I_MODE_SENSE, 0, 0, 0, 0, 0,
431 /* 0x60; 0x60->0x7d are reserved, 0x7e is "extended cdb" */
432         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
433         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
434         0, SDEB_I_VARIABLE_LEN,
435 /* 0x80; 0x80->0x9f: 16 byte cdbs */
436         0, 0, 0, 0, 0, SDEB_I_ATA_PT, 0, 0,
437         SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0,
438         0, 0, 0, SDEB_I_VERIFY,
439         SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, SDEB_I_WRITE_SAME,
440         SDEB_I_ZONE_OUT, SDEB_I_ZONE_IN, 0, 0,
441         0, 0, 0, 0, 0, 0, SDEB_I_SERV_ACT_IN_16, SDEB_I_SERV_ACT_OUT_16,
442 /* 0xa0; 0xa0->0xbf: 12 byte cdbs */
443         SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN,
444              SDEB_I_MAINT_OUT, 0, 0, 0,
445         SDEB_I_READ, 0 /* SDEB_I_SERV_ACT_OUT_12 */, SDEB_I_WRITE,
446              0 /* SDEB_I_SERV_ACT_IN_12 */, 0, 0, 0, 0,
447         0, 0, 0, 0, 0, 0, 0, 0,
448         0, 0, 0, 0, 0, 0, 0, 0,
449 /* 0xc0; 0xc0->0xff: vendor specific */
450         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
451         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
452         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
453         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
454 };
455
456 /*
457  * The following "response" functions return the SCSI mid-level's 4 byte
458  * tuple-in-an-int. To handle commands with an IMMED bit, for a faster
459  * command completion, they can mask their return value with
460  * SDEG_RES_IMMED_MASK .
461  */
462 #define SDEG_RES_IMMED_MASK 0x40000000
463
464 static int resp_inquiry(struct scsi_cmnd *, struct sdebug_dev_info *);
465 static int resp_report_luns(struct scsi_cmnd *, struct sdebug_dev_info *);
466 static int resp_requests(struct scsi_cmnd *, struct sdebug_dev_info *);
467 static int resp_mode_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
468 static int resp_mode_select(struct scsi_cmnd *, struct sdebug_dev_info *);
469 static int resp_log_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
470 static int resp_readcap(struct scsi_cmnd *, struct sdebug_dev_info *);
471 static int resp_read_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
472 static int resp_write_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
473 static int resp_write_scat(struct scsi_cmnd *, struct sdebug_dev_info *);
474 static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *);
475 static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *);
476 static int resp_get_lba_status(struct scsi_cmnd *, struct sdebug_dev_info *);
477 static int resp_report_tgtpgs(struct scsi_cmnd *, struct sdebug_dev_info *);
478 static int resp_unmap(struct scsi_cmnd *, struct sdebug_dev_info *);
479 static int resp_rsup_opcodes(struct scsi_cmnd *, struct sdebug_dev_info *);
480 static int resp_rsup_tmfs(struct scsi_cmnd *, struct sdebug_dev_info *);
481 static int resp_verify(struct scsi_cmnd *, struct sdebug_dev_info *);
482 static int resp_write_same_10(struct scsi_cmnd *, struct sdebug_dev_info *);
483 static int resp_write_same_16(struct scsi_cmnd *, struct sdebug_dev_info *);
484 static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *);
485 static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *);
486 static int resp_sync_cache(struct scsi_cmnd *, struct sdebug_dev_info *);
487 static int resp_pre_fetch(struct scsi_cmnd *, struct sdebug_dev_info *);
488 static int resp_report_zones(struct scsi_cmnd *, struct sdebug_dev_info *);
489 static int resp_open_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
490 static int resp_close_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
491 static int resp_finish_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
492 static int resp_rwp_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
493
494 static int sdebug_do_add_host(bool mk_new_store);
495 static int sdebug_add_host_helper(int per_host_idx);
496 static void sdebug_do_remove_host(bool the_end);
497 static int sdebug_add_store(void);
498 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip);
499 static void sdebug_erase_all_stores(bool apart_from_first);
500
501 /*
502  * The following are overflow arrays for cdbs that "hit" the same index in
503  * the opcode_info_arr array. The most time sensitive (or commonly used) cdb
504  * should be placed in opcode_info_arr[], the others should be placed here.
505  */
506 static const struct opcode_info_t msense_iarr[] = {
507         {0, 0x1a, 0, F_D_IN, NULL, NULL,
508             {6,  0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
509 };
510
511 static const struct opcode_info_t mselect_iarr[] = {
512         {0, 0x15, 0, F_D_OUT, NULL, NULL,
513             {6,  0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
514 };
515
516 static const struct opcode_info_t read_iarr[] = {
517         {0, 0x28, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(10) */
518             {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
519              0, 0, 0, 0} },
520         {0, 0x8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL, /* READ(6) */
521             {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
522         {0, 0xa8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(12) */
523             {12,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf,
524              0xc7, 0, 0, 0, 0} },
525 };
526
527 static const struct opcode_info_t write_iarr[] = {
528         {0, 0x2a, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,  /* WRITE(10) */
529             NULL, {10,  0xfb, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7,
530                    0, 0, 0, 0, 0, 0} },
531         {0, 0xa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,   /* WRITE(6) */
532             NULL, {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0,
533                    0, 0, 0} },
534         {0, 0xaa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,  /* WRITE(12) */
535             NULL, {12,  0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
536                    0xbf, 0xc7, 0, 0, 0, 0} },
537 };
538
539 static const struct opcode_info_t verify_iarr[] = {
540         {0, 0x2f, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,/* VERIFY(10) */
541             NULL, {10,  0xf7, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xff, 0xff, 0xc7,
542                    0, 0, 0, 0, 0, 0} },
543 };
544
545 static const struct opcode_info_t sa_in_16_iarr[] = {
546         {0, 0x9e, 0x12, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
547             {16,  0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
548              0xff, 0xff, 0xff, 0, 0xc7} },      /* GET LBA STATUS(16) */
549 };
550
551 static const struct opcode_info_t vl_iarr[] = { /* VARIABLE LENGTH */
552         {0, 0x7f, 0xb, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_dt0,
553             NULL, {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0xb, 0xfa,
554                    0, 0xff, 0xff, 0xff, 0xff} },        /* WRITE(32) */
555         {0, 0x7f, 0x11, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
556             NULL, {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x11, 0xf8,
557                    0, 0xff, 0xff, 0x0, 0x0} },  /* WRITE SCATTERED(32) */
558 };
559
560 static const struct opcode_info_t maint_in_iarr[] = {   /* MAINT IN */
561         {0, 0xa3, 0xc, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL,
562             {12,  0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
563              0xc7, 0, 0, 0, 0} }, /* REPORT SUPPORTED OPERATION CODES */
564         {0, 0xa3, 0xd, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL,
565             {12,  0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
566              0, 0} },   /* REPORTED SUPPORTED TASK MANAGEMENT FUNCTIONS */
567 };
568
569 static const struct opcode_info_t write_same_iarr[] = {
570         {0, 0x93, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_write_same_16, NULL,
571             {16,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
572              0xff, 0xff, 0xff, 0x3f, 0xc7} },           /* WRITE SAME(16) */
573 };
574
575 static const struct opcode_info_t reserve_iarr[] = {
576         {0, 0x16, 0, F_D_OUT, NULL, NULL,               /* RESERVE(6) */
577             {6,  0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
578 };
579
580 static const struct opcode_info_t release_iarr[] = {
581         {0, 0x17, 0, F_D_OUT, NULL, NULL,               /* RELEASE(6) */
582             {6,  0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
583 };
584
585 static const struct opcode_info_t sync_cache_iarr[] = {
586         {0, 0x91, 0, F_SYNC_DELAY | F_M_ACCESS, resp_sync_cache, NULL,
587             {16,  0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
588              0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },     /* SYNC_CACHE (16) */
589 };
590
591 static const struct opcode_info_t pre_fetch_iarr[] = {
592         {0, 0x90, 0, F_SYNC_DELAY | FF_MEDIA_IO, resp_pre_fetch, NULL,
593             {16,  0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
594              0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },     /* PRE-FETCH (16) */
595 };
596
597 static const struct opcode_info_t zone_out_iarr[] = {   /* ZONE OUT(16) */
598         {0, 0x94, 0x1, F_SA_LOW | F_M_ACCESS, resp_close_zone, NULL,
599             {16, 0x1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
600              0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },      /* CLOSE ZONE */
601         {0, 0x94, 0x2, F_SA_LOW | F_M_ACCESS, resp_finish_zone, NULL,
602             {16, 0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
603              0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },      /* FINISH ZONE */
604         {0, 0x94, 0x4, F_SA_LOW | F_M_ACCESS, resp_rwp_zone, NULL,
605             {16, 0x4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
606              0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },  /* RESET WRITE POINTER */
607 };
608
609 static const struct opcode_info_t zone_in_iarr[] = {    /* ZONE IN(16) */
610         {0, 0x95, 0x6, F_SA_LOW | F_D_IN | F_M_ACCESS, NULL, NULL,
611             {16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
612              0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* REPORT ZONES */
613 };
614
615
616 /* This array is accessed via SDEB_I_* values. Make sure all are mapped,
617  * plus the terminating elements for logic that scans this table such as
618  * REPORT SUPPORTED OPERATION CODES. */
619 static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEM_P1 + 1] = {
620 /* 0 */
621         {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL,    /* unknown opcodes */
622             {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
623         {0, 0x12, 0, FF_RESPOND | F_D_IN, resp_inquiry, NULL, /* INQUIRY */
624             {6,  0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
625         {0, 0xa0, 0, FF_RESPOND | F_D_IN, resp_report_luns, NULL,
626             {12,  0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
627              0, 0} },                                   /* REPORT LUNS */
628         {0, 0x3, 0, FF_RESPOND | F_D_IN, resp_requests, NULL,
629             {6,  0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
630         {0, 0x0, 0, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */
631             {6,  0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
632 /* 5 */
633         {ARRAY_SIZE(msense_iarr), 0x5a, 0, F_D_IN,      /* MODE SENSE(10) */
634             resp_mode_sense, msense_iarr, {10,  0xf8, 0xff, 0xff, 0, 0, 0,
635                 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
636         {ARRAY_SIZE(mselect_iarr), 0x55, 0, F_D_OUT,    /* MODE SELECT(10) */
637             resp_mode_select, mselect_iarr, {10,  0xf1, 0, 0, 0, 0, 0, 0xff,
638                 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
639         {0, 0x4d, 0, F_D_IN, resp_log_sense, NULL,      /* LOG SENSE */
640             {10,  0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0,
641              0, 0, 0} },
642         {0, 0x25, 0, F_D_IN, resp_readcap, NULL,    /* READ CAPACITY(10) */
643             {10,  0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0,
644              0, 0} },
645         {ARRAY_SIZE(read_iarr), 0x88, 0, F_D_IN | FF_MEDIA_IO, /* READ(16) */
646             resp_read_dt0, read_iarr, {16,  0xfe, 0xff, 0xff, 0xff, 0xff,
647             0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
648 /* 10 */
649         {ARRAY_SIZE(write_iarr), 0x8a, 0, F_D_OUT | FF_MEDIA_IO,
650             resp_write_dt0, write_iarr,                 /* WRITE(16) */
651                 {16,  0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
652                  0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
653         {0, 0x1b, 0, F_SSU_DELAY, resp_start_stop, NULL,/* START STOP UNIT */
654             {6,  0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
655         {ARRAY_SIZE(sa_in_16_iarr), 0x9e, 0x10, F_SA_LOW | F_D_IN,
656             resp_readcap16, sa_in_16_iarr, /* SA_IN(16), READ CAPACITY(16) */
657                 {16,  0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
658                  0xff, 0xff, 0xff, 0xff, 0x1, 0xc7} },
659         {0, 0x9f, 0x12, F_SA_LOW | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
660             NULL, {16,  0x12, 0xf9, 0x0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff,
661             0xff, 0xff, 0xff, 0xff, 0xc7} },  /* SA_OUT(16), WRITE SCAT(16) */
662         {ARRAY_SIZE(maint_in_iarr), 0xa3, 0xa, F_SA_LOW | F_D_IN,
663             resp_report_tgtpgs, /* MAINT IN, REPORT TARGET PORT GROUPS */
664                 maint_in_iarr, {12,  0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff,
665                                 0xff, 0, 0xc7, 0, 0, 0, 0} },
666 /* 15 */
667         {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
668             {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
669         {ARRAY_SIZE(verify_iarr), 0x8f, 0,
670             F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,   /* VERIFY(16) */
671             verify_iarr, {16,  0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
672                           0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },
673         {ARRAY_SIZE(vl_iarr), 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_MEDIA_IO,
674             resp_read_dt0, vl_iarr,     /* VARIABLE LENGTH, READ(32) */
675             {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x9, 0xfe, 0, 0xff, 0xff,
676              0xff, 0xff} },
677         {ARRAY_SIZE(reserve_iarr), 0x56, 0, F_D_OUT,
678             NULL, reserve_iarr, /* RESERVE(10) <no response function> */
679             {10,  0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
680              0} },
681         {ARRAY_SIZE(release_iarr), 0x57, 0, F_D_OUT,
682             NULL, release_iarr, /* RELEASE(10) <no response function> */
683             {10,  0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
684              0} },
685 /* 20 */
686         {0, 0x1e, 0, 0, NULL, NULL, /* ALLOW REMOVAL */
687             {6,  0, 0, 0, 0x3, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
688         {0, 0x1, 0, 0, resp_start_stop, NULL, /* REWIND ?? */
689             {6,  0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
690         {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */
691             {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
692         {0, 0x1d, F_D_OUT, 0, NULL, NULL,       /* SEND DIAGNOSTIC */
693             {6,  0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
694         {0, 0x42, 0, F_D_OUT | FF_MEDIA_IO, resp_unmap, NULL, /* UNMAP */
695             {10,  0x1, 0, 0, 0, 0, 0x3f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
696 /* 25 */
697         {0, 0x3b, 0, F_D_OUT_MAYBE, resp_write_buffer, NULL,
698             {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0,
699              0, 0, 0, 0} },                     /* WRITE_BUFFER */
700         {ARRAY_SIZE(write_same_iarr), 0x41, 0, F_D_OUT_MAYBE | FF_MEDIA_IO,
701             resp_write_same_10, write_same_iarr,        /* WRITE SAME(10) */
702                 {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0,
703                  0, 0, 0, 0, 0} },
704         {ARRAY_SIZE(sync_cache_iarr), 0x35, 0, F_SYNC_DELAY | F_M_ACCESS,
705             resp_sync_cache, sync_cache_iarr,
706             {10,  0x7, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
707              0, 0, 0, 0} },                     /* SYNC_CACHE (10) */
708         {0, 0x89, 0, F_D_OUT | FF_MEDIA_IO, resp_comp_write, NULL,
709             {16,  0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
710              0, 0xff, 0x3f, 0xc7} },            /* COMPARE AND WRITE */
711         {ARRAY_SIZE(pre_fetch_iarr), 0x34, 0, F_SYNC_DELAY | FF_MEDIA_IO,
712             resp_pre_fetch, pre_fetch_iarr,
713             {10,  0x2, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
714              0, 0, 0, 0} },                     /* PRE-FETCH (10) */
715
716 /* 30 */
717         {ARRAY_SIZE(zone_out_iarr), 0x94, 0x3, F_SA_LOW | F_M_ACCESS,
718             resp_open_zone, zone_out_iarr, /* ZONE_OUT(16), OPEN ZONE) */
719                 {16,  0x3 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
720                  0xff, 0xff, 0x0, 0x0, 0xff, 0xff, 0x1, 0xc7} },
721         {ARRAY_SIZE(zone_in_iarr), 0x95, 0x0, F_SA_LOW | F_M_ACCESS,
722             resp_report_zones, zone_in_iarr, /* ZONE_IN(16), REPORT ZONES) */
723                 {16,  0x0 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
724                  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xc7} },
725 /* sentinel */
726         {0xff, 0, 0, 0, NULL, NULL,             /* terminating element */
727             {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
728 };
729
730 static int sdebug_num_hosts;
731 static int sdebug_add_host = DEF_NUM_HOST;  /* in sysfs this is relative */
732 static int sdebug_ato = DEF_ATO;
733 static int sdebug_cdb_len = DEF_CDB_LEN;
734 static int sdebug_jdelay = DEF_JDELAY;  /* if > 0 then unit is jiffies */
735 static int sdebug_dev_size_mb = DEF_DEV_SIZE_PRE_INIT;
736 static int sdebug_dif = DEF_DIF;
737 static int sdebug_dix = DEF_DIX;
738 static int sdebug_dsense = DEF_D_SENSE;
739 static int sdebug_every_nth = DEF_EVERY_NTH;
740 static int sdebug_fake_rw = DEF_FAKE_RW;
741 static unsigned int sdebug_guard = DEF_GUARD;
742 static int sdebug_host_max_queue;       /* per host */
743 static int sdebug_lowest_aligned = DEF_LOWEST_ALIGNED;
744 static int sdebug_max_luns = DEF_MAX_LUNS;
745 static int sdebug_max_queue = SDEBUG_CANQUEUE;  /* per submit queue */
746 static unsigned int sdebug_medium_error_start = OPT_MEDIUM_ERR_ADDR;
747 static int sdebug_medium_error_count = OPT_MEDIUM_ERR_NUM;
748 static atomic_t retired_max_queue;      /* if > 0 then was prior max_queue */
749 static int sdebug_ndelay = DEF_NDELAY;  /* if > 0 then unit is nanoseconds */
750 static int sdebug_no_lun_0 = DEF_NO_LUN_0;
751 static int sdebug_no_uld;
752 static int sdebug_num_parts = DEF_NUM_PARTS;
753 static int sdebug_num_tgts = DEF_NUM_TGTS; /* targets per host */
754 static int sdebug_opt_blks = DEF_OPT_BLKS;
755 static int sdebug_opts = DEF_OPTS;
756 static int sdebug_physblk_exp = DEF_PHYSBLK_EXP;
757 static int sdebug_opt_xferlen_exp = DEF_OPT_XFERLEN_EXP;
758 static int sdebug_ptype = DEF_PTYPE; /* SCSI peripheral device type */
759 static int sdebug_scsi_level = DEF_SCSI_LEVEL;
760 static int sdebug_sector_size = DEF_SECTOR_SIZE;
761 static int sdeb_tur_ms_to_ready = DEF_TUR_MS_TO_READY;
762 static int sdebug_virtual_gb = DEF_VIRTUAL_GB;
763 static int sdebug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
764 static unsigned int sdebug_lbpu = DEF_LBPU;
765 static unsigned int sdebug_lbpws = DEF_LBPWS;
766 static unsigned int sdebug_lbpws10 = DEF_LBPWS10;
767 static unsigned int sdebug_lbprz = DEF_LBPRZ;
768 static unsigned int sdebug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
769 static unsigned int sdebug_unmap_granularity = DEF_UNMAP_GRANULARITY;
770 static unsigned int sdebug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
771 static unsigned int sdebug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
772 static unsigned int sdebug_write_same_length = DEF_WRITESAME_LENGTH;
773 static int sdebug_uuid_ctl = DEF_UUID_CTL;
774 static bool sdebug_random = DEF_RANDOM;
775 static bool sdebug_per_host_store = DEF_PER_HOST_STORE;
776 static bool sdebug_removable = DEF_REMOVABLE;
777 static bool sdebug_clustering;
778 static bool sdebug_host_lock = DEF_HOST_LOCK;
779 static bool sdebug_strict = DEF_STRICT;
780 static bool sdebug_any_injecting_opt;
781 static bool sdebug_verbose;
782 static bool have_dif_prot;
783 static bool write_since_sync;
784 static bool sdebug_statistics = DEF_STATISTICS;
785 static bool sdebug_wp;
786 /* Following enum: 0: no zbc, def; 1: host aware; 2: host managed */
787 static enum blk_zoned_model sdeb_zbc_model = BLK_ZONED_NONE;
788 static char *sdeb_zbc_model_s;
789
790 enum sam_lun_addr_method {SAM_LUN_AM_PERIPHERAL = 0x0,
791                           SAM_LUN_AM_FLAT = 0x1,
792                           SAM_LUN_AM_LOGICAL_UNIT = 0x2,
793                           SAM_LUN_AM_EXTENDED = 0x3};
794 static enum sam_lun_addr_method sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
795 static int sdebug_lun_am_i = (int)SAM_LUN_AM_PERIPHERAL;
796
797 static unsigned int sdebug_store_sectors;
798 static sector_t sdebug_capacity;        /* in sectors */
799
800 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
801    may still need them */
802 static int sdebug_heads;                /* heads per disk */
803 static int sdebug_cylinders_per;        /* cylinders per surface */
804 static int sdebug_sectors_per;          /* sectors per cylinder */
805
806 static LIST_HEAD(sdebug_host_list);
807 static DEFINE_SPINLOCK(sdebug_host_list_lock);
808
809 static struct xarray per_store_arr;
810 static struct xarray *per_store_ap = &per_store_arr;
811 static int sdeb_first_idx = -1;         /* invalid index ==> none created */
812 static int sdeb_most_recent_idx = -1;
813 static DEFINE_RWLOCK(sdeb_fake_rw_lck); /* need a RW lock when fake_rw=1 */
814
815 static unsigned long map_size;
816 static int num_aborts;
817 static int num_dev_resets;
818 static int num_target_resets;
819 static int num_bus_resets;
820 static int num_host_resets;
821 static int dix_writes;
822 static int dix_reads;
823 static int dif_errors;
824
825 /* ZBC global data */
826 static bool sdeb_zbc_in_use;    /* true for host-aware and host-managed disks */
827 static int sdeb_zbc_zone_size_mb;
828 static int sdeb_zbc_max_open = DEF_ZBC_MAX_OPEN_ZONES;
829 static int sdeb_zbc_nr_conv = DEF_ZBC_NR_CONV_ZONES;
830
831 static int submit_queues = DEF_SUBMIT_QUEUES;  /* > 1 for multi-queue (mq) */
832 static struct sdebug_queue *sdebug_q_arr;  /* ptr to array of submit queues */
833
834 static DEFINE_RWLOCK(atomic_rw);
835 static DEFINE_RWLOCK(atomic_rw2);
836
837 static rwlock_t *ramdisk_lck_a[2];
838
839 static char sdebug_proc_name[] = MY_NAME;
840 static const char *my_name = MY_NAME;
841
842 static struct bus_type pseudo_lld_bus;
843
844 static struct device_driver sdebug_driverfs_driver = {
845         .name           = sdebug_proc_name,
846         .bus            = &pseudo_lld_bus,
847 };
848
849 static const int check_condition_result =
850                 (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
851
852 static const int illegal_condition_result =
853         (DRIVER_SENSE << 24) | (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
854
855 static const int device_qfull_result =
856         (DID_OK << 16) | (COMMAND_COMPLETE << 8) | SAM_STAT_TASK_SET_FULL;
857
858 static const int condition_met_result = SAM_STAT_CONDITION_MET;
859
860
861 /* Only do the extra work involved in logical block provisioning if one or
862  * more of the lbpu, lbpws or lbpws10 parameters are given and we are doing
863  * real reads and writes (i.e. not skipping them for speed).
864  */
865 static inline bool scsi_debug_lbp(void)
866 {
867         return 0 == sdebug_fake_rw &&
868                 (sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10);
869 }
870
871 static void *lba2fake_store(struct sdeb_store_info *sip,
872                             unsigned long long lba)
873 {
874         struct sdeb_store_info *lsip = sip;
875
876         lba = do_div(lba, sdebug_store_sectors);
877         if (!sip || !sip->storep) {
878                 WARN_ON_ONCE(true);
879                 lsip = xa_load(per_store_ap, 0);  /* should never be NULL */
880         }
881         return lsip->storep + lba * sdebug_sector_size;
882 }
883
884 static struct t10_pi_tuple *dif_store(struct sdeb_store_info *sip,
885                                       sector_t sector)
886 {
887         sector = sector_div(sector, sdebug_store_sectors);
888
889         return sip->dif_storep + sector;
890 }
891
892 static void sdebug_max_tgts_luns(void)
893 {
894         struct sdebug_host_info *sdbg_host;
895         struct Scsi_Host *hpnt;
896
897         spin_lock(&sdebug_host_list_lock);
898         list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
899                 hpnt = sdbg_host->shost;
900                 if ((hpnt->this_id >= 0) &&
901                     (sdebug_num_tgts > hpnt->this_id))
902                         hpnt->max_id = sdebug_num_tgts + 1;
903                 else
904                         hpnt->max_id = sdebug_num_tgts;
905                 /* sdebug_max_luns; */
906                 hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
907         }
908         spin_unlock(&sdebug_host_list_lock);
909 }
910
911 enum sdeb_cmd_data {SDEB_IN_DATA = 0, SDEB_IN_CDB = 1};
912
913 /* Set in_bit to -1 to indicate no bit position of invalid field */
914 static void mk_sense_invalid_fld(struct scsi_cmnd *scp,
915                                  enum sdeb_cmd_data c_d,
916                                  int in_byte, int in_bit)
917 {
918         unsigned char *sbuff;
919         u8 sks[4];
920         int sl, asc;
921
922         sbuff = scp->sense_buffer;
923         if (!sbuff) {
924                 sdev_printk(KERN_ERR, scp->device,
925                             "%s: sense_buffer is NULL\n", __func__);
926                 return;
927         }
928         asc = c_d ? INVALID_FIELD_IN_CDB : INVALID_FIELD_IN_PARAM_LIST;
929         memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
930         scsi_build_sense_buffer(sdebug_dsense, sbuff, ILLEGAL_REQUEST, asc, 0);
931         memset(sks, 0, sizeof(sks));
932         sks[0] = 0x80;
933         if (c_d)
934                 sks[0] |= 0x40;
935         if (in_bit >= 0) {
936                 sks[0] |= 0x8;
937                 sks[0] |= 0x7 & in_bit;
938         }
939         put_unaligned_be16(in_byte, sks + 1);
940         if (sdebug_dsense) {
941                 sl = sbuff[7] + 8;
942                 sbuff[7] = sl;
943                 sbuff[sl] = 0x2;
944                 sbuff[sl + 1] = 0x6;
945                 memcpy(sbuff + sl + 4, sks, 3);
946         } else
947                 memcpy(sbuff + 15, sks, 3);
948         if (sdebug_verbose)
949                 sdev_printk(KERN_INFO, scp->device, "%s:  [sense_key,asc,ascq"
950                             "]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n",
951                             my_name, asc, c_d ? 'C' : 'D', in_byte, in_bit);
952 }
953
954 static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
955 {
956         unsigned char *sbuff;
957
958         sbuff = scp->sense_buffer;
959         if (!sbuff) {
960                 sdev_printk(KERN_ERR, scp->device,
961                             "%s: sense_buffer is NULL\n", __func__);
962                 return;
963         }
964         memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
965
966         scsi_build_sense_buffer(sdebug_dsense, sbuff, key, asc, asq);
967
968         if (sdebug_verbose)
969                 sdev_printk(KERN_INFO, scp->device,
970                             "%s:  [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
971                             my_name, key, asc, asq);
972 }
973
974 static void mk_sense_invalid_opcode(struct scsi_cmnd *scp)
975 {
976         mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
977 }
978
979 static int scsi_debug_ioctl(struct scsi_device *dev, unsigned int cmd,
980                             void __user *arg)
981 {
982         if (sdebug_verbose) {
983                 if (0x1261 == cmd)
984                         sdev_printk(KERN_INFO, dev,
985                                     "%s: BLKFLSBUF [0x1261]\n", __func__);
986                 else if (0x5331 == cmd)
987                         sdev_printk(KERN_INFO, dev,
988                                     "%s: CDROM_GET_CAPABILITY [0x5331]\n",
989                                     __func__);
990                 else
991                         sdev_printk(KERN_INFO, dev, "%s: cmd=0x%x\n",
992                                     __func__, cmd);
993         }
994         return -EINVAL;
995         /* return -ENOTTY; // correct return but upsets fdisk */
996 }
997
998 static void config_cdb_len(struct scsi_device *sdev)
999 {
1000         switch (sdebug_cdb_len) {
1001         case 6: /* suggest 6 byte READ, WRITE and MODE SENSE/SELECT */
1002                 sdev->use_10_for_rw = false;
1003                 sdev->use_16_for_rw = false;
1004                 sdev->use_10_for_ms = false;
1005                 break;
1006         case 10: /* suggest 10 byte RWs and 6 byte MODE SENSE/SELECT */
1007                 sdev->use_10_for_rw = true;
1008                 sdev->use_16_for_rw = false;
1009                 sdev->use_10_for_ms = false;
1010                 break;
1011         case 12: /* suggest 10 byte RWs and 10 byte MODE SENSE/SELECT */
1012                 sdev->use_10_for_rw = true;
1013                 sdev->use_16_for_rw = false;
1014                 sdev->use_10_for_ms = true;
1015                 break;
1016         case 16:
1017                 sdev->use_10_for_rw = false;
1018                 sdev->use_16_for_rw = true;
1019                 sdev->use_10_for_ms = true;
1020                 break;
1021         case 32: /* No knobs to suggest this so same as 16 for now */
1022                 sdev->use_10_for_rw = false;
1023                 sdev->use_16_for_rw = true;
1024                 sdev->use_10_for_ms = true;
1025                 break;
1026         default:
1027                 pr_warn("unexpected cdb_len=%d, force to 10\n",
1028                         sdebug_cdb_len);
1029                 sdev->use_10_for_rw = true;
1030                 sdev->use_16_for_rw = false;
1031                 sdev->use_10_for_ms = false;
1032                 sdebug_cdb_len = 10;
1033                 break;
1034         }
1035 }
1036
1037 static void all_config_cdb_len(void)
1038 {
1039         struct sdebug_host_info *sdbg_host;
1040         struct Scsi_Host *shost;
1041         struct scsi_device *sdev;
1042
1043         spin_lock(&sdebug_host_list_lock);
1044         list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
1045                 shost = sdbg_host->shost;
1046                 shost_for_each_device(sdev, shost) {
1047                         config_cdb_len(sdev);
1048                 }
1049         }
1050         spin_unlock(&sdebug_host_list_lock);
1051 }
1052
1053 static void clear_luns_changed_on_target(struct sdebug_dev_info *devip)
1054 {
1055         struct sdebug_host_info *sdhp;
1056         struct sdebug_dev_info *dp;
1057
1058         spin_lock(&sdebug_host_list_lock);
1059         list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
1060                 list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
1061                         if ((devip->sdbg_host == dp->sdbg_host) &&
1062                             (devip->target == dp->target))
1063                                 clear_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
1064                 }
1065         }
1066         spin_unlock(&sdebug_host_list_lock);
1067 }
1068
1069 static int make_ua(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1070 {
1071         int k;
1072
1073         k = find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS);
1074         if (k != SDEBUG_NUM_UAS) {
1075                 const char *cp = NULL;
1076
1077                 switch (k) {
1078                 case SDEBUG_UA_POR:
1079                         mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1080                                         POWER_ON_RESET_ASCQ);
1081                         if (sdebug_verbose)
1082                                 cp = "power on reset";
1083                         break;
1084                 case SDEBUG_UA_BUS_RESET:
1085                         mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1086                                         BUS_RESET_ASCQ);
1087                         if (sdebug_verbose)
1088                                 cp = "bus reset";
1089                         break;
1090                 case SDEBUG_UA_MODE_CHANGED:
1091                         mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1092                                         MODE_CHANGED_ASCQ);
1093                         if (sdebug_verbose)
1094                                 cp = "mode parameters changed";
1095                         break;
1096                 case SDEBUG_UA_CAPACITY_CHANGED:
1097                         mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1098                                         CAPACITY_CHANGED_ASCQ);
1099                         if (sdebug_verbose)
1100                                 cp = "capacity data changed";
1101                         break;
1102                 case SDEBUG_UA_MICROCODE_CHANGED:
1103                         mk_sense_buffer(scp, UNIT_ATTENTION,
1104                                         TARGET_CHANGED_ASC,
1105                                         MICROCODE_CHANGED_ASCQ);
1106                         if (sdebug_verbose)
1107                                 cp = "microcode has been changed";
1108                         break;
1109                 case SDEBUG_UA_MICROCODE_CHANGED_WO_RESET:
1110                         mk_sense_buffer(scp, UNIT_ATTENTION,
1111                                         TARGET_CHANGED_ASC,
1112                                         MICROCODE_CHANGED_WO_RESET_ASCQ);
1113                         if (sdebug_verbose)
1114                                 cp = "microcode has been changed without reset";
1115                         break;
1116                 case SDEBUG_UA_LUNS_CHANGED:
1117                         /*
1118                          * SPC-3 behavior is to report a UNIT ATTENTION with
1119                          * ASC/ASCQ REPORTED LUNS DATA HAS CHANGED on every LUN
1120                          * on the target, until a REPORT LUNS command is
1121                          * received.  SPC-4 behavior is to report it only once.
1122                          * NOTE:  sdebug_scsi_level does not use the same
1123                          * values as struct scsi_device->scsi_level.
1124                          */
1125                         if (sdebug_scsi_level >= 6)     /* SPC-4 and above */
1126                                 clear_luns_changed_on_target(devip);
1127                         mk_sense_buffer(scp, UNIT_ATTENTION,
1128                                         TARGET_CHANGED_ASC,
1129                                         LUNS_CHANGED_ASCQ);
1130                         if (sdebug_verbose)
1131                                 cp = "reported luns data has changed";
1132                         break;
1133                 default:
1134                         pr_warn("unexpected unit attention code=%d\n", k);
1135                         if (sdebug_verbose)
1136                                 cp = "unknown";
1137                         break;
1138                 }
1139                 clear_bit(k, devip->uas_bm);
1140                 if (sdebug_verbose)
1141                         sdev_printk(KERN_INFO, scp->device,
1142                                    "%s reports: Unit attention: %s\n",
1143                                    my_name, cp);
1144                 return check_condition_result;
1145         }
1146         return 0;
1147 }
1148
1149 /* Build SCSI "data-in" buffer. Returns 0 if ok else (DID_ERROR << 16). */
1150 static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1151                                 int arr_len)
1152 {
1153         int act_len;
1154         struct scsi_data_buffer *sdb = &scp->sdb;
1155
1156         if (!sdb->length)
1157                 return 0;
1158         if (scp->sc_data_direction != DMA_FROM_DEVICE)
1159                 return DID_ERROR << 16;
1160
1161         act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
1162                                       arr, arr_len);
1163         scsi_set_resid(scp, scsi_bufflen(scp) - act_len);
1164
1165         return 0;
1166 }
1167
1168 /* Partial build of SCSI "data-in" buffer. Returns 0 if ok else
1169  * (DID_ERROR << 16). Can write to offset in data-in buffer. If multiple
1170  * calls, not required to write in ascending offset order. Assumes resid
1171  * set to scsi_bufflen() prior to any calls.
1172  */
1173 static int p_fill_from_dev_buffer(struct scsi_cmnd *scp, const void *arr,
1174                                   int arr_len, unsigned int off_dst)
1175 {
1176         unsigned int act_len, n;
1177         struct scsi_data_buffer *sdb = &scp->sdb;
1178         off_t skip = off_dst;
1179
1180         if (sdb->length <= off_dst)
1181                 return 0;
1182         if (scp->sc_data_direction != DMA_FROM_DEVICE)
1183                 return DID_ERROR << 16;
1184
1185         act_len = sg_pcopy_from_buffer(sdb->table.sgl, sdb->table.nents,
1186                                        arr, arr_len, skip);
1187         pr_debug("%s: off_dst=%u, scsi_bufflen=%u, act_len=%u, resid=%d\n",
1188                  __func__, off_dst, scsi_bufflen(scp), act_len,
1189                  scsi_get_resid(scp));
1190         n = scsi_bufflen(scp) - (off_dst + act_len);
1191         scsi_set_resid(scp, min_t(u32, scsi_get_resid(scp), n));
1192         return 0;
1193 }
1194
1195 /* Fetches from SCSI "data-out" buffer. Returns number of bytes fetched into
1196  * 'arr' or -1 if error.
1197  */
1198 static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1199                                int arr_len)
1200 {
1201         if (!scsi_bufflen(scp))
1202                 return 0;
1203         if (scp->sc_data_direction != DMA_TO_DEVICE)
1204                 return -1;
1205
1206         return scsi_sg_copy_to_buffer(scp, arr, arr_len);
1207 }
1208
1209
1210 static char sdebug_inq_vendor_id[9] = "Linux   ";
1211 static char sdebug_inq_product_id[17] = "scsi_debug      ";
1212 static char sdebug_inq_product_rev[5] = SDEBUG_VERSION;
1213 /* Use some locally assigned NAAs for SAS addresses. */
1214 static const u64 naa3_comp_a = 0x3222222000000000ULL;
1215 static const u64 naa3_comp_b = 0x3333333000000000ULL;
1216 static const u64 naa3_comp_c = 0x3111111000000000ULL;
1217
1218 /* Device identification VPD page. Returns number of bytes placed in arr */
1219 static int inquiry_vpd_83(unsigned char *arr, int port_group_id,
1220                           int target_dev_id, int dev_id_num,
1221                           const char *dev_id_str, int dev_id_str_len,
1222                           const uuid_t *lu_name)
1223 {
1224         int num, port_a;
1225         char b[32];
1226
1227         port_a = target_dev_id + 1;
1228         /* T10 vendor identifier field format (faked) */
1229         arr[0] = 0x2;   /* ASCII */
1230         arr[1] = 0x1;
1231         arr[2] = 0x0;
1232         memcpy(&arr[4], sdebug_inq_vendor_id, 8);
1233         memcpy(&arr[12], sdebug_inq_product_id, 16);
1234         memcpy(&arr[28], dev_id_str, dev_id_str_len);
1235         num = 8 + 16 + dev_id_str_len;
1236         arr[3] = num;
1237         num += 4;
1238         if (dev_id_num >= 0) {
1239                 if (sdebug_uuid_ctl) {
1240                         /* Locally assigned UUID */
1241                         arr[num++] = 0x1;  /* binary (not necessarily sas) */
1242                         arr[num++] = 0xa;  /* PIV=0, lu, naa */
1243                         arr[num++] = 0x0;
1244                         arr[num++] = 0x12;
1245                         arr[num++] = 0x10; /* uuid type=1, locally assigned */
1246                         arr[num++] = 0x0;
1247                         memcpy(arr + num, lu_name, 16);
1248                         num += 16;
1249                 } else {
1250                         /* NAA-3, Logical unit identifier (binary) */
1251                         arr[num++] = 0x1;  /* binary (not necessarily sas) */
1252                         arr[num++] = 0x3;  /* PIV=0, lu, naa */
1253                         arr[num++] = 0x0;
1254                         arr[num++] = 0x8;
1255                         put_unaligned_be64(naa3_comp_b + dev_id_num, arr + num);
1256                         num += 8;
1257                 }
1258                 /* Target relative port number */
1259                 arr[num++] = 0x61;      /* proto=sas, binary */
1260                 arr[num++] = 0x94;      /* PIV=1, target port, rel port */
1261                 arr[num++] = 0x0;       /* reserved */
1262                 arr[num++] = 0x4;       /* length */
1263                 arr[num++] = 0x0;       /* reserved */
1264                 arr[num++] = 0x0;       /* reserved */
1265                 arr[num++] = 0x0;
1266                 arr[num++] = 0x1;       /* relative port A */
1267         }
1268         /* NAA-3, Target port identifier */
1269         arr[num++] = 0x61;      /* proto=sas, binary */
1270         arr[num++] = 0x93;      /* piv=1, target port, naa */
1271         arr[num++] = 0x0;
1272         arr[num++] = 0x8;
1273         put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1274         num += 8;
1275         /* NAA-3, Target port group identifier */
1276         arr[num++] = 0x61;      /* proto=sas, binary */
1277         arr[num++] = 0x95;      /* piv=1, target port group id */
1278         arr[num++] = 0x0;
1279         arr[num++] = 0x4;
1280         arr[num++] = 0;
1281         arr[num++] = 0;
1282         put_unaligned_be16(port_group_id, arr + num);
1283         num += 2;
1284         /* NAA-3, Target device identifier */
1285         arr[num++] = 0x61;      /* proto=sas, binary */
1286         arr[num++] = 0xa3;      /* piv=1, target device, naa */
1287         arr[num++] = 0x0;
1288         arr[num++] = 0x8;
1289         put_unaligned_be64(naa3_comp_a + target_dev_id, arr + num);
1290         num += 8;
1291         /* SCSI name string: Target device identifier */
1292         arr[num++] = 0x63;      /* proto=sas, UTF-8 */
1293         arr[num++] = 0xa8;      /* piv=1, target device, SCSI name string */
1294         arr[num++] = 0x0;
1295         arr[num++] = 24;
1296         memcpy(arr + num, "naa.32222220", 12);
1297         num += 12;
1298         snprintf(b, sizeof(b), "%08X", target_dev_id);
1299         memcpy(arr + num, b, 8);
1300         num += 8;
1301         memset(arr + num, 0, 4);
1302         num += 4;
1303         return num;
1304 }
1305
1306 static unsigned char vpd84_data[] = {
1307 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
1308     0x22,0x22,0x22,0x0,0xbb,0x1,
1309     0x22,0x22,0x22,0x0,0xbb,0x2,
1310 };
1311
1312 /*  Software interface identification VPD page */
1313 static int inquiry_vpd_84(unsigned char *arr)
1314 {
1315         memcpy(arr, vpd84_data, sizeof(vpd84_data));
1316         return sizeof(vpd84_data);
1317 }
1318
1319 /* Management network addresses VPD page */
1320 static int inquiry_vpd_85(unsigned char *arr)
1321 {
1322         int num = 0;
1323         const char *na1 = "https://www.kernel.org/config";
1324         const char *na2 = "http://www.kernel.org/log";
1325         int plen, olen;
1326
1327         arr[num++] = 0x1;       /* lu, storage config */
1328         arr[num++] = 0x0;       /* reserved */
1329         arr[num++] = 0x0;
1330         olen = strlen(na1);
1331         plen = olen + 1;
1332         if (plen % 4)
1333                 plen = ((plen / 4) + 1) * 4;
1334         arr[num++] = plen;      /* length, null termianted, padded */
1335         memcpy(arr + num, na1, olen);
1336         memset(arr + num + olen, 0, plen - olen);
1337         num += plen;
1338
1339         arr[num++] = 0x4;       /* lu, logging */
1340         arr[num++] = 0x0;       /* reserved */
1341         arr[num++] = 0x0;
1342         olen = strlen(na2);
1343         plen = olen + 1;
1344         if (plen % 4)
1345                 plen = ((plen / 4) + 1) * 4;
1346         arr[num++] = plen;      /* length, null terminated, padded */
1347         memcpy(arr + num, na2, olen);
1348         memset(arr + num + olen, 0, plen - olen);
1349         num += plen;
1350
1351         return num;
1352 }
1353
1354 /* SCSI ports VPD page */
1355 static int inquiry_vpd_88(unsigned char *arr, int target_dev_id)
1356 {
1357         int num = 0;
1358         int port_a, port_b;
1359
1360         port_a = target_dev_id + 1;
1361         port_b = port_a + 1;
1362         arr[num++] = 0x0;       /* reserved */
1363         arr[num++] = 0x0;       /* reserved */
1364         arr[num++] = 0x0;
1365         arr[num++] = 0x1;       /* relative port 1 (primary) */
1366         memset(arr + num, 0, 6);
1367         num += 6;
1368         arr[num++] = 0x0;
1369         arr[num++] = 12;        /* length tp descriptor */
1370         /* naa-5 target port identifier (A) */
1371         arr[num++] = 0x61;      /* proto=sas, binary */
1372         arr[num++] = 0x93;      /* PIV=1, target port, NAA */
1373         arr[num++] = 0x0;       /* reserved */
1374         arr[num++] = 0x8;       /* length */
1375         put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1376         num += 8;
1377         arr[num++] = 0x0;       /* reserved */
1378         arr[num++] = 0x0;       /* reserved */
1379         arr[num++] = 0x0;
1380         arr[num++] = 0x2;       /* relative port 2 (secondary) */
1381         memset(arr + num, 0, 6);
1382         num += 6;
1383         arr[num++] = 0x0;
1384         arr[num++] = 12;        /* length tp descriptor */
1385         /* naa-5 target port identifier (B) */
1386         arr[num++] = 0x61;      /* proto=sas, binary */
1387         arr[num++] = 0x93;      /* PIV=1, target port, NAA */
1388         arr[num++] = 0x0;       /* reserved */
1389         arr[num++] = 0x8;       /* length */
1390         put_unaligned_be64(naa3_comp_a + port_b, arr + num);
1391         num += 8;
1392
1393         return num;
1394 }
1395
1396
1397 static unsigned char vpd89_data[] = {
1398 /* from 4th byte */ 0,0,0,0,
1399 'l','i','n','u','x',' ',' ',' ',
1400 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
1401 '1','2','3','4',
1402 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
1403 0xec,0,0,0,
1404 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
1405 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
1406 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
1407 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
1408 0x53,0x41,
1409 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1410 0x20,0x20,
1411 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1412 0x10,0x80,
1413 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
1414 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
1415 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
1416 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
1417 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
1418 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
1419 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
1420 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1421 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1422 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1423 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
1424 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
1425 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
1426 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
1427 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1428 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1429 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1430 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1431 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1432 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1433 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1434 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1435 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1436 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1437 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1438 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
1439 };
1440
1441 /* ATA Information VPD page */
1442 static int inquiry_vpd_89(unsigned char *arr)
1443 {
1444         memcpy(arr, vpd89_data, sizeof(vpd89_data));
1445         return sizeof(vpd89_data);
1446 }
1447
1448
1449 static unsigned char vpdb0_data[] = {
1450         /* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
1451         0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1452         0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1453         0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1454 };
1455
1456 /* Block limits VPD page (SBC-3) */
1457 static int inquiry_vpd_b0(unsigned char *arr)
1458 {
1459         unsigned int gran;
1460
1461         memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
1462
1463         /* Optimal transfer length granularity */
1464         if (sdebug_opt_xferlen_exp != 0 &&
1465             sdebug_physblk_exp < sdebug_opt_xferlen_exp)
1466                 gran = 1 << sdebug_opt_xferlen_exp;
1467         else
1468                 gran = 1 << sdebug_physblk_exp;
1469         put_unaligned_be16(gran, arr + 2);
1470
1471         /* Maximum Transfer Length */
1472         if (sdebug_store_sectors > 0x400)
1473                 put_unaligned_be32(sdebug_store_sectors, arr + 4);
1474
1475         /* Optimal Transfer Length */
1476         put_unaligned_be32(sdebug_opt_blks, &arr[8]);
1477
1478         if (sdebug_lbpu) {
1479                 /* Maximum Unmap LBA Count */
1480                 put_unaligned_be32(sdebug_unmap_max_blocks, &arr[16]);
1481
1482                 /* Maximum Unmap Block Descriptor Count */
1483                 put_unaligned_be32(sdebug_unmap_max_desc, &arr[20]);
1484         }
1485
1486         /* Unmap Granularity Alignment */
1487         if (sdebug_unmap_alignment) {
1488                 put_unaligned_be32(sdebug_unmap_alignment, &arr[28]);
1489                 arr[28] |= 0x80; /* UGAVALID */
1490         }
1491
1492         /* Optimal Unmap Granularity */
1493         put_unaligned_be32(sdebug_unmap_granularity, &arr[24]);
1494
1495         /* Maximum WRITE SAME Length */
1496         put_unaligned_be64(sdebug_write_same_length, &arr[32]);
1497
1498         return 0x3c; /* Mandatory page length for Logical Block Provisioning */
1499
1500         return sizeof(vpdb0_data);
1501 }
1502
1503 /* Block device characteristics VPD page (SBC-3) */
1504 static int inquiry_vpd_b1(struct sdebug_dev_info *devip, unsigned char *arr)
1505 {
1506         memset(arr, 0, 0x3c);
1507         arr[0] = 0;
1508         arr[1] = 1;     /* non rotating medium (e.g. solid state) */
1509         arr[2] = 0;
1510         arr[3] = 5;     /* less than 1.8" */
1511         if (devip->zmodel == BLK_ZONED_HA)
1512                 arr[4] = 1 << 4;        /* zoned field = 01b */
1513
1514         return 0x3c;
1515 }
1516
1517 /* Logical block provisioning VPD page (SBC-4) */
1518 static int inquiry_vpd_b2(unsigned char *arr)
1519 {
1520         memset(arr, 0, 0x4);
1521         arr[0] = 0;                     /* threshold exponent */
1522         if (sdebug_lbpu)
1523                 arr[1] = 1 << 7;
1524         if (sdebug_lbpws)
1525                 arr[1] |= 1 << 6;
1526         if (sdebug_lbpws10)
1527                 arr[1] |= 1 << 5;
1528         if (sdebug_lbprz && scsi_debug_lbp())
1529                 arr[1] |= (sdebug_lbprz & 0x7) << 2;  /* sbc4r07 and later */
1530         /* anc_sup=0; dp=0 (no provisioning group descriptor) */
1531         /* minimum_percentage=0; provisioning_type=0 (unknown) */
1532         /* threshold_percentage=0 */
1533         return 0x4;
1534 }
1535
1536 /* Zoned block device characteristics VPD page (ZBC mandatory) */
1537 static int inquiry_vpd_b6(struct sdebug_dev_info *devip, unsigned char *arr)
1538 {
1539         memset(arr, 0, 0x3c);
1540         arr[0] = 0x1; /* set URSWRZ (unrestricted read in seq. wr req zone) */
1541         /*
1542          * Set Optimal number of open sequential write preferred zones and
1543          * Optimal number of non-sequentially written sequential write
1544          * preferred zones fields to 'not reported' (0xffffffff). Leave other
1545          * fields set to zero, apart from Max. number of open swrz_s field.
1546          */
1547         put_unaligned_be32(0xffffffff, &arr[4]);
1548         put_unaligned_be32(0xffffffff, &arr[8]);
1549         if (sdeb_zbc_model == BLK_ZONED_HM && devip->max_open)
1550                 put_unaligned_be32(devip->max_open, &arr[12]);
1551         else
1552                 put_unaligned_be32(0xffffffff, &arr[12]);
1553         return 0x3c;
1554 }
1555
1556 #define SDEBUG_LONG_INQ_SZ 96
1557 #define SDEBUG_MAX_INQ_ARR_SZ 584
1558
1559 static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1560 {
1561         unsigned char pq_pdt;
1562         unsigned char *arr;
1563         unsigned char *cmd = scp->cmnd;
1564         u32 alloc_len, n;
1565         int ret;
1566         bool have_wlun, is_disk, is_zbc, is_disk_zbc;
1567
1568         alloc_len = get_unaligned_be16(cmd + 3);
1569         arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
1570         if (! arr)
1571                 return DID_REQUEUE << 16;
1572         is_disk = (sdebug_ptype == TYPE_DISK);
1573         is_zbc = (devip->zmodel != BLK_ZONED_NONE);
1574         is_disk_zbc = (is_disk || is_zbc);
1575         have_wlun = scsi_is_wlun(scp->device->lun);
1576         if (have_wlun)
1577                 pq_pdt = TYPE_WLUN;     /* present, wlun */
1578         else if (sdebug_no_lun_0 && (devip->lun == SDEBUG_LUN_0_VAL))
1579                 pq_pdt = 0x7f;  /* not present, PQ=3, PDT=0x1f */
1580         else
1581                 pq_pdt = (sdebug_ptype & 0x1f);
1582         arr[0] = pq_pdt;
1583         if (0x2 & cmd[1]) {  /* CMDDT bit set */
1584                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
1585                 kfree(arr);
1586                 return check_condition_result;
1587         } else if (0x1 & cmd[1]) {  /* EVPD bit set */
1588                 int lu_id_num, port_group_id, target_dev_id;
1589                 u32 len;
1590                 char lu_id_str[6];
1591                 int host_no = devip->sdbg_host->shost->host_no;
1592                 
1593                 port_group_id = (((host_no + 1) & 0x7f) << 8) +
1594                     (devip->channel & 0x7f);
1595                 if (sdebug_vpd_use_hostno == 0)
1596                         host_no = 0;
1597                 lu_id_num = have_wlun ? -1 : (((host_no + 1) * 2000) +
1598                             (devip->target * 1000) + devip->lun);
1599                 target_dev_id = ((host_no + 1) * 2000) +
1600                                  (devip->target * 1000) - 3;
1601                 len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
1602                 if (0 == cmd[2]) { /* supported vital product data pages */
1603                         arr[1] = cmd[2];        /*sanity */
1604                         n = 4;
1605                         arr[n++] = 0x0;   /* this page */
1606                         arr[n++] = 0x80;  /* unit serial number */
1607                         arr[n++] = 0x83;  /* device identification */
1608                         arr[n++] = 0x84;  /* software interface ident. */
1609                         arr[n++] = 0x85;  /* management network addresses */
1610                         arr[n++] = 0x86;  /* extended inquiry */
1611                         arr[n++] = 0x87;  /* mode page policy */
1612                         arr[n++] = 0x88;  /* SCSI ports */
1613                         if (is_disk_zbc) {        /* SBC or ZBC */
1614                                 arr[n++] = 0x89;  /* ATA information */
1615                                 arr[n++] = 0xb0;  /* Block limits */
1616                                 arr[n++] = 0xb1;  /* Block characteristics */
1617                                 if (is_disk)
1618                                         arr[n++] = 0xb2;  /* LB Provisioning */
1619                                 if (is_zbc)
1620                                         arr[n++] = 0xb6;  /* ZB dev. char. */
1621                         }
1622                         arr[3] = n - 4;   /* number of supported VPD pages */
1623                 } else if (0x80 == cmd[2]) { /* unit serial number */
1624                         arr[1] = cmd[2];        /*sanity */
1625                         arr[3] = len;
1626                         memcpy(&arr[4], lu_id_str, len);
1627                 } else if (0x83 == cmd[2]) { /* device identification */
1628                         arr[1] = cmd[2];        /*sanity */
1629                         arr[3] = inquiry_vpd_83(&arr[4], port_group_id,
1630                                                 target_dev_id, lu_id_num,
1631                                                 lu_id_str, len,
1632                                                 &devip->lu_name);
1633                 } else if (0x84 == cmd[2]) { /* Software interface ident. */
1634                         arr[1] = cmd[2];        /*sanity */
1635                         arr[3] = inquiry_vpd_84(&arr[4]);
1636                 } else if (0x85 == cmd[2]) { /* Management network addresses */
1637                         arr[1] = cmd[2];        /*sanity */
1638                         arr[3] = inquiry_vpd_85(&arr[4]);
1639                 } else if (0x86 == cmd[2]) { /* extended inquiry */
1640                         arr[1] = cmd[2];        /*sanity */
1641                         arr[3] = 0x3c;  /* number of following entries */
1642                         if (sdebug_dif == T10_PI_TYPE3_PROTECTION)
1643                                 arr[4] = 0x4;   /* SPT: GRD_CHK:1 */
1644                         else if (have_dif_prot)
1645                                 arr[4] = 0x5;   /* SPT: GRD_CHK:1, REF_CHK:1 */
1646                         else
1647                                 arr[4] = 0x0;   /* no protection stuff */
1648                         arr[5] = 0x7;   /* head of q, ordered + simple q's */
1649                 } else if (0x87 == cmd[2]) { /* mode page policy */
1650                         arr[1] = cmd[2];        /*sanity */
1651                         arr[3] = 0x8;   /* number of following entries */
1652                         arr[4] = 0x2;   /* disconnect-reconnect mp */
1653                         arr[6] = 0x80;  /* mlus, shared */
1654                         arr[8] = 0x18;   /* protocol specific lu */
1655                         arr[10] = 0x82;  /* mlus, per initiator port */
1656                 } else if (0x88 == cmd[2]) { /* SCSI Ports */
1657                         arr[1] = cmd[2];        /*sanity */
1658                         arr[3] = inquiry_vpd_88(&arr[4], target_dev_id);
1659                 } else if (is_disk_zbc && 0x89 == cmd[2]) { /* ATA info */
1660                         arr[1] = cmd[2];        /*sanity */
1661                         n = inquiry_vpd_89(&arr[4]);
1662                         put_unaligned_be16(n, arr + 2);
1663                 } else if (is_disk_zbc && 0xb0 == cmd[2]) { /* Block limits */
1664                         arr[1] = cmd[2];        /*sanity */
1665                         arr[3] = inquiry_vpd_b0(&arr[4]);
1666                 } else if (is_disk_zbc && 0xb1 == cmd[2]) { /* Block char. */
1667                         arr[1] = cmd[2];        /*sanity */
1668                         arr[3] = inquiry_vpd_b1(devip, &arr[4]);
1669                 } else if (is_disk && 0xb2 == cmd[2]) { /* LB Prov. */
1670                         arr[1] = cmd[2];        /*sanity */
1671                         arr[3] = inquiry_vpd_b2(&arr[4]);
1672                 } else if (is_zbc && cmd[2] == 0xb6) { /* ZB dev. charact. */
1673                         arr[1] = cmd[2];        /*sanity */
1674                         arr[3] = inquiry_vpd_b6(devip, &arr[4]);
1675                 } else {
1676                         mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
1677                         kfree(arr);
1678                         return check_condition_result;
1679                 }
1680                 len = min_t(u32, get_unaligned_be16(arr + 2) + 4, alloc_len);
1681                 ret = fill_from_dev_buffer(scp, arr,
1682                             min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ));
1683                 kfree(arr);
1684                 return ret;
1685         }
1686         /* drops through here for a standard inquiry */
1687         arr[1] = sdebug_removable ? 0x80 : 0;   /* Removable disk */
1688         arr[2] = sdebug_scsi_level;
1689         arr[3] = 2;    /* response_data_format==2 */
1690         arr[4] = SDEBUG_LONG_INQ_SZ - 5;
1691         arr[5] = (int)have_dif_prot;    /* PROTECT bit */
1692         if (sdebug_vpd_use_hostno == 0)
1693                 arr[5] |= 0x10; /* claim: implicit TPGS */
1694         arr[6] = 0x10; /* claim: MultiP */
1695         /* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
1696         arr[7] = 0xa; /* claim: LINKED + CMDQUE */
1697         memcpy(&arr[8], sdebug_inq_vendor_id, 8);
1698         memcpy(&arr[16], sdebug_inq_product_id, 16);
1699         memcpy(&arr[32], sdebug_inq_product_rev, 4);
1700         /* Use Vendor Specific area to place driver date in ASCII hex */
1701         memcpy(&arr[36], sdebug_version_date, 8);
1702         /* version descriptors (2 bytes each) follow */
1703         put_unaligned_be16(0xc0, arr + 58);   /* SAM-6 no version claimed */
1704         put_unaligned_be16(0x5c0, arr + 60);  /* SPC-5 no version claimed */
1705         n = 62;
1706         if (is_disk) {          /* SBC-4 no version claimed */
1707                 put_unaligned_be16(0x600, arr + n);
1708                 n += 2;
1709         } else if (sdebug_ptype == TYPE_TAPE) { /* SSC-4 rev 3 */
1710                 put_unaligned_be16(0x525, arr + n);
1711                 n += 2;
1712         } else if (is_zbc) {    /* ZBC BSR INCITS 536 revision 05 */
1713                 put_unaligned_be16(0x624, arr + n);
1714                 n += 2;
1715         }
1716         put_unaligned_be16(0x2100, arr + n);    /* SPL-4 no version claimed */
1717         ret = fill_from_dev_buffer(scp, arr,
1718                             min_t(u32, alloc_len, SDEBUG_LONG_INQ_SZ));
1719         kfree(arr);
1720         return ret;
1721 }
1722
1723 /* See resp_iec_m_pg() for how this data is manipulated */
1724 static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1725                                    0, 0, 0x0, 0x0};
1726
1727 static int resp_requests(struct scsi_cmnd *scp,
1728                          struct sdebug_dev_info *devip)
1729 {
1730         unsigned char *cmd = scp->cmnd;
1731         unsigned char arr[SCSI_SENSE_BUFFERSIZE];       /* assume >= 18 bytes */
1732         bool dsense = !!(cmd[1] & 1);
1733         u32 alloc_len = cmd[4];
1734         u32 len = 18;
1735         int stopped_state = atomic_read(&devip->stopped);
1736
1737         memset(arr, 0, sizeof(arr));
1738         if (stopped_state > 0) {        /* some "pollable" data [spc6r02: 5.12.2] */
1739                 if (dsense) {
1740                         arr[0] = 0x72;
1741                         arr[1] = NOT_READY;
1742                         arr[2] = LOGICAL_UNIT_NOT_READY;
1743                         arr[3] = (stopped_state == 2) ? 0x1 : 0x2;
1744                         len = 8;
1745                 } else {
1746                         arr[0] = 0x70;
1747                         arr[2] = NOT_READY;             /* NO_SENSE in sense_key */
1748                         arr[7] = 0xa;                   /* 18 byte sense buffer */
1749                         arr[12] = LOGICAL_UNIT_NOT_READY;
1750                         arr[13] = (stopped_state == 2) ? 0x1 : 0x2;
1751                 }
1752         } else if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
1753                 /* Information exceptions control mode page: TEST=1, MRIE=6 */
1754                 if (dsense) {
1755                         arr[0] = 0x72;
1756                         arr[1] = 0x0;           /* NO_SENSE in sense_key */
1757                         arr[2] = THRESHOLD_EXCEEDED;
1758                         arr[3] = 0xff;          /* Failure prediction(false) */
1759                         len = 8;
1760                 } else {
1761                         arr[0] = 0x70;
1762                         arr[2] = 0x0;           /* NO_SENSE in sense_key */
1763                         arr[7] = 0xa;           /* 18 byte sense buffer */
1764                         arr[12] = THRESHOLD_EXCEEDED;
1765                         arr[13] = 0xff;         /* Failure prediction(false) */
1766                 }
1767         } else {        /* nothing to report */
1768                 if (dsense) {
1769                         len = 8;
1770                         memset(arr, 0, len);
1771                         arr[0] = 0x72;
1772                 } else {
1773                         memset(arr, 0, len);
1774                         arr[0] = 0x70;
1775                         arr[7] = 0xa;
1776                 }
1777         }
1778         return fill_from_dev_buffer(scp, arr, min_t(u32, len, alloc_len));
1779 }
1780
1781 static int resp_start_stop(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1782 {
1783         unsigned char *cmd = scp->cmnd;
1784         int power_cond, want_stop, stopped_state;
1785         bool changing;
1786
1787         power_cond = (cmd[4] & 0xf0) >> 4;
1788         if (power_cond) {
1789                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 7);
1790                 return check_condition_result;
1791         }
1792         want_stop = !(cmd[4] & 1);
1793         stopped_state = atomic_read(&devip->stopped);
1794         if (stopped_state == 2) {
1795                 ktime_t now_ts = ktime_get_boottime();
1796
1797                 if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
1798                         u64 diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
1799
1800                         if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
1801                                 /* tur_ms_to_ready timer extinguished */
1802                                 atomic_set(&devip->stopped, 0);
1803                                 stopped_state = 0;
1804                         }
1805                 }
1806                 if (stopped_state == 2) {
1807                         if (want_stop) {
1808                                 stopped_state = 1;      /* dummy up success */
1809                         } else {        /* Disallow tur_ms_to_ready delay to be overridden */
1810                                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 0 /* START bit */);
1811                                 return check_condition_result;
1812                         }
1813                 }
1814         }
1815         changing = (stopped_state != want_stop);
1816         if (changing)
1817                 atomic_xchg(&devip->stopped, want_stop);
1818         if (!changing || (cmd[1] & 0x1))  /* state unchanged or IMMED bit set in cdb */
1819                 return SDEG_RES_IMMED_MASK;
1820         else
1821                 return 0;
1822 }
1823
1824 static sector_t get_sdebug_capacity(void)
1825 {
1826         static const unsigned int gibibyte = 1073741824;
1827
1828         if (sdebug_virtual_gb > 0)
1829                 return (sector_t)sdebug_virtual_gb *
1830                         (gibibyte / sdebug_sector_size);
1831         else
1832                 return sdebug_store_sectors;
1833 }
1834
1835 #define SDEBUG_READCAP_ARR_SZ 8
1836 static int resp_readcap(struct scsi_cmnd *scp,
1837                         struct sdebug_dev_info *devip)
1838 {
1839         unsigned char arr[SDEBUG_READCAP_ARR_SZ];
1840         unsigned int capac;
1841
1842         /* following just in case virtual_gb changed */
1843         sdebug_capacity = get_sdebug_capacity();
1844         memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
1845         if (sdebug_capacity < 0xffffffff) {
1846                 capac = (unsigned int)sdebug_capacity - 1;
1847                 put_unaligned_be32(capac, arr + 0);
1848         } else
1849                 put_unaligned_be32(0xffffffff, arr + 0);
1850         put_unaligned_be16(sdebug_sector_size, arr + 6);
1851         return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
1852 }
1853
1854 #define SDEBUG_READCAP16_ARR_SZ 32
1855 static int resp_readcap16(struct scsi_cmnd *scp,
1856                           struct sdebug_dev_info *devip)
1857 {
1858         unsigned char *cmd = scp->cmnd;
1859         unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
1860         u32 alloc_len;
1861
1862         alloc_len = get_unaligned_be32(cmd + 10);
1863         /* following just in case virtual_gb changed */
1864         sdebug_capacity = get_sdebug_capacity();
1865         memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
1866         put_unaligned_be64((u64)(sdebug_capacity - 1), arr + 0);
1867         put_unaligned_be32(sdebug_sector_size, arr + 8);
1868         arr[13] = sdebug_physblk_exp & 0xf;
1869         arr[14] = (sdebug_lowest_aligned >> 8) & 0x3f;
1870
1871         if (scsi_debug_lbp()) {
1872                 arr[14] |= 0x80; /* LBPME */
1873                 /* from sbc4r07, this LBPRZ field is 1 bit, but the LBPRZ in
1874                  * the LB Provisioning VPD page is 3 bits. Note that lbprz=2
1875                  * in the wider field maps to 0 in this field.
1876                  */
1877                 if (sdebug_lbprz & 1)   /* precisely what the draft requires */
1878                         arr[14] |= 0x40;
1879         }
1880
1881         arr[15] = sdebug_lowest_aligned & 0xff;
1882
1883         if (have_dif_prot) {
1884                 arr[12] = (sdebug_dif - 1) << 1; /* P_TYPE */
1885                 arr[12] |= 1; /* PROT_EN */
1886         }
1887
1888         return fill_from_dev_buffer(scp, arr,
1889                             min_t(u32, alloc_len, SDEBUG_READCAP16_ARR_SZ));
1890 }
1891
1892 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
1893
1894 static int resp_report_tgtpgs(struct scsi_cmnd *scp,
1895                               struct sdebug_dev_info *devip)
1896 {
1897         unsigned char *cmd = scp->cmnd;
1898         unsigned char *arr;
1899         int host_no = devip->sdbg_host->shost->host_no;
1900         int port_group_a, port_group_b, port_a, port_b;
1901         u32 alen, n, rlen;
1902         int ret;
1903
1904         alen = get_unaligned_be32(cmd + 6);
1905         arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
1906         if (! arr)
1907                 return DID_REQUEUE << 16;
1908         /*
1909          * EVPD page 0x88 states we have two ports, one
1910          * real and a fake port with no device connected.
1911          * So we create two port groups with one port each
1912          * and set the group with port B to unavailable.
1913          */
1914         port_a = 0x1; /* relative port A */
1915         port_b = 0x2; /* relative port B */
1916         port_group_a = (((host_no + 1) & 0x7f) << 8) +
1917                         (devip->channel & 0x7f);
1918         port_group_b = (((host_no + 1) & 0x7f) << 8) +
1919                         (devip->channel & 0x7f) + 0x80;
1920
1921         /*
1922          * The asymmetric access state is cycled according to the host_id.
1923          */
1924         n = 4;
1925         if (sdebug_vpd_use_hostno == 0) {
1926                 arr[n++] = host_no % 3; /* Asymm access state */
1927                 arr[n++] = 0x0F; /* claim: all states are supported */
1928         } else {
1929                 arr[n++] = 0x0; /* Active/Optimized path */
1930                 arr[n++] = 0x01; /* only support active/optimized paths */
1931         }
1932         put_unaligned_be16(port_group_a, arr + n);
1933         n += 2;
1934         arr[n++] = 0;    /* Reserved */
1935         arr[n++] = 0;    /* Status code */
1936         arr[n++] = 0;    /* Vendor unique */
1937         arr[n++] = 0x1;  /* One port per group */
1938         arr[n++] = 0;    /* Reserved */
1939         arr[n++] = 0;    /* Reserved */
1940         put_unaligned_be16(port_a, arr + n);
1941         n += 2;
1942         arr[n++] = 3;    /* Port unavailable */
1943         arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
1944         put_unaligned_be16(port_group_b, arr + n);
1945         n += 2;
1946         arr[n++] = 0;    /* Reserved */
1947         arr[n++] = 0;    /* Status code */
1948         arr[n++] = 0;    /* Vendor unique */
1949         arr[n++] = 0x1;  /* One port per group */
1950         arr[n++] = 0;    /* Reserved */
1951         arr[n++] = 0;    /* Reserved */
1952         put_unaligned_be16(port_b, arr + n);
1953         n += 2;
1954
1955         rlen = n - 4;
1956         put_unaligned_be32(rlen, arr + 0);
1957
1958         /*
1959          * Return the smallest value of either
1960          * - The allocated length
1961          * - The constructed command length
1962          * - The maximum array size
1963          */
1964         rlen = min(alen, n);
1965         ret = fill_from_dev_buffer(scp, arr,
1966                            min_t(u32, rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
1967         kfree(arr);
1968         return ret;
1969 }
1970
1971 static int resp_rsup_opcodes(struct scsi_cmnd *scp,
1972                              struct sdebug_dev_info *devip)
1973 {
1974         bool rctd;
1975         u8 reporting_opts, req_opcode, sdeb_i, supp;
1976         u16 req_sa, u;
1977         u32 alloc_len, a_len;
1978         int k, offset, len, errsts, count, bump, na;
1979         const struct opcode_info_t *oip;
1980         const struct opcode_info_t *r_oip;
1981         u8 *arr;
1982         u8 *cmd = scp->cmnd;
1983
1984         rctd = !!(cmd[2] & 0x80);
1985         reporting_opts = cmd[2] & 0x7;
1986         req_opcode = cmd[3];
1987         req_sa = get_unaligned_be16(cmd + 4);
1988         alloc_len = get_unaligned_be32(cmd + 6);
1989         if (alloc_len < 4 || alloc_len > 0xffff) {
1990                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
1991                 return check_condition_result;
1992         }
1993         if (alloc_len > 8192)
1994                 a_len = 8192;
1995         else
1996                 a_len = alloc_len;
1997         arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_ATOMIC);
1998         if (NULL == arr) {
1999                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
2000                                 INSUFF_RES_ASCQ);
2001                 return check_condition_result;
2002         }
2003         switch (reporting_opts) {
2004         case 0: /* all commands */
2005                 /* count number of commands */
2006                 for (count = 0, oip = opcode_info_arr;
2007                      oip->num_attached != 0xff; ++oip) {
2008                         if (F_INV_OP & oip->flags)
2009                                 continue;
2010                         count += (oip->num_attached + 1);
2011                 }
2012                 bump = rctd ? 20 : 8;
2013                 put_unaligned_be32(count * bump, arr);
2014                 for (offset = 4, oip = opcode_info_arr;
2015                      oip->num_attached != 0xff && offset < a_len; ++oip) {
2016                         if (F_INV_OP & oip->flags)
2017                                 continue;
2018                         na = oip->num_attached;
2019                         arr[offset] = oip->opcode;
2020                         put_unaligned_be16(oip->sa, arr + offset + 2);
2021                         if (rctd)
2022                                 arr[offset + 5] |= 0x2;
2023                         if (FF_SA & oip->flags)
2024                                 arr[offset + 5] |= 0x1;
2025                         put_unaligned_be16(oip->len_mask[0], arr + offset + 6);
2026                         if (rctd)
2027                                 put_unaligned_be16(0xa, arr + offset + 8);
2028                         r_oip = oip;
2029                         for (k = 0, oip = oip->arrp; k < na; ++k, ++oip) {
2030                                 if (F_INV_OP & oip->flags)
2031                                         continue;
2032                                 offset += bump;
2033                                 arr[offset] = oip->opcode;
2034                                 put_unaligned_be16(oip->sa, arr + offset + 2);
2035                                 if (rctd)
2036                                         arr[offset + 5] |= 0x2;
2037                                 if (FF_SA & oip->flags)
2038                                         arr[offset + 5] |= 0x1;
2039                                 put_unaligned_be16(oip->len_mask[0],
2040                                                    arr + offset + 6);
2041                                 if (rctd)
2042                                         put_unaligned_be16(0xa,
2043                                                            arr + offset + 8);
2044                         }
2045                         oip = r_oip;
2046                         offset += bump;
2047                 }
2048                 break;
2049         case 1: /* one command: opcode only */
2050         case 2: /* one command: opcode plus service action */
2051         case 3: /* one command: if sa==0 then opcode only else opcode+sa */
2052                 sdeb_i = opcode_ind_arr[req_opcode];
2053                 oip = &opcode_info_arr[sdeb_i];
2054                 if (F_INV_OP & oip->flags) {
2055                         supp = 1;
2056                         offset = 4;
2057                 } else {
2058                         if (1 == reporting_opts) {
2059                                 if (FF_SA & oip->flags) {
2060                                         mk_sense_invalid_fld(scp, SDEB_IN_CDB,
2061                                                              2, 2);
2062                                         kfree(arr);
2063                                         return check_condition_result;
2064                                 }
2065                                 req_sa = 0;
2066                         } else if (2 == reporting_opts &&
2067                                    0 == (FF_SA & oip->flags)) {
2068                                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
2069                                 kfree(arr);     /* point at requested sa */
2070                                 return check_condition_result;
2071                         }
2072                         if (0 == (FF_SA & oip->flags) &&
2073                             req_opcode == oip->opcode)
2074                                 supp = 3;
2075                         else if (0 == (FF_SA & oip->flags)) {
2076                                 na = oip->num_attached;
2077                                 for (k = 0, oip = oip->arrp; k < na;
2078                                      ++k, ++oip) {
2079                                         if (req_opcode == oip->opcode)
2080                                                 break;
2081                                 }
2082                                 supp = (k >= na) ? 1 : 3;
2083                         } else if (req_sa != oip->sa) {
2084                                 na = oip->num_attached;
2085                                 for (k = 0, oip = oip->arrp; k < na;
2086                                      ++k, ++oip) {
2087                                         if (req_sa == oip->sa)
2088                                                 break;
2089                                 }
2090                                 supp = (k >= na) ? 1 : 3;
2091                         } else
2092                                 supp = 3;
2093                         if (3 == supp) {
2094                                 u = oip->len_mask[0];
2095                                 put_unaligned_be16(u, arr + 2);
2096                                 arr[4] = oip->opcode;
2097                                 for (k = 1; k < u; ++k)
2098                                         arr[4 + k] = (k < 16) ?
2099                                                  oip->len_mask[k] : 0xff;
2100                                 offset = 4 + u;
2101                         } else
2102                                 offset = 4;
2103                 }
2104                 arr[1] = (rctd ? 0x80 : 0) | supp;
2105                 if (rctd) {
2106                         put_unaligned_be16(0xa, arr + offset);
2107                         offset += 12;
2108                 }
2109                 break;
2110         default:
2111                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
2112                 kfree(arr);
2113                 return check_condition_result;
2114         }
2115         offset = (offset < a_len) ? offset : a_len;
2116         len = (offset < alloc_len) ? offset : alloc_len;
2117         errsts = fill_from_dev_buffer(scp, arr, len);
2118         kfree(arr);
2119         return errsts;
2120 }
2121
2122 static int resp_rsup_tmfs(struct scsi_cmnd *scp,
2123                           struct sdebug_dev_info *devip)
2124 {
2125         bool repd;
2126         u32 alloc_len, len;
2127         u8 arr[16];
2128         u8 *cmd = scp->cmnd;
2129
2130         memset(arr, 0, sizeof(arr));
2131         repd = !!(cmd[2] & 0x80);
2132         alloc_len = get_unaligned_be32(cmd + 6);
2133         if (alloc_len < 4) {
2134                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
2135                 return check_condition_result;
2136         }
2137         arr[0] = 0xc8;          /* ATS | ATSS | LURS */
2138         arr[1] = 0x1;           /* ITNRS */
2139         if (repd) {
2140                 arr[3] = 0xc;
2141                 len = 16;
2142         } else
2143                 len = 4;
2144
2145         len = (len < alloc_len) ? len : alloc_len;
2146         return fill_from_dev_buffer(scp, arr, len);
2147 }
2148
2149 /* <<Following mode page info copied from ST318451LW>> */
2150
2151 static int resp_err_recov_pg(unsigned char *p, int pcontrol, int target)
2152 {       /* Read-Write Error Recovery page for mode_sense */
2153         unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
2154                                         5, 0, 0xff, 0xff};
2155
2156         memcpy(p, err_recov_pg, sizeof(err_recov_pg));
2157         if (1 == pcontrol)
2158                 memset(p + 2, 0, sizeof(err_recov_pg) - 2);
2159         return sizeof(err_recov_pg);
2160 }
2161
2162 static int resp_disconnect_pg(unsigned char *p, int pcontrol, int target)
2163 {       /* Disconnect-Reconnect page for mode_sense */
2164         unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
2165                                          0, 0, 0, 0, 0, 0, 0, 0};
2166
2167         memcpy(p, disconnect_pg, sizeof(disconnect_pg));
2168         if (1 == pcontrol)
2169                 memset(p + 2, 0, sizeof(disconnect_pg) - 2);
2170         return sizeof(disconnect_pg);
2171 }
2172
2173 static int resp_format_pg(unsigned char *p, int pcontrol, int target)
2174 {       /* Format device page for mode_sense */
2175         unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
2176                                      0, 0, 0, 0, 0, 0, 0, 0,
2177                                      0, 0, 0, 0, 0x40, 0, 0, 0};
2178
2179         memcpy(p, format_pg, sizeof(format_pg));
2180         put_unaligned_be16(sdebug_sectors_per, p + 10);
2181         put_unaligned_be16(sdebug_sector_size, p + 12);
2182         if (sdebug_removable)
2183                 p[20] |= 0x20; /* should agree with INQUIRY */
2184         if (1 == pcontrol)
2185                 memset(p + 2, 0, sizeof(format_pg) - 2);
2186         return sizeof(format_pg);
2187 }
2188
2189 static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2190                                      0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,
2191                                      0, 0, 0, 0};
2192
2193 static int resp_caching_pg(unsigned char *p, int pcontrol, int target)
2194 {       /* Caching page for mode_sense */
2195         unsigned char ch_caching_pg[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0,
2196                 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
2197         unsigned char d_caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2198                 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,     0, 0, 0, 0};
2199
2200         if (SDEBUG_OPT_N_WCE & sdebug_opts)
2201                 caching_pg[2] &= ~0x4;  /* set WCE=0 (default WCE=1) */
2202         memcpy(p, caching_pg, sizeof(caching_pg));
2203         if (1 == pcontrol)
2204                 memcpy(p + 2, ch_caching_pg, sizeof(ch_caching_pg));
2205         else if (2 == pcontrol)
2206                 memcpy(p, d_caching_pg, sizeof(d_caching_pg));
2207         return sizeof(caching_pg);
2208 }
2209
2210 static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2211                                     0, 0, 0x2, 0x4b};
2212
2213 static int resp_ctrl_m_pg(unsigned char *p, int pcontrol, int target)
2214 {       /* Control mode page for mode_sense */
2215         unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
2216                                         0, 0, 0, 0};
2217         unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2218                                      0, 0, 0x2, 0x4b};
2219
2220         if (sdebug_dsense)
2221                 ctrl_m_pg[2] |= 0x4;
2222         else
2223                 ctrl_m_pg[2] &= ~0x4;
2224
2225         if (sdebug_ato)
2226                 ctrl_m_pg[5] |= 0x80; /* ATO=1 */
2227
2228         memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
2229         if (1 == pcontrol)
2230                 memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
2231         else if (2 == pcontrol)
2232                 memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
2233         return sizeof(ctrl_m_pg);
2234 }
2235
2236
2237 static int resp_iec_m_pg(unsigned char *p, int pcontrol, int target)
2238 {       /* Informational Exceptions control mode page for mode_sense */
2239         unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
2240                                        0, 0, 0x0, 0x0};
2241         unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
2242                                       0, 0, 0x0, 0x0};
2243
2244         memcpy(p, iec_m_pg, sizeof(iec_m_pg));
2245         if (1 == pcontrol)
2246                 memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
2247         else if (2 == pcontrol)
2248                 memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
2249         return sizeof(iec_m_pg);
2250 }
2251
2252 static int resp_sas_sf_m_pg(unsigned char *p, int pcontrol, int target)
2253 {       /* SAS SSP mode page - short format for mode_sense */
2254         unsigned char sas_sf_m_pg[] = {0x19, 0x6,
2255                 0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
2256
2257         memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
2258         if (1 == pcontrol)
2259                 memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
2260         return sizeof(sas_sf_m_pg);
2261 }
2262
2263
2264 static int resp_sas_pcd_m_spg(unsigned char *p, int pcontrol, int target,
2265                               int target_dev_id)
2266 {       /* SAS phy control and discover mode page for mode_sense */
2267         unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
2268                     0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
2269                     0, 0, 0, 0, 0, 0, 0, 0,     /* insert SAS addr */
2270                     0, 0, 0, 0, 0, 0, 0, 0,     /* insert SAS addr */
2271                     0x2, 0, 0, 0, 0, 0, 0, 0,
2272                     0x88, 0x99, 0, 0, 0, 0, 0, 0,
2273                     0, 0, 0, 0, 0, 0, 0, 0,
2274                     0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
2275                     0, 0, 0, 0, 0, 0, 0, 0,     /* insert SAS addr */
2276                     0, 0, 0, 0, 0, 0, 0, 0,     /* insert SAS addr */
2277                     0x3, 0, 0, 0, 0, 0, 0, 0,
2278                     0x88, 0x99, 0, 0, 0, 0, 0, 0,
2279                     0, 0, 0, 0, 0, 0, 0, 0,
2280                 };
2281         int port_a, port_b;
2282
2283         put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 16);
2284         put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 24);
2285         put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 64);
2286         put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 72);
2287         port_a = target_dev_id + 1;
2288         port_b = port_a + 1;
2289         memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
2290         put_unaligned_be32(port_a, p + 20);
2291         put_unaligned_be32(port_b, p + 48 + 20);
2292         if (1 == pcontrol)
2293                 memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
2294         return sizeof(sas_pcd_m_pg);
2295 }
2296
2297 static int resp_sas_sha_m_spg(unsigned char *p, int pcontrol)
2298 {       /* SAS SSP shared protocol specific port mode subpage */
2299         unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
2300                     0, 0, 0, 0, 0, 0, 0, 0,
2301                 };
2302
2303         memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
2304         if (1 == pcontrol)
2305                 memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
2306         return sizeof(sas_sha_m_pg);
2307 }
2308
2309 #define SDEBUG_MAX_MSENSE_SZ 256
2310
2311 static int resp_mode_sense(struct scsi_cmnd *scp,
2312                            struct sdebug_dev_info *devip)
2313 {
2314         int pcontrol, pcode, subpcode, bd_len;
2315         unsigned char dev_spec;
2316         u32 alloc_len, offset, len;
2317         int target_dev_id;
2318         int target = scp->device->id;
2319         unsigned char *ap;
2320         unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
2321         unsigned char *cmd = scp->cmnd;
2322         bool dbd, llbaa, msense_6, is_disk, is_zbc, bad_pcode;
2323
2324         dbd = !!(cmd[1] & 0x8);         /* disable block descriptors */
2325         pcontrol = (cmd[2] & 0xc0) >> 6;
2326         pcode = cmd[2] & 0x3f;
2327         subpcode = cmd[3];
2328         msense_6 = (MODE_SENSE == cmd[0]);
2329         llbaa = msense_6 ? false : !!(cmd[1] & 0x10);
2330         is_disk = (sdebug_ptype == TYPE_DISK);
2331         is_zbc = (devip->zmodel != BLK_ZONED_NONE);
2332         if ((is_disk || is_zbc) && !dbd)
2333                 bd_len = llbaa ? 16 : 8;
2334         else
2335                 bd_len = 0;
2336         alloc_len = msense_6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2337         memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
2338         if (0x3 == pcontrol) {  /* Saving values not supported */
2339                 mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0);
2340                 return check_condition_result;
2341         }
2342         target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
2343                         (devip->target * 1000) - 3;
2344         /* for disks+zbc set DPOFUA bit and clear write protect (WP) bit */
2345         if (is_disk || is_zbc) {
2346                 dev_spec = 0x10;        /* =0x90 if WP=1 implies read-only */
2347                 if (sdebug_wp)
2348                         dev_spec |= 0x80;
2349         } else
2350                 dev_spec = 0x0;
2351         if (msense_6) {
2352                 arr[2] = dev_spec;
2353                 arr[3] = bd_len;
2354                 offset = 4;
2355         } else {
2356                 arr[3] = dev_spec;
2357                 if (16 == bd_len)
2358                         arr[4] = 0x1;   /* set LONGLBA bit */
2359                 arr[7] = bd_len;        /* assume 255 or less */
2360                 offset = 8;
2361         }
2362         ap = arr + offset;
2363         if ((bd_len > 0) && (!sdebug_capacity))
2364                 sdebug_capacity = get_sdebug_capacity();
2365
2366         if (8 == bd_len) {
2367                 if (sdebug_capacity > 0xfffffffe)
2368                         put_unaligned_be32(0xffffffff, ap + 0);
2369                 else
2370                         put_unaligned_be32(sdebug_capacity, ap + 0);
2371                 put_unaligned_be16(sdebug_sector_size, ap + 6);
2372                 offset += bd_len;
2373                 ap = arr + offset;
2374         } else if (16 == bd_len) {
2375                 put_unaligned_be64((u64)sdebug_capacity, ap + 0);
2376                 put_unaligned_be32(sdebug_sector_size, ap + 12);
2377                 offset += bd_len;
2378                 ap = arr + offset;
2379         }
2380
2381         if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) {
2382                 /* TODO: Control Extension page */
2383                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2384                 return check_condition_result;
2385         }
2386         bad_pcode = false;
2387
2388         switch (pcode) {
2389         case 0x1:       /* Read-Write error recovery page, direct access */
2390                 len = resp_err_recov_pg(ap, pcontrol, target);
2391                 offset += len;
2392                 break;
2393         case 0x2:       /* Disconnect-Reconnect page, all devices */
2394                 len = resp_disconnect_pg(ap, pcontrol, target);
2395                 offset += len;
2396                 break;
2397         case 0x3:       /* Format device page, direct access */
2398                 if (is_disk) {
2399                         len = resp_format_pg(ap, pcontrol, target);
2400                         offset += len;
2401                 } else
2402                         bad_pcode = true;
2403                 break;
2404         case 0x8:       /* Caching page, direct access */
2405                 if (is_disk || is_zbc) {
2406                         len = resp_caching_pg(ap, pcontrol, target);
2407                         offset += len;
2408                 } else
2409                         bad_pcode = true;
2410                 break;
2411         case 0xa:       /* Control Mode page, all devices */
2412                 len = resp_ctrl_m_pg(ap, pcontrol, target);
2413                 offset += len;
2414                 break;
2415         case 0x19:      /* if spc==1 then sas phy, control+discover */
2416                 if ((subpcode > 0x2) && (subpcode < 0xff)) {
2417                         mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2418                         return check_condition_result;
2419                 }
2420                 len = 0;
2421                 if ((0x0 == subpcode) || (0xff == subpcode))
2422                         len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2423                 if ((0x1 == subpcode) || (0xff == subpcode))
2424                         len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
2425                                                   target_dev_id);
2426                 if ((0x2 == subpcode) || (0xff == subpcode))
2427                         len += resp_sas_sha_m_spg(ap + len, pcontrol);
2428                 offset += len;
2429                 break;
2430         case 0x1c:      /* Informational Exceptions Mode page, all devices */
2431                 len = resp_iec_m_pg(ap, pcontrol, target);
2432                 offset += len;
2433                 break;
2434         case 0x3f:      /* Read all Mode pages */
2435                 if ((0 == subpcode) || (0xff == subpcode)) {
2436                         len = resp_err_recov_pg(ap, pcontrol, target);
2437                         len += resp_disconnect_pg(ap + len, pcontrol, target);
2438                         if (is_disk) {
2439                                 len += resp_format_pg(ap + len, pcontrol,
2440                                                       target);
2441                                 len += resp_caching_pg(ap + len, pcontrol,
2442                                                        target);
2443                         } else if (is_zbc) {
2444                                 len += resp_caching_pg(ap + len, pcontrol,
2445                                                        target);
2446                         }
2447                         len += resp_ctrl_m_pg(ap + len, pcontrol, target);
2448                         len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2449                         if (0xff == subpcode) {
2450                                 len += resp_sas_pcd_m_spg(ap + len, pcontrol,
2451                                                   target, target_dev_id);
2452                                 len += resp_sas_sha_m_spg(ap + len, pcontrol);
2453                         }
2454                         len += resp_iec_m_pg(ap + len, pcontrol, target);
2455                         offset += len;
2456                 } else {
2457                         mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2458                         return check_condition_result;
2459                 }
2460                 break;
2461         default:
2462                 bad_pcode = true;
2463                 break;
2464         }
2465         if (bad_pcode) {
2466                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2467                 return check_condition_result;
2468         }
2469         if (msense_6)
2470                 arr[0] = offset - 1;
2471         else
2472                 put_unaligned_be16((offset - 2), arr + 0);
2473         return fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, offset));
2474 }
2475
2476 #define SDEBUG_MAX_MSELECT_SZ 512
2477
2478 static int resp_mode_select(struct scsi_cmnd *scp,
2479                             struct sdebug_dev_info *devip)
2480 {
2481         int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
2482         int param_len, res, mpage;
2483         unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
2484         unsigned char *cmd = scp->cmnd;
2485         int mselect6 = (MODE_SELECT == cmd[0]);
2486
2487         memset(arr, 0, sizeof(arr));
2488         pf = cmd[1] & 0x10;
2489         sp = cmd[1] & 0x1;
2490         param_len = mselect6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2491         if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
2492                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1);
2493                 return check_condition_result;
2494         }
2495         res = fetch_to_dev_buffer(scp, arr, param_len);
2496         if (-1 == res)
2497                 return DID_ERROR << 16;
2498         else if (sdebug_verbose && (res < param_len))
2499                 sdev_printk(KERN_INFO, scp->device,
2500                             "%s: cdb indicated=%d, IO sent=%d bytes\n",
2501                             __func__, param_len, res);
2502         md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2);
2503         bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6);
2504         off = bd_len + (mselect6 ? 4 : 8);
2505         if (md_len > 2 || off >= res) {
2506                 mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
2507                 return check_condition_result;
2508         }
2509         mpage = arr[off] & 0x3f;
2510         ps = !!(arr[off] & 0x80);
2511         if (ps) {
2512                 mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 7);
2513                 return check_condition_result;
2514         }
2515         spf = !!(arr[off] & 0x40);
2516         pg_len = spf ? (get_unaligned_be16(arr + off + 2) + 4) :
2517                        (arr[off + 1] + 2);
2518         if ((pg_len + off) > param_len) {
2519                 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2520                                 PARAMETER_LIST_LENGTH_ERR, 0);
2521                 return check_condition_result;
2522         }
2523         switch (mpage) {
2524         case 0x8:      /* Caching Mode page */
2525                 if (caching_pg[1] == arr[off + 1]) {
2526                         memcpy(caching_pg + 2, arr + off + 2,
2527                                sizeof(caching_pg) - 2);
2528                         goto set_mode_changed_ua;
2529                 }
2530                 break;
2531         case 0xa:      /* Control Mode page */
2532                 if (ctrl_m_pg[1] == arr[off + 1]) {
2533                         memcpy(ctrl_m_pg + 2, arr + off + 2,
2534                                sizeof(ctrl_m_pg) - 2);
2535                         if (ctrl_m_pg[4] & 0x8)
2536                                 sdebug_wp = true;
2537                         else
2538                                 sdebug_wp = false;
2539                         sdebug_dsense = !!(ctrl_m_pg[2] & 0x4);
2540                         goto set_mode_changed_ua;
2541                 }
2542                 break;
2543         case 0x1c:      /* Informational Exceptions Mode page */
2544                 if (iec_m_pg[1] == arr[off + 1]) {
2545                         memcpy(iec_m_pg + 2, arr + off + 2,
2546                                sizeof(iec_m_pg) - 2);
2547                         goto set_mode_changed_ua;
2548                 }
2549                 break;
2550         default:
2551                 break;
2552         }
2553         mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 5);
2554         return check_condition_result;
2555 set_mode_changed_ua:
2556         set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm);
2557         return 0;
2558 }
2559
2560 static int resp_temp_l_pg(unsigned char *arr)
2561 {
2562         unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
2563                                      0x0, 0x1, 0x3, 0x2, 0x0, 65,
2564                 };
2565
2566         memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
2567         return sizeof(temp_l_pg);
2568 }
2569
2570 static int resp_ie_l_pg(unsigned char *arr)
2571 {
2572         unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
2573                 };
2574
2575         memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
2576         if (iec_m_pg[2] & 0x4) {        /* TEST bit set */
2577                 arr[4] = THRESHOLD_EXCEEDED;
2578                 arr[5] = 0xff;
2579         }
2580         return sizeof(ie_l_pg);
2581 }
2582
2583 #define SDEBUG_MAX_LSENSE_SZ 512
2584
2585 static int resp_log_sense(struct scsi_cmnd *scp,
2586                           struct sdebug_dev_info *devip)
2587 {
2588         int ppc, sp, pcode, subpcode;
2589         u32 alloc_len, len, n;
2590         unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
2591         unsigned char *cmd = scp->cmnd;
2592
2593         memset(arr, 0, sizeof(arr));
2594         ppc = cmd[1] & 0x2;
2595         sp = cmd[1] & 0x1;
2596         if (ppc || sp) {
2597                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, ppc ? 1 : 0);
2598                 return check_condition_result;
2599         }
2600         pcode = cmd[2] & 0x3f;
2601         subpcode = cmd[3] & 0xff;
2602         alloc_len = get_unaligned_be16(cmd + 7);
2603         arr[0] = pcode;
2604         if (0 == subpcode) {
2605                 switch (pcode) {
2606                 case 0x0:       /* Supported log pages log page */
2607                         n = 4;
2608                         arr[n++] = 0x0;         /* this page */
2609                         arr[n++] = 0xd;         /* Temperature */
2610                         arr[n++] = 0x2f;        /* Informational exceptions */
2611                         arr[3] = n - 4;
2612                         break;
2613                 case 0xd:       /* Temperature log page */
2614                         arr[3] = resp_temp_l_pg(arr + 4);
2615                         break;
2616                 case 0x2f:      /* Informational exceptions log page */
2617                         arr[3] = resp_ie_l_pg(arr + 4);
2618                         break;
2619                 default:
2620                         mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2621                         return check_condition_result;
2622                 }
2623         } else if (0xff == subpcode) {
2624                 arr[0] |= 0x40;
2625                 arr[1] = subpcode;
2626                 switch (pcode) {
2627                 case 0x0:       /* Supported log pages and subpages log page */
2628                         n = 4;
2629                         arr[n++] = 0x0;
2630                         arr[n++] = 0x0;         /* 0,0 page */
2631                         arr[n++] = 0x0;
2632                         arr[n++] = 0xff;        /* this page */
2633                         arr[n++] = 0xd;
2634                         arr[n++] = 0x0;         /* Temperature */
2635                         arr[n++] = 0x2f;
2636                         arr[n++] = 0x0; /* Informational exceptions */
2637                         arr[3] = n - 4;
2638                         break;
2639                 case 0xd:       /* Temperature subpages */
2640                         n = 4;
2641                         arr[n++] = 0xd;
2642                         arr[n++] = 0x0;         /* Temperature */
2643                         arr[3] = n - 4;
2644                         break;
2645                 case 0x2f:      /* Informational exceptions subpages */
2646                         n = 4;
2647                         arr[n++] = 0x2f;
2648                         arr[n++] = 0x0;         /* Informational exceptions */
2649                         arr[3] = n - 4;
2650                         break;
2651                 default:
2652                         mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2653                         return check_condition_result;
2654                 }
2655         } else {
2656                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2657                 return check_condition_result;
2658         }
2659         len = min_t(u32, get_unaligned_be16(arr + 2) + 4, alloc_len);
2660         return fill_from_dev_buffer(scp, arr,
2661                     min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ));
2662 }
2663
2664 static inline bool sdebug_dev_is_zoned(struct sdebug_dev_info *devip)
2665 {
2666         return devip->nr_zones != 0;
2667 }
2668
2669 static struct sdeb_zone_state *zbc_zone(struct sdebug_dev_info *devip,
2670                                         unsigned long long lba)
2671 {
2672         return &devip->zstate[lba >> devip->zsize_shift];
2673 }
2674
2675 static inline bool zbc_zone_is_conv(struct sdeb_zone_state *zsp)
2676 {
2677         return zsp->z_type == ZBC_ZONE_TYPE_CNV;
2678 }
2679
2680 static void zbc_close_zone(struct sdebug_dev_info *devip,
2681                            struct sdeb_zone_state *zsp)
2682 {
2683         enum sdebug_z_cond zc;
2684
2685         if (zbc_zone_is_conv(zsp))
2686                 return;
2687
2688         zc = zsp->z_cond;
2689         if (!(zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN))
2690                 return;
2691
2692         if (zc == ZC2_IMPLICIT_OPEN)
2693                 devip->nr_imp_open--;
2694         else
2695                 devip->nr_exp_open--;
2696
2697         if (zsp->z_wp == zsp->z_start) {
2698                 zsp->z_cond = ZC1_EMPTY;
2699         } else {
2700                 zsp->z_cond = ZC4_CLOSED;
2701                 devip->nr_closed++;
2702         }
2703 }
2704
2705 static void zbc_close_imp_open_zone(struct sdebug_dev_info *devip)
2706 {
2707         struct sdeb_zone_state *zsp = &devip->zstate[0];
2708         unsigned int i;
2709
2710         for (i = 0; i < devip->nr_zones; i++, zsp++) {
2711                 if (zsp->z_cond == ZC2_IMPLICIT_OPEN) {
2712                         zbc_close_zone(devip, zsp);
2713                         return;
2714                 }
2715         }
2716 }
2717
2718 static void zbc_open_zone(struct sdebug_dev_info *devip,
2719                           struct sdeb_zone_state *zsp, bool explicit)
2720 {
2721         enum sdebug_z_cond zc;
2722
2723         if (zbc_zone_is_conv(zsp))
2724                 return;
2725
2726         zc = zsp->z_cond;
2727         if ((explicit && zc == ZC3_EXPLICIT_OPEN) ||
2728             (!explicit && zc == ZC2_IMPLICIT_OPEN))
2729                 return;
2730
2731         /* Close an implicit open zone if necessary */
2732         if (explicit && zsp->z_cond == ZC2_IMPLICIT_OPEN)
2733                 zbc_close_zone(devip, zsp);
2734         else if (devip->max_open &&
2735                  devip->nr_imp_open + devip->nr_exp_open >= devip->max_open)
2736                 zbc_close_imp_open_zone(devip);
2737
2738         if (zsp->z_cond == ZC4_CLOSED)
2739                 devip->nr_closed--;
2740         if (explicit) {
2741                 zsp->z_cond = ZC3_EXPLICIT_OPEN;
2742                 devip->nr_exp_open++;
2743         } else {
2744                 zsp->z_cond = ZC2_IMPLICIT_OPEN;
2745                 devip->nr_imp_open++;
2746         }
2747 }
2748
2749 static inline void zbc_set_zone_full(struct sdebug_dev_info *devip,
2750                                      struct sdeb_zone_state *zsp)
2751 {
2752         switch (zsp->z_cond) {
2753         case ZC2_IMPLICIT_OPEN:
2754                 devip->nr_imp_open--;
2755                 break;
2756         case ZC3_EXPLICIT_OPEN:
2757                 devip->nr_exp_open--;
2758                 break;
2759         default:
2760                 WARN_ONCE(true, "Invalid zone %llu condition %x\n",
2761                           zsp->z_start, zsp->z_cond);
2762                 break;
2763         }
2764         zsp->z_cond = ZC5_FULL;
2765 }
2766
2767 static void zbc_inc_wp(struct sdebug_dev_info *devip,
2768                        unsigned long long lba, unsigned int num)
2769 {
2770         struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
2771         unsigned long long n, end, zend = zsp->z_start + zsp->z_size;
2772
2773         if (zbc_zone_is_conv(zsp))
2774                 return;
2775
2776         if (zsp->z_type == ZBC_ZONE_TYPE_SWR) {
2777                 zsp->z_wp += num;
2778                 if (zsp->z_wp >= zend)
2779                         zbc_set_zone_full(devip, zsp);
2780                 return;
2781         }
2782
2783         while (num) {
2784                 if (lba != zsp->z_wp)
2785                         zsp->z_non_seq_resource = true;
2786
2787                 end = lba + num;
2788                 if (end >= zend) {
2789                         n = zend - lba;
2790                         zsp->z_wp = zend;
2791                 } else if (end > zsp->z_wp) {
2792                         n = num;
2793                         zsp->z_wp = end;
2794                 } else {
2795                         n = num;
2796                 }
2797                 if (zsp->z_wp >= zend)
2798                         zbc_set_zone_full(devip, zsp);
2799
2800                 num -= n;
2801                 lba += n;
2802                 if (num) {
2803                         zsp++;
2804                         zend = zsp->z_start + zsp->z_size;
2805                 }
2806         }
2807 }
2808
2809 static int check_zbc_access_params(struct scsi_cmnd *scp,
2810                         unsigned long long lba, unsigned int num, bool write)
2811 {
2812         struct scsi_device *sdp = scp->device;
2813         struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
2814         struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
2815         struct sdeb_zone_state *zsp_end = zbc_zone(devip, lba + num - 1);
2816
2817         if (!write) {
2818                 if (devip->zmodel == BLK_ZONED_HA)
2819                         return 0;
2820                 /* For host-managed, reads cannot cross zone types boundaries */
2821                 if (zsp_end != zsp &&
2822                     zbc_zone_is_conv(zsp) &&
2823                     !zbc_zone_is_conv(zsp_end)) {
2824                         mk_sense_buffer(scp, ILLEGAL_REQUEST,
2825                                         LBA_OUT_OF_RANGE,
2826                                         READ_INVDATA_ASCQ);
2827                         return check_condition_result;
2828                 }
2829                 return 0;
2830         }
2831
2832         /* No restrictions for writes within conventional zones */
2833         if (zbc_zone_is_conv(zsp)) {
2834                 if (!zbc_zone_is_conv(zsp_end)) {
2835                         mk_sense_buffer(scp, ILLEGAL_REQUEST,
2836                                         LBA_OUT_OF_RANGE,
2837                                         WRITE_BOUNDARY_ASCQ);
2838                         return check_condition_result;
2839                 }
2840                 return 0;
2841         }
2842
2843         if (zsp->z_type == ZBC_ZONE_TYPE_SWR) {
2844                 /* Writes cannot cross sequential zone boundaries */
2845                 if (zsp_end != zsp) {
2846                         mk_sense_buffer(scp, ILLEGAL_REQUEST,
2847                                         LBA_OUT_OF_RANGE,
2848                                         WRITE_BOUNDARY_ASCQ);
2849                         return check_condition_result;
2850                 }
2851                 /* Cannot write full zones */
2852                 if (zsp->z_cond == ZC5_FULL) {
2853                         mk_sense_buffer(scp, ILLEGAL_REQUEST,
2854                                         INVALID_FIELD_IN_CDB, 0);
2855                         return check_condition_result;
2856                 }
2857                 /* Writes must be aligned to the zone WP */
2858                 if (lba != zsp->z_wp) {
2859                         mk_sense_buffer(scp, ILLEGAL_REQUEST,
2860                                         LBA_OUT_OF_RANGE,
2861                                         UNALIGNED_WRITE_ASCQ);
2862                         return check_condition_result;
2863                 }
2864         }
2865
2866         /* Handle implicit open of closed and empty zones */
2867         if (zsp->z_cond == ZC1_EMPTY || zsp->z_cond == ZC4_CLOSED) {
2868                 if (devip->max_open &&
2869                     devip->nr_exp_open >= devip->max_open) {
2870                         mk_sense_buffer(scp, DATA_PROTECT,
2871                                         INSUFF_RES_ASC,
2872                                         INSUFF_ZONE_ASCQ);
2873                         return check_condition_result;
2874                 }
2875                 zbc_open_zone(devip, zsp, false);
2876         }
2877
2878         return 0;
2879 }
2880
2881 static inline int check_device_access_params
2882                         (struct scsi_cmnd *scp, unsigned long long lba,
2883                          unsigned int num, bool write)
2884 {
2885         struct scsi_device *sdp = scp->device;
2886         struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
2887
2888         if (lba + num > sdebug_capacity) {
2889                 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2890                 return check_condition_result;
2891         }
2892         /* transfer length excessive (tie in to block limits VPD page) */
2893         if (num > sdebug_store_sectors) {
2894                 /* needs work to find which cdb byte 'num' comes from */
2895                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2896                 return check_condition_result;
2897         }
2898         if (write && unlikely(sdebug_wp)) {
2899                 mk_sense_buffer(scp, DATA_PROTECT, WRITE_PROTECTED, 0x2);
2900                 return check_condition_result;
2901         }
2902         if (sdebug_dev_is_zoned(devip))
2903                 return check_zbc_access_params(scp, lba, num, write);
2904
2905         return 0;
2906 }
2907
2908 /*
2909  * Note: if BUG_ON() fires it usually indicates a problem with the parser
2910  * tables. Perhaps a missing F_FAKE_RW or FF_MEDIA_IO flag. Response functions
2911  * that access any of the "stores" in struct sdeb_store_info should call this
2912  * function with bug_if_fake_rw set to true.
2913  */
2914 static inline struct sdeb_store_info *devip2sip(struct sdebug_dev_info *devip,
2915                                                 bool bug_if_fake_rw)
2916 {
2917         if (sdebug_fake_rw) {
2918                 BUG_ON(bug_if_fake_rw); /* See note above */
2919                 return NULL;
2920         }
2921         return xa_load(per_store_ap, devip->sdbg_host->si_idx);
2922 }
2923
2924 /* Returns number of bytes copied or -1 if error. */
2925 static int do_device_access(struct sdeb_store_info *sip, struct scsi_cmnd *scp,
2926                             u32 sg_skip, u64 lba, u32 num, bool do_write)
2927 {
2928         int ret;
2929         u64 block, rest = 0;
2930         enum dma_data_direction dir;
2931         struct scsi_data_buffer *sdb = &scp->sdb;
2932         u8 *fsp;
2933
2934         if (do_write) {
2935                 dir = DMA_TO_DEVICE;
2936                 write_since_sync = true;
2937         } else {
2938                 dir = DMA_FROM_DEVICE;
2939         }
2940
2941         if (!sdb->length || !sip)
2942                 return 0;
2943         if (scp->sc_data_direction != dir)
2944                 return -1;
2945         fsp = sip->storep;
2946
2947         block = do_div(lba, sdebug_store_sectors);
2948         if (block + num > sdebug_store_sectors)
2949                 rest = block + num - sdebug_store_sectors;
2950
2951         ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
2952                    fsp + (block * sdebug_sector_size),
2953                    (num - rest) * sdebug_sector_size, sg_skip, do_write);
2954         if (ret != (num - rest) * sdebug_sector_size)
2955                 return ret;
2956
2957         if (rest) {
2958                 ret += sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
2959                             fsp, rest * sdebug_sector_size,
2960                             sg_skip + ((num - rest) * sdebug_sector_size),
2961                             do_write);
2962         }
2963
2964         return ret;
2965 }
2966
2967 /* Returns number of bytes copied or -1 if error. */
2968 static int do_dout_fetch(struct scsi_cmnd *scp, u32 num, u8 *doutp)
2969 {
2970         struct scsi_data_buffer *sdb = &scp->sdb;
2971
2972         if (!sdb->length)
2973                 return 0;
2974         if (scp->sc_data_direction != DMA_TO_DEVICE)
2975                 return -1;
2976         return sg_copy_buffer(sdb->table.sgl, sdb->table.nents, doutp,
2977                               num * sdebug_sector_size, 0, true);
2978 }
2979
2980 /* If sip->storep+lba compares equal to arr(num), then copy top half of
2981  * arr into sip->storep+lba and return true. If comparison fails then
2982  * return false. */
2983 static bool comp_write_worker(struct sdeb_store_info *sip, u64 lba, u32 num,
2984                               const u8 *arr, bool compare_only)
2985 {
2986         bool res;
2987         u64 block, rest = 0;
2988         u32 store_blks = sdebug_store_sectors;
2989         u32 lb_size = sdebug_sector_size;
2990         u8 *fsp = sip->storep;
2991
2992         block = do_div(lba, store_blks);
2993         if (block + num > store_blks)
2994                 rest = block + num - store_blks;
2995
2996         res = !memcmp(fsp + (block * lb_size), arr, (num - rest) * lb_size);
2997         if (!res)
2998                 return res;
2999         if (rest)
3000                 res = memcmp(fsp, arr + ((num - rest) * lb_size),
3001                              rest * lb_size);
3002         if (!res)
3003                 return res;
3004         if (compare_only)
3005                 return true;
3006         arr += num * lb_size;
3007         memcpy(fsp + (block * lb_size), arr, (num - rest) * lb_size);
3008         if (rest)
3009                 memcpy(fsp, arr + ((num - rest) * lb_size), rest * lb_size);
3010         return res;
3011 }
3012
3013 static __be16 dif_compute_csum(const void *buf, int len)
3014 {
3015         __be16 csum;
3016
3017         if (sdebug_guard)
3018                 csum = (__force __be16)ip_compute_csum(buf, len);
3019         else
3020                 csum = cpu_to_be16(crc_t10dif(buf, len));
3021
3022         return csum;
3023 }
3024
3025 static int dif_verify(struct t10_pi_tuple *sdt, const void *data,
3026                       sector_t sector, u32 ei_lba)
3027 {
3028         __be16 csum = dif_compute_csum(data, sdebug_sector_size);
3029
3030         if (sdt->guard_tag != csum) {
3031                 pr_err("GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
3032                         (unsigned long)sector,
3033                         be16_to_cpu(sdt->guard_tag),
3034                         be16_to_cpu(csum));
3035                 return 0x01;
3036         }
3037         if (sdebug_dif == T10_PI_TYPE1_PROTECTION &&
3038             be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
3039                 pr_err("REF check failed on sector %lu\n",
3040                         (unsigned long)sector);
3041                 return 0x03;
3042         }
3043         if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3044             be32_to_cpu(sdt->ref_tag) != ei_lba) {
3045                 pr_err("REF check failed on sector %lu\n",
3046                         (unsigned long)sector);
3047                 return 0x03;
3048         }
3049         return 0;
3050 }
3051
3052 static void dif_copy_prot(struct scsi_cmnd *scp, sector_t sector,
3053                           unsigned int sectors, bool read)
3054 {
3055         size_t resid;
3056         void *paddr;
3057         struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3058                                                 scp->device->hostdata, true);
3059         struct t10_pi_tuple *dif_storep = sip->dif_storep;
3060         const void *dif_store_end = dif_storep + sdebug_store_sectors;
3061         struct sg_mapping_iter miter;
3062
3063         /* Bytes of protection data to copy into sgl */
3064         resid = sectors * sizeof(*dif_storep);
3065
3066         sg_miter_start(&miter, scsi_prot_sglist(scp),
3067                        scsi_prot_sg_count(scp), SG_MITER_ATOMIC |
3068                        (read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
3069
3070         while (sg_miter_next(&miter) && resid > 0) {
3071                 size_t len = min_t(size_t, miter.length, resid);
3072                 void *start = dif_store(sip, sector);
3073                 size_t rest = 0;
3074
3075                 if (dif_store_end < start + len)
3076                         rest = start + len - dif_store_end;
3077
3078                 paddr = miter.addr;
3079
3080                 if (read)
3081                         memcpy(paddr, start, len - rest);
3082                 else
3083                         memcpy(start, paddr, len - rest);
3084
3085                 if (rest) {
3086                         if (read)
3087                                 memcpy(paddr + len - rest, dif_storep, rest);
3088                         else
3089                                 memcpy(dif_storep, paddr + len - rest, rest);
3090                 }
3091
3092                 sector += len / sizeof(*dif_storep);
3093                 resid -= len;
3094         }
3095         sg_miter_stop(&miter);
3096 }
3097
3098 static int prot_verify_read(struct scsi_cmnd *scp, sector_t start_sec,
3099                             unsigned int sectors, u32 ei_lba)
3100 {
3101         unsigned int i;
3102         sector_t sector;
3103         struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3104                                                 scp->device->hostdata, true);
3105         struct t10_pi_tuple *sdt;
3106
3107         for (i = 0; i < sectors; i++, ei_lba++) {
3108                 int ret;
3109
3110                 sector = start_sec + i;
3111                 sdt = dif_store(sip, sector);
3112
3113                 if (sdt->app_tag == cpu_to_be16(0xffff))
3114                         continue;
3115
3116                 ret = dif_verify(sdt, lba2fake_store(sip, sector), sector,
3117                                  ei_lba);
3118                 if (ret) {
3119                         dif_errors++;
3120                         return ret;
3121                 }
3122         }
3123
3124         dif_copy_prot(scp, start_sec, sectors, true);
3125         dix_reads++;
3126
3127         return 0;
3128 }
3129
3130 static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3131 {
3132         bool check_prot;
3133         u32 num;
3134         u32 ei_lba;
3135         int ret;
3136         u64 lba;
3137         struct sdeb_store_info *sip = devip2sip(devip, true);
3138         rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
3139         u8 *cmd = scp->cmnd;
3140
3141         switch (cmd[0]) {
3142         case READ_16:
3143                 ei_lba = 0;
3144                 lba = get_unaligned_be64(cmd + 2);
3145                 num = get_unaligned_be32(cmd + 10);
3146                 check_prot = true;
3147                 break;
3148         case READ_10:
3149                 ei_lba = 0;
3150                 lba = get_unaligned_be32(cmd + 2);
3151                 num = get_unaligned_be16(cmd + 7);
3152                 check_prot = true;
3153                 break;
3154         case READ_6:
3155                 ei_lba = 0;
3156                 lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3157                       (u32)(cmd[1] & 0x1f) << 16;
3158                 num = (0 == cmd[4]) ? 256 : cmd[4];
3159                 check_prot = true;
3160                 break;
3161         case READ_12:
3162                 ei_lba = 0;
3163                 lba = get_unaligned_be32(cmd + 2);
3164                 num = get_unaligned_be32(cmd + 6);
3165                 check_prot = true;
3166                 break;
3167         case XDWRITEREAD_10:
3168                 ei_lba = 0;
3169                 lba = get_unaligned_be32(cmd + 2);
3170                 num = get_unaligned_be16(cmd + 7);
3171                 check_prot = false;
3172                 break;
3173         default:        /* assume READ(32) */
3174                 lba = get_unaligned_be64(cmd + 12);
3175                 ei_lba = get_unaligned_be32(cmd + 20);
3176                 num = get_unaligned_be32(cmd + 28);
3177                 check_prot = false;
3178                 break;
3179         }
3180         if (unlikely(have_dif_prot && check_prot)) {
3181                 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3182                     (cmd[1] & 0xe0)) {
3183                         mk_sense_invalid_opcode(scp);
3184                         return check_condition_result;
3185                 }
3186                 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3187                      sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3188                     (cmd[1] & 0xe0) == 0)
3189                         sdev_printk(KERN_ERR, scp->device, "Unprotected RD "
3190                                     "to DIF device\n");
3191         }
3192         if (unlikely((sdebug_opts & SDEBUG_OPT_SHORT_TRANSFER) &&
3193                      atomic_read(&sdeb_inject_pending))) {
3194                 num /= 2;
3195                 atomic_set(&sdeb_inject_pending, 0);
3196         }
3197
3198         ret = check_device_access_params(scp, lba, num, false);
3199         if (ret)
3200                 return ret;
3201         if (unlikely((SDEBUG_OPT_MEDIUM_ERR & sdebug_opts) &&
3202                      (lba <= (sdebug_medium_error_start + sdebug_medium_error_count - 1)) &&
3203                      ((lba + num) > sdebug_medium_error_start))) {
3204                 /* claim unrecoverable read error */
3205                 mk_sense_buffer(scp, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
3206                 /* set info field and valid bit for fixed descriptor */
3207                 if (0x70 == (scp->sense_buffer[0] & 0x7f)) {
3208                         scp->sense_buffer[0] |= 0x80;   /* Valid bit */
3209                         ret = (lba < OPT_MEDIUM_ERR_ADDR)
3210                               ? OPT_MEDIUM_ERR_ADDR : (int)lba;
3211                         put_unaligned_be32(ret, scp->sense_buffer + 3);
3212                 }
3213                 scsi_set_resid(scp, scsi_bufflen(scp));
3214                 return check_condition_result;
3215         }
3216
3217         read_lock(macc_lckp);
3218
3219         /* DIX + T10 DIF */
3220         if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3221                 int prot_ret = prot_verify_read(scp, lba, num, ei_lba);
3222
3223                 if (prot_ret) {
3224                         read_unlock(macc_lckp);
3225                         mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, prot_ret);
3226                         return illegal_condition_result;
3227                 }
3228         }
3229
3230         ret = do_device_access(sip, scp, 0, lba, num, false);
3231         read_unlock(macc_lckp);
3232         if (unlikely(ret == -1))
3233                 return DID_ERROR << 16;
3234
3235         scsi_set_resid(scp, scsi_bufflen(scp) - ret);
3236
3237         if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3238                      atomic_read(&sdeb_inject_pending))) {
3239                 if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3240                         mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3241                         atomic_set(&sdeb_inject_pending, 0);
3242                         return check_condition_result;
3243                 } else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3244                         /* Logical block guard check failed */
3245                         mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3246                         atomic_set(&sdeb_inject_pending, 0);
3247                         return illegal_condition_result;
3248                 } else if (SDEBUG_OPT_DIX_ERR & sdebug_opts) {
3249                         mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3250                         atomic_set(&sdeb_inject_pending, 0);
3251                         return illegal_condition_result;
3252                 }
3253         }
3254         return 0;
3255 }
3256
3257 static void dump_sector(unsigned char *buf, int len)
3258 {
3259         int i, j, n;
3260
3261         pr_err(">>> Sector Dump <<<\n");
3262         for (i = 0 ; i < len ; i += 16) {
3263                 char b[128];
3264
3265                 for (j = 0, n = 0; j < 16; j++) {
3266                         unsigned char c = buf[i+j];
3267
3268                         if (c >= 0x20 && c < 0x7e)
3269                                 n += scnprintf(b + n, sizeof(b) - n,
3270                                                " %c ", buf[i+j]);
3271                         else
3272                                 n += scnprintf(b + n, sizeof(b) - n,
3273                                                "%02x ", buf[i+j]);
3274                 }
3275                 pr_err("%04d: %s\n", i, b);
3276         }
3277 }
3278
3279 static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
3280                              unsigned int sectors, u32 ei_lba)
3281 {
3282         int ret;
3283         struct t10_pi_tuple *sdt;
3284         void *daddr;
3285         sector_t sector = start_sec;
3286         int ppage_offset;
3287         int dpage_offset;
3288         struct sg_mapping_iter diter;
3289         struct sg_mapping_iter piter;
3290
3291         BUG_ON(scsi_sg_count(SCpnt) == 0);
3292         BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
3293
3294         sg_miter_start(&piter, scsi_prot_sglist(SCpnt),
3295                         scsi_prot_sg_count(SCpnt),
3296                         SG_MITER_ATOMIC | SG_MITER_FROM_SG);
3297         sg_miter_start(&diter, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
3298                         SG_MITER_ATOMIC | SG_MITER_FROM_SG);
3299
3300         /* For each protection page */
3301         while (sg_miter_next(&piter)) {
3302                 dpage_offset = 0;
3303                 if (WARN_ON(!sg_miter_next(&diter))) {
3304                         ret = 0x01;
3305                         goto out;
3306                 }
3307
3308                 for (ppage_offset = 0; ppage_offset < piter.length;
3309                      ppage_offset += sizeof(struct t10_pi_tuple)) {
3310                         /* If we're at the end of the current
3311                          * data page advance to the next one
3312                          */
3313                         if (dpage_offset >= diter.length) {
3314                                 if (WARN_ON(!sg_miter_next(&diter))) {
3315                                         ret = 0x01;
3316                                         goto out;
3317                                 }
3318                                 dpage_offset = 0;
3319                         }
3320
3321                         sdt = piter.addr + ppage_offset;
3322                         daddr = diter.addr + dpage_offset;
3323
3324                         ret = dif_verify(sdt, daddr, sector, ei_lba);
3325                         if (ret) {
3326                                 dump_sector(daddr, sdebug_sector_size);
3327                                 goto out;
3328                         }
3329
3330                         sector++;
3331                         ei_lba++;
3332                         dpage_offset += sdebug_sector_size;
3333                 }
3334                 diter.consumed = dpage_offset;
3335                 sg_miter_stop(&diter);
3336         }
3337         sg_miter_stop(&piter);
3338
3339         dif_copy_prot(SCpnt, start_sec, sectors, false);
3340         dix_writes++;
3341
3342         return 0;
3343
3344 out:
3345         dif_errors++;
3346         sg_miter_stop(&diter);
3347         sg_miter_stop(&piter);
3348         return ret;
3349 }
3350
3351 static unsigned long lba_to_map_index(sector_t lba)
3352 {
3353         if (sdebug_unmap_alignment)
3354                 lba += sdebug_unmap_granularity - sdebug_unmap_alignment;
3355         sector_div(lba, sdebug_unmap_granularity);
3356         return lba;
3357 }
3358
3359 static sector_t map_index_to_lba(unsigned long index)
3360 {
3361         sector_t lba = index * sdebug_unmap_granularity;
3362
3363         if (sdebug_unmap_alignment)
3364                 lba -= sdebug_unmap_granularity - sdebug_unmap_alignment;
3365         return lba;
3366 }
3367
3368 static unsigned int map_state(struct sdeb_store_info *sip, sector_t lba,
3369                               unsigned int *num)
3370 {
3371         sector_t end;
3372         unsigned int mapped;
3373         unsigned long index;
3374         unsigned long next;
3375
3376         index = lba_to_map_index(lba);
3377         mapped = test_bit(index, sip->map_storep);
3378
3379         if (mapped)
3380                 next = find_next_zero_bit(sip->map_storep, map_size, index);
3381         else
3382                 next = find_next_bit(sip->map_storep, map_size, index);
3383
3384         end = min_t(sector_t, sdebug_store_sectors,  map_index_to_lba(next));
3385         *num = end - lba;
3386         return mapped;
3387 }
3388
3389 static void map_region(struct sdeb_store_info *sip, sector_t lba,
3390                        unsigned int len)
3391 {
3392         sector_t end = lba + len;
3393
3394         while (lba < end) {
3395                 unsigned long index = lba_to_map_index(lba);
3396
3397                 if (index < map_size)
3398                         set_bit(index, sip->map_storep);
3399
3400                 lba = map_index_to_lba(index + 1);
3401         }
3402 }
3403
3404 static void unmap_region(struct sdeb_store_info *sip, sector_t lba,
3405                          unsigned int len)
3406 {
3407         sector_t end = lba + len;
3408         u8 *fsp = sip->storep;
3409
3410         while (lba < end) {
3411                 unsigned long index = lba_to_map_index(lba);
3412
3413                 if (lba == map_index_to_lba(index) &&
3414                     lba + sdebug_unmap_granularity <= end &&
3415                     index < map_size) {
3416                         clear_bit(index, sip->map_storep);
3417                         if (sdebug_lbprz) {  /* for LBPRZ=2 return 0xff_s */
3418                                 memset(fsp + lba * sdebug_sector_size,
3419                                        (sdebug_lbprz & 1) ? 0 : 0xff,
3420                                        sdebug_sector_size *
3421                                        sdebug_unmap_granularity);
3422                         }
3423                         if (sip->dif_storep) {
3424                                 memset(sip->dif_storep + lba, 0xff,
3425                                        sizeof(*sip->dif_storep) *
3426                                        sdebug_unmap_granularity);
3427                         }
3428                 }
3429                 lba = map_index_to_lba(index + 1);
3430         }
3431 }
3432
3433 static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3434 {
3435         bool check_prot;
3436         u32 num;
3437         u32 ei_lba;
3438         int ret;
3439         u64 lba;
3440         struct sdeb_store_info *sip = devip2sip(devip, true);
3441         rwlock_t *macc_lckp = &sip->macc_lck;
3442         u8 *cmd = scp->cmnd;
3443
3444         switch (cmd[0]) {
3445         case WRITE_16:
3446                 ei_lba = 0;
3447                 lba = get_unaligned_be64(cmd + 2);
3448                 num = get_unaligned_be32(cmd + 10);
3449                 check_prot = true;
3450                 break;
3451         case WRITE_10:
3452                 ei_lba = 0;
3453                 lba = get_unaligned_be32(cmd + 2);
3454                 num = get_unaligned_be16(cmd + 7);
3455                 check_prot = true;
3456                 break;
3457         case WRITE_6:
3458                 ei_lba = 0;
3459                 lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3460                       (u32)(cmd[1] & 0x1f) << 16;
3461                 num = (0 == cmd[4]) ? 256 : cmd[4];
3462                 check_prot = true;
3463                 break;
3464         case WRITE_12:
3465                 ei_lba = 0;
3466                 lba = get_unaligned_be32(cmd + 2);
3467                 num = get_unaligned_be32(cmd + 6);
3468                 check_prot = true;
3469                 break;
3470         case 0x53:      /* XDWRITEREAD(10) */
3471                 ei_lba = 0;
3472                 lba = get_unaligned_be32(cmd + 2);
3473                 num = get_unaligned_be16(cmd + 7);
3474                 check_prot = false;
3475                 break;
3476         default:        /* assume WRITE(32) */
3477                 lba = get_unaligned_be64(cmd + 12);
3478                 ei_lba = get_unaligned_be32(cmd + 20);
3479                 num = get_unaligned_be32(cmd + 28);
3480                 check_prot = false;
3481                 break;
3482         }
3483         if (unlikely(have_dif_prot && check_prot)) {
3484                 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3485                     (cmd[1] & 0xe0)) {
3486                         mk_sense_invalid_opcode(scp);
3487                         return check_condition_result;
3488                 }
3489                 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3490                      sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3491                     (cmd[1] & 0xe0) == 0)
3492                         sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3493                                     "to DIF device\n");
3494         }
3495
3496         write_lock(macc_lckp);
3497         ret = check_device_access_params(scp, lba, num, true);
3498         if (ret) {
3499                 write_unlock(macc_lckp);
3500                 return ret;
3501         }
3502
3503         /* DIX + T10 DIF */
3504         if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3505                 int prot_ret = prot_verify_write(scp, lba, num, ei_lba);
3506
3507                 if (prot_ret) {
3508                         write_unlock(macc_lckp);
3509                         mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, prot_ret);
3510                         return illegal_condition_result;
3511                 }
3512         }
3513
3514         ret = do_device_access(sip, scp, 0, lba, num, true);
3515         if (unlikely(scsi_debug_lbp()))
3516                 map_region(sip, lba, num);
3517         /* If ZBC zone then bump its write pointer */
3518         if (sdebug_dev_is_zoned(devip))
3519                 zbc_inc_wp(devip, lba, num);
3520         write_unlock(macc_lckp);
3521         if (unlikely(-1 == ret))
3522                 return DID_ERROR << 16;
3523         else if (unlikely(sdebug_verbose &&
3524                           (ret < (num * sdebug_sector_size))))
3525                 sdev_printk(KERN_INFO, scp->device,
3526                             "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3527                             my_name, num * sdebug_sector_size, ret);
3528
3529         if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3530                      atomic_read(&sdeb_inject_pending))) {
3531                 if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3532                         mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3533                         atomic_set(&sdeb_inject_pending, 0);
3534                         return check_condition_result;
3535                 } else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3536                         /* Logical block guard check failed */
3537                         mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3538                         atomic_set(&sdeb_inject_pending, 0);
3539                         return illegal_condition_result;
3540                 } else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
3541                         mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3542                         atomic_set(&sdeb_inject_pending, 0);
3543                         return illegal_condition_result;
3544                 }
3545         }
3546         return 0;
3547 }
3548
3549 /*
3550  * T10 has only specified WRITE SCATTERED(16) and WRITE SCATTERED(32).
3551  * No READ GATHERED yet (requires bidi or long cdb holding gather list).
3552  */
3553 static int resp_write_scat(struct scsi_cmnd *scp,
3554                            struct sdebug_dev_info *devip)
3555 {
3556         u8 *cmd = scp->cmnd;
3557         u8 *lrdp = NULL;
3558         u8 *up;
3559         struct sdeb_store_info *sip = devip2sip(devip, true);
3560         rwlock_t *macc_lckp = &sip->macc_lck;
3561         u8 wrprotect;
3562         u16 lbdof, num_lrd, k;
3563         u32 num, num_by, bt_len, lbdof_blen, sg_off, cum_lb;
3564         u32 lb_size = sdebug_sector_size;
3565         u32 ei_lba;
3566         u64 lba;
3567         int ret, res;
3568         bool is_16;
3569         static const u32 lrd_size = 32; /* + parameter list header size */
3570
3571         if (cmd[0] == VARIABLE_LENGTH_CMD) {
3572                 is_16 = false;
3573                 wrprotect = (cmd[10] >> 5) & 0x7;
3574                 lbdof = get_unaligned_be16(cmd + 12);
3575                 num_lrd = get_unaligned_be16(cmd + 16);
3576                 bt_len = get_unaligned_be32(cmd + 28);
3577         } else {        /* that leaves WRITE SCATTERED(16) */
3578                 is_16 = true;
3579                 wrprotect = (cmd[2] >> 5) & 0x7;
3580                 lbdof = get_unaligned_be16(cmd + 4);
3581                 num_lrd = get_unaligned_be16(cmd + 8);
3582                 bt_len = get_unaligned_be32(cmd + 10);
3583                 if (unlikely(have_dif_prot)) {
3584                         if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3585                             wrprotect) {
3586                                 mk_sense_invalid_opcode(scp);
3587                                 return illegal_condition_result;
3588                         }
3589                         if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3590                              sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3591                              wrprotect == 0)
3592                                 sdev_printk(KERN_ERR, scp->device,
3593                                             "Unprotected WR to DIF device\n");
3594                 }
3595         }
3596         if ((num_lrd == 0) || (bt_len == 0))
3597                 return 0;       /* T10 says these do-nothings are not errors */
3598         if (lbdof == 0) {
3599                 if (sdebug_verbose)
3600                         sdev_printk(KERN_INFO, scp->device,
3601                                 "%s: %s: LB Data Offset field bad\n",
3602                                 my_name, __func__);
3603                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3604                 return illegal_condition_result;
3605         }
3606         lbdof_blen = lbdof * lb_size;
3607         if ((lrd_size + (num_lrd * lrd_size)) > lbdof_blen) {
3608                 if (sdebug_verbose)
3609                         sdev_printk(KERN_INFO, scp->device,
3610                                 "%s: %s: LBA range descriptors don't fit\n",
3611                                 my_name, __func__);
3612                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3613                 return illegal_condition_result;
3614         }
3615         lrdp = kzalloc(lbdof_blen, GFP_ATOMIC);
3616         if (lrdp == NULL)
3617                 return SCSI_MLQUEUE_HOST_BUSY;
3618         if (sdebug_verbose)
3619                 sdev_printk(KERN_INFO, scp->device,
3620                         "%s: %s: Fetch header+scatter_list, lbdof_blen=%u\n",
3621                         my_name, __func__, lbdof_blen);
3622         res = fetch_to_dev_buffer(scp, lrdp, lbdof_blen);
3623         if (res == -1) {
3624                 ret = DID_ERROR << 16;
3625                 goto err_out;
3626         }
3627
3628         write_lock(macc_lckp);
3629         sg_off = lbdof_blen;
3630         /* Spec says Buffer xfer Length field in number of LBs in dout */
3631         cum_lb = 0;
3632         for (k = 0, up = lrdp + lrd_size; k < num_lrd; ++k, up += lrd_size) {
3633                 lba = get_unaligned_be64(up + 0);
3634                 num = get_unaligned_be32(up + 8);
3635                 if (sdebug_verbose)
3636                         sdev_printk(KERN_INFO, scp->device,
3637                                 "%s: %s: k=%d  LBA=0x%llx num=%u  sg_off=%u\n",
3638                                 my_name, __func__, k, lba, num, sg_off);
3639                 if (num == 0)
3640                         continue;
3641                 ret = check_device_access_params(scp, lba, num, true);
3642                 if (ret)
3643                         goto err_out_unlock;
3644                 num_by = num * lb_size;
3645                 ei_lba = is_16 ? 0 : get_unaligned_be32(up + 12);
3646
3647                 if ((cum_lb + num) > bt_len) {
3648                         if (sdebug_verbose)
3649                                 sdev_printk(KERN_INFO, scp->device,
3650                                     "%s: %s: sum of blocks > data provided\n",
3651                                     my_name, __func__);
3652                         mk_sense_buffer(scp, ILLEGAL_REQUEST, WRITE_ERROR_ASC,
3653                                         0);
3654                         ret = illegal_condition_result;
3655                         goto err_out_unlock;
3656                 }
3657
3658                 /* DIX + T10 DIF */
3659                 if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3660                         int prot_ret = prot_verify_write(scp, lba, num,
3661                                                          ei_lba);
3662
3663                         if (prot_ret) {
3664                                 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10,
3665                                                 prot_ret);
3666                                 ret = illegal_condition_result;
3667                                 goto err_out_unlock;
3668                         }
3669                 }
3670
3671                 ret = do_device_access(sip, scp, sg_off, lba, num, true);
3672                 /* If ZBC zone then bump its write pointer */
3673                 if (sdebug_dev_is_zoned(devip))
3674                         zbc_inc_wp(devip, lba, num);
3675                 if (unlikely(scsi_debug_lbp()))
3676                         map_region(sip, lba, num);
3677                 if (unlikely(-1 == ret)) {
3678                         ret = DID_ERROR << 16;
3679                         goto err_out_unlock;
3680                 } else if (unlikely(sdebug_verbose && (ret < num_by)))
3681                         sdev_printk(KERN_INFO, scp->device,
3682                             "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3683                             my_name, num_by, ret);
3684
3685                 if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3686                              atomic_read(&sdeb_inject_pending))) {
3687                         if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3688                                 mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3689                                 atomic_set(&sdeb_inject_pending, 0);
3690                                 ret = check_condition_result;
3691                                 goto err_out_unlock;
3692                         } else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3693                                 /* Logical block guard check failed */
3694                                 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3695                                 atomic_set(&sdeb_inject_pending, 0);
3696                                 ret = illegal_condition_result;
3697                                 goto err_out_unlock;
3698                         } else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
3699                                 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3700                                 atomic_set(&sdeb_inject_pending, 0);
3701                                 ret = illegal_condition_result;
3702                                 goto err_out_unlock;
3703                         }
3704                 }
3705                 sg_off += num_by;
3706                 cum_lb += num;
3707         }
3708         ret = 0;
3709 err_out_unlock:
3710         write_unlock(macc_lckp);
3711 err_out:
3712         kfree(lrdp);
3713         return ret;
3714 }
3715
3716 static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
3717                            u32 ei_lba, bool unmap, bool ndob)
3718 {
3719         struct scsi_device *sdp = scp->device;
3720         struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
3721         unsigned long long i;
3722         u64 block, lbaa;
3723         u32 lb_size = sdebug_sector_size;
3724         int ret;
3725         struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3726                                                 scp->device->hostdata, true);
3727         rwlock_t *macc_lckp = &sip->macc_lck;
3728         u8 *fs1p;
3729         u8 *fsp;
3730
3731         write_lock(macc_lckp);
3732
3733         ret = check_device_access_params(scp, lba, num, true);
3734         if (ret) {
3735                 write_unlock(macc_lckp);
3736                 return ret;
3737         }
3738
3739         if (unmap && scsi_debug_lbp()) {
3740                 unmap_region(sip, lba, num);
3741                 goto out;
3742         }
3743         lbaa = lba;
3744         block = do_div(lbaa, sdebug_store_sectors);
3745         /* if ndob then zero 1 logical block, else fetch 1 logical block */
3746         fsp = sip->storep;
3747         fs1p = fsp + (block * lb_size);
3748         if (ndob) {
3749                 memset(fs1p, 0, lb_size);
3750                 ret = 0;
3751         } else
3752                 ret = fetch_to_dev_buffer(scp, fs1p, lb_size);
3753
3754         if (-1 == ret) {
3755                 write_unlock(&sip->macc_lck);
3756                 return DID_ERROR << 16;
3757         } else if (sdebug_verbose && !ndob && (ret < lb_size))
3758                 sdev_printk(KERN_INFO, scp->device,
3759                             "%s: %s: lb size=%u, IO sent=%d bytes\n",
3760                             my_name, "write same", lb_size, ret);
3761
3762         /* Copy first sector to remaining blocks */
3763         for (i = 1 ; i < num ; i++) {
3764                 lbaa = lba + i;
3765                 block = do_div(lbaa, sdebug_store_sectors);
3766                 memmove(fsp + (block * lb_size), fs1p, lb_size);
3767         }
3768         if (scsi_debug_lbp())
3769                 map_region(sip, lba, num);
3770         /* If ZBC zone then bump its write pointer */
3771         if (sdebug_dev_is_zoned(devip))
3772                 zbc_inc_wp(devip, lba, num);
3773 out:
3774         write_unlock(macc_lckp);
3775
3776         return 0;
3777 }
3778
3779 static int resp_write_same_10(struct scsi_cmnd *scp,
3780                               struct sdebug_dev_info *devip)
3781 {
3782         u8 *cmd = scp->cmnd;
3783         u32 lba;
3784         u16 num;
3785         u32 ei_lba = 0;
3786         bool unmap = false;
3787
3788         if (cmd[1] & 0x8) {
3789                 if (sdebug_lbpws10 == 0) {
3790                         mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3791                         return check_condition_result;
3792                 } else
3793                         unmap = true;
3794         }
3795         lba = get_unaligned_be32(cmd + 2);
3796         num = get_unaligned_be16(cmd + 7);
3797         if (num > sdebug_write_same_length) {
3798                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3799                 return check_condition_result;
3800         }
3801         return resp_write_same(scp, lba, num, ei_lba, unmap, false);
3802 }
3803
3804 static int resp_write_same_16(struct scsi_cmnd *scp,
3805                               struct sdebug_dev_info *devip)
3806 {
3807         u8 *cmd = scp->cmnd;
3808         u64 lba;
3809         u32 num;
3810         u32 ei_lba = 0;
3811         bool unmap = false;
3812         bool ndob = false;
3813
3814         if (cmd[1] & 0x8) {     /* UNMAP */
3815                 if (sdebug_lbpws == 0) {
3816                         mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3817                         return check_condition_result;
3818                 } else
3819                         unmap = true;
3820         }
3821         if (cmd[1] & 0x1)  /* NDOB (no data-out buffer, assumes zeroes) */
3822                 ndob = true;
3823         lba = get_unaligned_be64(cmd + 2);
3824         num = get_unaligned_be32(cmd + 10);
3825         if (num > sdebug_write_same_length) {
3826                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
3827                 return check_condition_result;
3828         }
3829         return resp_write_same(scp, lba, num, ei_lba, unmap, ndob);
3830 }
3831
3832 /* Note the mode field is in the same position as the (lower) service action
3833  * field. For the Report supported operation codes command, SPC-4 suggests
3834  * each mode of this command should be reported separately; for future. */
3835 static int resp_write_buffer(struct scsi_cmnd *scp,
3836                              struct sdebug_dev_info *devip)
3837 {
3838         u8 *cmd = scp->cmnd;
3839         struct scsi_device *sdp = scp->device;
3840         struct sdebug_dev_info *dp;
3841         u8 mode;
3842
3843         mode = cmd[1] & 0x1f;
3844         switch (mode) {
3845         case 0x4:       /* download microcode (MC) and activate (ACT) */
3846                 /* set UAs on this device only */
3847                 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3848                 set_bit(SDEBUG_UA_MICROCODE_CHANGED, devip->uas_bm);
3849                 break;
3850         case 0x5:       /* download MC, save and ACT */
3851                 set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET, devip->uas_bm);
3852                 break;
3853         case 0x6:       /* download MC with offsets and ACT */
3854                 /* set UAs on most devices (LUs) in this target */
3855                 list_for_each_entry(dp,
3856                                     &devip->sdbg_host->dev_info_list,
3857                                     dev_list)
3858                         if (dp->target == sdp->id) {
3859                                 set_bit(SDEBUG_UA_BUS_RESET, dp->uas_bm);
3860                                 if (devip != dp)
3861                                         set_bit(SDEBUG_UA_MICROCODE_CHANGED,
3862                                                 dp->uas_bm);
3863                         }
3864                 break;
3865         case 0x7:       /* download MC with offsets, save, and ACT */
3866                 /* set UA on all devices (LUs) in this target */
3867                 list_for_each_entry(dp,
3868                                     &devip->sdbg_host->dev_info_list,
3869                                     dev_list)
3870                         if (dp->target == sdp->id)
3871                                 set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET,
3872                                         dp->uas_bm);
3873                 break;
3874         default:
3875                 /* do nothing for this command for other mode values */
3876                 break;
3877         }
3878         return 0;
3879 }
3880
3881 static int resp_comp_write(struct scsi_cmnd *scp,
3882                            struct sdebug_dev_info *devip)
3883 {
3884         u8 *cmd = scp->cmnd;
3885         u8 *arr;
3886         struct sdeb_store_info *sip = devip2sip(devip, true);
3887         rwlock_t *macc_lckp = &sip->macc_lck;
3888         u64 lba;
3889         u32 dnum;
3890         u32 lb_size = sdebug_sector_size;
3891         u8 num;
3892         int ret;
3893         int retval = 0;
3894
3895         lba = get_unaligned_be64(cmd + 2);
3896         num = cmd[13];          /* 1 to a maximum of 255 logical blocks */
3897         if (0 == num)
3898                 return 0;       /* degenerate case, not an error */
3899         if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3900             (cmd[1] & 0xe0)) {
3901                 mk_sense_invalid_opcode(scp);
3902                 return check_condition_result;
3903         }
3904         if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3905              sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3906             (cmd[1] & 0xe0) == 0)
3907                 sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3908                             "to DIF device\n");
3909         ret = check_device_access_params(scp, lba, num, false);
3910         if (ret)
3911                 return ret;
3912         dnum = 2 * num;
3913         arr = kcalloc(lb_size, dnum, GFP_ATOMIC);
3914         if (NULL == arr) {
3915                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3916                                 INSUFF_RES_ASCQ);
3917                 return check_condition_result;
3918         }
3919
3920         write_lock(macc_lckp);
3921
3922         ret = do_dout_fetch(scp, dnum, arr);
3923         if (ret == -1) {
3924                 retval = DID_ERROR << 16;
3925                 goto cleanup;
3926         } else if (sdebug_verbose && (ret < (dnum * lb_size)))
3927                 sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb "
3928                             "indicated=%u, IO sent=%d bytes\n", my_name,
3929                             dnum * lb_size, ret);
3930         if (!comp_write_worker(sip, lba, num, arr, false)) {
3931                 mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
3932                 retval = check_condition_result;
3933                 goto cleanup;
3934         }
3935         if (scsi_debug_lbp())
3936                 map_region(sip, lba, num);
3937 cleanup:
3938         write_unlock(macc_lckp);
3939         kfree(arr);
3940         return retval;
3941 }
3942
3943 struct unmap_block_desc {
3944         __be64  lba;
3945         __be32  blocks;
3946         __be32  __reserved;
3947 };
3948
3949 static int resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3950 {
3951         unsigned char *buf;
3952         struct unmap_block_desc *desc;
3953         struct sdeb_store_info *sip = devip2sip(devip, true);
3954         rwlock_t *macc_lckp = &sip->macc_lck;
3955         unsigned int i, payload_len, descriptors;
3956         int ret;
3957
3958         if (!scsi_debug_lbp())
3959                 return 0;       /* fib and say its done */
3960         payload_len = get_unaligned_be16(scp->cmnd + 7);
3961         BUG_ON(scsi_bufflen(scp) != payload_len);
3962
3963         descriptors = (payload_len - 8) / 16;
3964         if (descriptors > sdebug_unmap_max_desc) {
3965                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3966                 return check_condition_result;
3967         }
3968
3969         buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC);
3970         if (!buf) {
3971                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3972                                 INSUFF_RES_ASCQ);
3973                 return check_condition_result;
3974         }
3975
3976         scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
3977
3978         BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
3979         BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
3980
3981         desc = (void *)&buf[8];
3982
3983         write_lock(macc_lckp);
3984
3985         for (i = 0 ; i < descriptors ; i++) {
3986                 unsigned long long lba = get_unaligned_be64(&desc[i].lba);
3987                 unsigned int num = get_unaligned_be32(&desc[i].blocks);
3988
3989                 ret = check_device_access_params(scp, lba, num, true);
3990                 if (ret)
3991                         goto out;
3992
3993                 unmap_region(sip, lba, num);
3994         }
3995
3996         ret = 0;
3997
3998 out:
3999         write_unlock(macc_lckp);
4000         kfree(buf);
4001
4002         return ret;
4003 }
4004
4005 #define SDEBUG_GET_LBA_STATUS_LEN 32
4006
4007 static int resp_get_lba_status(struct scsi_cmnd *scp,
4008                                struct sdebug_dev_info *devip)
4009 {
4010         u8 *cmd = scp->cmnd;
4011         u64 lba;
4012         u32 alloc_len, mapped, num;
4013         int ret;
4014         u8 arr[SDEBUG_GET_LBA_STATUS_LEN];
4015
4016         lba = get_unaligned_be64(cmd + 2);
4017         alloc_len = get_unaligned_be32(cmd + 10);
4018
4019         if (alloc_len < 24)
4020                 return 0;
4021
4022         ret = check_device_access_params(scp, lba, 1, false);
4023         if (ret)
4024                 return ret;
4025
4026         if (scsi_debug_lbp()) {
4027                 struct sdeb_store_info *sip = devip2sip(devip, true);
4028
4029                 mapped = map_state(sip, lba, &num);
4030         } else {
4031                 mapped = 1;
4032                 /* following just in case virtual_gb changed */
4033                 sdebug_capacity = get_sdebug_capacity();
4034                 if (sdebug_capacity - lba <= 0xffffffff)
4035                         num = sdebug_capacity - lba;
4036                 else
4037                         num = 0xffffffff;
4038         }
4039
4040         memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
4041         put_unaligned_be32(20, arr);            /* Parameter Data Length */
4042         put_unaligned_be64(lba, arr + 8);       /* LBA */
4043         put_unaligned_be32(num, arr + 16);      /* Number of blocks */
4044         arr[20] = !mapped;              /* prov_stat=0: mapped; 1: dealloc */
4045
4046         return fill_from_dev_buffer(scp, arr, SDEBUG_GET_LBA_STATUS_LEN);
4047 }
4048
4049 static int resp_sync_cache(struct scsi_cmnd *scp,
4050                            struct sdebug_dev_info *devip)
4051 {
4052         int res = 0;
4053         u64 lba;
4054         u32 num_blocks;
4055         u8 *cmd = scp->cmnd;
4056
4057         if (cmd[0] == SYNCHRONIZE_CACHE) {      /* 10 byte cdb */
4058                 lba = get_unaligned_be32(cmd + 2);
4059                 num_blocks = get_unaligned_be16(cmd + 7);
4060         } else {                                /* SYNCHRONIZE_CACHE(16) */
4061                 lba = get_unaligned_be64(cmd + 2);
4062                 num_blocks = get_unaligned_be32(cmd + 10);
4063         }
4064         if (lba + num_blocks > sdebug_capacity) {
4065                 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4066                 return check_condition_result;
4067         }
4068         if (!write_since_sync || (cmd[1] & 0x2))
4069                 res = SDEG_RES_IMMED_MASK;
4070         else            /* delay if write_since_sync and IMMED clear */
4071                 write_since_sync = false;
4072         return res;
4073 }
4074
4075 /*
4076  * Assuming the LBA+num_blocks is not out-of-range, this function will return
4077  * CONDITION MET if the specified blocks will/have fitted in the cache, and
4078  * a GOOD status otherwise. Model a disk with a big cache and yield
4079  * CONDITION MET. Actually tries to bring range in main memory into the
4080  * cache associated with the CPU(s).
4081  */
4082 static int resp_pre_fetch(struct scsi_cmnd *scp,
4083                           struct sdebug_dev_info *devip)
4084 {
4085         int res = 0;
4086         u64 lba;
4087         u64 block, rest = 0;
4088         u32 nblks;
4089         u8 *cmd = scp->cmnd;
4090         struct sdeb_store_info *sip = devip2sip(devip, true);
4091         rwlock_t *macc_lckp = &sip->macc_lck;
4092         u8 *fsp = sip->storep;
4093
4094         if (cmd[0] == PRE_FETCH) {      /* 10 byte cdb */
4095                 lba = get_unaligned_be32(cmd + 2);
4096                 nblks = get_unaligned_be16(cmd + 7);
4097         } else {                        /* PRE-FETCH(16) */
4098                 lba = get_unaligned_be64(cmd + 2);
4099                 nblks = get_unaligned_be32(cmd + 10);
4100         }
4101         if (lba + nblks > sdebug_capacity) {
4102                 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4103                 return check_condition_result;
4104         }
4105         if (!fsp)
4106                 goto fini;
4107         /* PRE-FETCH spec says nothing about LBP or PI so skip them */
4108         block = do_div(lba, sdebug_store_sectors);
4109         if (block + nblks > sdebug_store_sectors)
4110                 rest = block + nblks - sdebug_store_sectors;
4111
4112         /* Try to bring the PRE-FETCH range into CPU's cache */
4113         read_lock(macc_lckp);
4114         prefetch_range(fsp + (sdebug_sector_size * block),
4115                        (nblks - rest) * sdebug_sector_size);
4116         if (rest)
4117                 prefetch_range(fsp, rest * sdebug_sector_size);
4118         read_unlock(macc_lckp);
4119 fini:
4120         if (cmd[1] & 0x2)
4121                 res = SDEG_RES_IMMED_MASK;
4122         return res | condition_met_result;
4123 }
4124
4125 #define RL_BUCKET_ELEMS 8
4126
4127 /* Even though each pseudo target has a REPORT LUNS "well known logical unit"
4128  * (W-LUN), the normal Linux scanning logic does not associate it with a
4129  * device (e.g. /dev/sg7). The following magic will make that association:
4130  *   "cd /sys/class/scsi_host/host<n> ; echo '- - 49409' > scan"
4131  * where <n> is a host number. If there are multiple targets in a host then
4132  * the above will associate a W-LUN to each target. To only get a W-LUN
4133  * for target 2, then use "echo '- 2 49409' > scan" .
4134  */
4135 static int resp_report_luns(struct scsi_cmnd *scp,
4136                             struct sdebug_dev_info *devip)
4137 {
4138         unsigned char *cmd = scp->cmnd;
4139         unsigned int alloc_len;
4140         unsigned char select_report;
4141         u64 lun;
4142         struct scsi_lun *lun_p;
4143         u8 arr[RL_BUCKET_ELEMS * sizeof(struct scsi_lun)];
4144         unsigned int lun_cnt;   /* normal LUN count (max: 256) */
4145         unsigned int wlun_cnt;  /* report luns W-LUN count */
4146         unsigned int tlun_cnt;  /* total LUN count */
4147         unsigned int rlen;      /* response length (in bytes) */
4148         int k, j, n, res;
4149         unsigned int off_rsp = 0;
4150         const int sz_lun = sizeof(struct scsi_lun);
4151
4152         clear_luns_changed_on_target(devip);
4153
4154         select_report = cmd[2];
4155         alloc_len = get_unaligned_be32(cmd + 6);
4156
4157         if (alloc_len < 4) {
4158                 pr_err("alloc len too small %d\n", alloc_len);
4159                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
4160                 return check_condition_result;
4161         }
4162
4163         switch (select_report) {
4164         case 0:         /* all LUNs apart from W-LUNs */
4165                 lun_cnt = sdebug_max_luns;
4166                 wlun_cnt = 0;
4167                 break;
4168         case 1:         /* only W-LUNs */
4169                 lun_cnt = 0;
4170                 wlun_cnt = 1;
4171                 break;
4172         case 2:         /* all LUNs */
4173                 lun_cnt = sdebug_max_luns;
4174                 wlun_cnt = 1;
4175                 break;
4176         case 0x10:      /* only administrative LUs */
4177         case 0x11:      /* see SPC-5 */
4178         case 0x12:      /* only subsiduary LUs owned by referenced LU */
4179         default:
4180                 pr_debug("select report invalid %d\n", select_report);
4181                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
4182                 return check_condition_result;
4183         }
4184
4185         if (sdebug_no_lun_0 && (lun_cnt > 0))
4186                 --lun_cnt;
4187
4188         tlun_cnt = lun_cnt + wlun_cnt;
4189         rlen = tlun_cnt * sz_lun;       /* excluding 8 byte header */
4190         scsi_set_resid(scp, scsi_bufflen(scp));
4191         pr_debug("select_report %d luns = %d wluns = %d no_lun0 %d\n",
4192                  select_report, lun_cnt, wlun_cnt, sdebug_no_lun_0);
4193
4194         /* loops rely on sizeof response header same as sizeof lun (both 8) */
4195         lun = sdebug_no_lun_0 ? 1 : 0;
4196         for (k = 0, j = 0, res = 0; true; ++k, j = 0) {
4197                 memset(arr, 0, sizeof(arr));
4198                 lun_p = (struct scsi_lun *)&arr[0];
4199                 if (k == 0) {
4200                         put_unaligned_be32(rlen, &arr[0]);
4201                         ++lun_p;
4202                         j = 1;
4203                 }
4204                 for ( ; j < RL_BUCKET_ELEMS; ++j, ++lun_p) {
4205                         if ((k * RL_BUCKET_ELEMS) + j > lun_cnt)
4206                                 break;
4207                         int_to_scsilun(lun++, lun_p);
4208                         if (lun > 1 && sdebug_lun_am == SAM_LUN_AM_FLAT)
4209                                 lun_p->scsi_lun[0] |= 0x40;
4210                 }
4211                 if (j < RL_BUCKET_ELEMS)
4212                         break;
4213                 n = j * sz_lun;
4214                 res = p_fill_from_dev_buffer(scp, arr, n, off_rsp);
4215                 if (res)
4216                         return res;
4217                 off_rsp += n;
4218         }
4219         if (wlun_cnt) {
4220                 int_to_scsilun(SCSI_W_LUN_REPORT_LUNS, lun_p);
4221                 ++j;
4222         }
4223         if (j > 0)
4224                 res = p_fill_from_dev_buffer(scp, arr, j * sz_lun, off_rsp);
4225         return res;
4226 }
4227
4228 static int resp_verify(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4229 {
4230         bool is_bytchk3 = false;
4231         u8 bytchk;
4232         int ret, j;
4233         u32 vnum, a_num, off;
4234         const u32 lb_size = sdebug_sector_size;
4235         u64 lba;
4236         u8 *arr;
4237         u8 *cmd = scp->cmnd;
4238         struct sdeb_store_info *sip = devip2sip(devip, true);
4239         rwlock_t *macc_lckp = &sip->macc_lck;
4240
4241         bytchk = (cmd[1] >> 1) & 0x3;
4242         if (bytchk == 0) {
4243                 return 0;       /* always claim internal verify okay */
4244         } else if (bytchk == 2) {
4245                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
4246                 return check_condition_result;
4247         } else if (bytchk == 3) {
4248                 is_bytchk3 = true;      /* 1 block sent, compared repeatedly */
4249         }
4250         switch (cmd[0]) {
4251         case VERIFY_16:
4252                 lba = get_unaligned_be64(cmd + 2);
4253                 vnum = get_unaligned_be32(cmd + 10);
4254                 break;
4255         case VERIFY:            /* is VERIFY(10) */
4256                 lba = get_unaligned_be32(cmd + 2);
4257                 vnum = get_unaligned_be16(cmd + 7);
4258                 break;
4259         default:
4260                 mk_sense_invalid_opcode(scp);
4261                 return check_condition_result;
4262         }
4263         if (vnum == 0)
4264                 return 0;       /* not an error */
4265         a_num = is_bytchk3 ? 1 : vnum;
4266         /* Treat following check like one for read (i.e. no write) access */
4267         ret = check_device_access_params(scp, lba, a_num, false);
4268         if (ret)
4269                 return ret;
4270
4271         arr = kcalloc(lb_size, vnum, GFP_ATOMIC);
4272         if (!arr) {
4273                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4274                                 INSUFF_RES_ASCQ);
4275                 return check_condition_result;
4276         }
4277         /* Not changing store, so only need read access */
4278         read_lock(macc_lckp);
4279
4280         ret = do_dout_fetch(scp, a_num, arr);
4281         if (ret == -1) {
4282                 ret = DID_ERROR << 16;
4283                 goto cleanup;
4284         } else if (sdebug_verbose && (ret < (a_num * lb_size))) {
4285                 sdev_printk(KERN_INFO, scp->device,
4286                             "%s: %s: cdb indicated=%u, IO sent=%d bytes\n",
4287                             my_name, __func__, a_num * lb_size, ret);
4288         }
4289         if (is_bytchk3) {
4290                 for (j = 1, off = lb_size; j < vnum; ++j, off += lb_size)
4291                         memcpy(arr + off, arr, lb_size);
4292         }
4293         ret = 0;
4294         if (!comp_write_worker(sip, lba, vnum, arr, true)) {
4295                 mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
4296                 ret = check_condition_result;
4297                 goto cleanup;
4298         }
4299 cleanup:
4300         read_unlock(macc_lckp);
4301         kfree(arr);
4302         return ret;
4303 }
4304
4305 #define RZONES_DESC_HD 64
4306
4307 /* Report zones depending on start LBA nad reporting options */
4308 static int resp_report_zones(struct scsi_cmnd *scp,
4309                              struct sdebug_dev_info *devip)
4310 {
4311         unsigned int i, max_zones, rep_max_zones, nrz = 0;
4312         int ret = 0;
4313         u32 alloc_len, rep_opts, rep_len;
4314         bool partial;
4315         u64 lba, zs_lba;
4316         u8 *arr = NULL, *desc;
4317         u8 *cmd = scp->cmnd;
4318         struct sdeb_zone_state *zsp;
4319         struct sdeb_store_info *sip = devip2sip(devip, false);
4320         rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4321
4322         if (!sdebug_dev_is_zoned(devip)) {
4323                 mk_sense_invalid_opcode(scp);
4324                 return check_condition_result;
4325         }
4326         zs_lba = get_unaligned_be64(cmd + 2);
4327         alloc_len = get_unaligned_be32(cmd + 10);
4328         if (alloc_len == 0)
4329                 return 0;       /* not an error */
4330         rep_opts = cmd[14] & 0x3f;
4331         partial = cmd[14] & 0x80;
4332
4333         if (zs_lba >= sdebug_capacity) {
4334                 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4335                 return check_condition_result;
4336         }
4337
4338         max_zones = devip->nr_zones - (zs_lba >> devip->zsize_shift);
4339         rep_max_zones = min((alloc_len - 64) >> ilog2(RZONES_DESC_HD),
4340                             max_zones);
4341
4342         arr = kzalloc(alloc_len, GFP_ATOMIC);
4343         if (!arr) {
4344                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4345                                 INSUFF_RES_ASCQ);
4346                 return check_condition_result;
4347         }
4348
4349         read_lock(macc_lckp);
4350
4351         desc = arr + 64;
4352         for (i = 0; i < max_zones; i++) {
4353                 lba = zs_lba + devip->zsize * i;
4354                 if (lba > sdebug_capacity)
4355                         break;
4356                 zsp = zbc_zone(devip, lba);
4357                 switch (rep_opts) {
4358                 case 0x00:
4359                         /* All zones */
4360                         break;
4361                 case 0x01:
4362                         /* Empty zones */
4363                         if (zsp->z_cond != ZC1_EMPTY)
4364                                 continue;
4365                         break;
4366                 case 0x02:
4367                         /* Implicit open zones */
4368                         if (zsp->z_cond != ZC2_IMPLICIT_OPEN)
4369                                 continue;
4370                         break;
4371                 case 0x03:
4372                         /* Explicit open zones */
4373                         if (zsp->z_cond != ZC3_EXPLICIT_OPEN)
4374                                 continue;
4375                         break;
4376                 case 0x04:
4377                         /* Closed zones */
4378                         if (zsp->z_cond != ZC4_CLOSED)
4379                                 continue;
4380                         break;
4381                 case 0x05:
4382                         /* Full zones */
4383                         if (zsp->z_cond != ZC5_FULL)
4384                                 continue;
4385                         break;
4386                 case 0x06:
4387                 case 0x07:
4388                 case 0x10:
4389                         /*
4390                          * Read-only, offline, reset WP recommended are
4391                          * not emulated: no zones to report;
4392                          */
4393                         continue;
4394                 case 0x11:
4395                         /* non-seq-resource set */
4396                         if (!zsp->z_non_seq_resource)
4397                                 continue;
4398                         break;
4399                 case 0x3f:
4400                         /* Not write pointer (conventional) zones */
4401                         if (!zbc_zone_is_conv(zsp))
4402                                 continue;
4403                         break;
4404                 default:
4405                         mk_sense_buffer(scp, ILLEGAL_REQUEST,
4406                                         INVALID_FIELD_IN_CDB, 0);
4407                         ret = check_condition_result;
4408                         goto fini;
4409                 }
4410
4411                 if (nrz < rep_max_zones) {
4412                         /* Fill zone descriptor */
4413                         desc[0] = zsp->z_type;
4414                         desc[1] = zsp->z_cond << 4;
4415                         if (zsp->z_non_seq_resource)
4416                                 desc[1] |= 1 << 1;
4417                         put_unaligned_be64((u64)zsp->z_size, desc + 8);
4418                         put_unaligned_be64((u64)zsp->z_start, desc + 16);
4419                         put_unaligned_be64((u64)zsp->z_wp, desc + 24);
4420                         desc += 64;
4421                 }
4422
4423                 if (partial && nrz >= rep_max_zones)
4424                         break;
4425
4426                 nrz++;
4427         }
4428
4429         /* Report header */
4430         put_unaligned_be32(nrz * RZONES_DESC_HD, arr + 0);
4431         put_unaligned_be64(sdebug_capacity - 1, arr + 8);
4432
4433         rep_len = (unsigned long)desc - (unsigned long)arr;
4434         ret = fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, rep_len));
4435
4436 fini:
4437         read_unlock(macc_lckp);
4438         kfree(arr);
4439         return ret;
4440 }
4441
4442 /* Logic transplanted from tcmu-runner, file_zbc.c */
4443 static void zbc_open_all(struct sdebug_dev_info *devip)
4444 {
4445         struct sdeb_zone_state *zsp = &devip->zstate[0];
4446         unsigned int i;
4447
4448         for (i = 0; i < devip->nr_zones; i++, zsp++) {
4449                 if (zsp->z_cond == ZC4_CLOSED)
4450                         zbc_open_zone(devip, &devip->zstate[i], true);
4451         }
4452 }
4453
4454 static int resp_open_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4455 {
4456         int res = 0;
4457         u64 z_id;
4458         enum sdebug_z_cond zc;
4459         u8 *cmd = scp->cmnd;
4460         struct sdeb_zone_state *zsp;
4461         bool all = cmd[14] & 0x01;
4462         struct sdeb_store_info *sip = devip2sip(devip, false);
4463         rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4464
4465         if (!sdebug_dev_is_zoned(devip)) {
4466                 mk_sense_invalid_opcode(scp);
4467                 return check_condition_result;
4468         }
4469
4470         write_lock(macc_lckp);
4471
4472         if (all) {
4473                 /* Check if all closed zones can be open */
4474                 if (devip->max_open &&
4475                     devip->nr_exp_open + devip->nr_closed > devip->max_open) {
4476                         mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
4477                                         INSUFF_ZONE_ASCQ);
4478                         res = check_condition_result;
4479                         goto fini;
4480                 }
4481                 /* Open all closed zones */
4482                 zbc_open_all(devip);
4483                 goto fini;
4484         }
4485
4486         /* Open the specified zone */
4487         z_id = get_unaligned_be64(cmd + 2);
4488         if (z_id >= sdebug_capacity) {
4489                 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4490                 res = check_condition_result;
4491                 goto fini;
4492         }
4493
4494         zsp = zbc_zone(devip, z_id);
4495         if (z_id != zsp->z_start) {
4496                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4497                 res = check_condition_result;
4498                 goto fini;
4499         }
4500         if (zbc_zone_is_conv(zsp)) {
4501                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4502                 res = check_condition_result;
4503                 goto fini;
4504         }
4505
4506         zc = zsp->z_cond;
4507         if (zc == ZC3_EXPLICIT_OPEN || zc == ZC5_FULL)
4508                 goto fini;
4509
4510         if (devip->max_open && devip->nr_exp_open >= devip->max_open) {
4511                 mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
4512                                 INSUFF_ZONE_ASCQ);
4513                 res = check_condition_result;
4514                 goto fini;
4515         }
4516
4517         zbc_open_zone(devip, zsp, true);
4518 fini:
4519         write_unlock(macc_lckp);
4520         return res;
4521 }
4522
4523 static void zbc_close_all(struct sdebug_dev_info *devip)
4524 {
4525         unsigned int i;
4526
4527         for (i = 0; i < devip->nr_zones; i++)
4528                 zbc_close_zone(devip, &devip->zstate[i]);
4529 }
4530
4531 static int resp_close_zone(struct scsi_cmnd *scp,
4532                            struct sdebug_dev_info *devip)
4533 {
4534         int res = 0;
4535         u64 z_id;
4536         u8 *cmd = scp->cmnd;
4537         struct sdeb_zone_state *zsp;
4538         bool all = cmd[14] & 0x01;
4539         struct sdeb_store_info *sip = devip2sip(devip, false);
4540         rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4541
4542         if (!sdebug_dev_is_zoned(devip)) {
4543                 mk_sense_invalid_opcode(scp);
4544                 return check_condition_result;
4545         }
4546
4547         write_lock(macc_lckp);
4548
4549         if (all) {
4550                 zbc_close_all(devip);
4551                 goto fini;
4552         }
4553
4554         /* Close specified zone */
4555         z_id = get_unaligned_be64(cmd + 2);
4556         if (z_id >= sdebug_capacity) {
4557                 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4558                 res = check_condition_result;
4559                 goto fini;
4560         }
4561
4562         zsp = zbc_zone(devip, z_id);
4563         if (z_id != zsp->z_start) {
4564                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4565                 res = check_condition_result;
4566                 goto fini;
4567         }
4568         if (zbc_zone_is_conv(zsp)) {
4569                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4570                 res = check_condition_result;
4571                 goto fini;
4572         }
4573
4574         zbc_close_zone(devip, zsp);
4575 fini:
4576         write_unlock(macc_lckp);
4577         return res;
4578 }
4579
4580 static void zbc_finish_zone(struct sdebug_dev_info *devip,
4581                             struct sdeb_zone_state *zsp, bool empty)
4582 {
4583         enum sdebug_z_cond zc = zsp->z_cond;
4584
4585         if (zc == ZC4_CLOSED || zc == ZC2_IMPLICIT_OPEN ||
4586             zc == ZC3_EXPLICIT_OPEN || (empty && zc == ZC1_EMPTY)) {
4587                 if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
4588                         zbc_close_zone(devip, zsp);
4589                 if (zsp->z_cond == ZC4_CLOSED)
4590                         devip->nr_closed--;
4591                 zsp->z_wp = zsp->z_start + zsp->z_size;
4592                 zsp->z_cond = ZC5_FULL;
4593         }
4594 }
4595
4596 static void zbc_finish_all(struct sdebug_dev_info *devip)
4597 {
4598         unsigned int i;
4599
4600         for (i = 0; i < devip->nr_zones; i++)
4601                 zbc_finish_zone(devip, &devip->zstate[i], false);
4602 }
4603
4604 static int resp_finish_zone(struct scsi_cmnd *scp,
4605                             struct sdebug_dev_info *devip)
4606 {
4607         struct sdeb_zone_state *zsp;
4608         int res = 0;
4609         u64 z_id;
4610         u8 *cmd = scp->cmnd;
4611         bool all = cmd[14] & 0x01;
4612         struct sdeb_store_info *sip = devip2sip(devip, false);
4613         rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4614
4615         if (!sdebug_dev_is_zoned(devip)) {
4616                 mk_sense_invalid_opcode(scp);
4617                 return check_condition_result;
4618         }
4619
4620         write_lock(macc_lckp);
4621
4622         if (all) {
4623                 zbc_finish_all(devip);
4624                 goto fini;
4625         }
4626
4627         /* Finish the specified zone */
4628         z_id = get_unaligned_be64(cmd + 2);
4629         if (z_id >= sdebug_capacity) {
4630                 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4631                 res = check_condition_result;
4632                 goto fini;
4633         }
4634
4635         zsp = zbc_zone(devip, z_id);
4636         if (z_id != zsp->z_start) {
4637                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4638                 res = check_condition_result;
4639                 goto fini;
4640         }
4641         if (zbc_zone_is_conv(zsp)) {
4642                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4643                 res = check_condition_result;
4644                 goto fini;
4645         }
4646
4647         zbc_finish_zone(devip, zsp, true);
4648 fini:
4649         write_unlock(macc_lckp);
4650         return res;
4651 }
4652
4653 static void zbc_rwp_zone(struct sdebug_dev_info *devip,
4654                          struct sdeb_zone_state *zsp)
4655 {
4656         enum sdebug_z_cond zc;
4657         struct sdeb_store_info *sip = devip2sip(devip, false);
4658
4659         if (zbc_zone_is_conv(zsp))
4660                 return;
4661
4662         zc = zsp->z_cond;
4663         if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
4664                 zbc_close_zone(devip, zsp);
4665
4666         if (zsp->z_cond == ZC4_CLOSED)
4667                 devip->nr_closed--;
4668
4669         if (zsp->z_wp > zsp->z_start)
4670                 memset(sip->storep + zsp->z_start * sdebug_sector_size, 0,
4671                        (zsp->z_wp - zsp->z_start) * sdebug_sector_size);
4672
4673         zsp->z_non_seq_resource = false;
4674         zsp->z_wp = zsp->z_start;
4675         zsp->z_cond = ZC1_EMPTY;
4676 }
4677
4678 static void zbc_rwp_all(struct sdebug_dev_info *devip)
4679 {
4680         unsigned int i;
4681
4682         for (i = 0; i < devip->nr_zones; i++)
4683                 zbc_rwp_zone(devip, &devip->zstate[i]);
4684 }
4685
4686 static int resp_rwp_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4687 {
4688         struct sdeb_zone_state *zsp;
4689         int res = 0;
4690         u64 z_id;
4691         u8 *cmd = scp->cmnd;
4692         bool all = cmd[14] & 0x01;
4693         struct sdeb_store_info *sip = devip2sip(devip, false);
4694         rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4695
4696         if (!sdebug_dev_is_zoned(devip)) {
4697                 mk_sense_invalid_opcode(scp);
4698                 return check_condition_result;
4699         }
4700
4701         write_lock(macc_lckp);
4702
4703         if (all) {
4704                 zbc_rwp_all(devip);
4705                 goto fini;
4706         }
4707
4708         z_id = get_unaligned_be64(cmd + 2);
4709         if (z_id >= sdebug_capacity) {
4710                 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4711                 res = check_condition_result;
4712                 goto fini;
4713         }
4714
4715         zsp = zbc_zone(devip, z_id);
4716         if (z_id != zsp->z_start) {
4717                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4718                 res = check_condition_result;
4719                 goto fini;
4720         }
4721         if (zbc_zone_is_conv(zsp)) {
4722                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4723                 res = check_condition_result;
4724                 goto fini;
4725         }
4726
4727         zbc_rwp_zone(devip, zsp);
4728 fini:
4729         write_unlock(macc_lckp);
4730         return res;
4731 }
4732
4733 static struct sdebug_queue *get_queue(struct scsi_cmnd *cmnd)
4734 {
4735         u16 hwq;
4736         u32 tag = blk_mq_unique_tag(cmnd->request);
4737
4738         hwq = blk_mq_unique_tag_to_hwq(tag);
4739
4740         pr_debug("tag=%#x, hwq=%d\n", tag, hwq);
4741         if (WARN_ON_ONCE(hwq >= submit_queues))
4742                 hwq = 0;
4743
4744         return sdebug_q_arr + hwq;
4745 }
4746
4747 static u32 get_tag(struct scsi_cmnd *cmnd)
4748 {
4749         return blk_mq_unique_tag(cmnd->request);
4750 }
4751
4752 /* Queued (deferred) command completions converge here. */
4753 static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
4754 {
4755         bool aborted = sd_dp->aborted;
4756         int qc_idx;
4757         int retiring = 0;
4758         unsigned long iflags;
4759         struct sdebug_queue *sqp;
4760         struct sdebug_queued_cmd *sqcp;
4761         struct scsi_cmnd *scp;
4762         struct sdebug_dev_info *devip;
4763
4764         sd_dp->defer_t = SDEB_DEFER_NONE;
4765         if (unlikely(aborted))
4766                 sd_dp->aborted = false;
4767         qc_idx = sd_dp->qc_idx;
4768         sqp = sdebug_q_arr + sd_dp->sqa_idx;
4769         if (sdebug_statistics) {
4770                 atomic_inc(&sdebug_completions);
4771                 if (raw_smp_processor_id() != sd_dp->issuing_cpu)
4772                         atomic_inc(&sdebug_miss_cpus);
4773         }
4774         if (unlikely((qc_idx < 0) || (qc_idx >= SDEBUG_CANQUEUE))) {
4775                 pr_err("wild qc_idx=%d\n", qc_idx);
4776                 return;
4777         }
4778         spin_lock_irqsave(&sqp->qc_lock, iflags);
4779         sqcp = &sqp->qc_arr[qc_idx];
4780         scp = sqcp->a_cmnd;
4781         if (unlikely(scp == NULL)) {
4782                 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4783                 pr_err("scp is NULL, sqa_idx=%d, qc_idx=%d, hc_idx=%d\n",
4784                        sd_dp->sqa_idx, qc_idx, sd_dp->hc_idx);
4785                 return;
4786         }
4787         devip = (struct sdebug_dev_info *)scp->device->hostdata;
4788         if (likely(devip))
4789                 atomic_dec(&devip->num_in_q);
4790         else
4791                 pr_err("devip=NULL\n");
4792         if (unlikely(atomic_read(&retired_max_queue) > 0))
4793                 retiring = 1;
4794
4795         sqcp->a_cmnd = NULL;
4796         if (unlikely(!test_and_clear_bit(qc_idx, sqp->in_use_bm))) {
4797                 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4798                 pr_err("Unexpected completion\n");
4799                 return;
4800         }
4801
4802         if (unlikely(retiring)) {       /* user has reduced max_queue */
4803                 int k, retval;
4804
4805                 retval = atomic_read(&retired_max_queue);
4806                 if (qc_idx >= retval) {
4807                         spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4808                         pr_err("index %d too large\n", retval);
4809                         return;
4810                 }
4811                 k = find_last_bit(sqp->in_use_bm, retval);
4812                 if ((k < sdebug_max_queue) || (k == retval))
4813                         atomic_set(&retired_max_queue, 0);
4814                 else
4815                         atomic_set(&retired_max_queue, k + 1);
4816         }
4817         spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4818         if (unlikely(aborted)) {
4819                 if (sdebug_verbose)
4820                         pr_info("bypassing scsi_done() due to aborted cmd\n");
4821                 return;
4822         }
4823         scp->scsi_done(scp); /* callback to mid level */
4824 }
4825
4826 /* When high resolution timer goes off this function is called. */
4827 static enum hrtimer_restart sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
4828 {
4829         struct sdebug_defer *sd_dp = container_of(timer, struct sdebug_defer,
4830                                                   hrt);
4831         sdebug_q_cmd_complete(sd_dp);
4832         return HRTIMER_NORESTART;
4833 }
4834
4835 /* When work queue schedules work, it calls this function. */
4836 static void sdebug_q_cmd_wq_complete(struct work_struct *work)
4837 {
4838         struct sdebug_defer *sd_dp = container_of(work, struct sdebug_defer,
4839                                                   ew.work);
4840         sdebug_q_cmd_complete(sd_dp);
4841 }
4842
4843 static bool got_shared_uuid;
4844 static uuid_t shared_uuid;
4845
4846 static int sdebug_device_create_zones(struct sdebug_dev_info *devip)
4847 {
4848         struct sdeb_zone_state *zsp;
4849         sector_t capacity = get_sdebug_capacity();
4850         sector_t zstart = 0;
4851         unsigned int i;
4852
4853         /*
4854          * Set the zone size: if sdeb_zbc_zone_size_mb is not set, figure out
4855          * a zone size allowing for at least 4 zones on the device. Otherwise,
4856          * use the specified zone size checking that at least 2 zones can be
4857          * created for the device.
4858          */
4859         if (!sdeb_zbc_zone_size_mb) {
4860                 devip->zsize = (DEF_ZBC_ZONE_SIZE_MB * SZ_1M)
4861                         >> ilog2(sdebug_sector_size);
4862                 while (capacity < devip->zsize << 2 && devip->zsize >= 2)
4863                         devip->zsize >>= 1;
4864                 if (devip->zsize < 2) {
4865                         pr_err("Device capacity too small\n");
4866                         return -EINVAL;
4867                 }
4868         } else {
4869                 if (!is_power_of_2(sdeb_zbc_zone_size_mb)) {
4870                         pr_err("Zone size is not a power of 2\n");
4871                         return -EINVAL;
4872                 }
4873                 devip->zsize = (sdeb_zbc_zone_size_mb * SZ_1M)
4874                         >> ilog2(sdebug_sector_size);
4875                 if (devip->zsize >= capacity) {
4876                         pr_err("Zone size too large for device capacity\n");
4877                         return -EINVAL;
4878                 }
4879         }
4880
4881         devip->zsize_shift = ilog2(devip->zsize);
4882         devip->nr_zones = (capacity + devip->zsize - 1) >> devip->zsize_shift;
4883
4884         if (sdeb_zbc_nr_conv >= devip->nr_zones) {
4885                 pr_err("Number of conventional zones too large\n");
4886                 return -EINVAL;
4887         }
4888         devip->nr_conv_zones = sdeb_zbc_nr_conv;
4889
4890         if (devip->zmodel == BLK_ZONED_HM) {
4891                 /* zbc_max_open_zones can be 0, meaning "not reported" */
4892                 if (sdeb_zbc_max_open >= devip->nr_zones - 1)
4893                         devip->max_open = (devip->nr_zones - 1) / 2;
4894                 else
4895                         devip->max_open = sdeb_zbc_max_open;
4896         }
4897
4898         devip->zstate = kcalloc(devip->nr_zones,
4899                                 sizeof(struct sdeb_zone_state), GFP_KERNEL);
4900         if (!devip->zstate)
4901                 return -ENOMEM;
4902
4903         for (i = 0; i < devip->nr_zones; i++) {
4904                 zsp = &devip->zstate[i];
4905
4906                 zsp->z_start = zstart;
4907
4908                 if (i < devip->nr_conv_zones) {
4909                         zsp->z_type = ZBC_ZONE_TYPE_CNV;
4910                         zsp->z_cond = ZBC_NOT_WRITE_POINTER;
4911                         zsp->z_wp = (sector_t)-1;
4912                 } else {
4913                         if (devip->zmodel == BLK_ZONED_HM)
4914                                 zsp->z_type = ZBC_ZONE_TYPE_SWR;
4915                         else
4916                                 zsp->z_type = ZBC_ZONE_TYPE_SWP;
4917                         zsp->z_cond = ZC1_EMPTY;
4918                         zsp->z_wp = zsp->z_start;
4919                 }
4920
4921                 if (zsp->z_start + devip->zsize < capacity)
4922                         zsp->z_size = devip->zsize;
4923                 else
4924                         zsp->z_size = capacity - zsp->z_start;
4925
4926                 zstart += zsp->z_size;
4927         }
4928
4929         return 0;
4930 }
4931
4932 static struct sdebug_dev_info *sdebug_device_create(
4933                         struct sdebug_host_info *sdbg_host, gfp_t flags)
4934 {
4935         struct sdebug_dev_info *devip;
4936
4937         devip = kzalloc(sizeof(*devip), flags);
4938         if (devip) {
4939                 if (sdebug_uuid_ctl == 1)
4940                         uuid_gen(&devip->lu_name);
4941                 else if (sdebug_uuid_ctl == 2) {
4942                         if (got_shared_uuid)
4943                                 devip->lu_name = shared_uuid;
4944                         else {
4945                                 uuid_gen(&shared_uuid);
4946                                 got_shared_uuid = true;
4947                                 devip->lu_name = shared_uuid;
4948                         }
4949                 }
4950                 devip->sdbg_host = sdbg_host;
4951                 if (sdeb_zbc_in_use) {
4952                         devip->zmodel = sdeb_zbc_model;
4953                         if (sdebug_device_create_zones(devip)) {
4954                                 kfree(devip);
4955                                 return NULL;
4956                         }
4957                 } else {
4958                         devip->zmodel = BLK_ZONED_NONE;
4959                 }
4960                 devip->sdbg_host = sdbg_host;
4961                 devip->create_ts = ktime_get_boottime();
4962                 atomic_set(&devip->stopped, (sdeb_tur_ms_to_ready > 0 ? 2 : 0));
4963                 list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
4964         }
4965         return devip;
4966 }
4967
4968 static struct sdebug_dev_info *find_build_dev_info(struct scsi_device *sdev)
4969 {
4970         struct sdebug_host_info *sdbg_host;
4971         struct sdebug_dev_info *open_devip = NULL;
4972         struct sdebug_dev_info *devip;
4973
4974         sdbg_host = *(struct sdebug_host_info **)shost_priv(sdev->host);
4975         if (!sdbg_host) {
4976                 pr_err("Host info NULL\n");
4977                 return NULL;
4978         }
4979
4980         list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
4981                 if ((devip->used) && (devip->channel == sdev->channel) &&
4982                     (devip->target == sdev->id) &&
4983                     (devip->lun == sdev->lun))
4984                         return devip;
4985                 else {
4986                         if ((!devip->used) && (!open_devip))
4987                                 open_devip = devip;
4988                 }
4989         }
4990         if (!open_devip) { /* try and make a new one */
4991                 open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
4992                 if (!open_devip) {
4993                         pr_err("out of memory at line %d\n", __LINE__);
4994                         return NULL;
4995                 }
4996         }
4997
4998         open_devip->channel = sdev->channel;
4999         open_devip->target = sdev->id;
5000         open_devip->lun = sdev->lun;
5001         open_devip->sdbg_host = sdbg_host;
5002         atomic_set(&open_devip->num_in_q, 0);
5003         set_bit(SDEBUG_UA_POR, open_devip->uas_bm);
5004         open_devip->used = true;
5005         return open_devip;
5006 }
5007
5008 static int scsi_debug_slave_alloc(struct scsi_device *sdp)
5009 {
5010         if (sdebug_verbose)
5011                 pr_info("slave_alloc <%u %u %u %llu>\n",
5012                        sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5013         return 0;
5014 }
5015
5016 static int scsi_debug_slave_configure(struct scsi_device *sdp)
5017 {
5018         struct sdebug_dev_info *devip =
5019                         (struct sdebug_dev_info *)sdp->hostdata;
5020
5021         if (sdebug_verbose)
5022                 pr_info("slave_configure <%u %u %u %llu>\n",
5023                        sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5024         if (sdp->host->max_cmd_len != SDEBUG_MAX_CMD_LEN)
5025                 sdp->host->max_cmd_len = SDEBUG_MAX_CMD_LEN;
5026         if (devip == NULL) {
5027                 devip = find_build_dev_info(sdp);
5028                 if (devip == NULL)
5029                         return 1;  /* no resources, will be marked offline */
5030         }
5031         sdp->hostdata = devip;
5032         if (sdebug_no_uld)
5033                 sdp->no_uld_attach = 1;
5034         config_cdb_len(sdp);
5035         return 0;
5036 }
5037
5038 static void scsi_debug_slave_destroy(struct scsi_device *sdp)
5039 {
5040         struct sdebug_dev_info *devip =
5041                 (struct sdebug_dev_info *)sdp->hostdata;
5042
5043         if (sdebug_verbose)
5044                 pr_info("slave_destroy <%u %u %u %llu>\n",
5045                        sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5046         if (devip) {
5047                 /* make this slot available for re-use */
5048                 devip->used = false;
5049                 sdp->hostdata = NULL;
5050         }
5051 }
5052
5053 static void stop_qc_helper(struct sdebug_defer *sd_dp,
5054                            enum sdeb_defer_type defer_t)
5055 {
5056         if (!sd_dp)
5057                 return;
5058         if (defer_t == SDEB_DEFER_HRT)
5059                 hrtimer_cancel(&sd_dp->hrt);
5060         else if (defer_t == SDEB_DEFER_WQ)
5061                 cancel_work_sync(&sd_dp->ew.work);
5062 }
5063
5064 /* If @cmnd found deletes its timer or work queue and returns true; else
5065    returns false */
5066 static bool stop_queued_cmnd(struct scsi_cmnd *cmnd)
5067 {
5068         unsigned long iflags;
5069         int j, k, qmax, r_qmax;
5070         enum sdeb_defer_type l_defer_t;
5071         struct sdebug_queue *sqp;
5072         struct sdebug_queued_cmd *sqcp;
5073         struct sdebug_dev_info *devip;
5074         struct sdebug_defer *sd_dp;
5075
5076         for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5077                 spin_lock_irqsave(&sqp->qc_lock, iflags);
5078                 qmax = sdebug_max_queue;
5079                 r_qmax = atomic_read(&retired_max_queue);
5080                 if (r_qmax > qmax)
5081                         qmax = r_qmax;
5082                 for (k = 0; k < qmax; ++k) {
5083                         if (test_bit(k, sqp->in_use_bm)) {
5084                                 sqcp = &sqp->qc_arr[k];
5085                                 if (cmnd != sqcp->a_cmnd)
5086                                         continue;
5087                                 /* found */
5088                                 devip = (struct sdebug_dev_info *)
5089                                                 cmnd->device->hostdata;
5090                                 if (devip)
5091                                         atomic_dec(&devip->num_in_q);
5092                                 sqcp->a_cmnd = NULL;
5093                                 sd_dp = sqcp->sd_dp;
5094                                 if (sd_dp) {
5095                                         l_defer_t = sd_dp->defer_t;
5096                                         sd_dp->defer_t = SDEB_DEFER_NONE;
5097                                 } else
5098                                         l_defer_t = SDEB_DEFER_NONE;
5099                                 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5100                                 stop_qc_helper(sd_dp, l_defer_t);
5101                                 clear_bit(k, sqp->in_use_bm);
5102                                 return true;
5103                         }
5104                 }
5105                 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5106         }
5107         return false;
5108 }
5109
5110 /* Deletes (stops) timers or work queues of all queued commands */
5111 static void stop_all_queued(void)
5112 {
5113         unsigned long iflags;
5114         int j, k;
5115         enum sdeb_defer_type l_defer_t;
5116         struct sdebug_queue *sqp;
5117         struct sdebug_queued_cmd *sqcp;
5118         struct sdebug_dev_info *devip;
5119         struct sdebug_defer *sd_dp;
5120
5121         for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5122                 spin_lock_irqsave(&sqp->qc_lock, iflags);
5123                 for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
5124                         if (test_bit(k, sqp->in_use_bm)) {
5125                                 sqcp = &sqp->qc_arr[k];
5126                                 if (sqcp->a_cmnd == NULL)
5127                                         continue;
5128                                 devip = (struct sdebug_dev_info *)
5129                                         sqcp->a_cmnd->device->hostdata;
5130                                 if (devip)
5131                                         atomic_dec(&devip->num_in_q);
5132                                 sqcp->a_cmnd = NULL;
5133                                 sd_dp = sqcp->sd_dp;
5134                                 if (sd_dp) {
5135                                         l_defer_t = sd_dp->defer_t;
5136                                         sd_dp->defer_t = SDEB_DEFER_NONE;
5137                                 } else
5138                                         l_defer_t = SDEB_DEFER_NONE;
5139                                 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5140                                 stop_qc_helper(sd_dp, l_defer_t);
5141                                 clear_bit(k, sqp->in_use_bm);
5142                                 spin_lock_irqsave(&sqp->qc_lock, iflags);
5143                         }
5144                 }
5145                 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5146         }
5147 }
5148
5149 /* Free queued command memory on heap */
5150 static void free_all_queued(void)
5151 {
5152         int j, k;
5153         struct sdebug_queue *sqp;
5154         struct sdebug_queued_cmd *sqcp;
5155
5156         for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5157                 for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
5158                         sqcp = &sqp->qc_arr[k];
5159                         kfree(sqcp->sd_dp);
5160                         sqcp->sd_dp = NULL;
5161                 }
5162         }
5163 }
5164
5165 static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
5166 {
5167         bool ok;
5168
5169         ++num_aborts;
5170         if (SCpnt) {
5171                 ok = stop_queued_cmnd(SCpnt);
5172                 if (SCpnt->device && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
5173                         sdev_printk(KERN_INFO, SCpnt->device,
5174                                     "%s: command%s found\n", __func__,
5175                                     ok ? "" : " not");
5176         }
5177         return SUCCESS;
5178 }
5179
5180 static int scsi_debug_device_reset(struct scsi_cmnd *SCpnt)
5181 {
5182         ++num_dev_resets;
5183         if (SCpnt && SCpnt->device) {
5184                 struct scsi_device *sdp = SCpnt->device;
5185                 struct sdebug_dev_info *devip =
5186                                 (struct sdebug_dev_info *)sdp->hostdata;
5187
5188                 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5189                         sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5190                 if (devip)
5191                         set_bit(SDEBUG_UA_POR, devip->uas_bm);
5192         }
5193         return SUCCESS;
5194 }
5195
5196 static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
5197 {
5198         struct sdebug_host_info *sdbg_host;
5199         struct sdebug_dev_info *devip;
5200         struct scsi_device *sdp;
5201         struct Scsi_Host *hp;
5202         int k = 0;
5203
5204         ++num_target_resets;
5205         if (!SCpnt)
5206                 goto lie;
5207         sdp = SCpnt->device;
5208         if (!sdp)
5209                 goto lie;
5210         if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5211                 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5212         hp = sdp->host;
5213         if (!hp)
5214                 goto lie;
5215         sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
5216         if (sdbg_host) {
5217                 list_for_each_entry(devip,
5218                                     &sdbg_host->dev_info_list,
5219                                     dev_list)
5220                         if (devip->target == sdp->id) {
5221                                 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5222                                 ++k;
5223                         }
5224         }
5225         if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5226                 sdev_printk(KERN_INFO, sdp,
5227                             "%s: %d device(s) found in target\n", __func__, k);
5228 lie:
5229         return SUCCESS;
5230 }
5231
5232 static int scsi_debug_bus_reset(struct scsi_cmnd *SCpnt)
5233 {
5234         struct sdebug_host_info *sdbg_host;
5235         struct sdebug_dev_info *devip;
5236         struct scsi_device *sdp;
5237         struct Scsi_Host *hp;
5238         int k = 0;
5239
5240         ++num_bus_resets;
5241         if (!(SCpnt && SCpnt->device))
5242                 goto lie;
5243         sdp = SCpnt->device;
5244         if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5245                 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5246         hp = sdp->host;
5247         if (hp) {
5248                 sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
5249                 if (sdbg_host) {
5250                         list_for_each_entry(devip,
5251                                             &sdbg_host->dev_info_list,
5252                                             dev_list) {
5253                                 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5254                                 ++k;
5255                         }
5256                 }
5257         }
5258         if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5259                 sdev_printk(KERN_INFO, sdp,
5260                             "%s: %d device(s) found in host\n", __func__, k);
5261 lie:
5262         return SUCCESS;
5263 }
5264
5265 static int scsi_debug_host_reset(struct scsi_cmnd *SCpnt)
5266 {
5267         struct sdebug_host_info *sdbg_host;
5268         struct sdebug_dev_info *devip;
5269         int k = 0;
5270
5271         ++num_host_resets;
5272         if ((SCpnt->device) && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
5273                 sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__);
5274         spin_lock(&sdebug_host_list_lock);
5275         list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
5276                 list_for_each_entry(devip, &sdbg_host->dev_info_list,
5277                                     dev_list) {
5278                         set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5279                         ++k;
5280                 }
5281         }
5282         spin_unlock(&sdebug_host_list_lock);
5283         stop_all_queued();
5284         if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5285                 sdev_printk(KERN_INFO, SCpnt->device,
5286                             "%s: %d device(s) found\n", __func__, k);
5287         return SUCCESS;
5288 }
5289
5290 static void sdebug_build_parts(unsigned char *ramp, unsigned long store_size)
5291 {
5292         struct msdos_partition *pp;
5293         int starts[SDEBUG_MAX_PARTS + 2], max_part_secs;
5294         int sectors_per_part, num_sectors, k;
5295         int heads_by_sects, start_sec, end_sec;
5296
5297         /* assume partition table already zeroed */
5298         if ((sdebug_num_parts < 1) || (store_size < 1048576))
5299                 return;
5300         if (sdebug_num_parts > SDEBUG_MAX_PARTS) {
5301                 sdebug_num_parts = SDEBUG_MAX_PARTS;
5302                 pr_warn("reducing partitions to %d\n", SDEBUG_MAX_PARTS);
5303         }
5304         num_sectors = (int)get_sdebug_capacity();
5305         sectors_per_part = (num_sectors - sdebug_sectors_per)
5306                            / sdebug_num_parts;
5307         heads_by_sects = sdebug_heads * sdebug_sectors_per;
5308         starts[0] = sdebug_sectors_per;
5309         max_part_secs = sectors_per_part;
5310         for (k = 1; k < sdebug_num_parts; ++k) {
5311                 starts[k] = ((k * sectors_per_part) / heads_by_sects)
5312                             * heads_by_sects;
5313                 if (starts[k] - starts[k - 1] < max_part_secs)
5314                         max_part_secs = starts[k] - starts[k - 1];
5315         }
5316         starts[sdebug_num_parts] = num_sectors;
5317         starts[sdebug_num_parts + 1] = 0;
5318
5319         ramp[510] = 0x55;       /* magic partition markings */
5320         ramp[511] = 0xAA;
5321         pp = (struct msdos_partition *)(ramp + 0x1be);
5322         for (k = 0; starts[k + 1]; ++k, ++pp) {
5323                 start_sec = starts[k];
5324                 end_sec = starts[k] + max_part_secs - 1;
5325                 pp->boot_ind = 0;
5326
5327                 pp->cyl = start_sec / heads_by_sects;
5328                 pp->head = (start_sec - (pp->cyl * heads_by_sects))
5329                            / sdebug_sectors_per;
5330                 pp->sector = (start_sec % sdebug_sectors_per) + 1;
5331
5332                 pp->end_cyl = end_sec / heads_by_sects;
5333                 pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
5334                                / sdebug_sectors_per;
5335                 pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
5336
5337                 pp->start_sect = cpu_to_le32(start_sec);
5338                 pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1);
5339                 pp->sys_ind = 0x83;     /* plain Linux partition */
5340         }
5341 }
5342
5343 static void block_unblock_all_queues(bool block)
5344 {
5345         int j;
5346         struct sdebug_queue *sqp;
5347
5348         for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp)
5349                 atomic_set(&sqp->blocked, (int)block);
5350 }
5351
5352 /* Adjust (by rounding down) the sdebug_cmnd_count so abs(every_nth)-1
5353  * commands will be processed normally before triggers occur.
5354  */
5355 static void tweak_cmnd_count(void)
5356 {
5357         int count, modulo;
5358
5359         modulo = abs(sdebug_every_nth);
5360         if (modulo < 2)
5361                 return;
5362         block_unblock_all_queues(true);
5363         count = atomic_read(&sdebug_cmnd_count);
5364         atomic_set(&sdebug_cmnd_count, (count / modulo) * modulo);
5365         block_unblock_all_queues(false);
5366 }
5367
5368 static void clear_queue_stats(void)
5369 {
5370         atomic_set(&sdebug_cmnd_count, 0);
5371         atomic_set(&sdebug_completions, 0);
5372         atomic_set(&sdebug_miss_cpus, 0);
5373         atomic_set(&sdebug_a_tsf, 0);
5374 }
5375
5376 static bool inject_on_this_cmd(void)
5377 {
5378         if (sdebug_every_nth == 0)
5379                 return false;
5380         return (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) == 0;
5381 }
5382
5383 #define INCLUSIVE_TIMING_MAX_NS 1000000         /* 1 millisecond */
5384
5385 /* Complete the processing of the thread that queued a SCSI command to this
5386  * driver. It either completes the command by calling cmnd_done() or
5387  * schedules a hr timer or work queue then returns 0. Returns
5388  * SCSI_MLQUEUE_HOST_BUSY if temporarily out of resources.
5389  */
5390 static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
5391                          int scsi_result,
5392                          int (*pfp)(struct scsi_cmnd *,
5393                                     struct sdebug_dev_info *),
5394                          int delta_jiff, int ndelay)
5395 {
5396         bool new_sd_dp;
5397         bool inject = false;
5398         int k, num_in_q, qdepth;
5399         unsigned long iflags;
5400         u64 ns_from_boot = 0;
5401         struct sdebug_queue *sqp;
5402         struct sdebug_queued_cmd *sqcp;
5403         struct scsi_device *sdp;
5404         struct sdebug_defer *sd_dp;
5405
5406         if (unlikely(devip == NULL)) {
5407                 if (scsi_result == 0)
5408                         scsi_result = DID_NO_CONNECT << 16;
5409                 goto respond_in_thread;
5410         }
5411         sdp = cmnd->device;
5412
5413         if (delta_jiff == 0)
5414                 goto respond_in_thread;
5415
5416         sqp = get_queue(cmnd);
5417         spin_lock_irqsave(&sqp->qc_lock, iflags);
5418         if (unlikely(atomic_read(&sqp->blocked))) {
5419                 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5420                 return SCSI_MLQUEUE_HOST_BUSY;
5421         }
5422         num_in_q = atomic_read(&devip->num_in_q);
5423         qdepth = cmnd->device->queue_depth;
5424         if (unlikely((qdepth > 0) && (num_in_q >= qdepth))) {
5425                 if (scsi_result) {
5426                         spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5427                         goto respond_in_thread;
5428                 } else
5429                         scsi_result = device_qfull_result;
5430         } else if (unlikely(sdebug_every_nth &&
5431                             (SDEBUG_OPT_RARE_TSF & sdebug_opts) &&
5432                             (scsi_result == 0))) {
5433                 if ((num_in_q == (qdepth - 1)) &&
5434                     (atomic_inc_return(&sdebug_a_tsf) >=
5435                      abs(sdebug_every_nth))) {
5436                         atomic_set(&sdebug_a_tsf, 0);
5437                         inject = true;
5438                         scsi_result = device_qfull_result;
5439                 }
5440         }
5441
5442         k = find_first_zero_bit(sqp->in_use_bm, sdebug_max_queue);
5443         if (unlikely(k >= sdebug_max_queue)) {
5444                 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5445                 if (scsi_result)
5446                         goto respond_in_thread;
5447                 else if (SDEBUG_OPT_ALL_TSF & sdebug_opts)
5448                         scsi_result = device_qfull_result;
5449                 if (SDEBUG_OPT_Q_NOISE & sdebug_opts)
5450                         sdev_printk(KERN_INFO, sdp,
5451                                     "%s: max_queue=%d exceeded, %s\n",
5452                                     __func__, sdebug_max_queue,
5453                                     (scsi_result ?  "status: TASK SET FULL" :
5454                                                     "report: host busy"));
5455                 if (scsi_result)
5456                         goto respond_in_thread;
5457                 else
5458                         return SCSI_MLQUEUE_HOST_BUSY;
5459         }
5460         set_bit(k, sqp->in_use_bm);
5461         atomic_inc(&devip->num_in_q);
5462         sqcp = &sqp->qc_arr[k];
5463         sqcp->a_cmnd = cmnd;
5464         cmnd->host_scribble = (unsigned char *)sqcp;
5465         sd_dp = sqcp->sd_dp;
5466         spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5467         if (!sd_dp) {
5468                 sd_dp = kzalloc(sizeof(*sd_dp), GFP_ATOMIC);
5469                 if (!sd_dp) {
5470                         atomic_dec(&devip->num_in_q);
5471                         clear_bit(k, sqp->in_use_bm);
5472                         return SCSI_MLQUEUE_HOST_BUSY;
5473                 }
5474                 new_sd_dp = true;
5475         } else {
5476                 new_sd_dp = false;
5477         }
5478
5479         /* Set the hostwide tag */
5480         if (sdebug_host_max_queue)
5481                 sd_dp->hc_idx = get_tag(cmnd);
5482
5483         if (ndelay > 0 && ndelay < INCLUSIVE_TIMING_MAX_NS)
5484                 ns_from_boot = ktime_get_boottime_ns();
5485
5486         /* one of the resp_*() response functions is called here */
5487         cmnd->result = pfp ? pfp(cmnd, devip) : 0;
5488         if (cmnd->result & SDEG_RES_IMMED_MASK) {
5489                 cmnd->result &= ~SDEG_RES_IMMED_MASK;
5490                 delta_jiff = ndelay = 0;
5491         }
5492         if (cmnd->result == 0 && scsi_result != 0)
5493                 cmnd->result = scsi_result;
5494         if (cmnd->result == 0 && unlikely(sdebug_opts & SDEBUG_OPT_TRANSPORT_ERR)) {
5495                 if (atomic_read(&sdeb_inject_pending)) {
5496                         mk_sense_buffer(cmnd, ABORTED_COMMAND, TRANSPORT_PROBLEM, ACK_NAK_TO);
5497                         atomic_set(&sdeb_inject_pending, 0);
5498                         cmnd->result = check_condition_result;
5499                 }
5500         }
5501
5502         if (unlikely(sdebug_verbose && cmnd->result))
5503                 sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n",
5504                             __func__, cmnd->result);
5505
5506         if (delta_jiff > 0 || ndelay > 0) {
5507                 ktime_t kt;
5508
5509                 if (delta_jiff > 0) {
5510                         u64 ns = jiffies_to_nsecs(delta_jiff);
5511
5512                         if (sdebug_random && ns < U32_MAX) {
5513                                 ns = prandom_u32_max((u32)ns);
5514                         } else if (sdebug_random) {
5515                                 ns >>= 12;      /* scale to 4 usec precision */
5516                                 if (ns < U32_MAX)       /* over 4 hours max */
5517                                         ns = prandom_u32_max((u32)ns);
5518                                 ns <<= 12;
5519                         }
5520                         kt = ns_to_ktime(ns);
5521                 } else {        /* ndelay has a 4.2 second max */
5522                         kt = sdebug_random ? prandom_u32_max((u32)ndelay) :
5523                                              (u32)ndelay;
5524                         if (ndelay < INCLUSIVE_TIMING_MAX_NS) {
5525                                 u64 d = ktime_get_boottime_ns() - ns_from_boot;
5526
5527                                 if (kt <= d) {  /* elapsed duration >= kt */
5528                                         spin_lock_irqsave(&sqp->qc_lock, iflags);
5529                                         sqcp->a_cmnd = NULL;
5530                                         atomic_dec(&devip->num_in_q);
5531                                         clear_bit(k, sqp->in_use_bm);
5532                                         spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5533                                         if (new_sd_dp)
5534                                                 kfree(sd_dp);
5535                                         /* call scsi_done() from this thread */
5536                                         cmnd->scsi_done(cmnd);
5537                                         return 0;
5538                                 }
5539                                 /* otherwise reduce kt by elapsed time */
5540                                 kt -= d;
5541                         }
5542                 }
5543                 if (!sd_dp->init_hrt) {
5544                         sd_dp->init_hrt = true;
5545                         sqcp->sd_dp = sd_dp;
5546                         hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC,
5547                                      HRTIMER_MODE_REL_PINNED);
5548                         sd_dp->hrt.function = sdebug_q_cmd_hrt_complete;
5549                         sd_dp->sqa_idx = sqp - sdebug_q_arr;
5550                         sd_dp->qc_idx = k;
5551                 }
5552                 if (sdebug_statistics)
5553                         sd_dp->issuing_cpu = raw_smp_processor_id();
5554                 sd_dp->defer_t = SDEB_DEFER_HRT;
5555                 /* schedule the invocation of scsi_done() for a later time */
5556                 hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED);
5557         } else {        /* jdelay < 0, use work queue */
5558                 if (!sd_dp->init_wq) {
5559                         sd_dp->init_wq = true;
5560                         sqcp->sd_dp = sd_dp;
5561                         sd_dp->sqa_idx = sqp - sdebug_q_arr;
5562                         sd_dp->qc_idx = k;
5563                         INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete);
5564                 }
5565                 if (sdebug_statistics)
5566                         sd_dp->issuing_cpu = raw_smp_processor_id();
5567                 sd_dp->defer_t = SDEB_DEFER_WQ;
5568                 if (unlikely((sdebug_opts & SDEBUG_OPT_CMD_ABORT) &&
5569                              atomic_read(&sdeb_inject_pending)))
5570                         sd_dp->aborted = true;
5571                 schedule_work(&sd_dp->ew.work);
5572                 if (unlikely((sdebug_opts & SDEBUG_OPT_CMD_ABORT) &&
5573                              atomic_read(&sdeb_inject_pending))) {
5574                         sdev_printk(KERN_INFO, sdp, "abort request tag %d\n", cmnd->request->tag);
5575                         blk_abort_request(cmnd->request);
5576                         atomic_set(&sdeb_inject_pending, 0);
5577                 }
5578         }
5579         if (unlikely((SDEBUG_OPT_Q_NOISE & sdebug_opts) && scsi_result == device_qfull_result))
5580                 sdev_printk(KERN_INFO, sdp, "%s: num_in_q=%d +1, %s%s\n", __func__,
5581                             num_in_q, (inject ? "<inject> " : ""), "status: TASK SET FULL");
5582         return 0;
5583
5584 respond_in_thread:      /* call back to mid-layer using invocation thread */
5585         cmnd->result = pfp != NULL ? pfp(cmnd, devip) : 0;
5586         cmnd->result &= ~SDEG_RES_IMMED_MASK;
5587         if (cmnd->result == 0 && scsi_result != 0)
5588                 cmnd->result = scsi_result;
5589         cmnd->scsi_done(cmnd);
5590         return 0;
5591 }
5592
5593 /* Note: The following macros create attribute files in the
5594    /sys/module/scsi_debug/parameters directory. Unfortunately this
5595    driver is unaware of a change and cannot trigger auxiliary actions
5596    as it can when the corresponding attribute in the
5597    /sys/bus/pseudo/drivers/scsi_debug directory is changed.
5598  */
5599 module_param_named(add_host, sdebug_add_host, int, S_IRUGO | S_IWUSR);
5600 module_param_named(ato, sdebug_ato, int, S_IRUGO);
5601 module_param_named(cdb_len, sdebug_cdb_len, int, 0644);
5602 module_param_named(clustering, sdebug_clustering, bool, S_IRUGO | S_IWUSR);
5603 module_param_named(delay, sdebug_jdelay, int, S_IRUGO | S_IWUSR);
5604 module_param_named(dev_size_mb, sdebug_dev_size_mb, int, S_IRUGO);
5605 module_param_named(dif, sdebug_dif, int, S_IRUGO);
5606 module_param_named(dix, sdebug_dix, int, S_IRUGO);
5607 module_param_named(dsense, sdebug_dsense, int, S_IRUGO | S_IWUSR);
5608 module_param_named(every_nth, sdebug_every_nth, int, S_IRUGO | S_IWUSR);
5609 module_param_named(fake_rw, sdebug_fake_rw, int, S_IRUGO | S_IWUSR);
5610 module_param_named(guard, sdebug_guard, uint, S_IRUGO);
5611 module_param_named(host_lock, sdebug_host_lock, bool, S_IRUGO | S_IWUSR);
5612 module_param_named(host_max_queue, sdebug_host_max_queue, int, S_IRUGO);
5613 module_param_string(inq_product, sdebug_inq_product_id,
5614                     sizeof(sdebug_inq_product_id), S_IRUGO | S_IWUSR);
5615 module_param_string(inq_rev, sdebug_inq_product_rev,
5616                     sizeof(sdebug_inq_product_rev), S_IRUGO | S_IWUSR);
5617 module_param_string(inq_vendor, sdebug_inq_vendor_id,
5618                     sizeof(sdebug_inq_vendor_id), S_IRUGO | S_IWUSR);
5619 module_param_named(lbprz, sdebug_lbprz, int, S_IRUGO);
5620 module_param_named(lbpu, sdebug_lbpu, int, S_IRUGO);
5621 module_param_named(lbpws, sdebug_lbpws, int, S_IRUGO);
5622 module_param_named(lbpws10, sdebug_lbpws10, int, S_IRUGO);
5623 module_param_named(lowest_aligned, sdebug_lowest_aligned, int, S_IRUGO);
5624 module_param_named(lun_format, sdebug_lun_am_i, int, S_IRUGO | S_IWUSR);
5625 module_param_named(max_luns, sdebug_max_luns, int, S_IRUGO | S_IWUSR);
5626 module_param_named(max_queue, sdebug_max_queue, int, S_IRUGO | S_IWUSR);
5627 module_param_named(medium_error_count, sdebug_medium_error_count, int,
5628                    S_IRUGO | S_IWUSR);
5629 module_param_named(medium_error_start, sdebug_medium_error_start, int,
5630                    S_IRUGO | S_IWUSR);
5631 module_param_named(ndelay, sdebug_ndelay, int, S_IRUGO | S_IWUSR);
5632 module_param_named(no_lun_0, sdebug_no_lun_0, int, S_IRUGO | S_IWUSR);
5633 module_param_named(no_uld, sdebug_no_uld, int, S_IRUGO);
5634 module_param_named(num_parts, sdebug_num_parts, int, S_IRUGO);
5635 module_param_named(num_tgts, sdebug_num_tgts, int, S_IRUGO | S_IWUSR);
5636 module_param_named(opt_blks, sdebug_opt_blks, int, S_IRUGO);
5637 module_param_named(opt_xferlen_exp, sdebug_opt_xferlen_exp, int, S_IRUGO);
5638 module_param_named(opts, sdebug_opts, int, S_IRUGO | S_IWUSR);
5639 module_param_named(per_host_store, sdebug_per_host_store, bool,
5640                    S_IRUGO | S_IWUSR);
5641 module_param_named(physblk_exp, sdebug_physblk_exp, int, S_IRUGO);
5642 module_param_named(ptype, sdebug_ptype, int, S_IRUGO | S_IWUSR);
5643 module_param_named(random, sdebug_random, bool, S_IRUGO | S_IWUSR);
5644 module_param_named(removable, sdebug_removable, bool, S_IRUGO | S_IWUSR);
5645 module_param_named(scsi_level, sdebug_scsi_level, int, S_IRUGO);
5646 module_param_named(sector_size, sdebug_sector_size, int, S_IRUGO);
5647 module_param_named(statistics, sdebug_statistics, bool, S_IRUGO | S_IWUSR);
5648 module_param_named(strict, sdebug_strict, bool, S_IRUGO | S_IWUSR);
5649 module_param_named(submit_queues, submit_queues, int, S_IRUGO);
5650 module_param_named(tur_ms_to_ready, sdeb_tur_ms_to_ready, int, S_IRUGO);
5651 module_param_named(unmap_alignment, sdebug_unmap_alignment, int, S_IRUGO);
5652 module_param_named(unmap_granularity, sdebug_unmap_granularity, int, S_IRUGO);
5653 module_param_named(unmap_max_blocks, sdebug_unmap_max_blocks, int, S_IRUGO);
5654 module_param_named(unmap_max_desc, sdebug_unmap_max_desc, int, S_IRUGO);
5655 module_param_named(uuid_ctl, sdebug_uuid_ctl, int, S_IRUGO);
5656 module_param_named(virtual_gb, sdebug_virtual_gb, int, S_IRUGO | S_IWUSR);
5657 module_param_named(vpd_use_hostno, sdebug_vpd_use_hostno, int,
5658                    S_IRUGO | S_IWUSR);
5659 module_param_named(wp, sdebug_wp, bool, S_IRUGO | S_IWUSR);
5660 module_param_named(write_same_length, sdebug_write_same_length, int,
5661                    S_IRUGO | S_IWUSR);
5662 module_param_named(zbc, sdeb_zbc_model_s, charp, S_IRUGO);
5663 module_param_named(zone_max_open, sdeb_zbc_max_open, int, S_IRUGO);
5664 module_param_named(zone_nr_conv, sdeb_zbc_nr_conv, int, S_IRUGO);
5665 module_param_named(zone_size_mb, sdeb_zbc_zone_size_mb, int, S_IRUGO);
5666
5667 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
5668 MODULE_DESCRIPTION("SCSI debug adapter driver");
5669 MODULE_LICENSE("GPL");
5670 MODULE_VERSION(SDEBUG_VERSION);
5671
5672 MODULE_PARM_DESC(add_host, "add n hosts, in sysfs if negative remove host(s) (def=1)");
5673 MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
5674 MODULE_PARM_DESC(cdb_len, "suggest CDB lengths to drivers (def=10)");
5675 MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
5676 MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
5677 MODULE_PARM_DESC(dev_size_mb, "size in MiB of ram shared by devs(def=8)");
5678 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
5679 MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
5680 MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
5681 MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
5682 MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
5683 MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
5684 MODULE_PARM_DESC(host_lock, "host_lock is ignored (def=0)");
5685 MODULE_PARM_DESC(host_max_queue,
5686                  "host max # of queued cmds (0 to max(def) [max_queue fixed equal for !0])");
5687 MODULE_PARM_DESC(inq_product, "SCSI INQUIRY product string (def=\"scsi_debug\")");
5688 MODULE_PARM_DESC(inq_rev, "SCSI INQUIRY revision string (def=\""
5689                  SDEBUG_VERSION "\")");
5690 MODULE_PARM_DESC(inq_vendor, "SCSI INQUIRY vendor string (def=\"Linux\")");
5691 MODULE_PARM_DESC(lbprz,
5692                  "on read unmapped LBs return 0 when 1 (def), return 0xff when 2");
5693 MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
5694 MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
5695 MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
5696 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
5697 MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
5698 MODULE_PARM_DESC(lun_format, "LUN format: 0->peripheral (def); 1 --> flat address method");
5699 MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))");
5700 MODULE_PARM_DESC(medium_error_count, "count of sectors to return follow on MEDIUM error");
5701 MODULE_PARM_DESC(medium_error_start, "starting sector number to return MEDIUM error");
5702 MODULE_PARM_DESC(ndelay, "response delay in nanoseconds (def=0 -> ignore)");
5703 MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
5704 MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
5705 MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
5706 MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
5707 MODULE_PARM_DESC(opt_blks, "optimal transfer length in blocks (def=1024)");
5708 MODULE_PARM_DESC(opt_xferlen_exp, "optimal transfer length granularity exponent (def=physblk_exp)");
5709 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
5710 MODULE_PARM_DESC(per_host_store, "If set, next positive add_host will get new store (def=0)");
5711 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
5712 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
5713 MODULE_PARM_DESC(random, "If set, uniformly randomize command duration between 0 and delay_in_ns");
5714 MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
5715 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=7[SPC-5])");
5716 MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
5717 MODULE_PARM_DESC(statistics, "collect statistics on commands, queues (def=0)");
5718 MODULE_PARM_DESC(strict, "stricter checks: reserved field in cdb (def=0)");
5719 MODULE_PARM_DESC(submit_queues, "support for block multi-queue (def=1)");
5720 MODULE_PARM_DESC(tur_ms_to_ready, "TEST UNIT READY millisecs before initial good status (def=0)");
5721 MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
5722 MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
5723 MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
5724 MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
5725 MODULE_PARM_DESC(uuid_ctl,
5726                  "1->use uuid for lu name, 0->don't, 2->all use same (def=0)");
5727 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)");
5728 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
5729 MODULE_PARM_DESC(wp, "Write Protect (def=0)");
5730 MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
5731 MODULE_PARM_DESC(zbc, "'none' [0]; 'aware' [1]; 'managed' [2] (def=0). Can have 'host-' prefix");
5732 MODULE_PARM_DESC(zone_max_open, "Maximum number of open zones; [0] for no limit (def=auto)");
5733 MODULE_PARM_DESC(zone_nr_conv, "Number of conventional zones (def=1)");
5734 MODULE_PARM_DESC(zone_size_mb, "Zone size in MiB (def=auto)");
5735
5736 #define SDEBUG_INFO_LEN 256
5737 static char sdebug_info[SDEBUG_INFO_LEN];
5738
5739 static const char *scsi_debug_info(struct Scsi_Host *shp)
5740 {
5741         int k;
5742
5743         k = scnprintf(sdebug_info, SDEBUG_INFO_LEN, "%s: version %s [%s]\n",
5744                       my_name, SDEBUG_VERSION, sdebug_version_date);
5745         if (k >= (SDEBUG_INFO_LEN - 1))
5746                 return sdebug_info;
5747         scnprintf(sdebug_info + k, SDEBUG_INFO_LEN - k,
5748                   "  dev_size_mb=%d, opts=0x%x, submit_queues=%d, %s=%d",
5749                   sdebug_dev_size_mb, sdebug_opts, submit_queues,
5750                   "statistics", (int)sdebug_statistics);
5751         return sdebug_info;
5752 }
5753
5754 /* 'echo <val> > /proc/scsi/scsi_debug/<host_id>' writes to opts */
5755 static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer,
5756                                  int length)
5757 {
5758         char arr[16];
5759         int opts;
5760         int minLen = length > 15 ? 15 : length;
5761
5762         if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
5763                 return -EACCES;
5764         memcpy(arr, buffer, minLen);
5765         arr[minLen] = '\0';
5766         if (1 != sscanf(arr, "%d", &opts))
5767                 return -EINVAL;
5768         sdebug_opts = opts;
5769         sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
5770         sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
5771         if (sdebug_every_nth != 0)
5772                 tweak_cmnd_count();
5773         return length;
5774 }
5775
5776 /* Output seen with 'cat /proc/scsi/scsi_debug/<host_id>'. It will be the
5777  * same for each scsi_debug host (if more than one). Some of the counters
5778  * output are not atomics so might be inaccurate in a busy system. */
5779 static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
5780 {
5781         int f, j, l;
5782         struct sdebug_queue *sqp;
5783         struct sdebug_host_info *sdhp;
5784
5785         seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n",
5786                    SDEBUG_VERSION, sdebug_version_date);
5787         seq_printf(m, "num_tgts=%d, %ssize=%d MB, opts=0x%x, every_nth=%d\n",
5788                    sdebug_num_tgts, "shared (ram) ", sdebug_dev_size_mb,
5789                    sdebug_opts, sdebug_every_nth);
5790         seq_printf(m, "delay=%d, ndelay=%d, max_luns=%d, sector_size=%d %s\n",
5791                    sdebug_jdelay, sdebug_ndelay, sdebug_max_luns,
5792                    sdebug_sector_size, "bytes");
5793         seq_printf(m, "cylinders=%d, heads=%d, sectors=%d, command aborts=%d\n",
5794                    sdebug_cylinders_per, sdebug_heads, sdebug_sectors_per,
5795                    num_aborts);
5796         seq_printf(m, "RESETs: device=%d, target=%d, bus=%d, host=%d\n",
5797                    num_dev_resets, num_target_resets, num_bus_resets,
5798                    num_host_resets);
5799         seq_printf(m, "dix_reads=%d, dix_writes=%d, dif_errors=%d\n",
5800                    dix_reads, dix_writes, dif_errors);
5801         seq_printf(m, "usec_in_jiffy=%lu, statistics=%d\n", TICK_NSEC / 1000,
5802                    sdebug_statistics);
5803         seq_printf(m, "cmnd_count=%d, completions=%d, %s=%d, a_tsf=%d\n",
5804                    atomic_read(&sdebug_cmnd_count),
5805                    atomic_read(&sdebug_completions),
5806                    "miss_cpus", atomic_read(&sdebug_miss_cpus),
5807                    atomic_read(&sdebug_a_tsf));
5808
5809         seq_printf(m, "submit_queues=%d\n", submit_queues);
5810         for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5811                 seq_printf(m, "  queue %d:\n", j);
5812                 f = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
5813                 if (f != sdebug_max_queue) {
5814                         l = find_last_bit(sqp->in_use_bm, sdebug_max_queue);
5815                         seq_printf(m, "    in_use_bm BUSY: %s: %d,%d\n",
5816                                    "first,last bits", f, l);
5817                 }
5818         }
5819
5820         seq_printf(m, "this host_no=%d\n", host->host_no);
5821         if (!xa_empty(per_store_ap)) {
5822                 bool niu;
5823                 int idx;
5824                 unsigned long l_idx;
5825                 struct sdeb_store_info *sip;
5826
5827                 seq_puts(m, "\nhost list:\n");
5828                 j = 0;
5829                 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
5830                         idx = sdhp->si_idx;
5831                         seq_printf(m, "  %d: host_no=%d, si_idx=%d\n", j,
5832                                    sdhp->shost->host_no, idx);
5833                         ++j;
5834                 }
5835                 seq_printf(m, "\nper_store array [most_recent_idx=%d]:\n",
5836                            sdeb_most_recent_idx);
5837                 j = 0;
5838                 xa_for_each(per_store_ap, l_idx, sip) {
5839                         niu = xa_get_mark(per_store_ap, l_idx,
5840                                           SDEB_XA_NOT_IN_USE);
5841                         idx = (int)l_idx;
5842                         seq_printf(m, "  %d: idx=%d%s\n", j, idx,
5843                                    (niu ? "  not_in_use" : ""));
5844                         ++j;
5845                 }
5846         }
5847         return 0;
5848 }
5849
5850 static ssize_t delay_show(struct device_driver *ddp, char *buf)
5851 {
5852         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_jdelay);
5853 }
5854 /* Returns -EBUSY if jdelay is being changed and commands are queued. The unit
5855  * of delay is jiffies.
5856  */
5857 static ssize_t delay_store(struct device_driver *ddp, const char *buf,
5858                            size_t count)
5859 {
5860         int jdelay, res;
5861
5862         if (count > 0 && sscanf(buf, "%d", &jdelay) == 1) {
5863                 res = count;
5864                 if (sdebug_jdelay != jdelay) {
5865                         int j, k;
5866                         struct sdebug_queue *sqp;
5867
5868                         block_unblock_all_queues(true);
5869                         for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
5870                              ++j, ++sqp) {
5871                                 k = find_first_bit(sqp->in_use_bm,
5872                                                    sdebug_max_queue);
5873                                 if (k != sdebug_max_queue) {
5874                                         res = -EBUSY;   /* queued commands */
5875                                         break;
5876                                 }
5877                         }
5878                         if (res > 0) {
5879                                 sdebug_jdelay = jdelay;
5880                                 sdebug_ndelay = 0;
5881                         }
5882                         block_unblock_all_queues(false);
5883                 }
5884                 return res;
5885         }
5886         return -EINVAL;
5887 }
5888 static DRIVER_ATTR_RW(delay);
5889
5890 static ssize_t ndelay_show(struct device_driver *ddp, char *buf)
5891 {
5892         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ndelay);
5893 }
5894 /* Returns -EBUSY if ndelay is being changed and commands are queued */
5895 /* If > 0 and accepted then sdebug_jdelay is set to JDELAY_OVERRIDDEN */
5896 static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
5897                             size_t count)
5898 {
5899         int ndelay, res;
5900
5901         if ((count > 0) && (1 == sscanf(buf, "%d", &ndelay)) &&
5902             (ndelay >= 0) && (ndelay < (1000 * 1000 * 1000))) {
5903                 res = count;
5904                 if (sdebug_ndelay != ndelay) {
5905                         int j, k;
5906                         struct sdebug_queue *sqp;
5907
5908                         block_unblock_all_queues(true);
5909                         for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
5910                              ++j, ++sqp) {
5911                                 k = find_first_bit(sqp->in_use_bm,
5912                                                    sdebug_max_queue);
5913                                 if (k != sdebug_max_queue) {
5914                                         res = -EBUSY;   /* queued commands */
5915                                         break;
5916                                 }
5917                         }
5918                         if (res > 0) {
5919                                 sdebug_ndelay = ndelay;
5920                                 sdebug_jdelay = ndelay  ? JDELAY_OVERRIDDEN
5921                                                         : DEF_JDELAY;
5922                         }
5923                         block_unblock_all_queues(false);
5924                 }
5925                 return res;
5926         }
5927         return -EINVAL;
5928 }
5929 static DRIVER_ATTR_RW(ndelay);
5930
5931 static ssize_t opts_show(struct device_driver *ddp, char *buf)
5932 {
5933         return scnprintf(buf, PAGE_SIZE, "0x%x\n", sdebug_opts);
5934 }
5935
5936 static ssize_t opts_store(struct device_driver *ddp, const char *buf,
5937                           size_t count)
5938 {
5939         int opts;
5940         char work[20];
5941
5942         if (sscanf(buf, "%10s", work) == 1) {
5943                 if (strncasecmp(work, "0x", 2) == 0) {
5944                         if (kstrtoint(work + 2, 16, &opts) == 0)
5945                                 goto opts_done;
5946                 } else {
5947                         if (kstrtoint(work, 10, &opts) == 0)
5948                                 goto opts_done;
5949                 }
5950         }
5951         return -EINVAL;
5952 opts_done:
5953         sdebug_opts = opts;
5954         sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
5955         sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
5956         tweak_cmnd_count();
5957         return count;
5958 }
5959 static DRIVER_ATTR_RW(opts);
5960
5961 static ssize_t ptype_show(struct device_driver *ddp, char *buf)
5962 {
5963         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ptype);
5964 }
5965 static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
5966                            size_t count)
5967 {
5968         int n;
5969
5970         /* Cannot change from or to TYPE_ZBC with sysfs */
5971         if (sdebug_ptype == TYPE_ZBC)
5972                 return -EINVAL;
5973
5974         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
5975                 if (n == TYPE_ZBC)
5976                         return -EINVAL;
5977                 sdebug_ptype = n;
5978                 return count;
5979         }
5980         return -EINVAL;
5981 }
5982 static DRIVER_ATTR_RW(ptype);
5983
5984 static ssize_t dsense_show(struct device_driver *ddp, char *buf)
5985 {
5986         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dsense);
5987 }
5988 static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
5989                             size_t count)
5990 {
5991         int n;
5992
5993         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
5994                 sdebug_dsense = n;
5995                 return count;
5996         }
5997         return -EINVAL;
5998 }
5999 static DRIVER_ATTR_RW(dsense);
6000
6001 static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
6002 {
6003         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_fake_rw);
6004 }
6005 static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
6006                              size_t count)
6007 {
6008         int n, idx;
6009
6010         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6011                 bool want_store = (n == 0);
6012                 struct sdebug_host_info *sdhp;
6013
6014                 n = (n > 0);
6015                 sdebug_fake_rw = (sdebug_fake_rw > 0);
6016                 if (sdebug_fake_rw == n)
6017                         return count;   /* not transitioning so do nothing */
6018
6019                 if (want_store) {       /* 1 --> 0 transition, set up store */
6020                         if (sdeb_first_idx < 0) {
6021                                 idx = sdebug_add_store();
6022                                 if (idx < 0)
6023                                         return idx;
6024                         } else {
6025                                 idx = sdeb_first_idx;
6026                                 xa_clear_mark(per_store_ap, idx,
6027                                               SDEB_XA_NOT_IN_USE);
6028                         }
6029                         /* make all hosts use same store */
6030                         list_for_each_entry(sdhp, &sdebug_host_list,
6031                                             host_list) {
6032                                 if (sdhp->si_idx != idx) {
6033                                         xa_set_mark(per_store_ap, sdhp->si_idx,
6034                                                     SDEB_XA_NOT_IN_USE);
6035                                         sdhp->si_idx = idx;
6036                                 }
6037                         }
6038                         sdeb_most_recent_idx = idx;
6039                 } else {        /* 0 --> 1 transition is trigger for shrink */
6040                         sdebug_erase_all_stores(true /* apart from first */);
6041                 }
6042                 sdebug_fake_rw = n;
6043                 return count;
6044         }
6045         return -EINVAL;
6046 }
6047 static DRIVER_ATTR_RW(fake_rw);
6048
6049 static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
6050 {
6051         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_lun_0);
6052 }
6053 static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
6054                               size_t count)
6055 {
6056         int n;
6057
6058         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6059                 sdebug_no_lun_0 = n;
6060                 return count;
6061         }
6062         return -EINVAL;
6063 }
6064 static DRIVER_ATTR_RW(no_lun_0);
6065
6066 static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
6067 {
6068         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_tgts);
6069 }
6070 static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
6071                               size_t count)
6072 {
6073         int n;
6074
6075         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6076                 sdebug_num_tgts = n;
6077                 sdebug_max_tgts_luns();
6078                 return count;
6079         }
6080         return -EINVAL;
6081 }
6082 static DRIVER_ATTR_RW(num_tgts);
6083
6084 static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)
6085 {
6086         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dev_size_mb);
6087 }
6088 static DRIVER_ATTR_RO(dev_size_mb);
6089
6090 static ssize_t per_host_store_show(struct device_driver *ddp, char *buf)
6091 {
6092         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_per_host_store);
6093 }
6094
6095 static ssize_t per_host_store_store(struct device_driver *ddp, const char *buf,
6096                                     size_t count)
6097 {
6098         bool v;
6099
6100         if (kstrtobool(buf, &v))
6101                 return -EINVAL;
6102
6103         sdebug_per_host_store = v;
6104         return count;
6105 }
6106 static DRIVER_ATTR_RW(per_host_store);
6107
6108 static ssize_t num_parts_show(struct device_driver *ddp, char *buf)
6109 {
6110         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_parts);
6111 }
6112 static DRIVER_ATTR_RO(num_parts);
6113
6114 static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
6115 {
6116         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_every_nth);
6117 }
6118 static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
6119                                size_t count)
6120 {
6121         int nth;
6122         char work[20];
6123
6124         if (sscanf(buf, "%10s", work) == 1) {
6125                 if (strncasecmp(work, "0x", 2) == 0) {
6126                         if (kstrtoint(work + 2, 16, &nth) == 0)
6127                                 goto every_nth_done;
6128                 } else {
6129                         if (kstrtoint(work, 10, &nth) == 0)
6130                                 goto every_nth_done;
6131                 }
6132         }
6133         return -EINVAL;
6134
6135 every_nth_done:
6136         sdebug_every_nth = nth;
6137         if (nth && !sdebug_statistics) {
6138                 pr_info("every_nth needs statistics=1, set it\n");
6139                 sdebug_statistics = true;
6140         }
6141         tweak_cmnd_count();
6142         return count;
6143 }
6144 static DRIVER_ATTR_RW(every_nth);
6145
6146 static ssize_t lun_format_show(struct device_driver *ddp, char *buf)
6147 {
6148         return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_lun_am);
6149 }
6150 static ssize_t lun_format_store(struct device_driver *ddp, const char *buf,
6151                                 size_t count)
6152 {
6153         int n;
6154         bool changed;
6155
6156         if (kstrtoint(buf, 0, &n))
6157                 return -EINVAL;
6158         if (n >= 0) {
6159                 if (n > (int)SAM_LUN_AM_FLAT) {
6160                         pr_warn("only LUN address methods 0 and 1 are supported\n");
6161                         return -EINVAL;
6162                 }
6163                 changed = ((int)sdebug_lun_am != n);
6164                 sdebug_lun_am = n;
6165                 if (changed && sdebug_scsi_level >= 5) {        /* >= SPC-3 */
6166                         struct sdebug_host_info *sdhp;
6167                         struct sdebug_dev_info *dp;
6168
6169                         spin_lock(&sdebug_host_list_lock);
6170                         list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6171                                 list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
6172                                         set_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
6173                                 }
6174                         }
6175                         spin_unlock(&sdebug_host_list_lock);
6176                 }
6177                 return count;
6178         }
6179         return -EINVAL;
6180 }
6181 static DRIVER_ATTR_RW(lun_format);
6182
6183 static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
6184 {
6185         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_luns);
6186 }
6187 static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
6188                               size_t count)
6189 {
6190         int n;
6191         bool changed;
6192
6193         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6194                 if (n > 256) {
6195                         pr_warn("max_luns can be no more than 256\n");
6196                         return -EINVAL;
6197                 }
6198                 changed = (sdebug_max_luns != n);
6199                 sdebug_max_luns = n;
6200                 sdebug_max_tgts_luns();
6201                 if (changed && (sdebug_scsi_level >= 5)) {      /* >= SPC-3 */
6202                         struct sdebug_host_info *sdhp;
6203                         struct sdebug_dev_info *dp;
6204
6205                         spin_lock(&sdebug_host_list_lock);
6206                         list_for_each_entry(sdhp, &sdebug_host_list,
6207                                             host_list) {
6208                                 list_for_each_entry(dp, &sdhp->dev_info_list,
6209                                                     dev_list) {
6210                                         set_bit(SDEBUG_UA_LUNS_CHANGED,
6211                                                 dp->uas_bm);
6212                                 }
6213                         }
6214                         spin_unlock(&sdebug_host_list_lock);
6215                 }
6216                 return count;
6217         }
6218         return -EINVAL;
6219 }
6220 static DRIVER_ATTR_RW(max_luns);
6221
6222 static ssize_t max_queue_show(struct device_driver *ddp, char *buf)
6223 {
6224         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_queue);
6225 }
6226 /* N.B. max_queue can be changed while there are queued commands. In flight
6227  * commands beyond the new max_queue will be completed. */
6228 static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
6229                                size_t count)
6230 {
6231         int j, n, k, a;
6232         struct sdebug_queue *sqp;
6233
6234         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
6235             (n <= SDEBUG_CANQUEUE) &&
6236             (sdebug_host_max_queue == 0)) {
6237                 block_unblock_all_queues(true);
6238                 k = 0;
6239                 for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
6240                      ++j, ++sqp) {
6241                         a = find_last_bit(sqp->in_use_bm, SDEBUG_CANQUEUE);
6242                         if (a > k)
6243                                 k = a;
6244                 }
6245                 sdebug_max_queue = n;
6246                 if (k == SDEBUG_CANQUEUE)
6247                         atomic_set(&retired_max_queue, 0);
6248                 else if (k >= n)
6249                         atomic_set(&retired_max_queue, k + 1);
6250                 else
6251                         atomic_set(&retired_max_queue, 0);
6252                 block_unblock_all_queues(false);
6253                 return count;
6254         }
6255         return -EINVAL;
6256 }
6257 static DRIVER_ATTR_RW(max_queue);
6258
6259 static ssize_t host_max_queue_show(struct device_driver *ddp, char *buf)
6260 {
6261         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_host_max_queue);
6262 }
6263
6264 /*
6265  * Since this is used for .can_queue, and we get the hc_idx tag from the bitmap
6266  * in range [0, sdebug_host_max_queue), we can't change it.
6267  */
6268 static DRIVER_ATTR_RO(host_max_queue);
6269
6270 static ssize_t no_uld_show(struct device_driver *ddp, char *buf)
6271 {
6272         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_uld);
6273 }
6274 static DRIVER_ATTR_RO(no_uld);
6275
6276 static ssize_t scsi_level_show(struct device_driver *ddp, char *buf)
6277 {
6278         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_scsi_level);
6279 }
6280 static DRIVER_ATTR_RO(scsi_level);
6281
6282 static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
6283 {
6284         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_virtual_gb);
6285 }
6286 static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
6287                                 size_t count)
6288 {
6289         int n;
6290         bool changed;
6291
6292         /* Ignore capacity change for ZBC drives for now */
6293         if (sdeb_zbc_in_use)
6294                 return -ENOTSUPP;
6295
6296         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6297                 changed = (sdebug_virtual_gb != n);
6298                 sdebug_virtual_gb = n;
6299                 sdebug_capacity = get_sdebug_capacity();
6300                 if (changed) {
6301                         struct sdebug_host_info *sdhp;
6302                         struct sdebug_dev_info *dp;
6303
6304                         spin_lock(&sdebug_host_list_lock);
6305                         list_for_each_entry(sdhp, &sdebug_host_list,
6306                                             host_list) {
6307                                 list_for_each_entry(dp, &sdhp->dev_info_list,
6308                                                     dev_list) {
6309                                         set_bit(SDEBUG_UA_CAPACITY_CHANGED,
6310                                                 dp->uas_bm);
6311                                 }
6312                         }
6313                         spin_unlock(&sdebug_host_list_lock);
6314                 }
6315                 return count;
6316         }
6317         return -EINVAL;
6318 }
6319 static DRIVER_ATTR_RW(virtual_gb);
6320
6321 static ssize_t add_host_show(struct device_driver *ddp, char *buf)
6322 {
6323         /* absolute number of hosts currently active is what is shown */
6324         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_hosts);
6325 }
6326
6327 static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
6328                               size_t count)
6329 {
6330         bool found;
6331         unsigned long idx;
6332         struct sdeb_store_info *sip;
6333         bool want_phs = (sdebug_fake_rw == 0) && sdebug_per_host_store;
6334         int delta_hosts;
6335
6336         if (sscanf(buf, "%d", &delta_hosts) != 1)
6337                 return -EINVAL;
6338         if (delta_hosts > 0) {
6339                 do {
6340                         found = false;
6341                         if (want_phs) {
6342                                 xa_for_each_marked(per_store_ap, idx, sip,
6343                                                    SDEB_XA_NOT_IN_USE) {
6344                                         sdeb_most_recent_idx = (int)idx;
6345                                         found = true;
6346                                         break;
6347                                 }
6348                                 if (found)      /* re-use case */
6349                                         sdebug_add_host_helper((int)idx);
6350                                 else
6351                                         sdebug_do_add_host(true);
6352                         } else {
6353                                 sdebug_do_add_host(false);
6354                         }
6355                 } while (--delta_hosts);
6356         } else if (delta_hosts < 0) {
6357                 do {
6358                         sdebug_do_remove_host(false);
6359                 } while (++delta_hosts);
6360         }
6361         return count;
6362 }
6363 static DRIVER_ATTR_RW(add_host);
6364
6365 static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf)
6366 {
6367         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_vpd_use_hostno);
6368 }
6369 static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf,
6370                                     size_t count)
6371 {
6372         int n;
6373
6374         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6375                 sdebug_vpd_use_hostno = n;
6376                 return count;
6377         }
6378         return -EINVAL;
6379 }
6380 static DRIVER_ATTR_RW(vpd_use_hostno);
6381
6382 static ssize_t statistics_show(struct device_driver *ddp, char *buf)
6383 {
6384         return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_statistics);
6385 }
6386 static ssize_t statistics_store(struct device_driver *ddp, const char *buf,
6387                                 size_t count)
6388 {
6389         int n;
6390
6391         if ((count > 0) && (sscanf(buf, "%d", &n) == 1) && (n >= 0)) {
6392                 if (n > 0)
6393                         sdebug_statistics = true;
6394                 else {
6395                         clear_queue_stats();
6396                         sdebug_statistics = false;
6397                 }
6398                 return count;
6399         }
6400         return -EINVAL;
6401 }
6402 static DRIVER_ATTR_RW(statistics);
6403
6404 static ssize_t sector_size_show(struct device_driver *ddp, char *buf)
6405 {
6406         return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_sector_size);
6407 }
6408 static DRIVER_ATTR_RO(sector_size);
6409
6410 static ssize_t submit_queues_show(struct device_driver *ddp, char *buf)
6411 {
6412         return scnprintf(buf, PAGE_SIZE, "%d\n", submit_queues);
6413 }
6414 static DRIVER_ATTR_RO(submit_queues);
6415
6416 static ssize_t dix_show(struct device_driver *ddp, char *buf)
6417 {
6418         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dix);
6419 }
6420 static DRIVER_ATTR_RO(dix);
6421
6422 static ssize_t dif_show(struct device_driver *ddp, char *buf)
6423 {
6424         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dif);
6425 }
6426 static DRIVER_ATTR_RO(dif);
6427
6428 static ssize_t guard_show(struct device_driver *ddp, char *buf)
6429 {
6430         return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_guard);
6431 }
6432 static DRIVER_ATTR_RO(guard);
6433
6434 static ssize_t ato_show(struct device_driver *ddp, char *buf)
6435 {
6436         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ato);
6437 }
6438 static DRIVER_ATTR_RO(ato);
6439
6440 static ssize_t map_show(struct device_driver *ddp, char *buf)
6441 {
6442         ssize_t count = 0;
6443
6444         if (!scsi_debug_lbp())
6445                 return scnprintf(buf, PAGE_SIZE, "0-%u\n",
6446                                  sdebug_store_sectors);
6447
6448         if (sdebug_fake_rw == 0 && !xa_empty(per_store_ap)) {
6449                 struct sdeb_store_info *sip = xa_load(per_store_ap, 0);
6450
6451                 if (sip)
6452                         count = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
6453                                           (int)map_size, sip->map_storep);
6454         }
6455         buf[count++] = '\n';
6456         buf[count] = '\0';
6457
6458         return count;
6459 }
6460 static DRIVER_ATTR_RO(map);
6461
6462 static ssize_t random_show(struct device_driver *ddp, char *buf)
6463 {
6464         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_random);
6465 }
6466
6467 static ssize_t random_store(struct device_driver *ddp, const char *buf,
6468                             size_t count)
6469 {
6470         bool v;
6471
6472         if (kstrtobool(buf, &v))
6473                 return -EINVAL;
6474
6475         sdebug_random = v;
6476         return count;
6477 }
6478 static DRIVER_ATTR_RW(random);
6479
6480 static ssize_t removable_show(struct device_driver *ddp, char *buf)
6481 {
6482         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_removable ? 1 : 0);
6483 }
6484 static ssize_t removable_store(struct device_driver *ddp, const char *buf,
6485                                size_t count)
6486 {
6487         int n;
6488
6489         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6490                 sdebug_removable = (n > 0);
6491                 return count;
6492         }
6493         return -EINVAL;
6494 }
6495 static DRIVER_ATTR_RW(removable);
6496
6497 static ssize_t host_lock_show(struct device_driver *ddp, char *buf)
6498 {
6499         return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_host_lock);
6500 }
6501 /* N.B. sdebug_host_lock does nothing, kept for backward compatibility */
6502 static ssize_t host_lock_store(struct device_driver *ddp, const char *buf,
6503                                size_t count)
6504 {
6505         int n;
6506
6507         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6508                 sdebug_host_lock = (n > 0);
6509                 return count;
6510         }
6511         return -EINVAL;
6512 }
6513 static DRIVER_ATTR_RW(host_lock);
6514
6515 static ssize_t strict_show(struct device_driver *ddp, char *buf)
6516 {
6517         return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_strict);
6518 }
6519 static ssize_t strict_store(struct device_driver *ddp, const char *buf,
6520                             size_t count)
6521 {
6522         int n;
6523
6524         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6525                 sdebug_strict = (n > 0);
6526                 return count;
6527         }
6528         return -EINVAL;
6529 }
6530 static DRIVER_ATTR_RW(strict);
6531
6532 static ssize_t uuid_ctl_show(struct device_driver *ddp, char *buf)
6533 {
6534         return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_uuid_ctl);
6535 }
6536 static DRIVER_ATTR_RO(uuid_ctl);
6537
6538 static ssize_t cdb_len_show(struct device_driver *ddp, char *buf)
6539 {
6540         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_cdb_len);
6541 }
6542 static ssize_t cdb_len_store(struct device_driver *ddp, const char *buf,
6543                              size_t count)
6544 {
6545         int ret, n;
6546
6547         ret = kstrtoint(buf, 0, &n);
6548         if (ret)
6549                 return ret;
6550         sdebug_cdb_len = n;
6551         all_config_cdb_len();
6552         return count;
6553 }
6554 static DRIVER_ATTR_RW(cdb_len);
6555
6556 static const char * const zbc_model_strs_a[] = {
6557         [BLK_ZONED_NONE] = "none",
6558         [BLK_ZONED_HA]   = "host-aware",
6559         [BLK_ZONED_HM]   = "host-managed",
6560 };
6561
6562 static const char * const zbc_model_strs_b[] = {
6563         [BLK_ZONED_NONE] = "no",
6564         [BLK_ZONED_HA]   = "aware",
6565         [BLK_ZONED_HM]   = "managed",
6566 };
6567
6568 static const char * const zbc_model_strs_c[] = {
6569         [BLK_ZONED_NONE] = "0",
6570         [BLK_ZONED_HA]   = "1",
6571         [BLK_ZONED_HM]   = "2",
6572 };
6573
6574 static int sdeb_zbc_model_str(const char *cp)
6575 {
6576         int res = sysfs_match_string(zbc_model_strs_a, cp);
6577
6578         if (res < 0) {
6579                 res = sysfs_match_string(zbc_model_strs_b, cp);
6580                 if (res < 0) {
6581                         res = sysfs_match_string(zbc_model_strs_c, cp);
6582                         if (res < 0)
6583                                 return -EINVAL;
6584                 }
6585         }
6586         return res;
6587 }
6588
6589 static ssize_t zbc_show(struct device_driver *ddp, char *buf)
6590 {
6591         return scnprintf(buf, PAGE_SIZE, "%s\n",
6592                          zbc_model_strs_a[sdeb_zbc_model]);
6593 }
6594 static DRIVER_ATTR_RO(zbc);
6595
6596 static ssize_t tur_ms_to_ready_show(struct device_driver *ddp, char *buf)
6597 {
6598         return scnprintf(buf, PAGE_SIZE, "%d\n", sdeb_tur_ms_to_ready);
6599 }
6600 static DRIVER_ATTR_RO(tur_ms_to_ready);
6601
6602 /* Note: The following array creates attribute files in the
6603    /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
6604    files (over those found in the /sys/module/scsi_debug/parameters
6605    directory) is that auxiliary actions can be triggered when an attribute
6606    is changed. For example see: add_host_store() above.
6607  */
6608
6609 static struct attribute *sdebug_drv_attrs[] = {
6610         &driver_attr_delay.attr,
6611         &driver_attr_opts.attr,
6612         &driver_attr_ptype.attr,
6613         &driver_attr_dsense.attr,
6614         &driver_attr_fake_rw.attr,
6615         &driver_attr_host_max_queue.attr,
6616         &driver_attr_no_lun_0.attr,
6617         &driver_attr_num_tgts.attr,
6618         &driver_attr_dev_size_mb.attr,
6619         &driver_attr_num_parts.attr,
6620         &driver_attr_every_nth.attr,
6621         &driver_attr_lun_format.attr,
6622         &driver_attr_max_luns.attr,
6623         &driver_attr_max_queue.attr,
6624         &driver_attr_no_uld.attr,
6625         &driver_attr_scsi_level.attr,
6626         &driver_attr_virtual_gb.attr,
6627         &driver_attr_add_host.attr,
6628         &driver_attr_per_host_store.attr,
6629         &driver_attr_vpd_use_hostno.attr,
6630         &driver_attr_sector_size.attr,
6631         &driver_attr_statistics.attr,
6632         &driver_attr_submit_queues.attr,
6633         &driver_attr_dix.attr,
6634         &driver_attr_dif.attr,
6635         &driver_attr_guard.attr,
6636         &driver_attr_ato.attr,
6637         &driver_attr_map.attr,
6638         &driver_attr_random.attr,
6639         &driver_attr_removable.attr,
6640         &driver_attr_host_lock.attr,
6641         &driver_attr_ndelay.attr,
6642         &driver_attr_strict.attr,
6643         &driver_attr_uuid_ctl.attr,
6644         &driver_attr_cdb_len.attr,
6645         &driver_attr_tur_ms_to_ready.attr,
6646         &driver_attr_zbc.attr,
6647         NULL,
6648 };
6649 ATTRIBUTE_GROUPS(sdebug_drv);
6650
6651 static struct device *pseudo_primary;
6652
6653 static int __init scsi_debug_init(void)
6654 {
6655         bool want_store = (sdebug_fake_rw == 0);
6656         unsigned long sz;
6657         int k, ret, hosts_to_add;
6658         int idx = -1;
6659
6660         ramdisk_lck_a[0] = &atomic_rw;
6661         ramdisk_lck_a[1] = &atomic_rw2;
6662         atomic_set(&retired_max_queue, 0);
6663
6664         if (sdebug_ndelay >= 1000 * 1000 * 1000) {
6665                 pr_warn("ndelay must be less than 1 second, ignored\n");
6666                 sdebug_ndelay = 0;
6667         } else if (sdebug_ndelay > 0)
6668                 sdebug_jdelay = JDELAY_OVERRIDDEN;
6669
6670         switch (sdebug_sector_size) {
6671         case  512:
6672         case 1024:
6673         case 2048:
6674         case 4096:
6675                 break;
6676         default:
6677                 pr_err("invalid sector_size %d\n", sdebug_sector_size);
6678                 return -EINVAL;
6679         }
6680
6681         switch (sdebug_dif) {
6682         case T10_PI_TYPE0_PROTECTION:
6683                 break;
6684         case T10_PI_TYPE1_PROTECTION:
6685         case T10_PI_TYPE2_PROTECTION:
6686         case T10_PI_TYPE3_PROTECTION:
6687                 have_dif_prot = true;
6688                 break;
6689
6690         default:
6691                 pr_err("dif must be 0, 1, 2 or 3\n");
6692                 return -EINVAL;
6693         }
6694
6695         if (sdebug_num_tgts < 0) {
6696                 pr_err("num_tgts must be >= 0\n");
6697                 return -EINVAL;
6698         }
6699
6700         if (sdebug_guard > 1) {
6701                 pr_err("guard must be 0 or 1\n");
6702                 return -EINVAL;
6703         }
6704
6705         if (sdebug_ato > 1) {
6706                 pr_err("ato must be 0 or 1\n");
6707                 return -EINVAL;
6708         }
6709
6710         if (sdebug_physblk_exp > 15) {
6711                 pr_err("invalid physblk_exp %u\n", sdebug_physblk_exp);
6712                 return -EINVAL;
6713         }
6714
6715         sdebug_lun_am = sdebug_lun_am_i;
6716         if (sdebug_lun_am > SAM_LUN_AM_FLAT) {
6717                 pr_warn("Invalid LUN format %u, using default\n", (int)sdebug_lun_am);
6718                 sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
6719         }
6720
6721         if (sdebug_max_luns > 256) {
6722                 if (sdebug_max_luns > 16384) {
6723                         pr_warn("max_luns can be no more than 16384, use default\n");
6724                         sdebug_max_luns = DEF_MAX_LUNS;
6725                 }
6726                 sdebug_lun_am = SAM_LUN_AM_FLAT;
6727         }
6728
6729         if (sdebug_lowest_aligned > 0x3fff) {
6730                 pr_err("lowest_aligned too big: %u\n", sdebug_lowest_aligned);
6731                 return -EINVAL;
6732         }
6733
6734         if (submit_queues < 1) {
6735                 pr_err("submit_queues must be 1 or more\n");
6736                 return -EINVAL;
6737         }
6738
6739         if ((sdebug_max_queue > SDEBUG_CANQUEUE) || (sdebug_max_queue < 1)) {
6740                 pr_err("max_queue must be in range [1, %d]\n", SDEBUG_CANQUEUE);
6741                 return -EINVAL;
6742         }
6743
6744         if ((sdebug_host_max_queue > SDEBUG_CANQUEUE) ||
6745             (sdebug_host_max_queue < 0)) {
6746                 pr_err("host_max_queue must be in range [0 %d]\n",
6747                        SDEBUG_CANQUEUE);
6748                 return -EINVAL;
6749         }
6750
6751         if (sdebug_host_max_queue &&
6752             (sdebug_max_queue != sdebug_host_max_queue)) {
6753                 sdebug_max_queue = sdebug_host_max_queue;
6754                 pr_warn("fixing max submit queue depth to host max queue depth, %d\n",
6755                         sdebug_max_queue);
6756         }
6757
6758         sdebug_q_arr = kcalloc(submit_queues, sizeof(struct sdebug_queue),
6759                                GFP_KERNEL);
6760         if (sdebug_q_arr == NULL)
6761                 return -ENOMEM;
6762         for (k = 0; k < submit_queues; ++k)
6763                 spin_lock_init(&sdebug_q_arr[k].qc_lock);
6764
6765         /*
6766          * check for host managed zoned block device specified with
6767          * ptype=0x14 or zbc=XXX.
6768          */
6769         if (sdebug_ptype == TYPE_ZBC) {
6770                 sdeb_zbc_model = BLK_ZONED_HM;
6771         } else if (sdeb_zbc_model_s && *sdeb_zbc_model_s) {
6772                 k = sdeb_zbc_model_str(sdeb_zbc_model_s);
6773                 if (k < 0) {
6774                         ret = k;
6775                         goto free_q_arr;
6776                 }
6777                 sdeb_zbc_model = k;
6778                 switch (sdeb_zbc_model) {
6779                 case BLK_ZONED_NONE:
6780                 case BLK_ZONED_HA:
6781                         sdebug_ptype = TYPE_DISK;
6782                         break;
6783                 case BLK_ZONED_HM:
6784                         sdebug_ptype = TYPE_ZBC;
6785                         break;
6786                 default:
6787                         pr_err("Invalid ZBC model\n");
6788                         ret = -EINVAL;
6789                         goto free_q_arr;
6790                 }
6791         }
6792         if (sdeb_zbc_model != BLK_ZONED_NONE) {
6793                 sdeb_zbc_in_use = true;
6794                 if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
6795                         sdebug_dev_size_mb = DEF_ZBC_DEV_SIZE_MB;
6796         }
6797
6798         if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
6799                 sdebug_dev_size_mb = DEF_DEV_SIZE_MB;
6800         if (sdebug_dev_size_mb < 1)
6801                 sdebug_dev_size_mb = 1;  /* force minimum 1 MB ramdisk */
6802         sz = (unsigned long)sdebug_dev_size_mb * 1048576;
6803         sdebug_store_sectors = sz / sdebug_sector_size;
6804         sdebug_capacity = get_sdebug_capacity();
6805
6806         /* play around with geometry, don't waste too much on track 0 */
6807         sdebug_heads = 8;
6808         sdebug_sectors_per = 32;
6809         if (sdebug_dev_size_mb >= 256)
6810                 sdebug_heads = 64;
6811         else if (sdebug_dev_size_mb >= 16)
6812                 sdebug_heads = 32;
6813         sdebug_cylinders_per = (unsigned long)sdebug_capacity /
6814                                (sdebug_sectors_per * sdebug_heads);
6815         if (sdebug_cylinders_per >= 1024) {
6816                 /* other LLDs do this; implies >= 1GB ram disk ... */
6817                 sdebug_heads = 255;
6818                 sdebug_sectors_per = 63;
6819                 sdebug_cylinders_per = (unsigned long)sdebug_capacity /
6820                                (sdebug_sectors_per * sdebug_heads);
6821         }
6822         if (scsi_debug_lbp()) {
6823                 sdebug_unmap_max_blocks =
6824                         clamp(sdebug_unmap_max_blocks, 0U, 0xffffffffU);
6825
6826                 sdebug_unmap_max_desc =
6827                         clamp(sdebug_unmap_max_desc, 0U, 256U);
6828
6829                 sdebug_unmap_granularity =
6830                         clamp(sdebug_unmap_granularity, 1U, 0xffffffffU);
6831
6832                 if (sdebug_unmap_alignment &&
6833                     sdebug_unmap_granularity <=
6834                     sdebug_unmap_alignment) {
6835                         pr_err("ERR: unmap_granularity <= unmap_alignment\n");
6836                         ret = -EINVAL;
6837                         goto free_q_arr;
6838                 }
6839         }
6840         xa_init_flags(per_store_ap, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
6841         if (want_store) {
6842                 idx = sdebug_add_store();
6843                 if (idx < 0) {
6844                         ret = idx;
6845                         goto free_q_arr;
6846                 }
6847         }
6848
6849         pseudo_primary = root_device_register("pseudo_0");
6850         if (IS_ERR(pseudo_primary)) {
6851                 pr_warn("root_device_register() error\n");
6852                 ret = PTR_ERR(pseudo_primary);
6853                 goto free_vm;
6854         }
6855         ret = bus_register(&pseudo_lld_bus);
6856         if (ret < 0) {
6857                 pr_warn("bus_register error: %d\n", ret);
6858                 goto dev_unreg;
6859         }
6860         ret = driver_register(&sdebug_driverfs_driver);
6861         if (ret < 0) {
6862                 pr_warn("driver_register error: %d\n", ret);
6863                 goto bus_unreg;
6864         }
6865
6866         hosts_to_add = sdebug_add_host;
6867         sdebug_add_host = 0;
6868
6869         for (k = 0; k < hosts_to_add; k++) {
6870                 if (want_store && k == 0) {
6871                         ret = sdebug_add_host_helper(idx);
6872                         if (ret < 0) {
6873                                 pr_err("add_host_helper k=%d, error=%d\n",
6874                                        k, -ret);
6875                                 break;
6876                         }
6877                 } else {
6878                         ret = sdebug_do_add_host(want_store &&
6879                                                  sdebug_per_host_store);
6880                         if (ret < 0) {
6881                                 pr_err("add_host k=%d error=%d\n", k, -ret);
6882                                 break;
6883                         }
6884                 }
6885         }
6886         if (sdebug_verbose)
6887                 pr_info("built %d host(s)\n", sdebug_num_hosts);
6888
6889         return 0;
6890
6891 bus_unreg:
6892         bus_unregister(&pseudo_lld_bus);
6893 dev_unreg:
6894         root_device_unregister(pseudo_primary);
6895 free_vm:
6896         sdebug_erase_store(idx, NULL);
6897 free_q_arr:
6898         kfree(sdebug_q_arr);
6899         return ret;
6900 }
6901
6902 static void __exit scsi_debug_exit(void)
6903 {
6904         int k = sdebug_num_hosts;
6905
6906         stop_all_queued();
6907         for (; k; k--)
6908                 sdebug_do_remove_host(true);
6909         free_all_queued();
6910         driver_unregister(&sdebug_driverfs_driver);
6911         bus_unregister(&pseudo_lld_bus);
6912         root_device_unregister(pseudo_primary);
6913
6914         sdebug_erase_all_stores(false);
6915         xa_destroy(per_store_ap);
6916         kfree(sdebug_q_arr);
6917 }
6918
6919 device_initcall(scsi_debug_init);
6920 module_exit(scsi_debug_exit);
6921
6922 static void sdebug_release_adapter(struct device *dev)
6923 {
6924         struct sdebug_host_info *sdbg_host;
6925
6926         sdbg_host = to_sdebug_host(dev);
6927         kfree(sdbg_host);
6928 }
6929
6930 /* idx must be valid, if sip is NULL then it will be obtained using idx */
6931 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip)
6932 {
6933         if (idx < 0)
6934                 return;
6935         if (!sip) {
6936                 if (xa_empty(per_store_ap))
6937                         return;
6938                 sip = xa_load(per_store_ap, idx);
6939                 if (!sip)
6940                         return;
6941         }
6942         vfree(sip->map_storep);
6943         vfree(sip->dif_storep);
6944         vfree(sip->storep);
6945         xa_erase(per_store_ap, idx);
6946         kfree(sip);
6947 }
6948
6949 /* Assume apart_from_first==false only in shutdown case. */
6950 static void sdebug_erase_all_stores(bool apart_from_first)
6951 {
6952         unsigned long idx;
6953         struct sdeb_store_info *sip = NULL;
6954
6955         xa_for_each(per_store_ap, idx, sip) {
6956                 if (apart_from_first)
6957                         apart_from_first = false;
6958                 else
6959                         sdebug_erase_store(idx, sip);
6960         }
6961         if (apart_from_first)
6962                 sdeb_most_recent_idx = sdeb_first_idx;
6963 }
6964
6965 /*
6966  * Returns store xarray new element index (idx) if >=0 else negated errno.
6967  * Limit the number of stores to 65536.
6968  */
6969 static int sdebug_add_store(void)
6970 {
6971         int res;
6972         u32 n_idx;
6973         unsigned long iflags;
6974         unsigned long sz = (unsigned long)sdebug_dev_size_mb * 1048576;
6975         struct sdeb_store_info *sip = NULL;
6976         struct xa_limit xal = { .max = 1 << 16, .min = 0 };
6977
6978         sip = kzalloc(sizeof(*sip), GFP_KERNEL);
6979         if (!sip)
6980                 return -ENOMEM;
6981
6982         xa_lock_irqsave(per_store_ap, iflags);
6983         res = __xa_alloc(per_store_ap, &n_idx, sip, xal, GFP_ATOMIC);
6984         if (unlikely(res < 0)) {
6985                 xa_unlock_irqrestore(per_store_ap, iflags);
6986                 kfree(sip);
6987                 pr_warn("%s: xa_alloc() errno=%d\n", __func__, -res);
6988                 return res;
6989         }
6990         sdeb_most_recent_idx = n_idx;
6991         if (sdeb_first_idx < 0)
6992                 sdeb_first_idx = n_idx;
6993         xa_unlock_irqrestore(per_store_ap, iflags);
6994
6995         res = -ENOMEM;
6996         sip->storep = vzalloc(sz);
6997         if (!sip->storep) {
6998                 pr_err("user data oom\n");
6999                 goto err;
7000         }
7001         if (sdebug_num_parts > 0)
7002                 sdebug_build_parts(sip->storep, sz);
7003
7004         /* DIF/DIX: what T10 calls Protection Information (PI) */
7005         if (sdebug_dix) {
7006                 int dif_size;
7007
7008                 dif_size = sdebug_store_sectors * sizeof(struct t10_pi_tuple);
7009                 sip->dif_storep = vmalloc(dif_size);
7010
7011                 pr_info("dif_storep %u bytes @ %pK\n", dif_size,
7012                         sip->dif_storep);
7013
7014                 if (!sip->dif_storep) {
7015                         pr_err("DIX oom\n");
7016                         goto err;
7017                 }
7018                 memset(sip->dif_storep, 0xff, dif_size);
7019         }
7020         /* Logical Block Provisioning */
7021         if (scsi_debug_lbp()) {
7022                 map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
7023                 sip->map_storep = vmalloc(array_size(sizeof(long),
7024                                                      BITS_TO_LONGS(map_size)));
7025
7026                 pr_info("%lu provisioning blocks\n", map_size);
7027
7028                 if (!sip->map_storep) {
7029                         pr_err("LBP map oom\n");
7030                         goto err;
7031                 }
7032
7033                 bitmap_zero(sip->map_storep, map_size);
7034
7035                 /* Map first 1KB for partition table */
7036                 if (sdebug_num_parts)
7037                         map_region(sip, 0, 2);
7038         }
7039
7040         rwlock_init(&sip->macc_lck);
7041         return (int)n_idx;
7042 err:
7043         sdebug_erase_store((int)n_idx, sip);
7044         pr_warn("%s: failed, errno=%d\n", __func__, -res);
7045         return res;
7046 }
7047
7048 static int sdebug_add_host_helper(int per_host_idx)
7049 {
7050         int k, devs_per_host, idx;
7051         int error = -ENOMEM;
7052         struct sdebug_host_info *sdbg_host;
7053         struct sdebug_dev_info *sdbg_devinfo, *tmp;
7054
7055         sdbg_host = kzalloc(sizeof(*sdbg_host), GFP_KERNEL);
7056         if (!sdbg_host)
7057                 return -ENOMEM;
7058         idx = (per_host_idx < 0) ? sdeb_first_idx : per_host_idx;
7059         if (xa_get_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE))
7060                 xa_clear_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
7061         sdbg_host->si_idx = idx;
7062
7063         INIT_LIST_HEAD(&sdbg_host->dev_info_list);
7064
7065         devs_per_host = sdebug_num_tgts * sdebug_max_luns;
7066         for (k = 0; k < devs_per_host; k++) {
7067                 sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
7068                 if (!sdbg_devinfo)
7069                         goto clean;
7070         }
7071
7072         spin_lock(&sdebug_host_list_lock);
7073         list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
7074         spin_unlock(&sdebug_host_list_lock);
7075
7076         sdbg_host->dev.bus = &pseudo_lld_bus;
7077         sdbg_host->dev.parent = pseudo_primary;
7078         sdbg_host->dev.release = &sdebug_release_adapter;
7079         dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_num_hosts);
7080
7081         error = device_register(&sdbg_host->dev);
7082         if (error)
7083                 goto clean;
7084
7085         ++sdebug_num_hosts;
7086         return 0;
7087
7088 clean:
7089         list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
7090                                  dev_list) {
7091                 list_del(&sdbg_devinfo->dev_list);
7092                 kfree(sdbg_devinfo->zstate);
7093                 kfree(sdbg_devinfo);
7094         }
7095         kfree(sdbg_host);
7096         pr_warn("%s: failed, errno=%d\n", __func__, -error);
7097         return error;
7098 }
7099
7100 static int sdebug_do_add_host(bool mk_new_store)
7101 {
7102         int ph_idx = sdeb_most_recent_idx;
7103
7104         if (mk_new_store) {
7105                 ph_idx = sdebug_add_store();
7106                 if (ph_idx < 0)
7107                         return ph_idx;
7108         }
7109         return sdebug_add_host_helper(ph_idx);
7110 }
7111
7112 static void sdebug_do_remove_host(bool the_end)
7113 {
7114         int idx = -1;
7115         struct sdebug_host_info *sdbg_host = NULL;
7116         struct sdebug_host_info *sdbg_host2;
7117
7118         spin_lock(&sdebug_host_list_lock);
7119         if (!list_empty(&sdebug_host_list)) {
7120                 sdbg_host = list_entry(sdebug_host_list.prev,
7121                                        struct sdebug_host_info, host_list);
7122                 idx = sdbg_host->si_idx;
7123         }
7124         if (!the_end && idx >= 0) {
7125                 bool unique = true;
7126
7127                 list_for_each_entry(sdbg_host2, &sdebug_host_list, host_list) {
7128                         if (sdbg_host2 == sdbg_host)
7129                                 continue;
7130                         if (idx == sdbg_host2->si_idx) {
7131                                 unique = false;
7132                                 break;
7133                         }
7134                 }
7135                 if (unique) {
7136                         xa_set_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
7137                         if (idx == sdeb_most_recent_idx)
7138                                 --sdeb_most_recent_idx;
7139                 }
7140         }
7141         if (sdbg_host)
7142                 list_del(&sdbg_host->host_list);
7143         spin_unlock(&sdebug_host_list_lock);
7144
7145         if (!sdbg_host)
7146                 return;
7147
7148         device_unregister(&sdbg_host->dev);
7149         --sdebug_num_hosts;
7150 }
7151
7152 static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
7153 {
7154         int num_in_q = 0;
7155         struct sdebug_dev_info *devip;
7156
7157         block_unblock_all_queues(true);
7158         devip = (struct sdebug_dev_info *)sdev->hostdata;
7159         if (NULL == devip) {
7160                 block_unblock_all_queues(false);
7161                 return  -ENODEV;
7162         }
7163         num_in_q = atomic_read(&devip->num_in_q);
7164
7165         if (qdepth < 1)
7166                 qdepth = 1;
7167         /* allow to exceed max host qc_arr elements for testing */
7168         if (qdepth > SDEBUG_CANQUEUE + 10)
7169                 qdepth = SDEBUG_CANQUEUE + 10;
7170         scsi_change_queue_depth(sdev, qdepth);
7171
7172         if (SDEBUG_OPT_Q_NOISE & sdebug_opts) {
7173                 sdev_printk(KERN_INFO, sdev, "%s: qdepth=%d, num_in_q=%d\n",
7174                             __func__, qdepth, num_in_q);
7175         }
7176         block_unblock_all_queues(false);
7177         return sdev->queue_depth;
7178 }
7179
7180 static bool fake_timeout(struct scsi_cmnd *scp)
7181 {
7182         if (0 == (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth))) {
7183                 if (sdebug_every_nth < -1)
7184                         sdebug_every_nth = -1;
7185                 if (SDEBUG_OPT_TIMEOUT & sdebug_opts)
7186                         return true; /* ignore command causing timeout */
7187                 else if (SDEBUG_OPT_MAC_TIMEOUT & sdebug_opts &&
7188                          scsi_medium_access_command(scp))
7189                         return true; /* time out reads and writes */
7190         }
7191         return false;
7192 }
7193
7194 /* Response to TUR or media access command when device stopped */
7195 static int resp_not_ready(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
7196 {
7197         int stopped_state;
7198         u64 diff_ns = 0;
7199         ktime_t now_ts = ktime_get_boottime();
7200         struct scsi_device *sdp = scp->device;
7201
7202         stopped_state = atomic_read(&devip->stopped);
7203         if (stopped_state == 2) {
7204                 if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
7205                         diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
7206                         if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
7207                                 /* tur_ms_to_ready timer extinguished */
7208                                 atomic_set(&devip->stopped, 0);
7209                                 return 0;
7210                         }
7211                 }
7212                 mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x1);
7213                 if (sdebug_verbose)
7214                         sdev_printk(KERN_INFO, sdp,
7215                                     "%s: Not ready: in process of becoming ready\n", my_name);
7216                 if (scp->cmnd[0] == TEST_UNIT_READY) {
7217                         u64 tur_nanosecs_to_ready = (u64)sdeb_tur_ms_to_ready * 1000000;
7218
7219                         if (diff_ns <= tur_nanosecs_to_ready)
7220                                 diff_ns = tur_nanosecs_to_ready - diff_ns;
7221                         else
7222                                 diff_ns = tur_nanosecs_to_ready;
7223                         /* As per 20-061r2 approved for spc6 by T10 on 20200716 */
7224                         do_div(diff_ns, 1000000);       /* diff_ns becomes milliseconds */
7225                         scsi_set_sense_information(scp->sense_buffer, SCSI_SENSE_BUFFERSIZE,
7226                                                    diff_ns);
7227                         return check_condition_result;
7228                 }
7229         }
7230         mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x2);
7231         if (sdebug_verbose)
7232                 sdev_printk(KERN_INFO, sdp, "%s: Not ready: initializing command required\n",
7233                             my_name);
7234         return check_condition_result;
7235 }
7236
7237 static int scsi_debug_queuecommand(struct Scsi_Host *shost,
7238                                    struct scsi_cmnd *scp)
7239 {
7240         u8 sdeb_i;
7241         struct scsi_device *sdp = scp->device;
7242         const struct opcode_info_t *oip;
7243         const struct opcode_info_t *r_oip;
7244         struct sdebug_dev_info *devip;
7245         u8 *cmd = scp->cmnd;
7246         int (*r_pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
7247         int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *) = NULL;
7248         int k, na;
7249         int errsts = 0;
7250         u64 lun_index = sdp->lun & 0x3FFF;
7251         u32 flags;
7252         u16 sa;
7253         u8 opcode = cmd[0];
7254         bool has_wlun_rl;
7255         bool inject_now;
7256
7257         scsi_set_resid(scp, 0);
7258         if (sdebug_statistics) {
7259                 atomic_inc(&sdebug_cmnd_count);
7260                 inject_now = inject_on_this_cmd();
7261         } else {
7262                 inject_now = false;
7263         }
7264         if (unlikely(sdebug_verbose &&
7265                      !(SDEBUG_OPT_NO_CDB_NOISE & sdebug_opts))) {
7266                 char b[120];
7267                 int n, len, sb;
7268
7269                 len = scp->cmd_len;
7270                 sb = (int)sizeof(b);
7271                 if (len > 32)
7272                         strcpy(b, "too long, over 32 bytes");
7273                 else {
7274                         for (k = 0, n = 0; k < len && n < sb; ++k)
7275                                 n += scnprintf(b + n, sb - n, "%02x ",
7276                                                (u32)cmd[k]);
7277                 }
7278                 sdev_printk(KERN_INFO, sdp, "%s: tag=%#x, cmd %s\n", my_name,
7279                             blk_mq_unique_tag(scp->request), b);
7280         }
7281         if (unlikely(inject_now && (sdebug_opts & SDEBUG_OPT_HOST_BUSY)))
7282                 return SCSI_MLQUEUE_HOST_BUSY;
7283         has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS);
7284         if (unlikely(lun_index >= sdebug_max_luns && !has_wlun_rl))
7285                 goto err_out;
7286
7287         sdeb_i = opcode_ind_arr[opcode];        /* fully mapped */
7288         oip = &opcode_info_arr[sdeb_i];         /* safe if table consistent */
7289         devip = (struct sdebug_dev_info *)sdp->hostdata;
7290         if (unlikely(!devip)) {
7291                 devip = find_build_dev_info(sdp);
7292                 if (NULL == devip)
7293                         goto err_out;
7294         }
7295         if (unlikely(inject_now && !atomic_read(&sdeb_inject_pending)))
7296                 atomic_set(&sdeb_inject_pending, 1);
7297
7298         na = oip->num_attached;
7299         r_pfp = oip->pfp;
7300         if (na) {       /* multiple commands with this opcode */
7301                 r_oip = oip;
7302                 if (FF_SA & r_oip->flags) {
7303                         if (F_SA_LOW & oip->flags)
7304                                 sa = 0x1f & cmd[1];
7305                         else
7306                                 sa = get_unaligned_be16(cmd + 8);
7307                         for (k = 0; k <= na; oip = r_oip->arrp + k++) {
7308                                 if (opcode == oip->opcode && sa == oip->sa)
7309                                         break;
7310                         }
7311                 } else {   /* since no service action only check opcode */
7312                         for (k = 0; k <= na; oip = r_oip->arrp + k++) {
7313                                 if (opcode == oip->opcode)
7314                                         break;
7315                         }
7316                 }
7317                 if (k > na) {
7318                         if (F_SA_LOW & r_oip->flags)
7319                                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 4);
7320                         else if (F_SA_HIGH & r_oip->flags)
7321                                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, 7);
7322                         else
7323                                 mk_sense_invalid_opcode(scp);
7324                         goto check_cond;
7325                 }
7326         }       /* else (when na==0) we assume the oip is a match */
7327         flags = oip->flags;
7328         if (unlikely(F_INV_OP & flags)) {
7329                 mk_sense_invalid_opcode(scp);
7330                 goto check_cond;
7331         }
7332         if (unlikely(has_wlun_rl && !(F_RL_WLUN_OK & flags))) {
7333                 if (sdebug_verbose)
7334                         sdev_printk(KERN_INFO, sdp, "%s: Opcode 0x%x not%s\n",
7335                                     my_name, opcode, " supported for wlun");
7336                 mk_sense_invalid_opcode(scp);
7337                 goto check_cond;
7338         }
7339         if (unlikely(sdebug_strict)) {  /* check cdb against mask */
7340                 u8 rem;
7341                 int j;
7342
7343                 for (k = 1; k < oip->len_mask[0] && k < 16; ++k) {
7344                         rem = ~oip->len_mask[k] & cmd[k];
7345                         if (rem) {
7346                                 for (j = 7; j >= 0; --j, rem <<= 1) {
7347                                         if (0x80 & rem)
7348                                                 break;
7349                                 }
7350                                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, k, j);
7351                                 goto check_cond;
7352                         }
7353                 }
7354         }
7355         if (unlikely(!(F_SKIP_UA & flags) &&
7356                      find_first_bit(devip->uas_bm,
7357                                     SDEBUG_NUM_UAS) != SDEBUG_NUM_UAS)) {
7358                 errsts = make_ua(scp, devip);
7359                 if (errsts)
7360                         goto check_cond;
7361         }
7362         if (unlikely(((F_M_ACCESS & flags) || scp->cmnd[0] == TEST_UNIT_READY) &&
7363                      atomic_read(&devip->stopped))) {
7364                 errsts = resp_not_ready(scp, devip);
7365                 if (errsts)
7366                         goto fini;
7367         }
7368         if (sdebug_fake_rw && (F_FAKE_RW & flags))
7369                 goto fini;
7370         if (unlikely(sdebug_every_nth)) {
7371                 if (fake_timeout(scp))
7372                         return 0;       /* ignore command: make trouble */
7373         }
7374         if (likely(oip->pfp))
7375                 pfp = oip->pfp; /* calls a resp_* function */
7376         else
7377                 pfp = r_pfp;    /* if leaf function ptr NULL, try the root's */
7378
7379 fini:
7380         if (F_DELAY_OVERR & flags)      /* cmds like INQUIRY respond asap */
7381                 return schedule_resp(scp, devip, errsts, pfp, 0, 0);
7382         else if ((flags & F_LONG_DELAY) && (sdebug_jdelay > 0 ||
7383                                             sdebug_ndelay > 10000)) {
7384                 /*
7385                  * Skip long delays if ndelay <= 10 microseconds. Otherwise
7386                  * for Start Stop Unit (SSU) want at least 1 second delay and
7387                  * if sdebug_jdelay>1 want a long delay of that many seconds.
7388                  * For Synchronize Cache want 1/20 of SSU's delay.
7389                  */
7390                 int jdelay = (sdebug_jdelay < 2) ? 1 : sdebug_jdelay;
7391                 int denom = (flags & F_SYNC_DELAY) ? 20 : 1;
7392
7393                 jdelay = mult_frac(USER_HZ * jdelay, HZ, denom * USER_HZ);
7394                 return schedule_resp(scp, devip, errsts, pfp, jdelay, 0);
7395         } else
7396                 return schedule_resp(scp, devip, errsts, pfp, sdebug_jdelay,
7397                                      sdebug_ndelay);
7398 check_cond:
7399         return schedule_resp(scp, devip, check_condition_result, NULL, 0, 0);
7400 err_out:
7401         return schedule_resp(scp, NULL, DID_NO_CONNECT << 16, NULL, 0, 0);
7402 }
7403
7404 static struct scsi_host_template sdebug_driver_template = {
7405         .show_info =            scsi_debug_show_info,
7406         .write_info =           scsi_debug_write_info,
7407         .proc_name =            sdebug_proc_name,
7408         .name =                 "SCSI DEBUG",
7409         .info =                 scsi_debug_info,
7410         .slave_alloc =          scsi_debug_slave_alloc,
7411         .slave_configure =      scsi_debug_slave_configure,
7412         .slave_destroy =        scsi_debug_slave_destroy,
7413         .ioctl =                scsi_debug_ioctl,
7414         .queuecommand =         scsi_debug_queuecommand,
7415         .change_queue_depth =   sdebug_change_qdepth,
7416         .eh_abort_handler =     scsi_debug_abort,
7417         .eh_device_reset_handler = scsi_debug_device_reset,
7418         .eh_target_reset_handler = scsi_debug_target_reset,
7419         .eh_bus_reset_handler = scsi_debug_bus_reset,
7420         .eh_host_reset_handler = scsi_debug_host_reset,
7421         .can_queue =            SDEBUG_CANQUEUE,
7422         .this_id =              7,
7423         .sg_tablesize =         SG_MAX_SEGMENTS,
7424         .cmd_per_lun =          DEF_CMD_PER_LUN,
7425         .max_sectors =          -1U,
7426         .max_segment_size =     -1U,
7427         .module =               THIS_MODULE,
7428         .track_queue_depth =    1,
7429 };
7430
7431 static int sdebug_driver_probe(struct device *dev)
7432 {
7433         int error = 0;
7434         struct sdebug_host_info *sdbg_host;
7435         struct Scsi_Host *hpnt;
7436         int hprot;
7437
7438         sdbg_host = to_sdebug_host(dev);
7439
7440         sdebug_driver_template.can_queue = sdebug_max_queue;
7441         if (!sdebug_clustering)
7442                 sdebug_driver_template.dma_boundary = PAGE_SIZE - 1;
7443
7444         hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
7445         if (NULL == hpnt) {
7446                 pr_err("scsi_host_alloc failed\n");
7447                 error = -ENODEV;
7448                 return error;
7449         }
7450         if (submit_queues > nr_cpu_ids) {
7451                 pr_warn("%s: trim submit_queues (was %d) to nr_cpu_ids=%u\n",
7452                         my_name, submit_queues, nr_cpu_ids);
7453                 submit_queues = nr_cpu_ids;
7454         }
7455         /*
7456          * Decide whether to tell scsi subsystem that we want mq. The
7457          * following should give the same answer for each host.
7458          */
7459         hpnt->nr_hw_queues = submit_queues;
7460         if (sdebug_host_max_queue)
7461                 hpnt->host_tagset = 1;
7462
7463         sdbg_host->shost = hpnt;
7464         *((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host;
7465         if ((hpnt->this_id >= 0) && (sdebug_num_tgts > hpnt->this_id))
7466                 hpnt->max_id = sdebug_num_tgts + 1;
7467         else
7468                 hpnt->max_id = sdebug_num_tgts;
7469         /* = sdebug_max_luns; */
7470         hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
7471
7472         hprot = 0;
7473
7474         switch (sdebug_dif) {
7475
7476         case T10_PI_TYPE1_PROTECTION:
7477                 hprot = SHOST_DIF_TYPE1_PROTECTION;
7478                 if (sdebug_dix)
7479                         hprot |= SHOST_DIX_TYPE1_PROTECTION;
7480                 break;
7481
7482         case T10_PI_TYPE2_PROTECTION:
7483                 hprot = SHOST_DIF_TYPE2_PROTECTION;
7484                 if (sdebug_dix)
7485                         hprot |= SHOST_DIX_TYPE2_PROTECTION;
7486                 break;
7487
7488         case T10_PI_TYPE3_PROTECTION:
7489                 hprot = SHOST_DIF_TYPE3_PROTECTION;
7490                 if (sdebug_dix)
7491                         hprot |= SHOST_DIX_TYPE3_PROTECTION;
7492                 break;
7493
7494         default:
7495                 if (sdebug_dix)
7496                         hprot |= SHOST_DIX_TYPE0_PROTECTION;
7497                 break;
7498         }
7499
7500         scsi_host_set_prot(hpnt, hprot);
7501
7502         if (have_dif_prot || sdebug_dix)
7503                 pr_info("host protection%s%s%s%s%s%s%s\n",
7504                         (hprot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
7505                         (hprot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
7506                         (hprot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
7507                         (hprot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
7508                         (hprot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
7509                         (hprot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
7510                         (hprot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
7511
7512         if (sdebug_guard == 1)
7513                 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
7514         else
7515                 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
7516
7517         sdebug_verbose = !!(SDEBUG_OPT_NOISE & sdebug_opts);
7518         sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & sdebug_opts);
7519         if (sdebug_every_nth)   /* need stats counters for every_nth */
7520                 sdebug_statistics = true;
7521         error = scsi_add_host(hpnt, &sdbg_host->dev);
7522         if (error) {
7523                 pr_err("scsi_add_host failed\n");
7524                 error = -ENODEV;
7525                 scsi_host_put(hpnt);
7526         } else {
7527                 scsi_scan_host(hpnt);
7528         }
7529
7530         return error;
7531 }
7532
7533 static int sdebug_driver_remove(struct device *dev)
7534 {
7535         struct sdebug_host_info *sdbg_host;
7536         struct sdebug_dev_info *sdbg_devinfo, *tmp;
7537
7538         sdbg_host = to_sdebug_host(dev);
7539
7540         if (!sdbg_host) {
7541                 pr_err("Unable to locate host info\n");
7542                 return -ENODEV;
7543         }
7544
7545         scsi_remove_host(sdbg_host->shost);
7546
7547         list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
7548                                  dev_list) {
7549                 list_del(&sdbg_devinfo->dev_list);
7550                 kfree(sdbg_devinfo->zstate);
7551                 kfree(sdbg_devinfo);
7552         }
7553
7554         scsi_host_put(sdbg_host->shost);
7555         return 0;
7556 }
7557
7558 static int pseudo_lld_bus_match(struct device *dev,
7559                                 struct device_driver *dev_driver)
7560 {
7561         return 1;
7562 }
7563
7564 static struct bus_type pseudo_lld_bus = {
7565         .name = "pseudo",
7566         .match = pseudo_lld_bus_match,
7567         .probe = sdebug_driver_probe,
7568         .remove = sdebug_driver_remove,
7569         .drv_groups = sdebug_drv_groups,
7570 };