1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
4 * Copyright (C) 1992 Eric Youngdale
5 * Simulate a host adapter with 2 disks attached. Do a lot of checking
6 * to make sure that we are not getting blocks mixed up, and PANIC if
7 * anything out of the ordinary is seen.
8 * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
10 * Copyright (C) 2001 - 2020 Douglas Gilbert
12 * For documentation see http://sg.danny.cz/sg/scsi_debug.html
16 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
18 #include <linux/module.h>
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/jiffies.h>
23 #include <linux/slab.h>
24 #include <linux/types.h>
25 #include <linux/string.h>
26 #include <linux/genhd.h>
28 #include <linux/init.h>
29 #include <linux/proc_fs.h>
30 #include <linux/vmalloc.h>
31 #include <linux/moduleparam.h>
32 #include <linux/scatterlist.h>
33 #include <linux/blkdev.h>
34 #include <linux/crc-t10dif.h>
35 #include <linux/spinlock.h>
36 #include <linux/interrupt.h>
37 #include <linux/atomic.h>
38 #include <linux/hrtimer.h>
39 #include <linux/uuid.h>
40 #include <linux/t10-pi.h>
41 #include <linux/msdos_partition.h>
42 #include <linux/random.h>
43 #include <linux/xarray.h>
44 #include <linux/prefetch.h>
46 #include <net/checksum.h>
48 #include <asm/unaligned.h>
50 #include <scsi/scsi.h>
51 #include <scsi/scsi_cmnd.h>
52 #include <scsi/scsi_device.h>
53 #include <scsi/scsi_host.h>
54 #include <scsi/scsicam.h>
55 #include <scsi/scsi_eh.h>
56 #include <scsi/scsi_tcq.h>
57 #include <scsi/scsi_dbg.h>
60 #include "scsi_logging.h"
62 /* make sure inq_product_rev string corresponds to this version */
63 #define SDEBUG_VERSION "0190" /* format to fit INQUIRY revision field */
64 static const char *sdebug_version_date = "20200710";
66 #define MY_NAME "scsi_debug"
68 /* Additional Sense Code (ASC) */
69 #define NO_ADDITIONAL_SENSE 0x0
70 #define LOGICAL_UNIT_NOT_READY 0x4
71 #define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
72 #define UNRECOVERED_READ_ERR 0x11
73 #define PARAMETER_LIST_LENGTH_ERR 0x1a
74 #define INVALID_OPCODE 0x20
75 #define LBA_OUT_OF_RANGE 0x21
76 #define INVALID_FIELD_IN_CDB 0x24
77 #define INVALID_FIELD_IN_PARAM_LIST 0x26
78 #define WRITE_PROTECTED 0x27
79 #define UA_RESET_ASC 0x29
80 #define UA_CHANGED_ASC 0x2a
81 #define TARGET_CHANGED_ASC 0x3f
82 #define LUNS_CHANGED_ASCQ 0x0e
83 #define INSUFF_RES_ASC 0x55
84 #define INSUFF_RES_ASCQ 0x3
85 #define POWER_ON_RESET_ASCQ 0x0
86 #define BUS_RESET_ASCQ 0x2 /* scsi bus reset occurred */
87 #define MODE_CHANGED_ASCQ 0x1 /* mode parameters changed */
88 #define CAPACITY_CHANGED_ASCQ 0x9
89 #define SAVING_PARAMS_UNSUP 0x39
90 #define TRANSPORT_PROBLEM 0x4b
91 #define THRESHOLD_EXCEEDED 0x5d
92 #define LOW_POWER_COND_ON 0x5e
93 #define MISCOMPARE_VERIFY_ASC 0x1d
94 #define MICROCODE_CHANGED_ASCQ 0x1 /* with TARGET_CHANGED_ASC */
95 #define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16
96 #define WRITE_ERROR_ASC 0xc
97 #define UNALIGNED_WRITE_ASCQ 0x4
98 #define WRITE_BOUNDARY_ASCQ 0x5
99 #define READ_INVDATA_ASCQ 0x6
100 #define READ_BOUNDARY_ASCQ 0x7
101 #define INSUFF_ZONE_ASCQ 0xe
103 /* Additional Sense Code Qualifier (ASCQ) */
104 #define ACK_NAK_TO 0x3
106 /* Default values for driver parameters */
107 #define DEF_NUM_HOST 1
108 #define DEF_NUM_TGTS 1
109 #define DEF_MAX_LUNS 1
110 /* With these defaults, this driver will make 1 host with 1 target
111 * (id 0) containing 1 logical unit (lun 0). That is 1 device.
114 #define DEF_CDB_LEN 10
115 #define DEF_JDELAY 1 /* if > 0 unit is a jiffy */
116 #define DEF_DEV_SIZE_PRE_INIT 0
117 #define DEF_DEV_SIZE_MB 8
118 #define DEF_ZBC_DEV_SIZE_MB 128
121 #define DEF_PER_HOST_STORE false
122 #define DEF_D_SENSE 0
123 #define DEF_EVERY_NTH 0
124 #define DEF_FAKE_RW 0
126 #define DEF_HOST_LOCK 0
129 #define DEF_LBPWS10 0
131 #define DEF_LOWEST_ALIGNED 0
132 #define DEF_NDELAY 0 /* if > 0 unit is a nanosecond */
133 #define DEF_NO_LUN_0 0
134 #define DEF_NUM_PARTS 0
136 #define DEF_OPT_BLKS 1024
137 #define DEF_PHYSBLK_EXP 0
138 #define DEF_OPT_XFERLEN_EXP 0
139 #define DEF_PTYPE TYPE_DISK
140 #define DEF_RANDOM false
141 #define DEF_REMOVABLE false
142 #define DEF_SCSI_LEVEL 7 /* INQUIRY, byte2 [6->SPC-4; 7->SPC-5] */
143 #define DEF_SECTOR_SIZE 512
144 #define DEF_UNMAP_ALIGNMENT 0
145 #define DEF_UNMAP_GRANULARITY 1
146 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
147 #define DEF_UNMAP_MAX_DESC 256
148 #define DEF_VIRTUAL_GB 0
149 #define DEF_VPD_USE_HOSTNO 1
150 #define DEF_WRITESAME_LENGTH 0xFFFF
152 #define DEF_STATISTICS false
153 #define DEF_SUBMIT_QUEUES 1
154 #define DEF_TUR_MS_TO_READY 0
155 #define DEF_UUID_CTL 0
156 #define JDELAY_OVERRIDDEN -9999
158 /* Default parameters for ZBC drives */
159 #define DEF_ZBC_ZONE_SIZE_MB 128
160 #define DEF_ZBC_MAX_OPEN_ZONES 8
161 #define DEF_ZBC_NR_CONV_ZONES 1
163 #define SDEBUG_LUN_0_VAL 0
165 /* bit mask values for sdebug_opts */
166 #define SDEBUG_OPT_NOISE 1
167 #define SDEBUG_OPT_MEDIUM_ERR 2
168 #define SDEBUG_OPT_TIMEOUT 4
169 #define SDEBUG_OPT_RECOVERED_ERR 8
170 #define SDEBUG_OPT_TRANSPORT_ERR 16
171 #define SDEBUG_OPT_DIF_ERR 32
172 #define SDEBUG_OPT_DIX_ERR 64
173 #define SDEBUG_OPT_MAC_TIMEOUT 128
174 #define SDEBUG_OPT_SHORT_TRANSFER 0x100
175 #define SDEBUG_OPT_Q_NOISE 0x200
176 #define SDEBUG_OPT_ALL_TSF 0x400
177 #define SDEBUG_OPT_RARE_TSF 0x800
178 #define SDEBUG_OPT_N_WCE 0x1000
179 #define SDEBUG_OPT_RESET_NOISE 0x2000
180 #define SDEBUG_OPT_NO_CDB_NOISE 0x4000
181 #define SDEBUG_OPT_HOST_BUSY 0x8000
182 #define SDEBUG_OPT_CMD_ABORT 0x10000
183 #define SDEBUG_OPT_ALL_NOISE (SDEBUG_OPT_NOISE | SDEBUG_OPT_Q_NOISE | \
184 SDEBUG_OPT_RESET_NOISE)
185 #define SDEBUG_OPT_ALL_INJECTING (SDEBUG_OPT_RECOVERED_ERR | \
186 SDEBUG_OPT_TRANSPORT_ERR | \
187 SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR | \
188 SDEBUG_OPT_SHORT_TRANSFER | \
189 SDEBUG_OPT_HOST_BUSY | \
190 SDEBUG_OPT_CMD_ABORT)
191 #define SDEBUG_OPT_RECOV_DIF_DIX (SDEBUG_OPT_RECOVERED_ERR | \
192 SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR)
194 /* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs) are returned in
195 * priority order. In the subset implemented here lower numbers have higher
196 * priority. The UA numbers should be a sequence starting from 0 with
197 * SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */
198 #define SDEBUG_UA_POR 0 /* Power on, reset, or bus device reset */
199 #define SDEBUG_UA_BUS_RESET 1
200 #define SDEBUG_UA_MODE_CHANGED 2
201 #define SDEBUG_UA_CAPACITY_CHANGED 3
202 #define SDEBUG_UA_LUNS_CHANGED 4
203 #define SDEBUG_UA_MICROCODE_CHANGED 5 /* simulate firmware change */
204 #define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 6
205 #define SDEBUG_NUM_UAS 7
207 /* when 1==SDEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
208 * sector on read commands: */
209 #define OPT_MEDIUM_ERR_ADDR 0x1234 /* that's sector 4660 in decimal */
210 #define OPT_MEDIUM_ERR_NUM 10 /* number of consecutive medium errs */
212 /* SDEBUG_CANQUEUE is the maximum number of commands that can be queued
213 * (for response) per submit queue at one time. Can be reduced by max_queue
214 * option. Command responses are not queued when jdelay=0 and ndelay=0. The
215 * per-device DEF_CMD_PER_LUN can be changed via sysfs:
216 * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth
217 * but cannot exceed SDEBUG_CANQUEUE .
219 #define SDEBUG_CANQUEUE_WORDS 3 /* a WORD is bits in a long */
220 #define SDEBUG_CANQUEUE (SDEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
221 #define DEF_CMD_PER_LUN 255
223 /* UA - Unit Attention; SA - Service Action; SSU - Start Stop Unit */
224 #define F_D_IN 1 /* Data-in command (e.g. READ) */
225 #define F_D_OUT 2 /* Data-out command (e.g. WRITE) */
226 #define F_D_OUT_MAYBE 4 /* WRITE SAME, NDOB bit */
228 #define F_RL_WLUN_OK 0x10 /* allowed with REPORT LUNS W-LUN */
229 #define F_SKIP_UA 0x20 /* bypass UAs (e.g. INQUIRY command) */
230 #define F_DELAY_OVERR 0x40 /* for commands like INQUIRY */
231 #define F_SA_LOW 0x80 /* SA is in cdb byte 1, bits 4 to 0 */
232 #define F_SA_HIGH 0x100 /* SA is in cdb bytes 8 and 9 */
233 #define F_INV_OP 0x200 /* invalid opcode (not supported) */
234 #define F_FAKE_RW 0x400 /* bypass resp_*() when fake_rw set */
235 #define F_M_ACCESS 0x800 /* media access, reacts to SSU state */
236 #define F_SSU_DELAY 0x1000 /* SSU command delay (long-ish) */
237 #define F_SYNC_DELAY 0x2000 /* SYNCHRONIZE CACHE delay */
239 /* Useful combinations of the above flags */
240 #define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
241 #define FF_MEDIA_IO (F_M_ACCESS | F_FAKE_RW)
242 #define FF_SA (F_SA_HIGH | F_SA_LOW)
243 #define F_LONG_DELAY (F_SSU_DELAY | F_SYNC_DELAY)
245 #define SDEBUG_MAX_PARTS 4
247 #define SDEBUG_MAX_CMD_LEN 32
249 #define SDEB_XA_NOT_IN_USE XA_MARK_1
251 /* Zone types (zbcr05 table 25) */
253 ZBC_ZONE_TYPE_CNV = 0x1,
254 ZBC_ZONE_TYPE_SWR = 0x2,
255 ZBC_ZONE_TYPE_SWP = 0x3,
258 /* enumeration names taken from table 26, zbcr05 */
260 ZBC_NOT_WRITE_POINTER = 0x0,
262 ZC2_IMPLICIT_OPEN = 0x2,
263 ZC3_EXPLICIT_OPEN = 0x3,
270 struct sdeb_zone_state { /* ZBC: per zone state */
271 enum sdebug_z_type z_type;
272 enum sdebug_z_cond z_cond;
273 bool z_non_seq_resource;
279 struct sdebug_dev_info {
280 struct list_head dev_list;
281 unsigned int channel;
285 struct sdebug_host_info *sdbg_host;
286 unsigned long uas_bm[1];
288 atomic_t stopped; /* 1: by SSU, 2: device start */
291 /* For ZBC devices */
292 enum blk_zoned_model zmodel;
294 unsigned int zsize_shift;
295 unsigned int nr_zones;
296 unsigned int nr_conv_zones;
297 unsigned int nr_imp_open;
298 unsigned int nr_exp_open;
299 unsigned int nr_closed;
300 unsigned int max_open;
301 ktime_t create_ts; /* time since bootup that this device was created */
302 struct sdeb_zone_state *zstate;
305 struct sdebug_host_info {
306 struct list_head host_list;
307 int si_idx; /* sdeb_store_info (per host) xarray index */
308 struct Scsi_Host *shost;
310 struct list_head dev_info_list;
313 /* There is an xarray of pointers to this struct's objects, one per host */
314 struct sdeb_store_info {
315 rwlock_t macc_lck; /* for atomic media access on this store */
316 u8 *storep; /* user data storage (ram) */
317 struct t10_pi_tuple *dif_storep; /* protection info */
318 void *map_storep; /* provisioning map */
321 #define to_sdebug_host(d) \
322 container_of(d, struct sdebug_host_info, dev)
324 enum sdeb_defer_type {SDEB_DEFER_NONE = 0, SDEB_DEFER_HRT = 1,
327 struct sdebug_defer {
329 struct execute_work ew;
330 int sqa_idx; /* index of sdebug_queue array */
331 int qc_idx; /* index of sdebug_queued_cmd array within sqa_idx */
332 int hc_idx; /* hostwide tag index */
336 bool aborted; /* true when blk_abort_request() already called */
337 enum sdeb_defer_type defer_t;
340 struct sdebug_queued_cmd {
341 /* corresponding bit set in in_use_bm[] in owning struct sdebug_queue
342 * instance indicates this slot is in use.
344 struct sdebug_defer *sd_dp;
345 struct scsi_cmnd *a_cmnd;
348 struct sdebug_queue {
349 struct sdebug_queued_cmd qc_arr[SDEBUG_CANQUEUE];
350 unsigned long in_use_bm[SDEBUG_CANQUEUE_WORDS];
352 atomic_t blocked; /* to temporarily stop more being queued */
355 static atomic_t sdebug_cmnd_count; /* number of incoming commands */
356 static atomic_t sdebug_completions; /* count of deferred completions */
357 static atomic_t sdebug_miss_cpus; /* submission + completion cpus differ */
358 static atomic_t sdebug_a_tsf; /* 'almost task set full' counter */
359 static atomic_t sdeb_inject_pending;
361 struct opcode_info_t {
362 u8 num_attached; /* 0 if this is it (i.e. a leaf); use 0xff */
363 /* for terminating element */
364 u8 opcode; /* if num_attached > 0, preferred */
365 u16 sa; /* service action */
366 u32 flags; /* OR-ed set of SDEB_F_* */
367 int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
368 const struct opcode_info_t *arrp; /* num_attached elements or NULL */
369 u8 len_mask[16]; /* len_mask[0]-->cdb_len, then mask for cdb */
370 /* 1 to min(cdb_len, 15); ignore cdb[15...] */
373 /* SCSI opcodes (first byte of cdb) of interest mapped onto these indexes */
374 enum sdeb_opcode_index {
375 SDEB_I_INVALID_OPCODE = 0,
377 SDEB_I_REPORT_LUNS = 2,
378 SDEB_I_REQUEST_SENSE = 3,
379 SDEB_I_TEST_UNIT_READY = 4,
380 SDEB_I_MODE_SENSE = 5, /* 6, 10 */
381 SDEB_I_MODE_SELECT = 6, /* 6, 10 */
382 SDEB_I_LOG_SENSE = 7,
383 SDEB_I_READ_CAPACITY = 8, /* 10; 16 is in SA_IN(16) */
384 SDEB_I_READ = 9, /* 6, 10, 12, 16 */
385 SDEB_I_WRITE = 10, /* 6, 10, 12, 16 */
386 SDEB_I_START_STOP = 11,
387 SDEB_I_SERV_ACT_IN_16 = 12, /* add ...SERV_ACT_IN_12 if needed */
388 SDEB_I_SERV_ACT_OUT_16 = 13, /* add ...SERV_ACT_OUT_12 if needed */
389 SDEB_I_MAINT_IN = 14,
390 SDEB_I_MAINT_OUT = 15,
391 SDEB_I_VERIFY = 16, /* VERIFY(10), VERIFY(16) */
392 SDEB_I_VARIABLE_LEN = 17, /* READ(32), WRITE(32), WR_SCAT(32) */
393 SDEB_I_RESERVE = 18, /* 6, 10 */
394 SDEB_I_RELEASE = 19, /* 6, 10 */
395 SDEB_I_ALLOW_REMOVAL = 20, /* PREVENT ALLOW MEDIUM REMOVAL */
396 SDEB_I_REZERO_UNIT = 21, /* REWIND in SSC */
397 SDEB_I_ATA_PT = 22, /* 12, 16 */
398 SDEB_I_SEND_DIAG = 23,
400 SDEB_I_WRITE_BUFFER = 25,
401 SDEB_I_WRITE_SAME = 26, /* 10, 16 */
402 SDEB_I_SYNC_CACHE = 27, /* 10, 16 */
403 SDEB_I_COMP_WRITE = 28,
404 SDEB_I_PRE_FETCH = 29, /* 10, 16 */
405 SDEB_I_ZONE_OUT = 30, /* 0x94+SA; includes no data xfer */
406 SDEB_I_ZONE_IN = 31, /* 0x95+SA; all have data-in */
407 SDEB_I_LAST_ELEM_P1 = 32, /* keep this last (previous + 1) */
411 static const unsigned char opcode_ind_arr[256] = {
412 /* 0x0; 0x0->0x1f: 6 byte cdbs */
413 SDEB_I_TEST_UNIT_READY, SDEB_I_REZERO_UNIT, 0, SDEB_I_REQUEST_SENSE,
415 SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, 0,
416 0, 0, SDEB_I_INQUIRY, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
418 0, 0, SDEB_I_MODE_SENSE, SDEB_I_START_STOP, 0, SDEB_I_SEND_DIAG,
419 SDEB_I_ALLOW_REMOVAL, 0,
420 /* 0x20; 0x20->0x3f: 10 byte cdbs */
421 0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY, 0, 0,
422 SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, SDEB_I_VERIFY,
423 0, 0, 0, 0, SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, 0,
424 0, 0, 0, SDEB_I_WRITE_BUFFER, 0, 0, 0, 0,
425 /* 0x40; 0x40->0x5f: 10 byte cdbs */
426 0, SDEB_I_WRITE_SAME, SDEB_I_UNMAP, 0, 0, 0, 0, 0,
427 0, 0, 0, 0, 0, SDEB_I_LOG_SENSE, 0, 0,
428 0, 0, 0, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
430 0, 0, SDEB_I_MODE_SENSE, 0, 0, 0, 0, 0,
431 /* 0x60; 0x60->0x7d are reserved, 0x7e is "extended cdb" */
432 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
433 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
434 0, SDEB_I_VARIABLE_LEN,
435 /* 0x80; 0x80->0x9f: 16 byte cdbs */
436 0, 0, 0, 0, 0, SDEB_I_ATA_PT, 0, 0,
437 SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0,
438 0, 0, 0, SDEB_I_VERIFY,
439 SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, SDEB_I_WRITE_SAME,
440 SDEB_I_ZONE_OUT, SDEB_I_ZONE_IN, 0, 0,
441 0, 0, 0, 0, 0, 0, SDEB_I_SERV_ACT_IN_16, SDEB_I_SERV_ACT_OUT_16,
442 /* 0xa0; 0xa0->0xbf: 12 byte cdbs */
443 SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN,
444 SDEB_I_MAINT_OUT, 0, 0, 0,
445 SDEB_I_READ, 0 /* SDEB_I_SERV_ACT_OUT_12 */, SDEB_I_WRITE,
446 0 /* SDEB_I_SERV_ACT_IN_12 */, 0, 0, 0, 0,
447 0, 0, 0, 0, 0, 0, 0, 0,
448 0, 0, 0, 0, 0, 0, 0, 0,
449 /* 0xc0; 0xc0->0xff: vendor specific */
450 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
451 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
452 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
453 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
457 * The following "response" functions return the SCSI mid-level's 4 byte
458 * tuple-in-an-int. To handle commands with an IMMED bit, for a faster
459 * command completion, they can mask their return value with
460 * SDEG_RES_IMMED_MASK .
462 #define SDEG_RES_IMMED_MASK 0x40000000
464 static int resp_inquiry(struct scsi_cmnd *, struct sdebug_dev_info *);
465 static int resp_report_luns(struct scsi_cmnd *, struct sdebug_dev_info *);
466 static int resp_requests(struct scsi_cmnd *, struct sdebug_dev_info *);
467 static int resp_mode_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
468 static int resp_mode_select(struct scsi_cmnd *, struct sdebug_dev_info *);
469 static int resp_log_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
470 static int resp_readcap(struct scsi_cmnd *, struct sdebug_dev_info *);
471 static int resp_read_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
472 static int resp_write_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
473 static int resp_write_scat(struct scsi_cmnd *, struct sdebug_dev_info *);
474 static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *);
475 static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *);
476 static int resp_get_lba_status(struct scsi_cmnd *, struct sdebug_dev_info *);
477 static int resp_report_tgtpgs(struct scsi_cmnd *, struct sdebug_dev_info *);
478 static int resp_unmap(struct scsi_cmnd *, struct sdebug_dev_info *);
479 static int resp_rsup_opcodes(struct scsi_cmnd *, struct sdebug_dev_info *);
480 static int resp_rsup_tmfs(struct scsi_cmnd *, struct sdebug_dev_info *);
481 static int resp_verify(struct scsi_cmnd *, struct sdebug_dev_info *);
482 static int resp_write_same_10(struct scsi_cmnd *, struct sdebug_dev_info *);
483 static int resp_write_same_16(struct scsi_cmnd *, struct sdebug_dev_info *);
484 static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *);
485 static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *);
486 static int resp_sync_cache(struct scsi_cmnd *, struct sdebug_dev_info *);
487 static int resp_pre_fetch(struct scsi_cmnd *, struct sdebug_dev_info *);
488 static int resp_report_zones(struct scsi_cmnd *, struct sdebug_dev_info *);
489 static int resp_open_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
490 static int resp_close_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
491 static int resp_finish_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
492 static int resp_rwp_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
494 static int sdebug_do_add_host(bool mk_new_store);
495 static int sdebug_add_host_helper(int per_host_idx);
496 static void sdebug_do_remove_host(bool the_end);
497 static int sdebug_add_store(void);
498 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip);
499 static void sdebug_erase_all_stores(bool apart_from_first);
502 * The following are overflow arrays for cdbs that "hit" the same index in
503 * the opcode_info_arr array. The most time sensitive (or commonly used) cdb
504 * should be placed in opcode_info_arr[], the others should be placed here.
506 static const struct opcode_info_t msense_iarr[] = {
507 {0, 0x1a, 0, F_D_IN, NULL, NULL,
508 {6, 0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
511 static const struct opcode_info_t mselect_iarr[] = {
512 {0, 0x15, 0, F_D_OUT, NULL, NULL,
513 {6, 0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
516 static const struct opcode_info_t read_iarr[] = {
517 {0, 0x28, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(10) */
518 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
520 {0, 0x8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL, /* READ(6) */
521 {6, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
522 {0, 0xa8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(12) */
523 {12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf,
527 static const struct opcode_info_t write_iarr[] = {
528 {0, 0x2a, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(10) */
529 NULL, {10, 0xfb, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7,
531 {0, 0xa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(6) */
532 NULL, {6, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0,
534 {0, 0xaa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(12) */
535 NULL, {12, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
536 0xbf, 0xc7, 0, 0, 0, 0} },
539 static const struct opcode_info_t verify_iarr[] = {
540 {0, 0x2f, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,/* VERIFY(10) */
541 NULL, {10, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xff, 0xff, 0xc7,
545 static const struct opcode_info_t sa_in_16_iarr[] = {
546 {0, 0x9e, 0x12, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
547 {16, 0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
548 0xff, 0xff, 0xff, 0, 0xc7} }, /* GET LBA STATUS(16) */
551 static const struct opcode_info_t vl_iarr[] = { /* VARIABLE LENGTH */
552 {0, 0x7f, 0xb, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_dt0,
553 NULL, {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0xb, 0xfa,
554 0, 0xff, 0xff, 0xff, 0xff} }, /* WRITE(32) */
555 {0, 0x7f, 0x11, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
556 NULL, {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x11, 0xf8,
557 0, 0xff, 0xff, 0x0, 0x0} }, /* WRITE SCATTERED(32) */
560 static const struct opcode_info_t maint_in_iarr[] = { /* MAINT IN */
561 {0, 0xa3, 0xc, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL,
562 {12, 0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
563 0xc7, 0, 0, 0, 0} }, /* REPORT SUPPORTED OPERATION CODES */
564 {0, 0xa3, 0xd, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL,
565 {12, 0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
566 0, 0} }, /* REPORTED SUPPORTED TASK MANAGEMENT FUNCTIONS */
569 static const struct opcode_info_t write_same_iarr[] = {
570 {0, 0x93, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_write_same_16, NULL,
571 {16, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
572 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* WRITE SAME(16) */
575 static const struct opcode_info_t reserve_iarr[] = {
576 {0, 0x16, 0, F_D_OUT, NULL, NULL, /* RESERVE(6) */
577 {6, 0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
580 static const struct opcode_info_t release_iarr[] = {
581 {0, 0x17, 0, F_D_OUT, NULL, NULL, /* RELEASE(6) */
582 {6, 0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
585 static const struct opcode_info_t sync_cache_iarr[] = {
586 {0, 0x91, 0, F_SYNC_DELAY | F_M_ACCESS, resp_sync_cache, NULL,
587 {16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
588 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* SYNC_CACHE (16) */
591 static const struct opcode_info_t pre_fetch_iarr[] = {
592 {0, 0x90, 0, F_SYNC_DELAY | FF_MEDIA_IO, resp_pre_fetch, NULL,
593 {16, 0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
594 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* PRE-FETCH (16) */
597 static const struct opcode_info_t zone_out_iarr[] = { /* ZONE OUT(16) */
598 {0, 0x94, 0x1, F_SA_LOW | F_M_ACCESS, resp_close_zone, NULL,
599 {16, 0x1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
600 0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} }, /* CLOSE ZONE */
601 {0, 0x94, 0x2, F_SA_LOW | F_M_ACCESS, resp_finish_zone, NULL,
602 {16, 0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
603 0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} }, /* FINISH ZONE */
604 {0, 0x94, 0x4, F_SA_LOW | F_M_ACCESS, resp_rwp_zone, NULL,
605 {16, 0x4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
606 0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} }, /* RESET WRITE POINTER */
609 static const struct opcode_info_t zone_in_iarr[] = { /* ZONE IN(16) */
610 {0, 0x95, 0x6, F_SA_LOW | F_D_IN | F_M_ACCESS, NULL, NULL,
611 {16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
612 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* REPORT ZONES */
616 /* This array is accessed via SDEB_I_* values. Make sure all are mapped,
617 * plus the terminating elements for logic that scans this table such as
618 * REPORT SUPPORTED OPERATION CODES. */
619 static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEM_P1 + 1] = {
621 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* unknown opcodes */
622 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
623 {0, 0x12, 0, FF_RESPOND | F_D_IN, resp_inquiry, NULL, /* INQUIRY */
624 {6, 0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
625 {0, 0xa0, 0, FF_RESPOND | F_D_IN, resp_report_luns, NULL,
626 {12, 0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
627 0, 0} }, /* REPORT LUNS */
628 {0, 0x3, 0, FF_RESPOND | F_D_IN, resp_requests, NULL,
629 {6, 0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
630 {0, 0x0, 0, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */
631 {6, 0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
633 {ARRAY_SIZE(msense_iarr), 0x5a, 0, F_D_IN, /* MODE SENSE(10) */
634 resp_mode_sense, msense_iarr, {10, 0xf8, 0xff, 0xff, 0, 0, 0,
635 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
636 {ARRAY_SIZE(mselect_iarr), 0x55, 0, F_D_OUT, /* MODE SELECT(10) */
637 resp_mode_select, mselect_iarr, {10, 0xf1, 0, 0, 0, 0, 0, 0xff,
638 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
639 {0, 0x4d, 0, F_D_IN, resp_log_sense, NULL, /* LOG SENSE */
640 {10, 0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0,
642 {0, 0x25, 0, F_D_IN, resp_readcap, NULL, /* READ CAPACITY(10) */
643 {10, 0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0,
645 {ARRAY_SIZE(read_iarr), 0x88, 0, F_D_IN | FF_MEDIA_IO, /* READ(16) */
646 resp_read_dt0, read_iarr, {16, 0xfe, 0xff, 0xff, 0xff, 0xff,
647 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
649 {ARRAY_SIZE(write_iarr), 0x8a, 0, F_D_OUT | FF_MEDIA_IO,
650 resp_write_dt0, write_iarr, /* WRITE(16) */
651 {16, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
652 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
653 {0, 0x1b, 0, F_SSU_DELAY, resp_start_stop, NULL,/* START STOP UNIT */
654 {6, 0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
655 {ARRAY_SIZE(sa_in_16_iarr), 0x9e, 0x10, F_SA_LOW | F_D_IN,
656 resp_readcap16, sa_in_16_iarr, /* SA_IN(16), READ CAPACITY(16) */
657 {16, 0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
658 0xff, 0xff, 0xff, 0xff, 0x1, 0xc7} },
659 {0, 0x9f, 0x12, F_SA_LOW | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
660 NULL, {16, 0x12, 0xf9, 0x0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff,
661 0xff, 0xff, 0xff, 0xff, 0xc7} }, /* SA_OUT(16), WRITE SCAT(16) */
662 {ARRAY_SIZE(maint_in_iarr), 0xa3, 0xa, F_SA_LOW | F_D_IN,
663 resp_report_tgtpgs, /* MAINT IN, REPORT TARGET PORT GROUPS */
664 maint_in_iarr, {12, 0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff,
665 0xff, 0, 0xc7, 0, 0, 0, 0} },
667 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
668 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
669 {ARRAY_SIZE(verify_iarr), 0x8f, 0,
670 F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify, /* VERIFY(16) */
671 verify_iarr, {16, 0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
672 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },
673 {ARRAY_SIZE(vl_iarr), 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_MEDIA_IO,
674 resp_read_dt0, vl_iarr, /* VARIABLE LENGTH, READ(32) */
675 {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x9, 0xfe, 0, 0xff, 0xff,
677 {ARRAY_SIZE(reserve_iarr), 0x56, 0, F_D_OUT,
678 NULL, reserve_iarr, /* RESERVE(10) <no response function> */
679 {10, 0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
681 {ARRAY_SIZE(release_iarr), 0x57, 0, F_D_OUT,
682 NULL, release_iarr, /* RELEASE(10) <no response function> */
683 {10, 0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
686 {0, 0x1e, 0, 0, NULL, NULL, /* ALLOW REMOVAL */
687 {6, 0, 0, 0, 0x3, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
688 {0, 0x1, 0, 0, resp_start_stop, NULL, /* REWIND ?? */
689 {6, 0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
690 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */
691 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
692 {0, 0x1d, F_D_OUT, 0, NULL, NULL, /* SEND DIAGNOSTIC */
693 {6, 0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
694 {0, 0x42, 0, F_D_OUT | FF_MEDIA_IO, resp_unmap, NULL, /* UNMAP */
695 {10, 0x1, 0, 0, 0, 0, 0x3f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
697 {0, 0x3b, 0, F_D_OUT_MAYBE, resp_write_buffer, NULL,
698 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0,
699 0, 0, 0, 0} }, /* WRITE_BUFFER */
700 {ARRAY_SIZE(write_same_iarr), 0x41, 0, F_D_OUT_MAYBE | FF_MEDIA_IO,
701 resp_write_same_10, write_same_iarr, /* WRITE SAME(10) */
702 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0,
704 {ARRAY_SIZE(sync_cache_iarr), 0x35, 0, F_SYNC_DELAY | F_M_ACCESS,
705 resp_sync_cache, sync_cache_iarr,
706 {10, 0x7, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
707 0, 0, 0, 0} }, /* SYNC_CACHE (10) */
708 {0, 0x89, 0, F_D_OUT | FF_MEDIA_IO, resp_comp_write, NULL,
709 {16, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
710 0, 0xff, 0x3f, 0xc7} }, /* COMPARE AND WRITE */
711 {ARRAY_SIZE(pre_fetch_iarr), 0x34, 0, F_SYNC_DELAY | FF_MEDIA_IO,
712 resp_pre_fetch, pre_fetch_iarr,
713 {10, 0x2, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
714 0, 0, 0, 0} }, /* PRE-FETCH (10) */
717 {ARRAY_SIZE(zone_out_iarr), 0x94, 0x3, F_SA_LOW | F_M_ACCESS,
718 resp_open_zone, zone_out_iarr, /* ZONE_OUT(16), OPEN ZONE) */
719 {16, 0x3 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
720 0xff, 0xff, 0x0, 0x0, 0xff, 0xff, 0x1, 0xc7} },
721 {ARRAY_SIZE(zone_in_iarr), 0x95, 0x0, F_SA_LOW | F_M_ACCESS,
722 resp_report_zones, zone_in_iarr, /* ZONE_IN(16), REPORT ZONES) */
723 {16, 0x0 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
724 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xc7} },
726 {0xff, 0, 0, 0, NULL, NULL, /* terminating element */
727 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
730 static int sdebug_num_hosts;
731 static int sdebug_add_host = DEF_NUM_HOST; /* in sysfs this is relative */
732 static int sdebug_ato = DEF_ATO;
733 static int sdebug_cdb_len = DEF_CDB_LEN;
734 static int sdebug_jdelay = DEF_JDELAY; /* if > 0 then unit is jiffies */
735 static int sdebug_dev_size_mb = DEF_DEV_SIZE_PRE_INIT;
736 static int sdebug_dif = DEF_DIF;
737 static int sdebug_dix = DEF_DIX;
738 static int sdebug_dsense = DEF_D_SENSE;
739 static int sdebug_every_nth = DEF_EVERY_NTH;
740 static int sdebug_fake_rw = DEF_FAKE_RW;
741 static unsigned int sdebug_guard = DEF_GUARD;
742 static int sdebug_host_max_queue; /* per host */
743 static int sdebug_lowest_aligned = DEF_LOWEST_ALIGNED;
744 static int sdebug_max_luns = DEF_MAX_LUNS;
745 static int sdebug_max_queue = SDEBUG_CANQUEUE; /* per submit queue */
746 static unsigned int sdebug_medium_error_start = OPT_MEDIUM_ERR_ADDR;
747 static int sdebug_medium_error_count = OPT_MEDIUM_ERR_NUM;
748 static atomic_t retired_max_queue; /* if > 0 then was prior max_queue */
749 static int sdebug_ndelay = DEF_NDELAY; /* if > 0 then unit is nanoseconds */
750 static int sdebug_no_lun_0 = DEF_NO_LUN_0;
751 static int sdebug_no_uld;
752 static int sdebug_num_parts = DEF_NUM_PARTS;
753 static int sdebug_num_tgts = DEF_NUM_TGTS; /* targets per host */
754 static int sdebug_opt_blks = DEF_OPT_BLKS;
755 static int sdebug_opts = DEF_OPTS;
756 static int sdebug_physblk_exp = DEF_PHYSBLK_EXP;
757 static int sdebug_opt_xferlen_exp = DEF_OPT_XFERLEN_EXP;
758 static int sdebug_ptype = DEF_PTYPE; /* SCSI peripheral device type */
759 static int sdebug_scsi_level = DEF_SCSI_LEVEL;
760 static int sdebug_sector_size = DEF_SECTOR_SIZE;
761 static int sdeb_tur_ms_to_ready = DEF_TUR_MS_TO_READY;
762 static int sdebug_virtual_gb = DEF_VIRTUAL_GB;
763 static int sdebug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
764 static unsigned int sdebug_lbpu = DEF_LBPU;
765 static unsigned int sdebug_lbpws = DEF_LBPWS;
766 static unsigned int sdebug_lbpws10 = DEF_LBPWS10;
767 static unsigned int sdebug_lbprz = DEF_LBPRZ;
768 static unsigned int sdebug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
769 static unsigned int sdebug_unmap_granularity = DEF_UNMAP_GRANULARITY;
770 static unsigned int sdebug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
771 static unsigned int sdebug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
772 static unsigned int sdebug_write_same_length = DEF_WRITESAME_LENGTH;
773 static int sdebug_uuid_ctl = DEF_UUID_CTL;
774 static bool sdebug_random = DEF_RANDOM;
775 static bool sdebug_per_host_store = DEF_PER_HOST_STORE;
776 static bool sdebug_removable = DEF_REMOVABLE;
777 static bool sdebug_clustering;
778 static bool sdebug_host_lock = DEF_HOST_LOCK;
779 static bool sdebug_strict = DEF_STRICT;
780 static bool sdebug_any_injecting_opt;
781 static bool sdebug_verbose;
782 static bool have_dif_prot;
783 static bool write_since_sync;
784 static bool sdebug_statistics = DEF_STATISTICS;
785 static bool sdebug_wp;
786 /* Following enum: 0: no zbc, def; 1: host aware; 2: host managed */
787 static enum blk_zoned_model sdeb_zbc_model = BLK_ZONED_NONE;
788 static char *sdeb_zbc_model_s;
790 enum sam_lun_addr_method {SAM_LUN_AM_PERIPHERAL = 0x0,
791 SAM_LUN_AM_FLAT = 0x1,
792 SAM_LUN_AM_LOGICAL_UNIT = 0x2,
793 SAM_LUN_AM_EXTENDED = 0x3};
794 static enum sam_lun_addr_method sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
795 static int sdebug_lun_am_i = (int)SAM_LUN_AM_PERIPHERAL;
797 static unsigned int sdebug_store_sectors;
798 static sector_t sdebug_capacity; /* in sectors */
800 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
801 may still need them */
802 static int sdebug_heads; /* heads per disk */
803 static int sdebug_cylinders_per; /* cylinders per surface */
804 static int sdebug_sectors_per; /* sectors per cylinder */
806 static LIST_HEAD(sdebug_host_list);
807 static DEFINE_SPINLOCK(sdebug_host_list_lock);
809 static struct xarray per_store_arr;
810 static struct xarray *per_store_ap = &per_store_arr;
811 static int sdeb_first_idx = -1; /* invalid index ==> none created */
812 static int sdeb_most_recent_idx = -1;
813 static DEFINE_RWLOCK(sdeb_fake_rw_lck); /* need a RW lock when fake_rw=1 */
815 static unsigned long map_size;
816 static int num_aborts;
817 static int num_dev_resets;
818 static int num_target_resets;
819 static int num_bus_resets;
820 static int num_host_resets;
821 static int dix_writes;
822 static int dix_reads;
823 static int dif_errors;
825 /* ZBC global data */
826 static bool sdeb_zbc_in_use; /* true for host-aware and host-managed disks */
827 static int sdeb_zbc_zone_size_mb;
828 static int sdeb_zbc_max_open = DEF_ZBC_MAX_OPEN_ZONES;
829 static int sdeb_zbc_nr_conv = DEF_ZBC_NR_CONV_ZONES;
831 static int submit_queues = DEF_SUBMIT_QUEUES; /* > 1 for multi-queue (mq) */
832 static struct sdebug_queue *sdebug_q_arr; /* ptr to array of submit queues */
834 static DEFINE_RWLOCK(atomic_rw);
835 static DEFINE_RWLOCK(atomic_rw2);
837 static rwlock_t *ramdisk_lck_a[2];
839 static char sdebug_proc_name[] = MY_NAME;
840 static const char *my_name = MY_NAME;
842 static struct bus_type pseudo_lld_bus;
844 static struct device_driver sdebug_driverfs_driver = {
845 .name = sdebug_proc_name,
846 .bus = &pseudo_lld_bus,
849 static const int check_condition_result =
850 (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
852 static const int illegal_condition_result =
853 (DRIVER_SENSE << 24) | (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
855 static const int device_qfull_result =
856 (DID_OK << 16) | (COMMAND_COMPLETE << 8) | SAM_STAT_TASK_SET_FULL;
858 static const int condition_met_result = SAM_STAT_CONDITION_MET;
861 /* Only do the extra work involved in logical block provisioning if one or
862 * more of the lbpu, lbpws or lbpws10 parameters are given and we are doing
863 * real reads and writes (i.e. not skipping them for speed).
865 static inline bool scsi_debug_lbp(void)
867 return 0 == sdebug_fake_rw &&
868 (sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10);
871 static void *lba2fake_store(struct sdeb_store_info *sip,
872 unsigned long long lba)
874 struct sdeb_store_info *lsip = sip;
876 lba = do_div(lba, sdebug_store_sectors);
877 if (!sip || !sip->storep) {
879 lsip = xa_load(per_store_ap, 0); /* should never be NULL */
881 return lsip->storep + lba * sdebug_sector_size;
884 static struct t10_pi_tuple *dif_store(struct sdeb_store_info *sip,
887 sector = sector_div(sector, sdebug_store_sectors);
889 return sip->dif_storep + sector;
892 static void sdebug_max_tgts_luns(void)
894 struct sdebug_host_info *sdbg_host;
895 struct Scsi_Host *hpnt;
897 spin_lock(&sdebug_host_list_lock);
898 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
899 hpnt = sdbg_host->shost;
900 if ((hpnt->this_id >= 0) &&
901 (sdebug_num_tgts > hpnt->this_id))
902 hpnt->max_id = sdebug_num_tgts + 1;
904 hpnt->max_id = sdebug_num_tgts;
905 /* sdebug_max_luns; */
906 hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
908 spin_unlock(&sdebug_host_list_lock);
911 enum sdeb_cmd_data {SDEB_IN_DATA = 0, SDEB_IN_CDB = 1};
913 /* Set in_bit to -1 to indicate no bit position of invalid field */
914 static void mk_sense_invalid_fld(struct scsi_cmnd *scp,
915 enum sdeb_cmd_data c_d,
916 int in_byte, int in_bit)
918 unsigned char *sbuff;
922 sbuff = scp->sense_buffer;
924 sdev_printk(KERN_ERR, scp->device,
925 "%s: sense_buffer is NULL\n", __func__);
928 asc = c_d ? INVALID_FIELD_IN_CDB : INVALID_FIELD_IN_PARAM_LIST;
929 memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
930 scsi_build_sense_buffer(sdebug_dsense, sbuff, ILLEGAL_REQUEST, asc, 0);
931 memset(sks, 0, sizeof(sks));
937 sks[0] |= 0x7 & in_bit;
939 put_unaligned_be16(in_byte, sks + 1);
945 memcpy(sbuff + sl + 4, sks, 3);
947 memcpy(sbuff + 15, sks, 3);
949 sdev_printk(KERN_INFO, scp->device, "%s: [sense_key,asc,ascq"
950 "]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n",
951 my_name, asc, c_d ? 'C' : 'D', in_byte, in_bit);
954 static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
956 unsigned char *sbuff;
958 sbuff = scp->sense_buffer;
960 sdev_printk(KERN_ERR, scp->device,
961 "%s: sense_buffer is NULL\n", __func__);
964 memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
966 scsi_build_sense_buffer(sdebug_dsense, sbuff, key, asc, asq);
969 sdev_printk(KERN_INFO, scp->device,
970 "%s: [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
971 my_name, key, asc, asq);
974 static void mk_sense_invalid_opcode(struct scsi_cmnd *scp)
976 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
979 static int scsi_debug_ioctl(struct scsi_device *dev, unsigned int cmd,
982 if (sdebug_verbose) {
984 sdev_printk(KERN_INFO, dev,
985 "%s: BLKFLSBUF [0x1261]\n", __func__);
986 else if (0x5331 == cmd)
987 sdev_printk(KERN_INFO, dev,
988 "%s: CDROM_GET_CAPABILITY [0x5331]\n",
991 sdev_printk(KERN_INFO, dev, "%s: cmd=0x%x\n",
995 /* return -ENOTTY; // correct return but upsets fdisk */
998 static void config_cdb_len(struct scsi_device *sdev)
1000 switch (sdebug_cdb_len) {
1001 case 6: /* suggest 6 byte READ, WRITE and MODE SENSE/SELECT */
1002 sdev->use_10_for_rw = false;
1003 sdev->use_16_for_rw = false;
1004 sdev->use_10_for_ms = false;
1006 case 10: /* suggest 10 byte RWs and 6 byte MODE SENSE/SELECT */
1007 sdev->use_10_for_rw = true;
1008 sdev->use_16_for_rw = false;
1009 sdev->use_10_for_ms = false;
1011 case 12: /* suggest 10 byte RWs and 10 byte MODE SENSE/SELECT */
1012 sdev->use_10_for_rw = true;
1013 sdev->use_16_for_rw = false;
1014 sdev->use_10_for_ms = true;
1017 sdev->use_10_for_rw = false;
1018 sdev->use_16_for_rw = true;
1019 sdev->use_10_for_ms = true;
1021 case 32: /* No knobs to suggest this so same as 16 for now */
1022 sdev->use_10_for_rw = false;
1023 sdev->use_16_for_rw = true;
1024 sdev->use_10_for_ms = true;
1027 pr_warn("unexpected cdb_len=%d, force to 10\n",
1029 sdev->use_10_for_rw = true;
1030 sdev->use_16_for_rw = false;
1031 sdev->use_10_for_ms = false;
1032 sdebug_cdb_len = 10;
1037 static void all_config_cdb_len(void)
1039 struct sdebug_host_info *sdbg_host;
1040 struct Scsi_Host *shost;
1041 struct scsi_device *sdev;
1043 spin_lock(&sdebug_host_list_lock);
1044 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
1045 shost = sdbg_host->shost;
1046 shost_for_each_device(sdev, shost) {
1047 config_cdb_len(sdev);
1050 spin_unlock(&sdebug_host_list_lock);
1053 static void clear_luns_changed_on_target(struct sdebug_dev_info *devip)
1055 struct sdebug_host_info *sdhp;
1056 struct sdebug_dev_info *dp;
1058 spin_lock(&sdebug_host_list_lock);
1059 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
1060 list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
1061 if ((devip->sdbg_host == dp->sdbg_host) &&
1062 (devip->target == dp->target))
1063 clear_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
1066 spin_unlock(&sdebug_host_list_lock);
1069 static int make_ua(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1073 k = find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS);
1074 if (k != SDEBUG_NUM_UAS) {
1075 const char *cp = NULL;
1079 mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1080 POWER_ON_RESET_ASCQ);
1082 cp = "power on reset";
1084 case SDEBUG_UA_BUS_RESET:
1085 mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1090 case SDEBUG_UA_MODE_CHANGED:
1091 mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1094 cp = "mode parameters changed";
1096 case SDEBUG_UA_CAPACITY_CHANGED:
1097 mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1098 CAPACITY_CHANGED_ASCQ);
1100 cp = "capacity data changed";
1102 case SDEBUG_UA_MICROCODE_CHANGED:
1103 mk_sense_buffer(scp, UNIT_ATTENTION,
1105 MICROCODE_CHANGED_ASCQ);
1107 cp = "microcode has been changed";
1109 case SDEBUG_UA_MICROCODE_CHANGED_WO_RESET:
1110 mk_sense_buffer(scp, UNIT_ATTENTION,
1112 MICROCODE_CHANGED_WO_RESET_ASCQ);
1114 cp = "microcode has been changed without reset";
1116 case SDEBUG_UA_LUNS_CHANGED:
1118 * SPC-3 behavior is to report a UNIT ATTENTION with
1119 * ASC/ASCQ REPORTED LUNS DATA HAS CHANGED on every LUN
1120 * on the target, until a REPORT LUNS command is
1121 * received. SPC-4 behavior is to report it only once.
1122 * NOTE: sdebug_scsi_level does not use the same
1123 * values as struct scsi_device->scsi_level.
1125 if (sdebug_scsi_level >= 6) /* SPC-4 and above */
1126 clear_luns_changed_on_target(devip);
1127 mk_sense_buffer(scp, UNIT_ATTENTION,
1131 cp = "reported luns data has changed";
1134 pr_warn("unexpected unit attention code=%d\n", k);
1139 clear_bit(k, devip->uas_bm);
1141 sdev_printk(KERN_INFO, scp->device,
1142 "%s reports: Unit attention: %s\n",
1144 return check_condition_result;
1149 /* Build SCSI "data-in" buffer. Returns 0 if ok else (DID_ERROR << 16). */
1150 static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1154 struct scsi_data_buffer *sdb = &scp->sdb;
1158 if (scp->sc_data_direction != DMA_FROM_DEVICE)
1159 return DID_ERROR << 16;
1161 act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
1163 scsi_set_resid(scp, scsi_bufflen(scp) - act_len);
1168 /* Partial build of SCSI "data-in" buffer. Returns 0 if ok else
1169 * (DID_ERROR << 16). Can write to offset in data-in buffer. If multiple
1170 * calls, not required to write in ascending offset order. Assumes resid
1171 * set to scsi_bufflen() prior to any calls.
1173 static int p_fill_from_dev_buffer(struct scsi_cmnd *scp, const void *arr,
1174 int arr_len, unsigned int off_dst)
1176 unsigned int act_len, n;
1177 struct scsi_data_buffer *sdb = &scp->sdb;
1178 off_t skip = off_dst;
1180 if (sdb->length <= off_dst)
1182 if (scp->sc_data_direction != DMA_FROM_DEVICE)
1183 return DID_ERROR << 16;
1185 act_len = sg_pcopy_from_buffer(sdb->table.sgl, sdb->table.nents,
1186 arr, arr_len, skip);
1187 pr_debug("%s: off_dst=%u, scsi_bufflen=%u, act_len=%u, resid=%d\n",
1188 __func__, off_dst, scsi_bufflen(scp), act_len,
1189 scsi_get_resid(scp));
1190 n = scsi_bufflen(scp) - (off_dst + act_len);
1191 scsi_set_resid(scp, min_t(u32, scsi_get_resid(scp), n));
1195 /* Fetches from SCSI "data-out" buffer. Returns number of bytes fetched into
1196 * 'arr' or -1 if error.
1198 static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1201 if (!scsi_bufflen(scp))
1203 if (scp->sc_data_direction != DMA_TO_DEVICE)
1206 return scsi_sg_copy_to_buffer(scp, arr, arr_len);
1210 static char sdebug_inq_vendor_id[9] = "Linux ";
1211 static char sdebug_inq_product_id[17] = "scsi_debug ";
1212 static char sdebug_inq_product_rev[5] = SDEBUG_VERSION;
1213 /* Use some locally assigned NAAs for SAS addresses. */
1214 static const u64 naa3_comp_a = 0x3222222000000000ULL;
1215 static const u64 naa3_comp_b = 0x3333333000000000ULL;
1216 static const u64 naa3_comp_c = 0x3111111000000000ULL;
1218 /* Device identification VPD page. Returns number of bytes placed in arr */
1219 static int inquiry_vpd_83(unsigned char *arr, int port_group_id,
1220 int target_dev_id, int dev_id_num,
1221 const char *dev_id_str, int dev_id_str_len,
1222 const uuid_t *lu_name)
1227 port_a = target_dev_id + 1;
1228 /* T10 vendor identifier field format (faked) */
1229 arr[0] = 0x2; /* ASCII */
1232 memcpy(&arr[4], sdebug_inq_vendor_id, 8);
1233 memcpy(&arr[12], sdebug_inq_product_id, 16);
1234 memcpy(&arr[28], dev_id_str, dev_id_str_len);
1235 num = 8 + 16 + dev_id_str_len;
1238 if (dev_id_num >= 0) {
1239 if (sdebug_uuid_ctl) {
1240 /* Locally assigned UUID */
1241 arr[num++] = 0x1; /* binary (not necessarily sas) */
1242 arr[num++] = 0xa; /* PIV=0, lu, naa */
1245 arr[num++] = 0x10; /* uuid type=1, locally assigned */
1247 memcpy(arr + num, lu_name, 16);
1250 /* NAA-3, Logical unit identifier (binary) */
1251 arr[num++] = 0x1; /* binary (not necessarily sas) */
1252 arr[num++] = 0x3; /* PIV=0, lu, naa */
1255 put_unaligned_be64(naa3_comp_b + dev_id_num, arr + num);
1258 /* Target relative port number */
1259 arr[num++] = 0x61; /* proto=sas, binary */
1260 arr[num++] = 0x94; /* PIV=1, target port, rel port */
1261 arr[num++] = 0x0; /* reserved */
1262 arr[num++] = 0x4; /* length */
1263 arr[num++] = 0x0; /* reserved */
1264 arr[num++] = 0x0; /* reserved */
1266 arr[num++] = 0x1; /* relative port A */
1268 /* NAA-3, Target port identifier */
1269 arr[num++] = 0x61; /* proto=sas, binary */
1270 arr[num++] = 0x93; /* piv=1, target port, naa */
1273 put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1275 /* NAA-3, Target port group identifier */
1276 arr[num++] = 0x61; /* proto=sas, binary */
1277 arr[num++] = 0x95; /* piv=1, target port group id */
1282 put_unaligned_be16(port_group_id, arr + num);
1284 /* NAA-3, Target device identifier */
1285 arr[num++] = 0x61; /* proto=sas, binary */
1286 arr[num++] = 0xa3; /* piv=1, target device, naa */
1289 put_unaligned_be64(naa3_comp_a + target_dev_id, arr + num);
1291 /* SCSI name string: Target device identifier */
1292 arr[num++] = 0x63; /* proto=sas, UTF-8 */
1293 arr[num++] = 0xa8; /* piv=1, target device, SCSI name string */
1296 memcpy(arr + num, "naa.32222220", 12);
1298 snprintf(b, sizeof(b), "%08X", target_dev_id);
1299 memcpy(arr + num, b, 8);
1301 memset(arr + num, 0, 4);
1306 static unsigned char vpd84_data[] = {
1307 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
1308 0x22,0x22,0x22,0x0,0xbb,0x1,
1309 0x22,0x22,0x22,0x0,0xbb,0x2,
1312 /* Software interface identification VPD page */
1313 static int inquiry_vpd_84(unsigned char *arr)
1315 memcpy(arr, vpd84_data, sizeof(vpd84_data));
1316 return sizeof(vpd84_data);
1319 /* Management network addresses VPD page */
1320 static int inquiry_vpd_85(unsigned char *arr)
1323 const char *na1 = "https://www.kernel.org/config";
1324 const char *na2 = "http://www.kernel.org/log";
1327 arr[num++] = 0x1; /* lu, storage config */
1328 arr[num++] = 0x0; /* reserved */
1333 plen = ((plen / 4) + 1) * 4;
1334 arr[num++] = plen; /* length, null termianted, padded */
1335 memcpy(arr + num, na1, olen);
1336 memset(arr + num + olen, 0, plen - olen);
1339 arr[num++] = 0x4; /* lu, logging */
1340 arr[num++] = 0x0; /* reserved */
1345 plen = ((plen / 4) + 1) * 4;
1346 arr[num++] = plen; /* length, null terminated, padded */
1347 memcpy(arr + num, na2, olen);
1348 memset(arr + num + olen, 0, plen - olen);
1354 /* SCSI ports VPD page */
1355 static int inquiry_vpd_88(unsigned char *arr, int target_dev_id)
1360 port_a = target_dev_id + 1;
1361 port_b = port_a + 1;
1362 arr[num++] = 0x0; /* reserved */
1363 arr[num++] = 0x0; /* reserved */
1365 arr[num++] = 0x1; /* relative port 1 (primary) */
1366 memset(arr + num, 0, 6);
1369 arr[num++] = 12; /* length tp descriptor */
1370 /* naa-5 target port identifier (A) */
1371 arr[num++] = 0x61; /* proto=sas, binary */
1372 arr[num++] = 0x93; /* PIV=1, target port, NAA */
1373 arr[num++] = 0x0; /* reserved */
1374 arr[num++] = 0x8; /* length */
1375 put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1377 arr[num++] = 0x0; /* reserved */
1378 arr[num++] = 0x0; /* reserved */
1380 arr[num++] = 0x2; /* relative port 2 (secondary) */
1381 memset(arr + num, 0, 6);
1384 arr[num++] = 12; /* length tp descriptor */
1385 /* naa-5 target port identifier (B) */
1386 arr[num++] = 0x61; /* proto=sas, binary */
1387 arr[num++] = 0x93; /* PIV=1, target port, NAA */
1388 arr[num++] = 0x0; /* reserved */
1389 arr[num++] = 0x8; /* length */
1390 put_unaligned_be64(naa3_comp_a + port_b, arr + num);
1397 static unsigned char vpd89_data[] = {
1398 /* from 4th byte */ 0,0,0,0,
1399 'l','i','n','u','x',' ',' ',' ',
1400 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
1402 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
1404 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
1405 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
1406 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
1407 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
1409 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1411 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1413 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
1414 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
1415 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
1416 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
1417 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
1418 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
1419 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
1420 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1421 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1422 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1423 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
1424 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
1425 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
1426 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
1427 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1428 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1429 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1430 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1431 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1432 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1433 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1434 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1435 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1436 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1437 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1438 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
1441 /* ATA Information VPD page */
1442 static int inquiry_vpd_89(unsigned char *arr)
1444 memcpy(arr, vpd89_data, sizeof(vpd89_data));
1445 return sizeof(vpd89_data);
1449 static unsigned char vpdb0_data[] = {
1450 /* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
1451 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1452 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1453 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1456 /* Block limits VPD page (SBC-3) */
1457 static int inquiry_vpd_b0(unsigned char *arr)
1461 memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
1463 /* Optimal transfer length granularity */
1464 if (sdebug_opt_xferlen_exp != 0 &&
1465 sdebug_physblk_exp < sdebug_opt_xferlen_exp)
1466 gran = 1 << sdebug_opt_xferlen_exp;
1468 gran = 1 << sdebug_physblk_exp;
1469 put_unaligned_be16(gran, arr + 2);
1471 /* Maximum Transfer Length */
1472 if (sdebug_store_sectors > 0x400)
1473 put_unaligned_be32(sdebug_store_sectors, arr + 4);
1475 /* Optimal Transfer Length */
1476 put_unaligned_be32(sdebug_opt_blks, &arr[8]);
1479 /* Maximum Unmap LBA Count */
1480 put_unaligned_be32(sdebug_unmap_max_blocks, &arr[16]);
1482 /* Maximum Unmap Block Descriptor Count */
1483 put_unaligned_be32(sdebug_unmap_max_desc, &arr[20]);
1486 /* Unmap Granularity Alignment */
1487 if (sdebug_unmap_alignment) {
1488 put_unaligned_be32(sdebug_unmap_alignment, &arr[28]);
1489 arr[28] |= 0x80; /* UGAVALID */
1492 /* Optimal Unmap Granularity */
1493 put_unaligned_be32(sdebug_unmap_granularity, &arr[24]);
1495 /* Maximum WRITE SAME Length */
1496 put_unaligned_be64(sdebug_write_same_length, &arr[32]);
1498 return 0x3c; /* Mandatory page length for Logical Block Provisioning */
1500 return sizeof(vpdb0_data);
1503 /* Block device characteristics VPD page (SBC-3) */
1504 static int inquiry_vpd_b1(struct sdebug_dev_info *devip, unsigned char *arr)
1506 memset(arr, 0, 0x3c);
1508 arr[1] = 1; /* non rotating medium (e.g. solid state) */
1510 arr[3] = 5; /* less than 1.8" */
1511 if (devip->zmodel == BLK_ZONED_HA)
1512 arr[4] = 1 << 4; /* zoned field = 01b */
1517 /* Logical block provisioning VPD page (SBC-4) */
1518 static int inquiry_vpd_b2(unsigned char *arr)
1520 memset(arr, 0, 0x4);
1521 arr[0] = 0; /* threshold exponent */
1528 if (sdebug_lbprz && scsi_debug_lbp())
1529 arr[1] |= (sdebug_lbprz & 0x7) << 2; /* sbc4r07 and later */
1530 /* anc_sup=0; dp=0 (no provisioning group descriptor) */
1531 /* minimum_percentage=0; provisioning_type=0 (unknown) */
1532 /* threshold_percentage=0 */
1536 /* Zoned block device characteristics VPD page (ZBC mandatory) */
1537 static int inquiry_vpd_b6(struct sdebug_dev_info *devip, unsigned char *arr)
1539 memset(arr, 0, 0x3c);
1540 arr[0] = 0x1; /* set URSWRZ (unrestricted read in seq. wr req zone) */
1542 * Set Optimal number of open sequential write preferred zones and
1543 * Optimal number of non-sequentially written sequential write
1544 * preferred zones fields to 'not reported' (0xffffffff). Leave other
1545 * fields set to zero, apart from Max. number of open swrz_s field.
1547 put_unaligned_be32(0xffffffff, &arr[4]);
1548 put_unaligned_be32(0xffffffff, &arr[8]);
1549 if (sdeb_zbc_model == BLK_ZONED_HM && devip->max_open)
1550 put_unaligned_be32(devip->max_open, &arr[12]);
1552 put_unaligned_be32(0xffffffff, &arr[12]);
1556 #define SDEBUG_LONG_INQ_SZ 96
1557 #define SDEBUG_MAX_INQ_ARR_SZ 584
1559 static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1561 unsigned char pq_pdt;
1563 unsigned char *cmd = scp->cmnd;
1566 bool have_wlun, is_disk, is_zbc, is_disk_zbc;
1568 alloc_len = get_unaligned_be16(cmd + 3);
1569 arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
1571 return DID_REQUEUE << 16;
1572 is_disk = (sdebug_ptype == TYPE_DISK);
1573 is_zbc = (devip->zmodel != BLK_ZONED_NONE);
1574 is_disk_zbc = (is_disk || is_zbc);
1575 have_wlun = scsi_is_wlun(scp->device->lun);
1577 pq_pdt = TYPE_WLUN; /* present, wlun */
1578 else if (sdebug_no_lun_0 && (devip->lun == SDEBUG_LUN_0_VAL))
1579 pq_pdt = 0x7f; /* not present, PQ=3, PDT=0x1f */
1581 pq_pdt = (sdebug_ptype & 0x1f);
1583 if (0x2 & cmd[1]) { /* CMDDT bit set */
1584 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
1586 return check_condition_result;
1587 } else if (0x1 & cmd[1]) { /* EVPD bit set */
1588 int lu_id_num, port_group_id, target_dev_id;
1591 int host_no = devip->sdbg_host->shost->host_no;
1593 port_group_id = (((host_no + 1) & 0x7f) << 8) +
1594 (devip->channel & 0x7f);
1595 if (sdebug_vpd_use_hostno == 0)
1597 lu_id_num = have_wlun ? -1 : (((host_no + 1) * 2000) +
1598 (devip->target * 1000) + devip->lun);
1599 target_dev_id = ((host_no + 1) * 2000) +
1600 (devip->target * 1000) - 3;
1601 len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
1602 if (0 == cmd[2]) { /* supported vital product data pages */
1603 arr[1] = cmd[2]; /*sanity */
1605 arr[n++] = 0x0; /* this page */
1606 arr[n++] = 0x80; /* unit serial number */
1607 arr[n++] = 0x83; /* device identification */
1608 arr[n++] = 0x84; /* software interface ident. */
1609 arr[n++] = 0x85; /* management network addresses */
1610 arr[n++] = 0x86; /* extended inquiry */
1611 arr[n++] = 0x87; /* mode page policy */
1612 arr[n++] = 0x88; /* SCSI ports */
1613 if (is_disk_zbc) { /* SBC or ZBC */
1614 arr[n++] = 0x89; /* ATA information */
1615 arr[n++] = 0xb0; /* Block limits */
1616 arr[n++] = 0xb1; /* Block characteristics */
1618 arr[n++] = 0xb2; /* LB Provisioning */
1620 arr[n++] = 0xb6; /* ZB dev. char. */
1622 arr[3] = n - 4; /* number of supported VPD pages */
1623 } else if (0x80 == cmd[2]) { /* unit serial number */
1624 arr[1] = cmd[2]; /*sanity */
1626 memcpy(&arr[4], lu_id_str, len);
1627 } else if (0x83 == cmd[2]) { /* device identification */
1628 arr[1] = cmd[2]; /*sanity */
1629 arr[3] = inquiry_vpd_83(&arr[4], port_group_id,
1630 target_dev_id, lu_id_num,
1633 } else if (0x84 == cmd[2]) { /* Software interface ident. */
1634 arr[1] = cmd[2]; /*sanity */
1635 arr[3] = inquiry_vpd_84(&arr[4]);
1636 } else if (0x85 == cmd[2]) { /* Management network addresses */
1637 arr[1] = cmd[2]; /*sanity */
1638 arr[3] = inquiry_vpd_85(&arr[4]);
1639 } else if (0x86 == cmd[2]) { /* extended inquiry */
1640 arr[1] = cmd[2]; /*sanity */
1641 arr[3] = 0x3c; /* number of following entries */
1642 if (sdebug_dif == T10_PI_TYPE3_PROTECTION)
1643 arr[4] = 0x4; /* SPT: GRD_CHK:1 */
1644 else if (have_dif_prot)
1645 arr[4] = 0x5; /* SPT: GRD_CHK:1, REF_CHK:1 */
1647 arr[4] = 0x0; /* no protection stuff */
1648 arr[5] = 0x7; /* head of q, ordered + simple q's */
1649 } else if (0x87 == cmd[2]) { /* mode page policy */
1650 arr[1] = cmd[2]; /*sanity */
1651 arr[3] = 0x8; /* number of following entries */
1652 arr[4] = 0x2; /* disconnect-reconnect mp */
1653 arr[6] = 0x80; /* mlus, shared */
1654 arr[8] = 0x18; /* protocol specific lu */
1655 arr[10] = 0x82; /* mlus, per initiator port */
1656 } else if (0x88 == cmd[2]) { /* SCSI Ports */
1657 arr[1] = cmd[2]; /*sanity */
1658 arr[3] = inquiry_vpd_88(&arr[4], target_dev_id);
1659 } else if (is_disk_zbc && 0x89 == cmd[2]) { /* ATA info */
1660 arr[1] = cmd[2]; /*sanity */
1661 n = inquiry_vpd_89(&arr[4]);
1662 put_unaligned_be16(n, arr + 2);
1663 } else if (is_disk_zbc && 0xb0 == cmd[2]) { /* Block limits */
1664 arr[1] = cmd[2]; /*sanity */
1665 arr[3] = inquiry_vpd_b0(&arr[4]);
1666 } else if (is_disk_zbc && 0xb1 == cmd[2]) { /* Block char. */
1667 arr[1] = cmd[2]; /*sanity */
1668 arr[3] = inquiry_vpd_b1(devip, &arr[4]);
1669 } else if (is_disk && 0xb2 == cmd[2]) { /* LB Prov. */
1670 arr[1] = cmd[2]; /*sanity */
1671 arr[3] = inquiry_vpd_b2(&arr[4]);
1672 } else if (is_zbc && cmd[2] == 0xb6) { /* ZB dev. charact. */
1673 arr[1] = cmd[2]; /*sanity */
1674 arr[3] = inquiry_vpd_b6(devip, &arr[4]);
1676 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
1678 return check_condition_result;
1680 len = min_t(u32, get_unaligned_be16(arr + 2) + 4, alloc_len);
1681 ret = fill_from_dev_buffer(scp, arr,
1682 min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ));
1686 /* drops through here for a standard inquiry */
1687 arr[1] = sdebug_removable ? 0x80 : 0; /* Removable disk */
1688 arr[2] = sdebug_scsi_level;
1689 arr[3] = 2; /* response_data_format==2 */
1690 arr[4] = SDEBUG_LONG_INQ_SZ - 5;
1691 arr[5] = (int)have_dif_prot; /* PROTECT bit */
1692 if (sdebug_vpd_use_hostno == 0)
1693 arr[5] |= 0x10; /* claim: implicit TPGS */
1694 arr[6] = 0x10; /* claim: MultiP */
1695 /* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
1696 arr[7] = 0xa; /* claim: LINKED + CMDQUE */
1697 memcpy(&arr[8], sdebug_inq_vendor_id, 8);
1698 memcpy(&arr[16], sdebug_inq_product_id, 16);
1699 memcpy(&arr[32], sdebug_inq_product_rev, 4);
1700 /* Use Vendor Specific area to place driver date in ASCII hex */
1701 memcpy(&arr[36], sdebug_version_date, 8);
1702 /* version descriptors (2 bytes each) follow */
1703 put_unaligned_be16(0xc0, arr + 58); /* SAM-6 no version claimed */
1704 put_unaligned_be16(0x5c0, arr + 60); /* SPC-5 no version claimed */
1706 if (is_disk) { /* SBC-4 no version claimed */
1707 put_unaligned_be16(0x600, arr + n);
1709 } else if (sdebug_ptype == TYPE_TAPE) { /* SSC-4 rev 3 */
1710 put_unaligned_be16(0x525, arr + n);
1712 } else if (is_zbc) { /* ZBC BSR INCITS 536 revision 05 */
1713 put_unaligned_be16(0x624, arr + n);
1716 put_unaligned_be16(0x2100, arr + n); /* SPL-4 no version claimed */
1717 ret = fill_from_dev_buffer(scp, arr,
1718 min_t(u32, alloc_len, SDEBUG_LONG_INQ_SZ));
1723 /* See resp_iec_m_pg() for how this data is manipulated */
1724 static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1727 static int resp_requests(struct scsi_cmnd *scp,
1728 struct sdebug_dev_info *devip)
1730 unsigned char *cmd = scp->cmnd;
1731 unsigned char arr[SCSI_SENSE_BUFFERSIZE]; /* assume >= 18 bytes */
1732 bool dsense = !!(cmd[1] & 1);
1733 u32 alloc_len = cmd[4];
1735 int stopped_state = atomic_read(&devip->stopped);
1737 memset(arr, 0, sizeof(arr));
1738 if (stopped_state > 0) { /* some "pollable" data [spc6r02: 5.12.2] */
1742 arr[2] = LOGICAL_UNIT_NOT_READY;
1743 arr[3] = (stopped_state == 2) ? 0x1 : 0x2;
1747 arr[2] = NOT_READY; /* NO_SENSE in sense_key */
1748 arr[7] = 0xa; /* 18 byte sense buffer */
1749 arr[12] = LOGICAL_UNIT_NOT_READY;
1750 arr[13] = (stopped_state == 2) ? 0x1 : 0x2;
1752 } else if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
1753 /* Information exceptions control mode page: TEST=1, MRIE=6 */
1756 arr[1] = 0x0; /* NO_SENSE in sense_key */
1757 arr[2] = THRESHOLD_EXCEEDED;
1758 arr[3] = 0xff; /* Failure prediction(false) */
1762 arr[2] = 0x0; /* NO_SENSE in sense_key */
1763 arr[7] = 0xa; /* 18 byte sense buffer */
1764 arr[12] = THRESHOLD_EXCEEDED;
1765 arr[13] = 0xff; /* Failure prediction(false) */
1767 } else { /* nothing to report */
1770 memset(arr, 0, len);
1773 memset(arr, 0, len);
1778 return fill_from_dev_buffer(scp, arr, min_t(u32, len, alloc_len));
1781 static int resp_start_stop(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1783 unsigned char *cmd = scp->cmnd;
1784 int power_cond, want_stop, stopped_state;
1787 power_cond = (cmd[4] & 0xf0) >> 4;
1789 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 7);
1790 return check_condition_result;
1792 want_stop = !(cmd[4] & 1);
1793 stopped_state = atomic_read(&devip->stopped);
1794 if (stopped_state == 2) {
1795 ktime_t now_ts = ktime_get_boottime();
1797 if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
1798 u64 diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
1800 if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
1801 /* tur_ms_to_ready timer extinguished */
1802 atomic_set(&devip->stopped, 0);
1806 if (stopped_state == 2) {
1808 stopped_state = 1; /* dummy up success */
1809 } else { /* Disallow tur_ms_to_ready delay to be overridden */
1810 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 0 /* START bit */);
1811 return check_condition_result;
1815 changing = (stopped_state != want_stop);
1817 atomic_xchg(&devip->stopped, want_stop);
1818 if (!changing || (cmd[1] & 0x1)) /* state unchanged or IMMED bit set in cdb */
1819 return SDEG_RES_IMMED_MASK;
1824 static sector_t get_sdebug_capacity(void)
1826 static const unsigned int gibibyte = 1073741824;
1828 if (sdebug_virtual_gb > 0)
1829 return (sector_t)sdebug_virtual_gb *
1830 (gibibyte / sdebug_sector_size);
1832 return sdebug_store_sectors;
1835 #define SDEBUG_READCAP_ARR_SZ 8
1836 static int resp_readcap(struct scsi_cmnd *scp,
1837 struct sdebug_dev_info *devip)
1839 unsigned char arr[SDEBUG_READCAP_ARR_SZ];
1842 /* following just in case virtual_gb changed */
1843 sdebug_capacity = get_sdebug_capacity();
1844 memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
1845 if (sdebug_capacity < 0xffffffff) {
1846 capac = (unsigned int)sdebug_capacity - 1;
1847 put_unaligned_be32(capac, arr + 0);
1849 put_unaligned_be32(0xffffffff, arr + 0);
1850 put_unaligned_be16(sdebug_sector_size, arr + 6);
1851 return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
1854 #define SDEBUG_READCAP16_ARR_SZ 32
1855 static int resp_readcap16(struct scsi_cmnd *scp,
1856 struct sdebug_dev_info *devip)
1858 unsigned char *cmd = scp->cmnd;
1859 unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
1862 alloc_len = get_unaligned_be32(cmd + 10);
1863 /* following just in case virtual_gb changed */
1864 sdebug_capacity = get_sdebug_capacity();
1865 memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
1866 put_unaligned_be64((u64)(sdebug_capacity - 1), arr + 0);
1867 put_unaligned_be32(sdebug_sector_size, arr + 8);
1868 arr[13] = sdebug_physblk_exp & 0xf;
1869 arr[14] = (sdebug_lowest_aligned >> 8) & 0x3f;
1871 if (scsi_debug_lbp()) {
1872 arr[14] |= 0x80; /* LBPME */
1873 /* from sbc4r07, this LBPRZ field is 1 bit, but the LBPRZ in
1874 * the LB Provisioning VPD page is 3 bits. Note that lbprz=2
1875 * in the wider field maps to 0 in this field.
1877 if (sdebug_lbprz & 1) /* precisely what the draft requires */
1881 arr[15] = sdebug_lowest_aligned & 0xff;
1883 if (have_dif_prot) {
1884 arr[12] = (sdebug_dif - 1) << 1; /* P_TYPE */
1885 arr[12] |= 1; /* PROT_EN */
1888 return fill_from_dev_buffer(scp, arr,
1889 min_t(u32, alloc_len, SDEBUG_READCAP16_ARR_SZ));
1892 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
1894 static int resp_report_tgtpgs(struct scsi_cmnd *scp,
1895 struct sdebug_dev_info *devip)
1897 unsigned char *cmd = scp->cmnd;
1899 int host_no = devip->sdbg_host->shost->host_no;
1900 int port_group_a, port_group_b, port_a, port_b;
1904 alen = get_unaligned_be32(cmd + 6);
1905 arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
1907 return DID_REQUEUE << 16;
1909 * EVPD page 0x88 states we have two ports, one
1910 * real and a fake port with no device connected.
1911 * So we create two port groups with one port each
1912 * and set the group with port B to unavailable.
1914 port_a = 0x1; /* relative port A */
1915 port_b = 0x2; /* relative port B */
1916 port_group_a = (((host_no + 1) & 0x7f) << 8) +
1917 (devip->channel & 0x7f);
1918 port_group_b = (((host_no + 1) & 0x7f) << 8) +
1919 (devip->channel & 0x7f) + 0x80;
1922 * The asymmetric access state is cycled according to the host_id.
1925 if (sdebug_vpd_use_hostno == 0) {
1926 arr[n++] = host_no % 3; /* Asymm access state */
1927 arr[n++] = 0x0F; /* claim: all states are supported */
1929 arr[n++] = 0x0; /* Active/Optimized path */
1930 arr[n++] = 0x01; /* only support active/optimized paths */
1932 put_unaligned_be16(port_group_a, arr + n);
1934 arr[n++] = 0; /* Reserved */
1935 arr[n++] = 0; /* Status code */
1936 arr[n++] = 0; /* Vendor unique */
1937 arr[n++] = 0x1; /* One port per group */
1938 arr[n++] = 0; /* Reserved */
1939 arr[n++] = 0; /* Reserved */
1940 put_unaligned_be16(port_a, arr + n);
1942 arr[n++] = 3; /* Port unavailable */
1943 arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
1944 put_unaligned_be16(port_group_b, arr + n);
1946 arr[n++] = 0; /* Reserved */
1947 arr[n++] = 0; /* Status code */
1948 arr[n++] = 0; /* Vendor unique */
1949 arr[n++] = 0x1; /* One port per group */
1950 arr[n++] = 0; /* Reserved */
1951 arr[n++] = 0; /* Reserved */
1952 put_unaligned_be16(port_b, arr + n);
1956 put_unaligned_be32(rlen, arr + 0);
1959 * Return the smallest value of either
1960 * - The allocated length
1961 * - The constructed command length
1962 * - The maximum array size
1964 rlen = min(alen, n);
1965 ret = fill_from_dev_buffer(scp, arr,
1966 min_t(u32, rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
1971 static int resp_rsup_opcodes(struct scsi_cmnd *scp,
1972 struct sdebug_dev_info *devip)
1975 u8 reporting_opts, req_opcode, sdeb_i, supp;
1977 u32 alloc_len, a_len;
1978 int k, offset, len, errsts, count, bump, na;
1979 const struct opcode_info_t *oip;
1980 const struct opcode_info_t *r_oip;
1982 u8 *cmd = scp->cmnd;
1984 rctd = !!(cmd[2] & 0x80);
1985 reporting_opts = cmd[2] & 0x7;
1986 req_opcode = cmd[3];
1987 req_sa = get_unaligned_be16(cmd + 4);
1988 alloc_len = get_unaligned_be32(cmd + 6);
1989 if (alloc_len < 4 || alloc_len > 0xffff) {
1990 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
1991 return check_condition_result;
1993 if (alloc_len > 8192)
1997 arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_ATOMIC);
1999 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
2001 return check_condition_result;
2003 switch (reporting_opts) {
2004 case 0: /* all commands */
2005 /* count number of commands */
2006 for (count = 0, oip = opcode_info_arr;
2007 oip->num_attached != 0xff; ++oip) {
2008 if (F_INV_OP & oip->flags)
2010 count += (oip->num_attached + 1);
2012 bump = rctd ? 20 : 8;
2013 put_unaligned_be32(count * bump, arr);
2014 for (offset = 4, oip = opcode_info_arr;
2015 oip->num_attached != 0xff && offset < a_len; ++oip) {
2016 if (F_INV_OP & oip->flags)
2018 na = oip->num_attached;
2019 arr[offset] = oip->opcode;
2020 put_unaligned_be16(oip->sa, arr + offset + 2);
2022 arr[offset + 5] |= 0x2;
2023 if (FF_SA & oip->flags)
2024 arr[offset + 5] |= 0x1;
2025 put_unaligned_be16(oip->len_mask[0], arr + offset + 6);
2027 put_unaligned_be16(0xa, arr + offset + 8);
2029 for (k = 0, oip = oip->arrp; k < na; ++k, ++oip) {
2030 if (F_INV_OP & oip->flags)
2033 arr[offset] = oip->opcode;
2034 put_unaligned_be16(oip->sa, arr + offset + 2);
2036 arr[offset + 5] |= 0x2;
2037 if (FF_SA & oip->flags)
2038 arr[offset + 5] |= 0x1;
2039 put_unaligned_be16(oip->len_mask[0],
2042 put_unaligned_be16(0xa,
2049 case 1: /* one command: opcode only */
2050 case 2: /* one command: opcode plus service action */
2051 case 3: /* one command: if sa==0 then opcode only else opcode+sa */
2052 sdeb_i = opcode_ind_arr[req_opcode];
2053 oip = &opcode_info_arr[sdeb_i];
2054 if (F_INV_OP & oip->flags) {
2058 if (1 == reporting_opts) {
2059 if (FF_SA & oip->flags) {
2060 mk_sense_invalid_fld(scp, SDEB_IN_CDB,
2063 return check_condition_result;
2066 } else if (2 == reporting_opts &&
2067 0 == (FF_SA & oip->flags)) {
2068 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
2069 kfree(arr); /* point at requested sa */
2070 return check_condition_result;
2072 if (0 == (FF_SA & oip->flags) &&
2073 req_opcode == oip->opcode)
2075 else if (0 == (FF_SA & oip->flags)) {
2076 na = oip->num_attached;
2077 for (k = 0, oip = oip->arrp; k < na;
2079 if (req_opcode == oip->opcode)
2082 supp = (k >= na) ? 1 : 3;
2083 } else if (req_sa != oip->sa) {
2084 na = oip->num_attached;
2085 for (k = 0, oip = oip->arrp; k < na;
2087 if (req_sa == oip->sa)
2090 supp = (k >= na) ? 1 : 3;
2094 u = oip->len_mask[0];
2095 put_unaligned_be16(u, arr + 2);
2096 arr[4] = oip->opcode;
2097 for (k = 1; k < u; ++k)
2098 arr[4 + k] = (k < 16) ?
2099 oip->len_mask[k] : 0xff;
2104 arr[1] = (rctd ? 0x80 : 0) | supp;
2106 put_unaligned_be16(0xa, arr + offset);
2111 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
2113 return check_condition_result;
2115 offset = (offset < a_len) ? offset : a_len;
2116 len = (offset < alloc_len) ? offset : alloc_len;
2117 errsts = fill_from_dev_buffer(scp, arr, len);
2122 static int resp_rsup_tmfs(struct scsi_cmnd *scp,
2123 struct sdebug_dev_info *devip)
2128 u8 *cmd = scp->cmnd;
2130 memset(arr, 0, sizeof(arr));
2131 repd = !!(cmd[2] & 0x80);
2132 alloc_len = get_unaligned_be32(cmd + 6);
2133 if (alloc_len < 4) {
2134 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
2135 return check_condition_result;
2137 arr[0] = 0xc8; /* ATS | ATSS | LURS */
2138 arr[1] = 0x1; /* ITNRS */
2145 len = (len < alloc_len) ? len : alloc_len;
2146 return fill_from_dev_buffer(scp, arr, len);
2149 /* <<Following mode page info copied from ST318451LW>> */
2151 static int resp_err_recov_pg(unsigned char *p, int pcontrol, int target)
2152 { /* Read-Write Error Recovery page for mode_sense */
2153 unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
2156 memcpy(p, err_recov_pg, sizeof(err_recov_pg));
2158 memset(p + 2, 0, sizeof(err_recov_pg) - 2);
2159 return sizeof(err_recov_pg);
2162 static int resp_disconnect_pg(unsigned char *p, int pcontrol, int target)
2163 { /* Disconnect-Reconnect page for mode_sense */
2164 unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
2165 0, 0, 0, 0, 0, 0, 0, 0};
2167 memcpy(p, disconnect_pg, sizeof(disconnect_pg));
2169 memset(p + 2, 0, sizeof(disconnect_pg) - 2);
2170 return sizeof(disconnect_pg);
2173 static int resp_format_pg(unsigned char *p, int pcontrol, int target)
2174 { /* Format device page for mode_sense */
2175 unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
2176 0, 0, 0, 0, 0, 0, 0, 0,
2177 0, 0, 0, 0, 0x40, 0, 0, 0};
2179 memcpy(p, format_pg, sizeof(format_pg));
2180 put_unaligned_be16(sdebug_sectors_per, p + 10);
2181 put_unaligned_be16(sdebug_sector_size, p + 12);
2182 if (sdebug_removable)
2183 p[20] |= 0x20; /* should agree with INQUIRY */
2185 memset(p + 2, 0, sizeof(format_pg) - 2);
2186 return sizeof(format_pg);
2189 static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2190 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,
2193 static int resp_caching_pg(unsigned char *p, int pcontrol, int target)
2194 { /* Caching page for mode_sense */
2195 unsigned char ch_caching_pg[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0,
2196 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
2197 unsigned char d_caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2198 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0, 0, 0, 0, 0};
2200 if (SDEBUG_OPT_N_WCE & sdebug_opts)
2201 caching_pg[2] &= ~0x4; /* set WCE=0 (default WCE=1) */
2202 memcpy(p, caching_pg, sizeof(caching_pg));
2204 memcpy(p + 2, ch_caching_pg, sizeof(ch_caching_pg));
2205 else if (2 == pcontrol)
2206 memcpy(p, d_caching_pg, sizeof(d_caching_pg));
2207 return sizeof(caching_pg);
2210 static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2213 static int resp_ctrl_m_pg(unsigned char *p, int pcontrol, int target)
2214 { /* Control mode page for mode_sense */
2215 unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
2217 unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2221 ctrl_m_pg[2] |= 0x4;
2223 ctrl_m_pg[2] &= ~0x4;
2226 ctrl_m_pg[5] |= 0x80; /* ATO=1 */
2228 memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
2230 memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
2231 else if (2 == pcontrol)
2232 memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
2233 return sizeof(ctrl_m_pg);
2237 static int resp_iec_m_pg(unsigned char *p, int pcontrol, int target)
2238 { /* Informational Exceptions control mode page for mode_sense */
2239 unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
2241 unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
2244 memcpy(p, iec_m_pg, sizeof(iec_m_pg));
2246 memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
2247 else if (2 == pcontrol)
2248 memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
2249 return sizeof(iec_m_pg);
2252 static int resp_sas_sf_m_pg(unsigned char *p, int pcontrol, int target)
2253 { /* SAS SSP mode page - short format for mode_sense */
2254 unsigned char sas_sf_m_pg[] = {0x19, 0x6,
2255 0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
2257 memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
2259 memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
2260 return sizeof(sas_sf_m_pg);
2264 static int resp_sas_pcd_m_spg(unsigned char *p, int pcontrol, int target,
2266 { /* SAS phy control and discover mode page for mode_sense */
2267 unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
2268 0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
2269 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2270 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2271 0x2, 0, 0, 0, 0, 0, 0, 0,
2272 0x88, 0x99, 0, 0, 0, 0, 0, 0,
2273 0, 0, 0, 0, 0, 0, 0, 0,
2274 0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
2275 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2276 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2277 0x3, 0, 0, 0, 0, 0, 0, 0,
2278 0x88, 0x99, 0, 0, 0, 0, 0, 0,
2279 0, 0, 0, 0, 0, 0, 0, 0,
2283 put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 16);
2284 put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 24);
2285 put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 64);
2286 put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 72);
2287 port_a = target_dev_id + 1;
2288 port_b = port_a + 1;
2289 memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
2290 put_unaligned_be32(port_a, p + 20);
2291 put_unaligned_be32(port_b, p + 48 + 20);
2293 memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
2294 return sizeof(sas_pcd_m_pg);
2297 static int resp_sas_sha_m_spg(unsigned char *p, int pcontrol)
2298 { /* SAS SSP shared protocol specific port mode subpage */
2299 unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
2300 0, 0, 0, 0, 0, 0, 0, 0,
2303 memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
2305 memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
2306 return sizeof(sas_sha_m_pg);
2309 #define SDEBUG_MAX_MSENSE_SZ 256
2311 static int resp_mode_sense(struct scsi_cmnd *scp,
2312 struct sdebug_dev_info *devip)
2314 int pcontrol, pcode, subpcode, bd_len;
2315 unsigned char dev_spec;
2316 u32 alloc_len, offset, len;
2318 int target = scp->device->id;
2320 unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
2321 unsigned char *cmd = scp->cmnd;
2322 bool dbd, llbaa, msense_6, is_disk, is_zbc, bad_pcode;
2324 dbd = !!(cmd[1] & 0x8); /* disable block descriptors */
2325 pcontrol = (cmd[2] & 0xc0) >> 6;
2326 pcode = cmd[2] & 0x3f;
2328 msense_6 = (MODE_SENSE == cmd[0]);
2329 llbaa = msense_6 ? false : !!(cmd[1] & 0x10);
2330 is_disk = (sdebug_ptype == TYPE_DISK);
2331 is_zbc = (devip->zmodel != BLK_ZONED_NONE);
2332 if ((is_disk || is_zbc) && !dbd)
2333 bd_len = llbaa ? 16 : 8;
2336 alloc_len = msense_6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2337 memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
2338 if (0x3 == pcontrol) { /* Saving values not supported */
2339 mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0);
2340 return check_condition_result;
2342 target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
2343 (devip->target * 1000) - 3;
2344 /* for disks+zbc set DPOFUA bit and clear write protect (WP) bit */
2345 if (is_disk || is_zbc) {
2346 dev_spec = 0x10; /* =0x90 if WP=1 implies read-only */
2358 arr[4] = 0x1; /* set LONGLBA bit */
2359 arr[7] = bd_len; /* assume 255 or less */
2363 if ((bd_len > 0) && (!sdebug_capacity))
2364 sdebug_capacity = get_sdebug_capacity();
2367 if (sdebug_capacity > 0xfffffffe)
2368 put_unaligned_be32(0xffffffff, ap + 0);
2370 put_unaligned_be32(sdebug_capacity, ap + 0);
2371 put_unaligned_be16(sdebug_sector_size, ap + 6);
2374 } else if (16 == bd_len) {
2375 put_unaligned_be64((u64)sdebug_capacity, ap + 0);
2376 put_unaligned_be32(sdebug_sector_size, ap + 12);
2381 if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) {
2382 /* TODO: Control Extension page */
2383 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2384 return check_condition_result;
2389 case 0x1: /* Read-Write error recovery page, direct access */
2390 len = resp_err_recov_pg(ap, pcontrol, target);
2393 case 0x2: /* Disconnect-Reconnect page, all devices */
2394 len = resp_disconnect_pg(ap, pcontrol, target);
2397 case 0x3: /* Format device page, direct access */
2399 len = resp_format_pg(ap, pcontrol, target);
2404 case 0x8: /* Caching page, direct access */
2405 if (is_disk || is_zbc) {
2406 len = resp_caching_pg(ap, pcontrol, target);
2411 case 0xa: /* Control Mode page, all devices */
2412 len = resp_ctrl_m_pg(ap, pcontrol, target);
2415 case 0x19: /* if spc==1 then sas phy, control+discover */
2416 if ((subpcode > 0x2) && (subpcode < 0xff)) {
2417 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2418 return check_condition_result;
2421 if ((0x0 == subpcode) || (0xff == subpcode))
2422 len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2423 if ((0x1 == subpcode) || (0xff == subpcode))
2424 len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
2426 if ((0x2 == subpcode) || (0xff == subpcode))
2427 len += resp_sas_sha_m_spg(ap + len, pcontrol);
2430 case 0x1c: /* Informational Exceptions Mode page, all devices */
2431 len = resp_iec_m_pg(ap, pcontrol, target);
2434 case 0x3f: /* Read all Mode pages */
2435 if ((0 == subpcode) || (0xff == subpcode)) {
2436 len = resp_err_recov_pg(ap, pcontrol, target);
2437 len += resp_disconnect_pg(ap + len, pcontrol, target);
2439 len += resp_format_pg(ap + len, pcontrol,
2441 len += resp_caching_pg(ap + len, pcontrol,
2443 } else if (is_zbc) {
2444 len += resp_caching_pg(ap + len, pcontrol,
2447 len += resp_ctrl_m_pg(ap + len, pcontrol, target);
2448 len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2449 if (0xff == subpcode) {
2450 len += resp_sas_pcd_m_spg(ap + len, pcontrol,
2451 target, target_dev_id);
2452 len += resp_sas_sha_m_spg(ap + len, pcontrol);
2454 len += resp_iec_m_pg(ap + len, pcontrol, target);
2457 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2458 return check_condition_result;
2466 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2467 return check_condition_result;
2470 arr[0] = offset - 1;
2472 put_unaligned_be16((offset - 2), arr + 0);
2473 return fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, offset));
2476 #define SDEBUG_MAX_MSELECT_SZ 512
2478 static int resp_mode_select(struct scsi_cmnd *scp,
2479 struct sdebug_dev_info *devip)
2481 int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
2482 int param_len, res, mpage;
2483 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
2484 unsigned char *cmd = scp->cmnd;
2485 int mselect6 = (MODE_SELECT == cmd[0]);
2487 memset(arr, 0, sizeof(arr));
2490 param_len = mselect6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2491 if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
2492 mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1);
2493 return check_condition_result;
2495 res = fetch_to_dev_buffer(scp, arr, param_len);
2497 return DID_ERROR << 16;
2498 else if (sdebug_verbose && (res < param_len))
2499 sdev_printk(KERN_INFO, scp->device,
2500 "%s: cdb indicated=%d, IO sent=%d bytes\n",
2501 __func__, param_len, res);
2502 md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2);
2503 bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6);
2504 off = bd_len + (mselect6 ? 4 : 8);
2505 if (md_len > 2 || off >= res) {
2506 mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
2507 return check_condition_result;
2509 mpage = arr[off] & 0x3f;
2510 ps = !!(arr[off] & 0x80);
2512 mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 7);
2513 return check_condition_result;
2515 spf = !!(arr[off] & 0x40);
2516 pg_len = spf ? (get_unaligned_be16(arr + off + 2) + 4) :
2518 if ((pg_len + off) > param_len) {
2519 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2520 PARAMETER_LIST_LENGTH_ERR, 0);
2521 return check_condition_result;
2524 case 0x8: /* Caching Mode page */
2525 if (caching_pg[1] == arr[off + 1]) {
2526 memcpy(caching_pg + 2, arr + off + 2,
2527 sizeof(caching_pg) - 2);
2528 goto set_mode_changed_ua;
2531 case 0xa: /* Control Mode page */
2532 if (ctrl_m_pg[1] == arr[off + 1]) {
2533 memcpy(ctrl_m_pg + 2, arr + off + 2,
2534 sizeof(ctrl_m_pg) - 2);
2535 if (ctrl_m_pg[4] & 0x8)
2539 sdebug_dsense = !!(ctrl_m_pg[2] & 0x4);
2540 goto set_mode_changed_ua;
2543 case 0x1c: /* Informational Exceptions Mode page */
2544 if (iec_m_pg[1] == arr[off + 1]) {
2545 memcpy(iec_m_pg + 2, arr + off + 2,
2546 sizeof(iec_m_pg) - 2);
2547 goto set_mode_changed_ua;
2553 mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 5);
2554 return check_condition_result;
2555 set_mode_changed_ua:
2556 set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm);
2560 static int resp_temp_l_pg(unsigned char *arr)
2562 unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
2563 0x0, 0x1, 0x3, 0x2, 0x0, 65,
2566 memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
2567 return sizeof(temp_l_pg);
2570 static int resp_ie_l_pg(unsigned char *arr)
2572 unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
2575 memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
2576 if (iec_m_pg[2] & 0x4) { /* TEST bit set */
2577 arr[4] = THRESHOLD_EXCEEDED;
2580 return sizeof(ie_l_pg);
2583 #define SDEBUG_MAX_LSENSE_SZ 512
2585 static int resp_log_sense(struct scsi_cmnd *scp,
2586 struct sdebug_dev_info *devip)
2588 int ppc, sp, pcode, subpcode;
2589 u32 alloc_len, len, n;
2590 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
2591 unsigned char *cmd = scp->cmnd;
2593 memset(arr, 0, sizeof(arr));
2597 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, ppc ? 1 : 0);
2598 return check_condition_result;
2600 pcode = cmd[2] & 0x3f;
2601 subpcode = cmd[3] & 0xff;
2602 alloc_len = get_unaligned_be16(cmd + 7);
2604 if (0 == subpcode) {
2606 case 0x0: /* Supported log pages log page */
2608 arr[n++] = 0x0; /* this page */
2609 arr[n++] = 0xd; /* Temperature */
2610 arr[n++] = 0x2f; /* Informational exceptions */
2613 case 0xd: /* Temperature log page */
2614 arr[3] = resp_temp_l_pg(arr + 4);
2616 case 0x2f: /* Informational exceptions log page */
2617 arr[3] = resp_ie_l_pg(arr + 4);
2620 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2621 return check_condition_result;
2623 } else if (0xff == subpcode) {
2627 case 0x0: /* Supported log pages and subpages log page */
2630 arr[n++] = 0x0; /* 0,0 page */
2632 arr[n++] = 0xff; /* this page */
2634 arr[n++] = 0x0; /* Temperature */
2636 arr[n++] = 0x0; /* Informational exceptions */
2639 case 0xd: /* Temperature subpages */
2642 arr[n++] = 0x0; /* Temperature */
2645 case 0x2f: /* Informational exceptions subpages */
2648 arr[n++] = 0x0; /* Informational exceptions */
2652 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2653 return check_condition_result;
2656 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2657 return check_condition_result;
2659 len = min_t(u32, get_unaligned_be16(arr + 2) + 4, alloc_len);
2660 return fill_from_dev_buffer(scp, arr,
2661 min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ));
2664 static inline bool sdebug_dev_is_zoned(struct sdebug_dev_info *devip)
2666 return devip->nr_zones != 0;
2669 static struct sdeb_zone_state *zbc_zone(struct sdebug_dev_info *devip,
2670 unsigned long long lba)
2672 return &devip->zstate[lba >> devip->zsize_shift];
2675 static inline bool zbc_zone_is_conv(struct sdeb_zone_state *zsp)
2677 return zsp->z_type == ZBC_ZONE_TYPE_CNV;
2680 static void zbc_close_zone(struct sdebug_dev_info *devip,
2681 struct sdeb_zone_state *zsp)
2683 enum sdebug_z_cond zc;
2685 if (zbc_zone_is_conv(zsp))
2689 if (!(zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN))
2692 if (zc == ZC2_IMPLICIT_OPEN)
2693 devip->nr_imp_open--;
2695 devip->nr_exp_open--;
2697 if (zsp->z_wp == zsp->z_start) {
2698 zsp->z_cond = ZC1_EMPTY;
2700 zsp->z_cond = ZC4_CLOSED;
2705 static void zbc_close_imp_open_zone(struct sdebug_dev_info *devip)
2707 struct sdeb_zone_state *zsp = &devip->zstate[0];
2710 for (i = 0; i < devip->nr_zones; i++, zsp++) {
2711 if (zsp->z_cond == ZC2_IMPLICIT_OPEN) {
2712 zbc_close_zone(devip, zsp);
2718 static void zbc_open_zone(struct sdebug_dev_info *devip,
2719 struct sdeb_zone_state *zsp, bool explicit)
2721 enum sdebug_z_cond zc;
2723 if (zbc_zone_is_conv(zsp))
2727 if ((explicit && zc == ZC3_EXPLICIT_OPEN) ||
2728 (!explicit && zc == ZC2_IMPLICIT_OPEN))
2731 /* Close an implicit open zone if necessary */
2732 if (explicit && zsp->z_cond == ZC2_IMPLICIT_OPEN)
2733 zbc_close_zone(devip, zsp);
2734 else if (devip->max_open &&
2735 devip->nr_imp_open + devip->nr_exp_open >= devip->max_open)
2736 zbc_close_imp_open_zone(devip);
2738 if (zsp->z_cond == ZC4_CLOSED)
2741 zsp->z_cond = ZC3_EXPLICIT_OPEN;
2742 devip->nr_exp_open++;
2744 zsp->z_cond = ZC2_IMPLICIT_OPEN;
2745 devip->nr_imp_open++;
2749 static inline void zbc_set_zone_full(struct sdebug_dev_info *devip,
2750 struct sdeb_zone_state *zsp)
2752 switch (zsp->z_cond) {
2753 case ZC2_IMPLICIT_OPEN:
2754 devip->nr_imp_open--;
2756 case ZC3_EXPLICIT_OPEN:
2757 devip->nr_exp_open--;
2760 WARN_ONCE(true, "Invalid zone %llu condition %x\n",
2761 zsp->z_start, zsp->z_cond);
2764 zsp->z_cond = ZC5_FULL;
2767 static void zbc_inc_wp(struct sdebug_dev_info *devip,
2768 unsigned long long lba, unsigned int num)
2770 struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
2771 unsigned long long n, end, zend = zsp->z_start + zsp->z_size;
2773 if (zbc_zone_is_conv(zsp))
2776 if (zsp->z_type == ZBC_ZONE_TYPE_SWR) {
2778 if (zsp->z_wp >= zend)
2779 zbc_set_zone_full(devip, zsp);
2784 if (lba != zsp->z_wp)
2785 zsp->z_non_seq_resource = true;
2791 } else if (end > zsp->z_wp) {
2797 if (zsp->z_wp >= zend)
2798 zbc_set_zone_full(devip, zsp);
2804 zend = zsp->z_start + zsp->z_size;
2809 static int check_zbc_access_params(struct scsi_cmnd *scp,
2810 unsigned long long lba, unsigned int num, bool write)
2812 struct scsi_device *sdp = scp->device;
2813 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
2814 struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
2815 struct sdeb_zone_state *zsp_end = zbc_zone(devip, lba + num - 1);
2818 if (devip->zmodel == BLK_ZONED_HA)
2820 /* For host-managed, reads cannot cross zone types boundaries */
2821 if (zsp_end != zsp &&
2822 zbc_zone_is_conv(zsp) &&
2823 !zbc_zone_is_conv(zsp_end)) {
2824 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2827 return check_condition_result;
2832 /* No restrictions for writes within conventional zones */
2833 if (zbc_zone_is_conv(zsp)) {
2834 if (!zbc_zone_is_conv(zsp_end)) {
2835 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2837 WRITE_BOUNDARY_ASCQ);
2838 return check_condition_result;
2843 if (zsp->z_type == ZBC_ZONE_TYPE_SWR) {
2844 /* Writes cannot cross sequential zone boundaries */
2845 if (zsp_end != zsp) {
2846 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2848 WRITE_BOUNDARY_ASCQ);
2849 return check_condition_result;
2851 /* Cannot write full zones */
2852 if (zsp->z_cond == ZC5_FULL) {
2853 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2854 INVALID_FIELD_IN_CDB, 0);
2855 return check_condition_result;
2857 /* Writes must be aligned to the zone WP */
2858 if (lba != zsp->z_wp) {
2859 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2861 UNALIGNED_WRITE_ASCQ);
2862 return check_condition_result;
2866 /* Handle implicit open of closed and empty zones */
2867 if (zsp->z_cond == ZC1_EMPTY || zsp->z_cond == ZC4_CLOSED) {
2868 if (devip->max_open &&
2869 devip->nr_exp_open >= devip->max_open) {
2870 mk_sense_buffer(scp, DATA_PROTECT,
2873 return check_condition_result;
2875 zbc_open_zone(devip, zsp, false);
2881 static inline int check_device_access_params
2882 (struct scsi_cmnd *scp, unsigned long long lba,
2883 unsigned int num, bool write)
2885 struct scsi_device *sdp = scp->device;
2886 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
2888 if (lba + num > sdebug_capacity) {
2889 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2890 return check_condition_result;
2892 /* transfer length excessive (tie in to block limits VPD page) */
2893 if (num > sdebug_store_sectors) {
2894 /* needs work to find which cdb byte 'num' comes from */
2895 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2896 return check_condition_result;
2898 if (write && unlikely(sdebug_wp)) {
2899 mk_sense_buffer(scp, DATA_PROTECT, WRITE_PROTECTED, 0x2);
2900 return check_condition_result;
2902 if (sdebug_dev_is_zoned(devip))
2903 return check_zbc_access_params(scp, lba, num, write);
2909 * Note: if BUG_ON() fires it usually indicates a problem with the parser
2910 * tables. Perhaps a missing F_FAKE_RW or FF_MEDIA_IO flag. Response functions
2911 * that access any of the "stores" in struct sdeb_store_info should call this
2912 * function with bug_if_fake_rw set to true.
2914 static inline struct sdeb_store_info *devip2sip(struct sdebug_dev_info *devip,
2915 bool bug_if_fake_rw)
2917 if (sdebug_fake_rw) {
2918 BUG_ON(bug_if_fake_rw); /* See note above */
2921 return xa_load(per_store_ap, devip->sdbg_host->si_idx);
2924 /* Returns number of bytes copied or -1 if error. */
2925 static int do_device_access(struct sdeb_store_info *sip, struct scsi_cmnd *scp,
2926 u32 sg_skip, u64 lba, u32 num, bool do_write)
2929 u64 block, rest = 0;
2930 enum dma_data_direction dir;
2931 struct scsi_data_buffer *sdb = &scp->sdb;
2935 dir = DMA_TO_DEVICE;
2936 write_since_sync = true;
2938 dir = DMA_FROM_DEVICE;
2941 if (!sdb->length || !sip)
2943 if (scp->sc_data_direction != dir)
2947 block = do_div(lba, sdebug_store_sectors);
2948 if (block + num > sdebug_store_sectors)
2949 rest = block + num - sdebug_store_sectors;
2951 ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
2952 fsp + (block * sdebug_sector_size),
2953 (num - rest) * sdebug_sector_size, sg_skip, do_write);
2954 if (ret != (num - rest) * sdebug_sector_size)
2958 ret += sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
2959 fsp, rest * sdebug_sector_size,
2960 sg_skip + ((num - rest) * sdebug_sector_size),
2967 /* Returns number of bytes copied or -1 if error. */
2968 static int do_dout_fetch(struct scsi_cmnd *scp, u32 num, u8 *doutp)
2970 struct scsi_data_buffer *sdb = &scp->sdb;
2974 if (scp->sc_data_direction != DMA_TO_DEVICE)
2976 return sg_copy_buffer(sdb->table.sgl, sdb->table.nents, doutp,
2977 num * sdebug_sector_size, 0, true);
2980 /* If sip->storep+lba compares equal to arr(num), then copy top half of
2981 * arr into sip->storep+lba and return true. If comparison fails then
2983 static bool comp_write_worker(struct sdeb_store_info *sip, u64 lba, u32 num,
2984 const u8 *arr, bool compare_only)
2987 u64 block, rest = 0;
2988 u32 store_blks = sdebug_store_sectors;
2989 u32 lb_size = sdebug_sector_size;
2990 u8 *fsp = sip->storep;
2992 block = do_div(lba, store_blks);
2993 if (block + num > store_blks)
2994 rest = block + num - store_blks;
2996 res = !memcmp(fsp + (block * lb_size), arr, (num - rest) * lb_size);
3000 res = memcmp(fsp, arr + ((num - rest) * lb_size),
3006 arr += num * lb_size;
3007 memcpy(fsp + (block * lb_size), arr, (num - rest) * lb_size);
3009 memcpy(fsp, arr + ((num - rest) * lb_size), rest * lb_size);
3013 static __be16 dif_compute_csum(const void *buf, int len)
3018 csum = (__force __be16)ip_compute_csum(buf, len);
3020 csum = cpu_to_be16(crc_t10dif(buf, len));
3025 static int dif_verify(struct t10_pi_tuple *sdt, const void *data,
3026 sector_t sector, u32 ei_lba)
3028 __be16 csum = dif_compute_csum(data, sdebug_sector_size);
3030 if (sdt->guard_tag != csum) {
3031 pr_err("GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
3032 (unsigned long)sector,
3033 be16_to_cpu(sdt->guard_tag),
3037 if (sdebug_dif == T10_PI_TYPE1_PROTECTION &&
3038 be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
3039 pr_err("REF check failed on sector %lu\n",
3040 (unsigned long)sector);
3043 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3044 be32_to_cpu(sdt->ref_tag) != ei_lba) {
3045 pr_err("REF check failed on sector %lu\n",
3046 (unsigned long)sector);
3052 static void dif_copy_prot(struct scsi_cmnd *scp, sector_t sector,
3053 unsigned int sectors, bool read)
3057 struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3058 scp->device->hostdata, true);
3059 struct t10_pi_tuple *dif_storep = sip->dif_storep;
3060 const void *dif_store_end = dif_storep + sdebug_store_sectors;
3061 struct sg_mapping_iter miter;
3063 /* Bytes of protection data to copy into sgl */
3064 resid = sectors * sizeof(*dif_storep);
3066 sg_miter_start(&miter, scsi_prot_sglist(scp),
3067 scsi_prot_sg_count(scp), SG_MITER_ATOMIC |
3068 (read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
3070 while (sg_miter_next(&miter) && resid > 0) {
3071 size_t len = min_t(size_t, miter.length, resid);
3072 void *start = dif_store(sip, sector);
3075 if (dif_store_end < start + len)
3076 rest = start + len - dif_store_end;
3081 memcpy(paddr, start, len - rest);
3083 memcpy(start, paddr, len - rest);
3087 memcpy(paddr + len - rest, dif_storep, rest);
3089 memcpy(dif_storep, paddr + len - rest, rest);
3092 sector += len / sizeof(*dif_storep);
3095 sg_miter_stop(&miter);
3098 static int prot_verify_read(struct scsi_cmnd *scp, sector_t start_sec,
3099 unsigned int sectors, u32 ei_lba)
3103 struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3104 scp->device->hostdata, true);
3105 struct t10_pi_tuple *sdt;
3107 for (i = 0; i < sectors; i++, ei_lba++) {
3110 sector = start_sec + i;
3111 sdt = dif_store(sip, sector);
3113 if (sdt->app_tag == cpu_to_be16(0xffff))
3116 ret = dif_verify(sdt, lba2fake_store(sip, sector), sector,
3124 dif_copy_prot(scp, start_sec, sectors, true);
3130 static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3137 struct sdeb_store_info *sip = devip2sip(devip, true);
3138 rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
3139 u8 *cmd = scp->cmnd;
3144 lba = get_unaligned_be64(cmd + 2);
3145 num = get_unaligned_be32(cmd + 10);
3150 lba = get_unaligned_be32(cmd + 2);
3151 num = get_unaligned_be16(cmd + 7);
3156 lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3157 (u32)(cmd[1] & 0x1f) << 16;
3158 num = (0 == cmd[4]) ? 256 : cmd[4];
3163 lba = get_unaligned_be32(cmd + 2);
3164 num = get_unaligned_be32(cmd + 6);
3167 case XDWRITEREAD_10:
3169 lba = get_unaligned_be32(cmd + 2);
3170 num = get_unaligned_be16(cmd + 7);
3173 default: /* assume READ(32) */
3174 lba = get_unaligned_be64(cmd + 12);
3175 ei_lba = get_unaligned_be32(cmd + 20);
3176 num = get_unaligned_be32(cmd + 28);
3180 if (unlikely(have_dif_prot && check_prot)) {
3181 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3183 mk_sense_invalid_opcode(scp);
3184 return check_condition_result;
3186 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3187 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3188 (cmd[1] & 0xe0) == 0)
3189 sdev_printk(KERN_ERR, scp->device, "Unprotected RD "
3192 if (unlikely((sdebug_opts & SDEBUG_OPT_SHORT_TRANSFER) &&
3193 atomic_read(&sdeb_inject_pending))) {
3195 atomic_set(&sdeb_inject_pending, 0);
3198 ret = check_device_access_params(scp, lba, num, false);
3201 if (unlikely((SDEBUG_OPT_MEDIUM_ERR & sdebug_opts) &&
3202 (lba <= (sdebug_medium_error_start + sdebug_medium_error_count - 1)) &&
3203 ((lba + num) > sdebug_medium_error_start))) {
3204 /* claim unrecoverable read error */
3205 mk_sense_buffer(scp, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
3206 /* set info field and valid bit for fixed descriptor */
3207 if (0x70 == (scp->sense_buffer[0] & 0x7f)) {
3208 scp->sense_buffer[0] |= 0x80; /* Valid bit */
3209 ret = (lba < OPT_MEDIUM_ERR_ADDR)
3210 ? OPT_MEDIUM_ERR_ADDR : (int)lba;
3211 put_unaligned_be32(ret, scp->sense_buffer + 3);
3213 scsi_set_resid(scp, scsi_bufflen(scp));
3214 return check_condition_result;
3217 read_lock(macc_lckp);
3220 if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3221 int prot_ret = prot_verify_read(scp, lba, num, ei_lba);
3224 read_unlock(macc_lckp);
3225 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, prot_ret);
3226 return illegal_condition_result;
3230 ret = do_device_access(sip, scp, 0, lba, num, false);
3231 read_unlock(macc_lckp);
3232 if (unlikely(ret == -1))
3233 return DID_ERROR << 16;
3235 scsi_set_resid(scp, scsi_bufflen(scp) - ret);
3237 if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3238 atomic_read(&sdeb_inject_pending))) {
3239 if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3240 mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3241 atomic_set(&sdeb_inject_pending, 0);
3242 return check_condition_result;
3243 } else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3244 /* Logical block guard check failed */
3245 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3246 atomic_set(&sdeb_inject_pending, 0);
3247 return illegal_condition_result;
3248 } else if (SDEBUG_OPT_DIX_ERR & sdebug_opts) {
3249 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3250 atomic_set(&sdeb_inject_pending, 0);
3251 return illegal_condition_result;
3257 static void dump_sector(unsigned char *buf, int len)
3261 pr_err(">>> Sector Dump <<<\n");
3262 for (i = 0 ; i < len ; i += 16) {
3265 for (j = 0, n = 0; j < 16; j++) {
3266 unsigned char c = buf[i+j];
3268 if (c >= 0x20 && c < 0x7e)
3269 n += scnprintf(b + n, sizeof(b) - n,
3272 n += scnprintf(b + n, sizeof(b) - n,
3275 pr_err("%04d: %s\n", i, b);
3279 static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
3280 unsigned int sectors, u32 ei_lba)
3283 struct t10_pi_tuple *sdt;
3285 sector_t sector = start_sec;
3288 struct sg_mapping_iter diter;
3289 struct sg_mapping_iter piter;
3291 BUG_ON(scsi_sg_count(SCpnt) == 0);
3292 BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
3294 sg_miter_start(&piter, scsi_prot_sglist(SCpnt),
3295 scsi_prot_sg_count(SCpnt),
3296 SG_MITER_ATOMIC | SG_MITER_FROM_SG);
3297 sg_miter_start(&diter, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
3298 SG_MITER_ATOMIC | SG_MITER_FROM_SG);
3300 /* For each protection page */
3301 while (sg_miter_next(&piter)) {
3303 if (WARN_ON(!sg_miter_next(&diter))) {
3308 for (ppage_offset = 0; ppage_offset < piter.length;
3309 ppage_offset += sizeof(struct t10_pi_tuple)) {
3310 /* If we're at the end of the current
3311 * data page advance to the next one
3313 if (dpage_offset >= diter.length) {
3314 if (WARN_ON(!sg_miter_next(&diter))) {
3321 sdt = piter.addr + ppage_offset;
3322 daddr = diter.addr + dpage_offset;
3324 ret = dif_verify(sdt, daddr, sector, ei_lba);
3326 dump_sector(daddr, sdebug_sector_size);
3332 dpage_offset += sdebug_sector_size;
3334 diter.consumed = dpage_offset;
3335 sg_miter_stop(&diter);
3337 sg_miter_stop(&piter);
3339 dif_copy_prot(SCpnt, start_sec, sectors, false);
3346 sg_miter_stop(&diter);
3347 sg_miter_stop(&piter);
3351 static unsigned long lba_to_map_index(sector_t lba)
3353 if (sdebug_unmap_alignment)
3354 lba += sdebug_unmap_granularity - sdebug_unmap_alignment;
3355 sector_div(lba, sdebug_unmap_granularity);
3359 static sector_t map_index_to_lba(unsigned long index)
3361 sector_t lba = index * sdebug_unmap_granularity;
3363 if (sdebug_unmap_alignment)
3364 lba -= sdebug_unmap_granularity - sdebug_unmap_alignment;
3368 static unsigned int map_state(struct sdeb_store_info *sip, sector_t lba,
3372 unsigned int mapped;
3373 unsigned long index;
3376 index = lba_to_map_index(lba);
3377 mapped = test_bit(index, sip->map_storep);
3380 next = find_next_zero_bit(sip->map_storep, map_size, index);
3382 next = find_next_bit(sip->map_storep, map_size, index);
3384 end = min_t(sector_t, sdebug_store_sectors, map_index_to_lba(next));
3389 static void map_region(struct sdeb_store_info *sip, sector_t lba,
3392 sector_t end = lba + len;
3395 unsigned long index = lba_to_map_index(lba);
3397 if (index < map_size)
3398 set_bit(index, sip->map_storep);
3400 lba = map_index_to_lba(index + 1);
3404 static void unmap_region(struct sdeb_store_info *sip, sector_t lba,
3407 sector_t end = lba + len;
3408 u8 *fsp = sip->storep;
3411 unsigned long index = lba_to_map_index(lba);
3413 if (lba == map_index_to_lba(index) &&
3414 lba + sdebug_unmap_granularity <= end &&
3416 clear_bit(index, sip->map_storep);
3417 if (sdebug_lbprz) { /* for LBPRZ=2 return 0xff_s */
3418 memset(fsp + lba * sdebug_sector_size,
3419 (sdebug_lbprz & 1) ? 0 : 0xff,
3420 sdebug_sector_size *
3421 sdebug_unmap_granularity);
3423 if (sip->dif_storep) {
3424 memset(sip->dif_storep + lba, 0xff,
3425 sizeof(*sip->dif_storep) *
3426 sdebug_unmap_granularity);
3429 lba = map_index_to_lba(index + 1);
3433 static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3440 struct sdeb_store_info *sip = devip2sip(devip, true);
3441 rwlock_t *macc_lckp = &sip->macc_lck;
3442 u8 *cmd = scp->cmnd;
3447 lba = get_unaligned_be64(cmd + 2);
3448 num = get_unaligned_be32(cmd + 10);
3453 lba = get_unaligned_be32(cmd + 2);
3454 num = get_unaligned_be16(cmd + 7);
3459 lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3460 (u32)(cmd[1] & 0x1f) << 16;
3461 num = (0 == cmd[4]) ? 256 : cmd[4];
3466 lba = get_unaligned_be32(cmd + 2);
3467 num = get_unaligned_be32(cmd + 6);
3470 case 0x53: /* XDWRITEREAD(10) */
3472 lba = get_unaligned_be32(cmd + 2);
3473 num = get_unaligned_be16(cmd + 7);
3476 default: /* assume WRITE(32) */
3477 lba = get_unaligned_be64(cmd + 12);
3478 ei_lba = get_unaligned_be32(cmd + 20);
3479 num = get_unaligned_be32(cmd + 28);
3483 if (unlikely(have_dif_prot && check_prot)) {
3484 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3486 mk_sense_invalid_opcode(scp);
3487 return check_condition_result;
3489 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3490 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3491 (cmd[1] & 0xe0) == 0)
3492 sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3496 write_lock(macc_lckp);
3497 ret = check_device_access_params(scp, lba, num, true);
3499 write_unlock(macc_lckp);
3504 if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3505 int prot_ret = prot_verify_write(scp, lba, num, ei_lba);
3508 write_unlock(macc_lckp);
3509 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, prot_ret);
3510 return illegal_condition_result;
3514 ret = do_device_access(sip, scp, 0, lba, num, true);
3515 if (unlikely(scsi_debug_lbp()))
3516 map_region(sip, lba, num);
3517 /* If ZBC zone then bump its write pointer */
3518 if (sdebug_dev_is_zoned(devip))
3519 zbc_inc_wp(devip, lba, num);
3520 write_unlock(macc_lckp);
3521 if (unlikely(-1 == ret))
3522 return DID_ERROR << 16;
3523 else if (unlikely(sdebug_verbose &&
3524 (ret < (num * sdebug_sector_size))))
3525 sdev_printk(KERN_INFO, scp->device,
3526 "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3527 my_name, num * sdebug_sector_size, ret);
3529 if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3530 atomic_read(&sdeb_inject_pending))) {
3531 if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3532 mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3533 atomic_set(&sdeb_inject_pending, 0);
3534 return check_condition_result;
3535 } else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3536 /* Logical block guard check failed */
3537 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3538 atomic_set(&sdeb_inject_pending, 0);
3539 return illegal_condition_result;
3540 } else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
3541 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3542 atomic_set(&sdeb_inject_pending, 0);
3543 return illegal_condition_result;
3550 * T10 has only specified WRITE SCATTERED(16) and WRITE SCATTERED(32).
3551 * No READ GATHERED yet (requires bidi or long cdb holding gather list).
3553 static int resp_write_scat(struct scsi_cmnd *scp,
3554 struct sdebug_dev_info *devip)
3556 u8 *cmd = scp->cmnd;
3559 struct sdeb_store_info *sip = devip2sip(devip, true);
3560 rwlock_t *macc_lckp = &sip->macc_lck;
3562 u16 lbdof, num_lrd, k;
3563 u32 num, num_by, bt_len, lbdof_blen, sg_off, cum_lb;
3564 u32 lb_size = sdebug_sector_size;
3569 static const u32 lrd_size = 32; /* + parameter list header size */
3571 if (cmd[0] == VARIABLE_LENGTH_CMD) {
3573 wrprotect = (cmd[10] >> 5) & 0x7;
3574 lbdof = get_unaligned_be16(cmd + 12);
3575 num_lrd = get_unaligned_be16(cmd + 16);
3576 bt_len = get_unaligned_be32(cmd + 28);
3577 } else { /* that leaves WRITE SCATTERED(16) */
3579 wrprotect = (cmd[2] >> 5) & 0x7;
3580 lbdof = get_unaligned_be16(cmd + 4);
3581 num_lrd = get_unaligned_be16(cmd + 8);
3582 bt_len = get_unaligned_be32(cmd + 10);
3583 if (unlikely(have_dif_prot)) {
3584 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3586 mk_sense_invalid_opcode(scp);
3587 return illegal_condition_result;
3589 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3590 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3592 sdev_printk(KERN_ERR, scp->device,
3593 "Unprotected WR to DIF device\n");
3596 if ((num_lrd == 0) || (bt_len == 0))
3597 return 0; /* T10 says these do-nothings are not errors */
3600 sdev_printk(KERN_INFO, scp->device,
3601 "%s: %s: LB Data Offset field bad\n",
3603 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3604 return illegal_condition_result;
3606 lbdof_blen = lbdof * lb_size;
3607 if ((lrd_size + (num_lrd * lrd_size)) > lbdof_blen) {
3609 sdev_printk(KERN_INFO, scp->device,
3610 "%s: %s: LBA range descriptors don't fit\n",
3612 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3613 return illegal_condition_result;
3615 lrdp = kzalloc(lbdof_blen, GFP_ATOMIC);
3617 return SCSI_MLQUEUE_HOST_BUSY;
3619 sdev_printk(KERN_INFO, scp->device,
3620 "%s: %s: Fetch header+scatter_list, lbdof_blen=%u\n",
3621 my_name, __func__, lbdof_blen);
3622 res = fetch_to_dev_buffer(scp, lrdp, lbdof_blen);
3624 ret = DID_ERROR << 16;
3628 write_lock(macc_lckp);
3629 sg_off = lbdof_blen;
3630 /* Spec says Buffer xfer Length field in number of LBs in dout */
3632 for (k = 0, up = lrdp + lrd_size; k < num_lrd; ++k, up += lrd_size) {
3633 lba = get_unaligned_be64(up + 0);
3634 num = get_unaligned_be32(up + 8);
3636 sdev_printk(KERN_INFO, scp->device,
3637 "%s: %s: k=%d LBA=0x%llx num=%u sg_off=%u\n",
3638 my_name, __func__, k, lba, num, sg_off);
3641 ret = check_device_access_params(scp, lba, num, true);
3643 goto err_out_unlock;
3644 num_by = num * lb_size;
3645 ei_lba = is_16 ? 0 : get_unaligned_be32(up + 12);
3647 if ((cum_lb + num) > bt_len) {
3649 sdev_printk(KERN_INFO, scp->device,
3650 "%s: %s: sum of blocks > data provided\n",
3652 mk_sense_buffer(scp, ILLEGAL_REQUEST, WRITE_ERROR_ASC,
3654 ret = illegal_condition_result;
3655 goto err_out_unlock;
3659 if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3660 int prot_ret = prot_verify_write(scp, lba, num,
3664 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10,
3666 ret = illegal_condition_result;
3667 goto err_out_unlock;
3671 ret = do_device_access(sip, scp, sg_off, lba, num, true);
3672 /* If ZBC zone then bump its write pointer */
3673 if (sdebug_dev_is_zoned(devip))
3674 zbc_inc_wp(devip, lba, num);
3675 if (unlikely(scsi_debug_lbp()))
3676 map_region(sip, lba, num);
3677 if (unlikely(-1 == ret)) {
3678 ret = DID_ERROR << 16;
3679 goto err_out_unlock;
3680 } else if (unlikely(sdebug_verbose && (ret < num_by)))
3681 sdev_printk(KERN_INFO, scp->device,
3682 "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3683 my_name, num_by, ret);
3685 if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3686 atomic_read(&sdeb_inject_pending))) {
3687 if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3688 mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3689 atomic_set(&sdeb_inject_pending, 0);
3690 ret = check_condition_result;
3691 goto err_out_unlock;
3692 } else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3693 /* Logical block guard check failed */
3694 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3695 atomic_set(&sdeb_inject_pending, 0);
3696 ret = illegal_condition_result;
3697 goto err_out_unlock;
3698 } else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
3699 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3700 atomic_set(&sdeb_inject_pending, 0);
3701 ret = illegal_condition_result;
3702 goto err_out_unlock;
3710 write_unlock(macc_lckp);
3716 static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
3717 u32 ei_lba, bool unmap, bool ndob)
3719 struct scsi_device *sdp = scp->device;
3720 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
3721 unsigned long long i;
3723 u32 lb_size = sdebug_sector_size;
3725 struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3726 scp->device->hostdata, true);
3727 rwlock_t *macc_lckp = &sip->macc_lck;
3731 write_lock(macc_lckp);
3733 ret = check_device_access_params(scp, lba, num, true);
3735 write_unlock(macc_lckp);
3739 if (unmap && scsi_debug_lbp()) {
3740 unmap_region(sip, lba, num);
3744 block = do_div(lbaa, sdebug_store_sectors);
3745 /* if ndob then zero 1 logical block, else fetch 1 logical block */
3747 fs1p = fsp + (block * lb_size);
3749 memset(fs1p, 0, lb_size);
3752 ret = fetch_to_dev_buffer(scp, fs1p, lb_size);
3755 write_unlock(&sip->macc_lck);
3756 return DID_ERROR << 16;
3757 } else if (sdebug_verbose && !ndob && (ret < lb_size))
3758 sdev_printk(KERN_INFO, scp->device,
3759 "%s: %s: lb size=%u, IO sent=%d bytes\n",
3760 my_name, "write same", lb_size, ret);
3762 /* Copy first sector to remaining blocks */
3763 for (i = 1 ; i < num ; i++) {
3765 block = do_div(lbaa, sdebug_store_sectors);
3766 memmove(fsp + (block * lb_size), fs1p, lb_size);
3768 if (scsi_debug_lbp())
3769 map_region(sip, lba, num);
3770 /* If ZBC zone then bump its write pointer */
3771 if (sdebug_dev_is_zoned(devip))
3772 zbc_inc_wp(devip, lba, num);
3774 write_unlock(macc_lckp);
3779 static int resp_write_same_10(struct scsi_cmnd *scp,
3780 struct sdebug_dev_info *devip)
3782 u8 *cmd = scp->cmnd;
3789 if (sdebug_lbpws10 == 0) {
3790 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3791 return check_condition_result;
3795 lba = get_unaligned_be32(cmd + 2);
3796 num = get_unaligned_be16(cmd + 7);
3797 if (num > sdebug_write_same_length) {
3798 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3799 return check_condition_result;
3801 return resp_write_same(scp, lba, num, ei_lba, unmap, false);
3804 static int resp_write_same_16(struct scsi_cmnd *scp,
3805 struct sdebug_dev_info *devip)
3807 u8 *cmd = scp->cmnd;
3814 if (cmd[1] & 0x8) { /* UNMAP */
3815 if (sdebug_lbpws == 0) {
3816 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3817 return check_condition_result;
3821 if (cmd[1] & 0x1) /* NDOB (no data-out buffer, assumes zeroes) */
3823 lba = get_unaligned_be64(cmd + 2);
3824 num = get_unaligned_be32(cmd + 10);
3825 if (num > sdebug_write_same_length) {
3826 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
3827 return check_condition_result;
3829 return resp_write_same(scp, lba, num, ei_lba, unmap, ndob);
3832 /* Note the mode field is in the same position as the (lower) service action
3833 * field. For the Report supported operation codes command, SPC-4 suggests
3834 * each mode of this command should be reported separately; for future. */
3835 static int resp_write_buffer(struct scsi_cmnd *scp,
3836 struct sdebug_dev_info *devip)
3838 u8 *cmd = scp->cmnd;
3839 struct scsi_device *sdp = scp->device;
3840 struct sdebug_dev_info *dp;
3843 mode = cmd[1] & 0x1f;
3845 case 0x4: /* download microcode (MC) and activate (ACT) */
3846 /* set UAs on this device only */
3847 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3848 set_bit(SDEBUG_UA_MICROCODE_CHANGED, devip->uas_bm);
3850 case 0x5: /* download MC, save and ACT */
3851 set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET, devip->uas_bm);
3853 case 0x6: /* download MC with offsets and ACT */
3854 /* set UAs on most devices (LUs) in this target */
3855 list_for_each_entry(dp,
3856 &devip->sdbg_host->dev_info_list,
3858 if (dp->target == sdp->id) {
3859 set_bit(SDEBUG_UA_BUS_RESET, dp->uas_bm);
3861 set_bit(SDEBUG_UA_MICROCODE_CHANGED,
3865 case 0x7: /* download MC with offsets, save, and ACT */
3866 /* set UA on all devices (LUs) in this target */
3867 list_for_each_entry(dp,
3868 &devip->sdbg_host->dev_info_list,
3870 if (dp->target == sdp->id)
3871 set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET,
3875 /* do nothing for this command for other mode values */
3881 static int resp_comp_write(struct scsi_cmnd *scp,
3882 struct sdebug_dev_info *devip)
3884 u8 *cmd = scp->cmnd;
3886 struct sdeb_store_info *sip = devip2sip(devip, true);
3887 rwlock_t *macc_lckp = &sip->macc_lck;
3890 u32 lb_size = sdebug_sector_size;
3895 lba = get_unaligned_be64(cmd + 2);
3896 num = cmd[13]; /* 1 to a maximum of 255 logical blocks */
3898 return 0; /* degenerate case, not an error */
3899 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3901 mk_sense_invalid_opcode(scp);
3902 return check_condition_result;
3904 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3905 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3906 (cmd[1] & 0xe0) == 0)
3907 sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3909 ret = check_device_access_params(scp, lba, num, false);
3913 arr = kcalloc(lb_size, dnum, GFP_ATOMIC);
3915 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3917 return check_condition_result;
3920 write_lock(macc_lckp);
3922 ret = do_dout_fetch(scp, dnum, arr);
3924 retval = DID_ERROR << 16;
3926 } else if (sdebug_verbose && (ret < (dnum * lb_size)))
3927 sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb "
3928 "indicated=%u, IO sent=%d bytes\n", my_name,
3929 dnum * lb_size, ret);
3930 if (!comp_write_worker(sip, lba, num, arr, false)) {
3931 mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
3932 retval = check_condition_result;
3935 if (scsi_debug_lbp())
3936 map_region(sip, lba, num);
3938 write_unlock(macc_lckp);
3943 struct unmap_block_desc {
3949 static int resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3952 struct unmap_block_desc *desc;
3953 struct sdeb_store_info *sip = devip2sip(devip, true);
3954 rwlock_t *macc_lckp = &sip->macc_lck;
3955 unsigned int i, payload_len, descriptors;
3958 if (!scsi_debug_lbp())
3959 return 0; /* fib and say its done */
3960 payload_len = get_unaligned_be16(scp->cmnd + 7);
3961 BUG_ON(scsi_bufflen(scp) != payload_len);
3963 descriptors = (payload_len - 8) / 16;
3964 if (descriptors > sdebug_unmap_max_desc) {
3965 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3966 return check_condition_result;
3969 buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC);
3971 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3973 return check_condition_result;
3976 scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
3978 BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
3979 BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
3981 desc = (void *)&buf[8];
3983 write_lock(macc_lckp);
3985 for (i = 0 ; i < descriptors ; i++) {
3986 unsigned long long lba = get_unaligned_be64(&desc[i].lba);
3987 unsigned int num = get_unaligned_be32(&desc[i].blocks);
3989 ret = check_device_access_params(scp, lba, num, true);
3993 unmap_region(sip, lba, num);
3999 write_unlock(macc_lckp);
4005 #define SDEBUG_GET_LBA_STATUS_LEN 32
4007 static int resp_get_lba_status(struct scsi_cmnd *scp,
4008 struct sdebug_dev_info *devip)
4010 u8 *cmd = scp->cmnd;
4012 u32 alloc_len, mapped, num;
4014 u8 arr[SDEBUG_GET_LBA_STATUS_LEN];
4016 lba = get_unaligned_be64(cmd + 2);
4017 alloc_len = get_unaligned_be32(cmd + 10);
4022 ret = check_device_access_params(scp, lba, 1, false);
4026 if (scsi_debug_lbp()) {
4027 struct sdeb_store_info *sip = devip2sip(devip, true);
4029 mapped = map_state(sip, lba, &num);
4032 /* following just in case virtual_gb changed */
4033 sdebug_capacity = get_sdebug_capacity();
4034 if (sdebug_capacity - lba <= 0xffffffff)
4035 num = sdebug_capacity - lba;
4040 memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
4041 put_unaligned_be32(20, arr); /* Parameter Data Length */
4042 put_unaligned_be64(lba, arr + 8); /* LBA */
4043 put_unaligned_be32(num, arr + 16); /* Number of blocks */
4044 arr[20] = !mapped; /* prov_stat=0: mapped; 1: dealloc */
4046 return fill_from_dev_buffer(scp, arr, SDEBUG_GET_LBA_STATUS_LEN);
4049 static int resp_sync_cache(struct scsi_cmnd *scp,
4050 struct sdebug_dev_info *devip)
4055 u8 *cmd = scp->cmnd;
4057 if (cmd[0] == SYNCHRONIZE_CACHE) { /* 10 byte cdb */
4058 lba = get_unaligned_be32(cmd + 2);
4059 num_blocks = get_unaligned_be16(cmd + 7);
4060 } else { /* SYNCHRONIZE_CACHE(16) */
4061 lba = get_unaligned_be64(cmd + 2);
4062 num_blocks = get_unaligned_be32(cmd + 10);
4064 if (lba + num_blocks > sdebug_capacity) {
4065 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4066 return check_condition_result;
4068 if (!write_since_sync || (cmd[1] & 0x2))
4069 res = SDEG_RES_IMMED_MASK;
4070 else /* delay if write_since_sync and IMMED clear */
4071 write_since_sync = false;
4076 * Assuming the LBA+num_blocks is not out-of-range, this function will return
4077 * CONDITION MET if the specified blocks will/have fitted in the cache, and
4078 * a GOOD status otherwise. Model a disk with a big cache and yield
4079 * CONDITION MET. Actually tries to bring range in main memory into the
4080 * cache associated with the CPU(s).
4082 static int resp_pre_fetch(struct scsi_cmnd *scp,
4083 struct sdebug_dev_info *devip)
4087 u64 block, rest = 0;
4089 u8 *cmd = scp->cmnd;
4090 struct sdeb_store_info *sip = devip2sip(devip, true);
4091 rwlock_t *macc_lckp = &sip->macc_lck;
4092 u8 *fsp = sip->storep;
4094 if (cmd[0] == PRE_FETCH) { /* 10 byte cdb */
4095 lba = get_unaligned_be32(cmd + 2);
4096 nblks = get_unaligned_be16(cmd + 7);
4097 } else { /* PRE-FETCH(16) */
4098 lba = get_unaligned_be64(cmd + 2);
4099 nblks = get_unaligned_be32(cmd + 10);
4101 if (lba + nblks > sdebug_capacity) {
4102 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4103 return check_condition_result;
4107 /* PRE-FETCH spec says nothing about LBP or PI so skip them */
4108 block = do_div(lba, sdebug_store_sectors);
4109 if (block + nblks > sdebug_store_sectors)
4110 rest = block + nblks - sdebug_store_sectors;
4112 /* Try to bring the PRE-FETCH range into CPU's cache */
4113 read_lock(macc_lckp);
4114 prefetch_range(fsp + (sdebug_sector_size * block),
4115 (nblks - rest) * sdebug_sector_size);
4117 prefetch_range(fsp, rest * sdebug_sector_size);
4118 read_unlock(macc_lckp);
4121 res = SDEG_RES_IMMED_MASK;
4122 return res | condition_met_result;
4125 #define RL_BUCKET_ELEMS 8
4127 /* Even though each pseudo target has a REPORT LUNS "well known logical unit"
4128 * (W-LUN), the normal Linux scanning logic does not associate it with a
4129 * device (e.g. /dev/sg7). The following magic will make that association:
4130 * "cd /sys/class/scsi_host/host<n> ; echo '- - 49409' > scan"
4131 * where <n> is a host number. If there are multiple targets in a host then
4132 * the above will associate a W-LUN to each target. To only get a W-LUN
4133 * for target 2, then use "echo '- 2 49409' > scan" .
4135 static int resp_report_luns(struct scsi_cmnd *scp,
4136 struct sdebug_dev_info *devip)
4138 unsigned char *cmd = scp->cmnd;
4139 unsigned int alloc_len;
4140 unsigned char select_report;
4142 struct scsi_lun *lun_p;
4143 u8 arr[RL_BUCKET_ELEMS * sizeof(struct scsi_lun)];
4144 unsigned int lun_cnt; /* normal LUN count (max: 256) */
4145 unsigned int wlun_cnt; /* report luns W-LUN count */
4146 unsigned int tlun_cnt; /* total LUN count */
4147 unsigned int rlen; /* response length (in bytes) */
4149 unsigned int off_rsp = 0;
4150 const int sz_lun = sizeof(struct scsi_lun);
4152 clear_luns_changed_on_target(devip);
4154 select_report = cmd[2];
4155 alloc_len = get_unaligned_be32(cmd + 6);
4157 if (alloc_len < 4) {
4158 pr_err("alloc len too small %d\n", alloc_len);
4159 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
4160 return check_condition_result;
4163 switch (select_report) {
4164 case 0: /* all LUNs apart from W-LUNs */
4165 lun_cnt = sdebug_max_luns;
4168 case 1: /* only W-LUNs */
4172 case 2: /* all LUNs */
4173 lun_cnt = sdebug_max_luns;
4176 case 0x10: /* only administrative LUs */
4177 case 0x11: /* see SPC-5 */
4178 case 0x12: /* only subsiduary LUs owned by referenced LU */
4180 pr_debug("select report invalid %d\n", select_report);
4181 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
4182 return check_condition_result;
4185 if (sdebug_no_lun_0 && (lun_cnt > 0))
4188 tlun_cnt = lun_cnt + wlun_cnt;
4189 rlen = tlun_cnt * sz_lun; /* excluding 8 byte header */
4190 scsi_set_resid(scp, scsi_bufflen(scp));
4191 pr_debug("select_report %d luns = %d wluns = %d no_lun0 %d\n",
4192 select_report, lun_cnt, wlun_cnt, sdebug_no_lun_0);
4194 /* loops rely on sizeof response header same as sizeof lun (both 8) */
4195 lun = sdebug_no_lun_0 ? 1 : 0;
4196 for (k = 0, j = 0, res = 0; true; ++k, j = 0) {
4197 memset(arr, 0, sizeof(arr));
4198 lun_p = (struct scsi_lun *)&arr[0];
4200 put_unaligned_be32(rlen, &arr[0]);
4204 for ( ; j < RL_BUCKET_ELEMS; ++j, ++lun_p) {
4205 if ((k * RL_BUCKET_ELEMS) + j > lun_cnt)
4207 int_to_scsilun(lun++, lun_p);
4208 if (lun > 1 && sdebug_lun_am == SAM_LUN_AM_FLAT)
4209 lun_p->scsi_lun[0] |= 0x40;
4211 if (j < RL_BUCKET_ELEMS)
4214 res = p_fill_from_dev_buffer(scp, arr, n, off_rsp);
4220 int_to_scsilun(SCSI_W_LUN_REPORT_LUNS, lun_p);
4224 res = p_fill_from_dev_buffer(scp, arr, j * sz_lun, off_rsp);
4228 static int resp_verify(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4230 bool is_bytchk3 = false;
4233 u32 vnum, a_num, off;
4234 const u32 lb_size = sdebug_sector_size;
4237 u8 *cmd = scp->cmnd;
4238 struct sdeb_store_info *sip = devip2sip(devip, true);
4239 rwlock_t *macc_lckp = &sip->macc_lck;
4241 bytchk = (cmd[1] >> 1) & 0x3;
4243 return 0; /* always claim internal verify okay */
4244 } else if (bytchk == 2) {
4245 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
4246 return check_condition_result;
4247 } else if (bytchk == 3) {
4248 is_bytchk3 = true; /* 1 block sent, compared repeatedly */
4252 lba = get_unaligned_be64(cmd + 2);
4253 vnum = get_unaligned_be32(cmd + 10);
4255 case VERIFY: /* is VERIFY(10) */
4256 lba = get_unaligned_be32(cmd + 2);
4257 vnum = get_unaligned_be16(cmd + 7);
4260 mk_sense_invalid_opcode(scp);
4261 return check_condition_result;
4264 return 0; /* not an error */
4265 a_num = is_bytchk3 ? 1 : vnum;
4266 /* Treat following check like one for read (i.e. no write) access */
4267 ret = check_device_access_params(scp, lba, a_num, false);
4271 arr = kcalloc(lb_size, vnum, GFP_ATOMIC);
4273 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4275 return check_condition_result;
4277 /* Not changing store, so only need read access */
4278 read_lock(macc_lckp);
4280 ret = do_dout_fetch(scp, a_num, arr);
4282 ret = DID_ERROR << 16;
4284 } else if (sdebug_verbose && (ret < (a_num * lb_size))) {
4285 sdev_printk(KERN_INFO, scp->device,
4286 "%s: %s: cdb indicated=%u, IO sent=%d bytes\n",
4287 my_name, __func__, a_num * lb_size, ret);
4290 for (j = 1, off = lb_size; j < vnum; ++j, off += lb_size)
4291 memcpy(arr + off, arr, lb_size);
4294 if (!comp_write_worker(sip, lba, vnum, arr, true)) {
4295 mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
4296 ret = check_condition_result;
4300 read_unlock(macc_lckp);
4305 #define RZONES_DESC_HD 64
4307 /* Report zones depending on start LBA nad reporting options */
4308 static int resp_report_zones(struct scsi_cmnd *scp,
4309 struct sdebug_dev_info *devip)
4311 unsigned int i, max_zones, rep_max_zones, nrz = 0;
4313 u32 alloc_len, rep_opts, rep_len;
4316 u8 *arr = NULL, *desc;
4317 u8 *cmd = scp->cmnd;
4318 struct sdeb_zone_state *zsp;
4319 struct sdeb_store_info *sip = devip2sip(devip, false);
4320 rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4322 if (!sdebug_dev_is_zoned(devip)) {
4323 mk_sense_invalid_opcode(scp);
4324 return check_condition_result;
4326 zs_lba = get_unaligned_be64(cmd + 2);
4327 alloc_len = get_unaligned_be32(cmd + 10);
4329 return 0; /* not an error */
4330 rep_opts = cmd[14] & 0x3f;
4331 partial = cmd[14] & 0x80;
4333 if (zs_lba >= sdebug_capacity) {
4334 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4335 return check_condition_result;
4338 max_zones = devip->nr_zones - (zs_lba >> devip->zsize_shift);
4339 rep_max_zones = min((alloc_len - 64) >> ilog2(RZONES_DESC_HD),
4342 arr = kzalloc(alloc_len, GFP_ATOMIC);
4344 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4346 return check_condition_result;
4349 read_lock(macc_lckp);
4352 for (i = 0; i < max_zones; i++) {
4353 lba = zs_lba + devip->zsize * i;
4354 if (lba > sdebug_capacity)
4356 zsp = zbc_zone(devip, lba);
4363 if (zsp->z_cond != ZC1_EMPTY)
4367 /* Implicit open zones */
4368 if (zsp->z_cond != ZC2_IMPLICIT_OPEN)
4372 /* Explicit open zones */
4373 if (zsp->z_cond != ZC3_EXPLICIT_OPEN)
4378 if (zsp->z_cond != ZC4_CLOSED)
4383 if (zsp->z_cond != ZC5_FULL)
4390 * Read-only, offline, reset WP recommended are
4391 * not emulated: no zones to report;
4395 /* non-seq-resource set */
4396 if (!zsp->z_non_seq_resource)
4400 /* Not write pointer (conventional) zones */
4401 if (!zbc_zone_is_conv(zsp))
4405 mk_sense_buffer(scp, ILLEGAL_REQUEST,
4406 INVALID_FIELD_IN_CDB, 0);
4407 ret = check_condition_result;
4411 if (nrz < rep_max_zones) {
4412 /* Fill zone descriptor */
4413 desc[0] = zsp->z_type;
4414 desc[1] = zsp->z_cond << 4;
4415 if (zsp->z_non_seq_resource)
4417 put_unaligned_be64((u64)zsp->z_size, desc + 8);
4418 put_unaligned_be64((u64)zsp->z_start, desc + 16);
4419 put_unaligned_be64((u64)zsp->z_wp, desc + 24);
4423 if (partial && nrz >= rep_max_zones)
4430 put_unaligned_be32(nrz * RZONES_DESC_HD, arr + 0);
4431 put_unaligned_be64(sdebug_capacity - 1, arr + 8);
4433 rep_len = (unsigned long)desc - (unsigned long)arr;
4434 ret = fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, rep_len));
4437 read_unlock(macc_lckp);
4442 /* Logic transplanted from tcmu-runner, file_zbc.c */
4443 static void zbc_open_all(struct sdebug_dev_info *devip)
4445 struct sdeb_zone_state *zsp = &devip->zstate[0];
4448 for (i = 0; i < devip->nr_zones; i++, zsp++) {
4449 if (zsp->z_cond == ZC4_CLOSED)
4450 zbc_open_zone(devip, &devip->zstate[i], true);
4454 static int resp_open_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4458 enum sdebug_z_cond zc;
4459 u8 *cmd = scp->cmnd;
4460 struct sdeb_zone_state *zsp;
4461 bool all = cmd[14] & 0x01;
4462 struct sdeb_store_info *sip = devip2sip(devip, false);
4463 rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4465 if (!sdebug_dev_is_zoned(devip)) {
4466 mk_sense_invalid_opcode(scp);
4467 return check_condition_result;
4470 write_lock(macc_lckp);
4473 /* Check if all closed zones can be open */
4474 if (devip->max_open &&
4475 devip->nr_exp_open + devip->nr_closed > devip->max_open) {
4476 mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
4478 res = check_condition_result;
4481 /* Open all closed zones */
4482 zbc_open_all(devip);
4486 /* Open the specified zone */
4487 z_id = get_unaligned_be64(cmd + 2);
4488 if (z_id >= sdebug_capacity) {
4489 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4490 res = check_condition_result;
4494 zsp = zbc_zone(devip, z_id);
4495 if (z_id != zsp->z_start) {
4496 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4497 res = check_condition_result;
4500 if (zbc_zone_is_conv(zsp)) {
4501 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4502 res = check_condition_result;
4507 if (zc == ZC3_EXPLICIT_OPEN || zc == ZC5_FULL)
4510 if (devip->max_open && devip->nr_exp_open >= devip->max_open) {
4511 mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
4513 res = check_condition_result;
4517 zbc_open_zone(devip, zsp, true);
4519 write_unlock(macc_lckp);
4523 static void zbc_close_all(struct sdebug_dev_info *devip)
4527 for (i = 0; i < devip->nr_zones; i++)
4528 zbc_close_zone(devip, &devip->zstate[i]);
4531 static int resp_close_zone(struct scsi_cmnd *scp,
4532 struct sdebug_dev_info *devip)
4536 u8 *cmd = scp->cmnd;
4537 struct sdeb_zone_state *zsp;
4538 bool all = cmd[14] & 0x01;
4539 struct sdeb_store_info *sip = devip2sip(devip, false);
4540 rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4542 if (!sdebug_dev_is_zoned(devip)) {
4543 mk_sense_invalid_opcode(scp);
4544 return check_condition_result;
4547 write_lock(macc_lckp);
4550 zbc_close_all(devip);
4554 /* Close specified zone */
4555 z_id = get_unaligned_be64(cmd + 2);
4556 if (z_id >= sdebug_capacity) {
4557 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4558 res = check_condition_result;
4562 zsp = zbc_zone(devip, z_id);
4563 if (z_id != zsp->z_start) {
4564 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4565 res = check_condition_result;
4568 if (zbc_zone_is_conv(zsp)) {
4569 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4570 res = check_condition_result;
4574 zbc_close_zone(devip, zsp);
4576 write_unlock(macc_lckp);
4580 static void zbc_finish_zone(struct sdebug_dev_info *devip,
4581 struct sdeb_zone_state *zsp, bool empty)
4583 enum sdebug_z_cond zc = zsp->z_cond;
4585 if (zc == ZC4_CLOSED || zc == ZC2_IMPLICIT_OPEN ||
4586 zc == ZC3_EXPLICIT_OPEN || (empty && zc == ZC1_EMPTY)) {
4587 if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
4588 zbc_close_zone(devip, zsp);
4589 if (zsp->z_cond == ZC4_CLOSED)
4591 zsp->z_wp = zsp->z_start + zsp->z_size;
4592 zsp->z_cond = ZC5_FULL;
4596 static void zbc_finish_all(struct sdebug_dev_info *devip)
4600 for (i = 0; i < devip->nr_zones; i++)
4601 zbc_finish_zone(devip, &devip->zstate[i], false);
4604 static int resp_finish_zone(struct scsi_cmnd *scp,
4605 struct sdebug_dev_info *devip)
4607 struct sdeb_zone_state *zsp;
4610 u8 *cmd = scp->cmnd;
4611 bool all = cmd[14] & 0x01;
4612 struct sdeb_store_info *sip = devip2sip(devip, false);
4613 rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4615 if (!sdebug_dev_is_zoned(devip)) {
4616 mk_sense_invalid_opcode(scp);
4617 return check_condition_result;
4620 write_lock(macc_lckp);
4623 zbc_finish_all(devip);
4627 /* Finish the specified zone */
4628 z_id = get_unaligned_be64(cmd + 2);
4629 if (z_id >= sdebug_capacity) {
4630 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4631 res = check_condition_result;
4635 zsp = zbc_zone(devip, z_id);
4636 if (z_id != zsp->z_start) {
4637 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4638 res = check_condition_result;
4641 if (zbc_zone_is_conv(zsp)) {
4642 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4643 res = check_condition_result;
4647 zbc_finish_zone(devip, zsp, true);
4649 write_unlock(macc_lckp);
4653 static void zbc_rwp_zone(struct sdebug_dev_info *devip,
4654 struct sdeb_zone_state *zsp)
4656 enum sdebug_z_cond zc;
4657 struct sdeb_store_info *sip = devip2sip(devip, false);
4659 if (zbc_zone_is_conv(zsp))
4663 if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
4664 zbc_close_zone(devip, zsp);
4666 if (zsp->z_cond == ZC4_CLOSED)
4669 if (zsp->z_wp > zsp->z_start)
4670 memset(sip->storep + zsp->z_start * sdebug_sector_size, 0,
4671 (zsp->z_wp - zsp->z_start) * sdebug_sector_size);
4673 zsp->z_non_seq_resource = false;
4674 zsp->z_wp = zsp->z_start;
4675 zsp->z_cond = ZC1_EMPTY;
4678 static void zbc_rwp_all(struct sdebug_dev_info *devip)
4682 for (i = 0; i < devip->nr_zones; i++)
4683 zbc_rwp_zone(devip, &devip->zstate[i]);
4686 static int resp_rwp_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4688 struct sdeb_zone_state *zsp;
4691 u8 *cmd = scp->cmnd;
4692 bool all = cmd[14] & 0x01;
4693 struct sdeb_store_info *sip = devip2sip(devip, false);
4694 rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4696 if (!sdebug_dev_is_zoned(devip)) {
4697 mk_sense_invalid_opcode(scp);
4698 return check_condition_result;
4701 write_lock(macc_lckp);
4708 z_id = get_unaligned_be64(cmd + 2);
4709 if (z_id >= sdebug_capacity) {
4710 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4711 res = check_condition_result;
4715 zsp = zbc_zone(devip, z_id);
4716 if (z_id != zsp->z_start) {
4717 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4718 res = check_condition_result;
4721 if (zbc_zone_is_conv(zsp)) {
4722 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4723 res = check_condition_result;
4727 zbc_rwp_zone(devip, zsp);
4729 write_unlock(macc_lckp);
4733 static struct sdebug_queue *get_queue(struct scsi_cmnd *cmnd)
4736 u32 tag = blk_mq_unique_tag(cmnd->request);
4738 hwq = blk_mq_unique_tag_to_hwq(tag);
4740 pr_debug("tag=%#x, hwq=%d\n", tag, hwq);
4741 if (WARN_ON_ONCE(hwq >= submit_queues))
4744 return sdebug_q_arr + hwq;
4747 static u32 get_tag(struct scsi_cmnd *cmnd)
4749 return blk_mq_unique_tag(cmnd->request);
4752 /* Queued (deferred) command completions converge here. */
4753 static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
4755 bool aborted = sd_dp->aborted;
4758 unsigned long iflags;
4759 struct sdebug_queue *sqp;
4760 struct sdebug_queued_cmd *sqcp;
4761 struct scsi_cmnd *scp;
4762 struct sdebug_dev_info *devip;
4764 sd_dp->defer_t = SDEB_DEFER_NONE;
4765 if (unlikely(aborted))
4766 sd_dp->aborted = false;
4767 qc_idx = sd_dp->qc_idx;
4768 sqp = sdebug_q_arr + sd_dp->sqa_idx;
4769 if (sdebug_statistics) {
4770 atomic_inc(&sdebug_completions);
4771 if (raw_smp_processor_id() != sd_dp->issuing_cpu)
4772 atomic_inc(&sdebug_miss_cpus);
4774 if (unlikely((qc_idx < 0) || (qc_idx >= SDEBUG_CANQUEUE))) {
4775 pr_err("wild qc_idx=%d\n", qc_idx);
4778 spin_lock_irqsave(&sqp->qc_lock, iflags);
4779 sqcp = &sqp->qc_arr[qc_idx];
4781 if (unlikely(scp == NULL)) {
4782 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4783 pr_err("scp is NULL, sqa_idx=%d, qc_idx=%d, hc_idx=%d\n",
4784 sd_dp->sqa_idx, qc_idx, sd_dp->hc_idx);
4787 devip = (struct sdebug_dev_info *)scp->device->hostdata;
4789 atomic_dec(&devip->num_in_q);
4791 pr_err("devip=NULL\n");
4792 if (unlikely(atomic_read(&retired_max_queue) > 0))
4795 sqcp->a_cmnd = NULL;
4796 if (unlikely(!test_and_clear_bit(qc_idx, sqp->in_use_bm))) {
4797 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4798 pr_err("Unexpected completion\n");
4802 if (unlikely(retiring)) { /* user has reduced max_queue */
4805 retval = atomic_read(&retired_max_queue);
4806 if (qc_idx >= retval) {
4807 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4808 pr_err("index %d too large\n", retval);
4811 k = find_last_bit(sqp->in_use_bm, retval);
4812 if ((k < sdebug_max_queue) || (k == retval))
4813 atomic_set(&retired_max_queue, 0);
4815 atomic_set(&retired_max_queue, k + 1);
4817 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4818 if (unlikely(aborted)) {
4820 pr_info("bypassing scsi_done() due to aborted cmd\n");
4823 scp->scsi_done(scp); /* callback to mid level */
4826 /* When high resolution timer goes off this function is called. */
4827 static enum hrtimer_restart sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
4829 struct sdebug_defer *sd_dp = container_of(timer, struct sdebug_defer,
4831 sdebug_q_cmd_complete(sd_dp);
4832 return HRTIMER_NORESTART;
4835 /* When work queue schedules work, it calls this function. */
4836 static void sdebug_q_cmd_wq_complete(struct work_struct *work)
4838 struct sdebug_defer *sd_dp = container_of(work, struct sdebug_defer,
4840 sdebug_q_cmd_complete(sd_dp);
4843 static bool got_shared_uuid;
4844 static uuid_t shared_uuid;
4846 static int sdebug_device_create_zones(struct sdebug_dev_info *devip)
4848 struct sdeb_zone_state *zsp;
4849 sector_t capacity = get_sdebug_capacity();
4850 sector_t zstart = 0;
4854 * Set the zone size: if sdeb_zbc_zone_size_mb is not set, figure out
4855 * a zone size allowing for at least 4 zones on the device. Otherwise,
4856 * use the specified zone size checking that at least 2 zones can be
4857 * created for the device.
4859 if (!sdeb_zbc_zone_size_mb) {
4860 devip->zsize = (DEF_ZBC_ZONE_SIZE_MB * SZ_1M)
4861 >> ilog2(sdebug_sector_size);
4862 while (capacity < devip->zsize << 2 && devip->zsize >= 2)
4864 if (devip->zsize < 2) {
4865 pr_err("Device capacity too small\n");
4869 if (!is_power_of_2(sdeb_zbc_zone_size_mb)) {
4870 pr_err("Zone size is not a power of 2\n");
4873 devip->zsize = (sdeb_zbc_zone_size_mb * SZ_1M)
4874 >> ilog2(sdebug_sector_size);
4875 if (devip->zsize >= capacity) {
4876 pr_err("Zone size too large for device capacity\n");
4881 devip->zsize_shift = ilog2(devip->zsize);
4882 devip->nr_zones = (capacity + devip->zsize - 1) >> devip->zsize_shift;
4884 if (sdeb_zbc_nr_conv >= devip->nr_zones) {
4885 pr_err("Number of conventional zones too large\n");
4888 devip->nr_conv_zones = sdeb_zbc_nr_conv;
4890 if (devip->zmodel == BLK_ZONED_HM) {
4891 /* zbc_max_open_zones can be 0, meaning "not reported" */
4892 if (sdeb_zbc_max_open >= devip->nr_zones - 1)
4893 devip->max_open = (devip->nr_zones - 1) / 2;
4895 devip->max_open = sdeb_zbc_max_open;
4898 devip->zstate = kcalloc(devip->nr_zones,
4899 sizeof(struct sdeb_zone_state), GFP_KERNEL);
4903 for (i = 0; i < devip->nr_zones; i++) {
4904 zsp = &devip->zstate[i];
4906 zsp->z_start = zstart;
4908 if (i < devip->nr_conv_zones) {
4909 zsp->z_type = ZBC_ZONE_TYPE_CNV;
4910 zsp->z_cond = ZBC_NOT_WRITE_POINTER;
4911 zsp->z_wp = (sector_t)-1;
4913 if (devip->zmodel == BLK_ZONED_HM)
4914 zsp->z_type = ZBC_ZONE_TYPE_SWR;
4916 zsp->z_type = ZBC_ZONE_TYPE_SWP;
4917 zsp->z_cond = ZC1_EMPTY;
4918 zsp->z_wp = zsp->z_start;
4921 if (zsp->z_start + devip->zsize < capacity)
4922 zsp->z_size = devip->zsize;
4924 zsp->z_size = capacity - zsp->z_start;
4926 zstart += zsp->z_size;
4932 static struct sdebug_dev_info *sdebug_device_create(
4933 struct sdebug_host_info *sdbg_host, gfp_t flags)
4935 struct sdebug_dev_info *devip;
4937 devip = kzalloc(sizeof(*devip), flags);
4939 if (sdebug_uuid_ctl == 1)
4940 uuid_gen(&devip->lu_name);
4941 else if (sdebug_uuid_ctl == 2) {
4942 if (got_shared_uuid)
4943 devip->lu_name = shared_uuid;
4945 uuid_gen(&shared_uuid);
4946 got_shared_uuid = true;
4947 devip->lu_name = shared_uuid;
4950 devip->sdbg_host = sdbg_host;
4951 if (sdeb_zbc_in_use) {
4952 devip->zmodel = sdeb_zbc_model;
4953 if (sdebug_device_create_zones(devip)) {
4958 devip->zmodel = BLK_ZONED_NONE;
4960 devip->sdbg_host = sdbg_host;
4961 devip->create_ts = ktime_get_boottime();
4962 atomic_set(&devip->stopped, (sdeb_tur_ms_to_ready > 0 ? 2 : 0));
4963 list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
4968 static struct sdebug_dev_info *find_build_dev_info(struct scsi_device *sdev)
4970 struct sdebug_host_info *sdbg_host;
4971 struct sdebug_dev_info *open_devip = NULL;
4972 struct sdebug_dev_info *devip;
4974 sdbg_host = *(struct sdebug_host_info **)shost_priv(sdev->host);
4976 pr_err("Host info NULL\n");
4980 list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
4981 if ((devip->used) && (devip->channel == sdev->channel) &&
4982 (devip->target == sdev->id) &&
4983 (devip->lun == sdev->lun))
4986 if ((!devip->used) && (!open_devip))
4990 if (!open_devip) { /* try and make a new one */
4991 open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
4993 pr_err("out of memory at line %d\n", __LINE__);
4998 open_devip->channel = sdev->channel;
4999 open_devip->target = sdev->id;
5000 open_devip->lun = sdev->lun;
5001 open_devip->sdbg_host = sdbg_host;
5002 atomic_set(&open_devip->num_in_q, 0);
5003 set_bit(SDEBUG_UA_POR, open_devip->uas_bm);
5004 open_devip->used = true;
5008 static int scsi_debug_slave_alloc(struct scsi_device *sdp)
5011 pr_info("slave_alloc <%u %u %u %llu>\n",
5012 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5016 static int scsi_debug_slave_configure(struct scsi_device *sdp)
5018 struct sdebug_dev_info *devip =
5019 (struct sdebug_dev_info *)sdp->hostdata;
5022 pr_info("slave_configure <%u %u %u %llu>\n",
5023 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5024 if (sdp->host->max_cmd_len != SDEBUG_MAX_CMD_LEN)
5025 sdp->host->max_cmd_len = SDEBUG_MAX_CMD_LEN;
5026 if (devip == NULL) {
5027 devip = find_build_dev_info(sdp);
5029 return 1; /* no resources, will be marked offline */
5031 sdp->hostdata = devip;
5033 sdp->no_uld_attach = 1;
5034 config_cdb_len(sdp);
5038 static void scsi_debug_slave_destroy(struct scsi_device *sdp)
5040 struct sdebug_dev_info *devip =
5041 (struct sdebug_dev_info *)sdp->hostdata;
5044 pr_info("slave_destroy <%u %u %u %llu>\n",
5045 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5047 /* make this slot available for re-use */
5048 devip->used = false;
5049 sdp->hostdata = NULL;
5053 static void stop_qc_helper(struct sdebug_defer *sd_dp,
5054 enum sdeb_defer_type defer_t)
5058 if (defer_t == SDEB_DEFER_HRT)
5059 hrtimer_cancel(&sd_dp->hrt);
5060 else if (defer_t == SDEB_DEFER_WQ)
5061 cancel_work_sync(&sd_dp->ew.work);
5064 /* If @cmnd found deletes its timer or work queue and returns true; else
5066 static bool stop_queued_cmnd(struct scsi_cmnd *cmnd)
5068 unsigned long iflags;
5069 int j, k, qmax, r_qmax;
5070 enum sdeb_defer_type l_defer_t;
5071 struct sdebug_queue *sqp;
5072 struct sdebug_queued_cmd *sqcp;
5073 struct sdebug_dev_info *devip;
5074 struct sdebug_defer *sd_dp;
5076 for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5077 spin_lock_irqsave(&sqp->qc_lock, iflags);
5078 qmax = sdebug_max_queue;
5079 r_qmax = atomic_read(&retired_max_queue);
5082 for (k = 0; k < qmax; ++k) {
5083 if (test_bit(k, sqp->in_use_bm)) {
5084 sqcp = &sqp->qc_arr[k];
5085 if (cmnd != sqcp->a_cmnd)
5088 devip = (struct sdebug_dev_info *)
5089 cmnd->device->hostdata;
5091 atomic_dec(&devip->num_in_q);
5092 sqcp->a_cmnd = NULL;
5093 sd_dp = sqcp->sd_dp;
5095 l_defer_t = sd_dp->defer_t;
5096 sd_dp->defer_t = SDEB_DEFER_NONE;
5098 l_defer_t = SDEB_DEFER_NONE;
5099 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5100 stop_qc_helper(sd_dp, l_defer_t);
5101 clear_bit(k, sqp->in_use_bm);
5105 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5110 /* Deletes (stops) timers or work queues of all queued commands */
5111 static void stop_all_queued(void)
5113 unsigned long iflags;
5115 enum sdeb_defer_type l_defer_t;
5116 struct sdebug_queue *sqp;
5117 struct sdebug_queued_cmd *sqcp;
5118 struct sdebug_dev_info *devip;
5119 struct sdebug_defer *sd_dp;
5121 for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5122 spin_lock_irqsave(&sqp->qc_lock, iflags);
5123 for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
5124 if (test_bit(k, sqp->in_use_bm)) {
5125 sqcp = &sqp->qc_arr[k];
5126 if (sqcp->a_cmnd == NULL)
5128 devip = (struct sdebug_dev_info *)
5129 sqcp->a_cmnd->device->hostdata;
5131 atomic_dec(&devip->num_in_q);
5132 sqcp->a_cmnd = NULL;
5133 sd_dp = sqcp->sd_dp;
5135 l_defer_t = sd_dp->defer_t;
5136 sd_dp->defer_t = SDEB_DEFER_NONE;
5138 l_defer_t = SDEB_DEFER_NONE;
5139 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5140 stop_qc_helper(sd_dp, l_defer_t);
5141 clear_bit(k, sqp->in_use_bm);
5142 spin_lock_irqsave(&sqp->qc_lock, iflags);
5145 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5149 /* Free queued command memory on heap */
5150 static void free_all_queued(void)
5153 struct sdebug_queue *sqp;
5154 struct sdebug_queued_cmd *sqcp;
5156 for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5157 for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
5158 sqcp = &sqp->qc_arr[k];
5165 static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
5171 ok = stop_queued_cmnd(SCpnt);
5172 if (SCpnt->device && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
5173 sdev_printk(KERN_INFO, SCpnt->device,
5174 "%s: command%s found\n", __func__,
5180 static int scsi_debug_device_reset(struct scsi_cmnd *SCpnt)
5183 if (SCpnt && SCpnt->device) {
5184 struct scsi_device *sdp = SCpnt->device;
5185 struct sdebug_dev_info *devip =
5186 (struct sdebug_dev_info *)sdp->hostdata;
5188 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5189 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5191 set_bit(SDEBUG_UA_POR, devip->uas_bm);
5196 static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
5198 struct sdebug_host_info *sdbg_host;
5199 struct sdebug_dev_info *devip;
5200 struct scsi_device *sdp;
5201 struct Scsi_Host *hp;
5204 ++num_target_resets;
5207 sdp = SCpnt->device;
5210 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5211 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5215 sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
5217 list_for_each_entry(devip,
5218 &sdbg_host->dev_info_list,
5220 if (devip->target == sdp->id) {
5221 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5225 if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5226 sdev_printk(KERN_INFO, sdp,
5227 "%s: %d device(s) found in target\n", __func__, k);
5232 static int scsi_debug_bus_reset(struct scsi_cmnd *SCpnt)
5234 struct sdebug_host_info *sdbg_host;
5235 struct sdebug_dev_info *devip;
5236 struct scsi_device *sdp;
5237 struct Scsi_Host *hp;
5241 if (!(SCpnt && SCpnt->device))
5243 sdp = SCpnt->device;
5244 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5245 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5248 sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
5250 list_for_each_entry(devip,
5251 &sdbg_host->dev_info_list,
5253 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5258 if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5259 sdev_printk(KERN_INFO, sdp,
5260 "%s: %d device(s) found in host\n", __func__, k);
5265 static int scsi_debug_host_reset(struct scsi_cmnd *SCpnt)
5267 struct sdebug_host_info *sdbg_host;
5268 struct sdebug_dev_info *devip;
5272 if ((SCpnt->device) && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
5273 sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__);
5274 spin_lock(&sdebug_host_list_lock);
5275 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
5276 list_for_each_entry(devip, &sdbg_host->dev_info_list,
5278 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5282 spin_unlock(&sdebug_host_list_lock);
5284 if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5285 sdev_printk(KERN_INFO, SCpnt->device,
5286 "%s: %d device(s) found\n", __func__, k);
5290 static void sdebug_build_parts(unsigned char *ramp, unsigned long store_size)
5292 struct msdos_partition *pp;
5293 int starts[SDEBUG_MAX_PARTS + 2], max_part_secs;
5294 int sectors_per_part, num_sectors, k;
5295 int heads_by_sects, start_sec, end_sec;
5297 /* assume partition table already zeroed */
5298 if ((sdebug_num_parts < 1) || (store_size < 1048576))
5300 if (sdebug_num_parts > SDEBUG_MAX_PARTS) {
5301 sdebug_num_parts = SDEBUG_MAX_PARTS;
5302 pr_warn("reducing partitions to %d\n", SDEBUG_MAX_PARTS);
5304 num_sectors = (int)get_sdebug_capacity();
5305 sectors_per_part = (num_sectors - sdebug_sectors_per)
5307 heads_by_sects = sdebug_heads * sdebug_sectors_per;
5308 starts[0] = sdebug_sectors_per;
5309 max_part_secs = sectors_per_part;
5310 for (k = 1; k < sdebug_num_parts; ++k) {
5311 starts[k] = ((k * sectors_per_part) / heads_by_sects)
5313 if (starts[k] - starts[k - 1] < max_part_secs)
5314 max_part_secs = starts[k] - starts[k - 1];
5316 starts[sdebug_num_parts] = num_sectors;
5317 starts[sdebug_num_parts + 1] = 0;
5319 ramp[510] = 0x55; /* magic partition markings */
5321 pp = (struct msdos_partition *)(ramp + 0x1be);
5322 for (k = 0; starts[k + 1]; ++k, ++pp) {
5323 start_sec = starts[k];
5324 end_sec = starts[k] + max_part_secs - 1;
5327 pp->cyl = start_sec / heads_by_sects;
5328 pp->head = (start_sec - (pp->cyl * heads_by_sects))
5329 / sdebug_sectors_per;
5330 pp->sector = (start_sec % sdebug_sectors_per) + 1;
5332 pp->end_cyl = end_sec / heads_by_sects;
5333 pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
5334 / sdebug_sectors_per;
5335 pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
5337 pp->start_sect = cpu_to_le32(start_sec);
5338 pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1);
5339 pp->sys_ind = 0x83; /* plain Linux partition */
5343 static void block_unblock_all_queues(bool block)
5346 struct sdebug_queue *sqp;
5348 for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp)
5349 atomic_set(&sqp->blocked, (int)block);
5352 /* Adjust (by rounding down) the sdebug_cmnd_count so abs(every_nth)-1
5353 * commands will be processed normally before triggers occur.
5355 static void tweak_cmnd_count(void)
5359 modulo = abs(sdebug_every_nth);
5362 block_unblock_all_queues(true);
5363 count = atomic_read(&sdebug_cmnd_count);
5364 atomic_set(&sdebug_cmnd_count, (count / modulo) * modulo);
5365 block_unblock_all_queues(false);
5368 static void clear_queue_stats(void)
5370 atomic_set(&sdebug_cmnd_count, 0);
5371 atomic_set(&sdebug_completions, 0);
5372 atomic_set(&sdebug_miss_cpus, 0);
5373 atomic_set(&sdebug_a_tsf, 0);
5376 static bool inject_on_this_cmd(void)
5378 if (sdebug_every_nth == 0)
5380 return (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) == 0;
5383 #define INCLUSIVE_TIMING_MAX_NS 1000000 /* 1 millisecond */
5385 /* Complete the processing of the thread that queued a SCSI command to this
5386 * driver. It either completes the command by calling cmnd_done() or
5387 * schedules a hr timer or work queue then returns 0. Returns
5388 * SCSI_MLQUEUE_HOST_BUSY if temporarily out of resources.
5390 static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
5392 int (*pfp)(struct scsi_cmnd *,
5393 struct sdebug_dev_info *),
5394 int delta_jiff, int ndelay)
5397 bool inject = false;
5398 int k, num_in_q, qdepth;
5399 unsigned long iflags;
5400 u64 ns_from_boot = 0;
5401 struct sdebug_queue *sqp;
5402 struct sdebug_queued_cmd *sqcp;
5403 struct scsi_device *sdp;
5404 struct sdebug_defer *sd_dp;
5406 if (unlikely(devip == NULL)) {
5407 if (scsi_result == 0)
5408 scsi_result = DID_NO_CONNECT << 16;
5409 goto respond_in_thread;
5413 if (delta_jiff == 0)
5414 goto respond_in_thread;
5416 sqp = get_queue(cmnd);
5417 spin_lock_irqsave(&sqp->qc_lock, iflags);
5418 if (unlikely(atomic_read(&sqp->blocked))) {
5419 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5420 return SCSI_MLQUEUE_HOST_BUSY;
5422 num_in_q = atomic_read(&devip->num_in_q);
5423 qdepth = cmnd->device->queue_depth;
5424 if (unlikely((qdepth > 0) && (num_in_q >= qdepth))) {
5426 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5427 goto respond_in_thread;
5429 scsi_result = device_qfull_result;
5430 } else if (unlikely(sdebug_every_nth &&
5431 (SDEBUG_OPT_RARE_TSF & sdebug_opts) &&
5432 (scsi_result == 0))) {
5433 if ((num_in_q == (qdepth - 1)) &&
5434 (atomic_inc_return(&sdebug_a_tsf) >=
5435 abs(sdebug_every_nth))) {
5436 atomic_set(&sdebug_a_tsf, 0);
5438 scsi_result = device_qfull_result;
5442 k = find_first_zero_bit(sqp->in_use_bm, sdebug_max_queue);
5443 if (unlikely(k >= sdebug_max_queue)) {
5444 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5446 goto respond_in_thread;
5447 else if (SDEBUG_OPT_ALL_TSF & sdebug_opts)
5448 scsi_result = device_qfull_result;
5449 if (SDEBUG_OPT_Q_NOISE & sdebug_opts)
5450 sdev_printk(KERN_INFO, sdp,
5451 "%s: max_queue=%d exceeded, %s\n",
5452 __func__, sdebug_max_queue,
5453 (scsi_result ? "status: TASK SET FULL" :
5454 "report: host busy"));
5456 goto respond_in_thread;
5458 return SCSI_MLQUEUE_HOST_BUSY;
5460 set_bit(k, sqp->in_use_bm);
5461 atomic_inc(&devip->num_in_q);
5462 sqcp = &sqp->qc_arr[k];
5463 sqcp->a_cmnd = cmnd;
5464 cmnd->host_scribble = (unsigned char *)sqcp;
5465 sd_dp = sqcp->sd_dp;
5466 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5468 sd_dp = kzalloc(sizeof(*sd_dp), GFP_ATOMIC);
5470 atomic_dec(&devip->num_in_q);
5471 clear_bit(k, sqp->in_use_bm);
5472 return SCSI_MLQUEUE_HOST_BUSY;
5479 /* Set the hostwide tag */
5480 if (sdebug_host_max_queue)
5481 sd_dp->hc_idx = get_tag(cmnd);
5483 if (ndelay > 0 && ndelay < INCLUSIVE_TIMING_MAX_NS)
5484 ns_from_boot = ktime_get_boottime_ns();
5486 /* one of the resp_*() response functions is called here */
5487 cmnd->result = pfp ? pfp(cmnd, devip) : 0;
5488 if (cmnd->result & SDEG_RES_IMMED_MASK) {
5489 cmnd->result &= ~SDEG_RES_IMMED_MASK;
5490 delta_jiff = ndelay = 0;
5492 if (cmnd->result == 0 && scsi_result != 0)
5493 cmnd->result = scsi_result;
5494 if (cmnd->result == 0 && unlikely(sdebug_opts & SDEBUG_OPT_TRANSPORT_ERR)) {
5495 if (atomic_read(&sdeb_inject_pending)) {
5496 mk_sense_buffer(cmnd, ABORTED_COMMAND, TRANSPORT_PROBLEM, ACK_NAK_TO);
5497 atomic_set(&sdeb_inject_pending, 0);
5498 cmnd->result = check_condition_result;
5502 if (unlikely(sdebug_verbose && cmnd->result))
5503 sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n",
5504 __func__, cmnd->result);
5506 if (delta_jiff > 0 || ndelay > 0) {
5509 if (delta_jiff > 0) {
5510 u64 ns = jiffies_to_nsecs(delta_jiff);
5512 if (sdebug_random && ns < U32_MAX) {
5513 ns = prandom_u32_max((u32)ns);
5514 } else if (sdebug_random) {
5515 ns >>= 12; /* scale to 4 usec precision */
5516 if (ns < U32_MAX) /* over 4 hours max */
5517 ns = prandom_u32_max((u32)ns);
5520 kt = ns_to_ktime(ns);
5521 } else { /* ndelay has a 4.2 second max */
5522 kt = sdebug_random ? prandom_u32_max((u32)ndelay) :
5524 if (ndelay < INCLUSIVE_TIMING_MAX_NS) {
5525 u64 d = ktime_get_boottime_ns() - ns_from_boot;
5527 if (kt <= d) { /* elapsed duration >= kt */
5528 spin_lock_irqsave(&sqp->qc_lock, iflags);
5529 sqcp->a_cmnd = NULL;
5530 atomic_dec(&devip->num_in_q);
5531 clear_bit(k, sqp->in_use_bm);
5532 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5535 /* call scsi_done() from this thread */
5536 cmnd->scsi_done(cmnd);
5539 /* otherwise reduce kt by elapsed time */
5543 if (!sd_dp->init_hrt) {
5544 sd_dp->init_hrt = true;
5545 sqcp->sd_dp = sd_dp;
5546 hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC,
5547 HRTIMER_MODE_REL_PINNED);
5548 sd_dp->hrt.function = sdebug_q_cmd_hrt_complete;
5549 sd_dp->sqa_idx = sqp - sdebug_q_arr;
5552 if (sdebug_statistics)
5553 sd_dp->issuing_cpu = raw_smp_processor_id();
5554 sd_dp->defer_t = SDEB_DEFER_HRT;
5555 /* schedule the invocation of scsi_done() for a later time */
5556 hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED);
5557 } else { /* jdelay < 0, use work queue */
5558 if (!sd_dp->init_wq) {
5559 sd_dp->init_wq = true;
5560 sqcp->sd_dp = sd_dp;
5561 sd_dp->sqa_idx = sqp - sdebug_q_arr;
5563 INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete);
5565 if (sdebug_statistics)
5566 sd_dp->issuing_cpu = raw_smp_processor_id();
5567 sd_dp->defer_t = SDEB_DEFER_WQ;
5568 if (unlikely((sdebug_opts & SDEBUG_OPT_CMD_ABORT) &&
5569 atomic_read(&sdeb_inject_pending)))
5570 sd_dp->aborted = true;
5571 schedule_work(&sd_dp->ew.work);
5572 if (unlikely((sdebug_opts & SDEBUG_OPT_CMD_ABORT) &&
5573 atomic_read(&sdeb_inject_pending))) {
5574 sdev_printk(KERN_INFO, sdp, "abort request tag %d\n", cmnd->request->tag);
5575 blk_abort_request(cmnd->request);
5576 atomic_set(&sdeb_inject_pending, 0);
5579 if (unlikely((SDEBUG_OPT_Q_NOISE & sdebug_opts) && scsi_result == device_qfull_result))
5580 sdev_printk(KERN_INFO, sdp, "%s: num_in_q=%d +1, %s%s\n", __func__,
5581 num_in_q, (inject ? "<inject> " : ""), "status: TASK SET FULL");
5584 respond_in_thread: /* call back to mid-layer using invocation thread */
5585 cmnd->result = pfp != NULL ? pfp(cmnd, devip) : 0;
5586 cmnd->result &= ~SDEG_RES_IMMED_MASK;
5587 if (cmnd->result == 0 && scsi_result != 0)
5588 cmnd->result = scsi_result;
5589 cmnd->scsi_done(cmnd);
5593 /* Note: The following macros create attribute files in the
5594 /sys/module/scsi_debug/parameters directory. Unfortunately this
5595 driver is unaware of a change and cannot trigger auxiliary actions
5596 as it can when the corresponding attribute in the
5597 /sys/bus/pseudo/drivers/scsi_debug directory is changed.
5599 module_param_named(add_host, sdebug_add_host, int, S_IRUGO | S_IWUSR);
5600 module_param_named(ato, sdebug_ato, int, S_IRUGO);
5601 module_param_named(cdb_len, sdebug_cdb_len, int, 0644);
5602 module_param_named(clustering, sdebug_clustering, bool, S_IRUGO | S_IWUSR);
5603 module_param_named(delay, sdebug_jdelay, int, S_IRUGO | S_IWUSR);
5604 module_param_named(dev_size_mb, sdebug_dev_size_mb, int, S_IRUGO);
5605 module_param_named(dif, sdebug_dif, int, S_IRUGO);
5606 module_param_named(dix, sdebug_dix, int, S_IRUGO);
5607 module_param_named(dsense, sdebug_dsense, int, S_IRUGO | S_IWUSR);
5608 module_param_named(every_nth, sdebug_every_nth, int, S_IRUGO | S_IWUSR);
5609 module_param_named(fake_rw, sdebug_fake_rw, int, S_IRUGO | S_IWUSR);
5610 module_param_named(guard, sdebug_guard, uint, S_IRUGO);
5611 module_param_named(host_lock, sdebug_host_lock, bool, S_IRUGO | S_IWUSR);
5612 module_param_named(host_max_queue, sdebug_host_max_queue, int, S_IRUGO);
5613 module_param_string(inq_product, sdebug_inq_product_id,
5614 sizeof(sdebug_inq_product_id), S_IRUGO | S_IWUSR);
5615 module_param_string(inq_rev, sdebug_inq_product_rev,
5616 sizeof(sdebug_inq_product_rev), S_IRUGO | S_IWUSR);
5617 module_param_string(inq_vendor, sdebug_inq_vendor_id,
5618 sizeof(sdebug_inq_vendor_id), S_IRUGO | S_IWUSR);
5619 module_param_named(lbprz, sdebug_lbprz, int, S_IRUGO);
5620 module_param_named(lbpu, sdebug_lbpu, int, S_IRUGO);
5621 module_param_named(lbpws, sdebug_lbpws, int, S_IRUGO);
5622 module_param_named(lbpws10, sdebug_lbpws10, int, S_IRUGO);
5623 module_param_named(lowest_aligned, sdebug_lowest_aligned, int, S_IRUGO);
5624 module_param_named(lun_format, sdebug_lun_am_i, int, S_IRUGO | S_IWUSR);
5625 module_param_named(max_luns, sdebug_max_luns, int, S_IRUGO | S_IWUSR);
5626 module_param_named(max_queue, sdebug_max_queue, int, S_IRUGO | S_IWUSR);
5627 module_param_named(medium_error_count, sdebug_medium_error_count, int,
5629 module_param_named(medium_error_start, sdebug_medium_error_start, int,
5631 module_param_named(ndelay, sdebug_ndelay, int, S_IRUGO | S_IWUSR);
5632 module_param_named(no_lun_0, sdebug_no_lun_0, int, S_IRUGO | S_IWUSR);
5633 module_param_named(no_uld, sdebug_no_uld, int, S_IRUGO);
5634 module_param_named(num_parts, sdebug_num_parts, int, S_IRUGO);
5635 module_param_named(num_tgts, sdebug_num_tgts, int, S_IRUGO | S_IWUSR);
5636 module_param_named(opt_blks, sdebug_opt_blks, int, S_IRUGO);
5637 module_param_named(opt_xferlen_exp, sdebug_opt_xferlen_exp, int, S_IRUGO);
5638 module_param_named(opts, sdebug_opts, int, S_IRUGO | S_IWUSR);
5639 module_param_named(per_host_store, sdebug_per_host_store, bool,
5641 module_param_named(physblk_exp, sdebug_physblk_exp, int, S_IRUGO);
5642 module_param_named(ptype, sdebug_ptype, int, S_IRUGO | S_IWUSR);
5643 module_param_named(random, sdebug_random, bool, S_IRUGO | S_IWUSR);
5644 module_param_named(removable, sdebug_removable, bool, S_IRUGO | S_IWUSR);
5645 module_param_named(scsi_level, sdebug_scsi_level, int, S_IRUGO);
5646 module_param_named(sector_size, sdebug_sector_size, int, S_IRUGO);
5647 module_param_named(statistics, sdebug_statistics, bool, S_IRUGO | S_IWUSR);
5648 module_param_named(strict, sdebug_strict, bool, S_IRUGO | S_IWUSR);
5649 module_param_named(submit_queues, submit_queues, int, S_IRUGO);
5650 module_param_named(tur_ms_to_ready, sdeb_tur_ms_to_ready, int, S_IRUGO);
5651 module_param_named(unmap_alignment, sdebug_unmap_alignment, int, S_IRUGO);
5652 module_param_named(unmap_granularity, sdebug_unmap_granularity, int, S_IRUGO);
5653 module_param_named(unmap_max_blocks, sdebug_unmap_max_blocks, int, S_IRUGO);
5654 module_param_named(unmap_max_desc, sdebug_unmap_max_desc, int, S_IRUGO);
5655 module_param_named(uuid_ctl, sdebug_uuid_ctl, int, S_IRUGO);
5656 module_param_named(virtual_gb, sdebug_virtual_gb, int, S_IRUGO | S_IWUSR);
5657 module_param_named(vpd_use_hostno, sdebug_vpd_use_hostno, int,
5659 module_param_named(wp, sdebug_wp, bool, S_IRUGO | S_IWUSR);
5660 module_param_named(write_same_length, sdebug_write_same_length, int,
5662 module_param_named(zbc, sdeb_zbc_model_s, charp, S_IRUGO);
5663 module_param_named(zone_max_open, sdeb_zbc_max_open, int, S_IRUGO);
5664 module_param_named(zone_nr_conv, sdeb_zbc_nr_conv, int, S_IRUGO);
5665 module_param_named(zone_size_mb, sdeb_zbc_zone_size_mb, int, S_IRUGO);
5667 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
5668 MODULE_DESCRIPTION("SCSI debug adapter driver");
5669 MODULE_LICENSE("GPL");
5670 MODULE_VERSION(SDEBUG_VERSION);
5672 MODULE_PARM_DESC(add_host, "add n hosts, in sysfs if negative remove host(s) (def=1)");
5673 MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
5674 MODULE_PARM_DESC(cdb_len, "suggest CDB lengths to drivers (def=10)");
5675 MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
5676 MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
5677 MODULE_PARM_DESC(dev_size_mb, "size in MiB of ram shared by devs(def=8)");
5678 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
5679 MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
5680 MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
5681 MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
5682 MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
5683 MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
5684 MODULE_PARM_DESC(host_lock, "host_lock is ignored (def=0)");
5685 MODULE_PARM_DESC(host_max_queue,
5686 "host max # of queued cmds (0 to max(def) [max_queue fixed equal for !0])");
5687 MODULE_PARM_DESC(inq_product, "SCSI INQUIRY product string (def=\"scsi_debug\")");
5688 MODULE_PARM_DESC(inq_rev, "SCSI INQUIRY revision string (def=\""
5689 SDEBUG_VERSION "\")");
5690 MODULE_PARM_DESC(inq_vendor, "SCSI INQUIRY vendor string (def=\"Linux\")");
5691 MODULE_PARM_DESC(lbprz,
5692 "on read unmapped LBs return 0 when 1 (def), return 0xff when 2");
5693 MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
5694 MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
5695 MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
5696 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
5697 MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
5698 MODULE_PARM_DESC(lun_format, "LUN format: 0->peripheral (def); 1 --> flat address method");
5699 MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))");
5700 MODULE_PARM_DESC(medium_error_count, "count of sectors to return follow on MEDIUM error");
5701 MODULE_PARM_DESC(medium_error_start, "starting sector number to return MEDIUM error");
5702 MODULE_PARM_DESC(ndelay, "response delay in nanoseconds (def=0 -> ignore)");
5703 MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
5704 MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
5705 MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
5706 MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
5707 MODULE_PARM_DESC(opt_blks, "optimal transfer length in blocks (def=1024)");
5708 MODULE_PARM_DESC(opt_xferlen_exp, "optimal transfer length granularity exponent (def=physblk_exp)");
5709 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
5710 MODULE_PARM_DESC(per_host_store, "If set, next positive add_host will get new store (def=0)");
5711 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
5712 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
5713 MODULE_PARM_DESC(random, "If set, uniformly randomize command duration between 0 and delay_in_ns");
5714 MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
5715 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=7[SPC-5])");
5716 MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
5717 MODULE_PARM_DESC(statistics, "collect statistics on commands, queues (def=0)");
5718 MODULE_PARM_DESC(strict, "stricter checks: reserved field in cdb (def=0)");
5719 MODULE_PARM_DESC(submit_queues, "support for block multi-queue (def=1)");
5720 MODULE_PARM_DESC(tur_ms_to_ready, "TEST UNIT READY millisecs before initial good status (def=0)");
5721 MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
5722 MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
5723 MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
5724 MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
5725 MODULE_PARM_DESC(uuid_ctl,
5726 "1->use uuid for lu name, 0->don't, 2->all use same (def=0)");
5727 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)");
5728 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
5729 MODULE_PARM_DESC(wp, "Write Protect (def=0)");
5730 MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
5731 MODULE_PARM_DESC(zbc, "'none' [0]; 'aware' [1]; 'managed' [2] (def=0). Can have 'host-' prefix");
5732 MODULE_PARM_DESC(zone_max_open, "Maximum number of open zones; [0] for no limit (def=auto)");
5733 MODULE_PARM_DESC(zone_nr_conv, "Number of conventional zones (def=1)");
5734 MODULE_PARM_DESC(zone_size_mb, "Zone size in MiB (def=auto)");
5736 #define SDEBUG_INFO_LEN 256
5737 static char sdebug_info[SDEBUG_INFO_LEN];
5739 static const char *scsi_debug_info(struct Scsi_Host *shp)
5743 k = scnprintf(sdebug_info, SDEBUG_INFO_LEN, "%s: version %s [%s]\n",
5744 my_name, SDEBUG_VERSION, sdebug_version_date);
5745 if (k >= (SDEBUG_INFO_LEN - 1))
5747 scnprintf(sdebug_info + k, SDEBUG_INFO_LEN - k,
5748 " dev_size_mb=%d, opts=0x%x, submit_queues=%d, %s=%d",
5749 sdebug_dev_size_mb, sdebug_opts, submit_queues,
5750 "statistics", (int)sdebug_statistics);
5754 /* 'echo <val> > /proc/scsi/scsi_debug/<host_id>' writes to opts */
5755 static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer,
5760 int minLen = length > 15 ? 15 : length;
5762 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
5764 memcpy(arr, buffer, minLen);
5766 if (1 != sscanf(arr, "%d", &opts))
5769 sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
5770 sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
5771 if (sdebug_every_nth != 0)
5776 /* Output seen with 'cat /proc/scsi/scsi_debug/<host_id>'. It will be the
5777 * same for each scsi_debug host (if more than one). Some of the counters
5778 * output are not atomics so might be inaccurate in a busy system. */
5779 static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
5782 struct sdebug_queue *sqp;
5783 struct sdebug_host_info *sdhp;
5785 seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n",
5786 SDEBUG_VERSION, sdebug_version_date);
5787 seq_printf(m, "num_tgts=%d, %ssize=%d MB, opts=0x%x, every_nth=%d\n",
5788 sdebug_num_tgts, "shared (ram) ", sdebug_dev_size_mb,
5789 sdebug_opts, sdebug_every_nth);
5790 seq_printf(m, "delay=%d, ndelay=%d, max_luns=%d, sector_size=%d %s\n",
5791 sdebug_jdelay, sdebug_ndelay, sdebug_max_luns,
5792 sdebug_sector_size, "bytes");
5793 seq_printf(m, "cylinders=%d, heads=%d, sectors=%d, command aborts=%d\n",
5794 sdebug_cylinders_per, sdebug_heads, sdebug_sectors_per,
5796 seq_printf(m, "RESETs: device=%d, target=%d, bus=%d, host=%d\n",
5797 num_dev_resets, num_target_resets, num_bus_resets,
5799 seq_printf(m, "dix_reads=%d, dix_writes=%d, dif_errors=%d\n",
5800 dix_reads, dix_writes, dif_errors);
5801 seq_printf(m, "usec_in_jiffy=%lu, statistics=%d\n", TICK_NSEC / 1000,
5803 seq_printf(m, "cmnd_count=%d, completions=%d, %s=%d, a_tsf=%d\n",
5804 atomic_read(&sdebug_cmnd_count),
5805 atomic_read(&sdebug_completions),
5806 "miss_cpus", atomic_read(&sdebug_miss_cpus),
5807 atomic_read(&sdebug_a_tsf));
5809 seq_printf(m, "submit_queues=%d\n", submit_queues);
5810 for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5811 seq_printf(m, " queue %d:\n", j);
5812 f = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
5813 if (f != sdebug_max_queue) {
5814 l = find_last_bit(sqp->in_use_bm, sdebug_max_queue);
5815 seq_printf(m, " in_use_bm BUSY: %s: %d,%d\n",
5816 "first,last bits", f, l);
5820 seq_printf(m, "this host_no=%d\n", host->host_no);
5821 if (!xa_empty(per_store_ap)) {
5824 unsigned long l_idx;
5825 struct sdeb_store_info *sip;
5827 seq_puts(m, "\nhost list:\n");
5829 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
5831 seq_printf(m, " %d: host_no=%d, si_idx=%d\n", j,
5832 sdhp->shost->host_no, idx);
5835 seq_printf(m, "\nper_store array [most_recent_idx=%d]:\n",
5836 sdeb_most_recent_idx);
5838 xa_for_each(per_store_ap, l_idx, sip) {
5839 niu = xa_get_mark(per_store_ap, l_idx,
5840 SDEB_XA_NOT_IN_USE);
5842 seq_printf(m, " %d: idx=%d%s\n", j, idx,
5843 (niu ? " not_in_use" : ""));
5850 static ssize_t delay_show(struct device_driver *ddp, char *buf)
5852 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_jdelay);
5854 /* Returns -EBUSY if jdelay is being changed and commands are queued. The unit
5855 * of delay is jiffies.
5857 static ssize_t delay_store(struct device_driver *ddp, const char *buf,
5862 if (count > 0 && sscanf(buf, "%d", &jdelay) == 1) {
5864 if (sdebug_jdelay != jdelay) {
5866 struct sdebug_queue *sqp;
5868 block_unblock_all_queues(true);
5869 for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
5871 k = find_first_bit(sqp->in_use_bm,
5873 if (k != sdebug_max_queue) {
5874 res = -EBUSY; /* queued commands */
5879 sdebug_jdelay = jdelay;
5882 block_unblock_all_queues(false);
5888 static DRIVER_ATTR_RW(delay);
5890 static ssize_t ndelay_show(struct device_driver *ddp, char *buf)
5892 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ndelay);
5894 /* Returns -EBUSY if ndelay is being changed and commands are queued */
5895 /* If > 0 and accepted then sdebug_jdelay is set to JDELAY_OVERRIDDEN */
5896 static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
5901 if ((count > 0) && (1 == sscanf(buf, "%d", &ndelay)) &&
5902 (ndelay >= 0) && (ndelay < (1000 * 1000 * 1000))) {
5904 if (sdebug_ndelay != ndelay) {
5906 struct sdebug_queue *sqp;
5908 block_unblock_all_queues(true);
5909 for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
5911 k = find_first_bit(sqp->in_use_bm,
5913 if (k != sdebug_max_queue) {
5914 res = -EBUSY; /* queued commands */
5919 sdebug_ndelay = ndelay;
5920 sdebug_jdelay = ndelay ? JDELAY_OVERRIDDEN
5923 block_unblock_all_queues(false);
5929 static DRIVER_ATTR_RW(ndelay);
5931 static ssize_t opts_show(struct device_driver *ddp, char *buf)
5933 return scnprintf(buf, PAGE_SIZE, "0x%x\n", sdebug_opts);
5936 static ssize_t opts_store(struct device_driver *ddp, const char *buf,
5942 if (sscanf(buf, "%10s", work) == 1) {
5943 if (strncasecmp(work, "0x", 2) == 0) {
5944 if (kstrtoint(work + 2, 16, &opts) == 0)
5947 if (kstrtoint(work, 10, &opts) == 0)
5954 sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
5955 sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
5959 static DRIVER_ATTR_RW(opts);
5961 static ssize_t ptype_show(struct device_driver *ddp, char *buf)
5963 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ptype);
5965 static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
5970 /* Cannot change from or to TYPE_ZBC with sysfs */
5971 if (sdebug_ptype == TYPE_ZBC)
5974 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
5982 static DRIVER_ATTR_RW(ptype);
5984 static ssize_t dsense_show(struct device_driver *ddp, char *buf)
5986 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dsense);
5988 static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
5993 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
5999 static DRIVER_ATTR_RW(dsense);
6001 static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
6003 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_fake_rw);
6005 static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
6010 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6011 bool want_store = (n == 0);
6012 struct sdebug_host_info *sdhp;
6015 sdebug_fake_rw = (sdebug_fake_rw > 0);
6016 if (sdebug_fake_rw == n)
6017 return count; /* not transitioning so do nothing */
6019 if (want_store) { /* 1 --> 0 transition, set up store */
6020 if (sdeb_first_idx < 0) {
6021 idx = sdebug_add_store();
6025 idx = sdeb_first_idx;
6026 xa_clear_mark(per_store_ap, idx,
6027 SDEB_XA_NOT_IN_USE);
6029 /* make all hosts use same store */
6030 list_for_each_entry(sdhp, &sdebug_host_list,
6032 if (sdhp->si_idx != idx) {
6033 xa_set_mark(per_store_ap, sdhp->si_idx,
6034 SDEB_XA_NOT_IN_USE);
6038 sdeb_most_recent_idx = idx;
6039 } else { /* 0 --> 1 transition is trigger for shrink */
6040 sdebug_erase_all_stores(true /* apart from first */);
6047 static DRIVER_ATTR_RW(fake_rw);
6049 static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
6051 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_lun_0);
6053 static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
6058 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6059 sdebug_no_lun_0 = n;
6064 static DRIVER_ATTR_RW(no_lun_0);
6066 static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
6068 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_tgts);
6070 static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
6075 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6076 sdebug_num_tgts = n;
6077 sdebug_max_tgts_luns();
6082 static DRIVER_ATTR_RW(num_tgts);
6084 static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)
6086 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dev_size_mb);
6088 static DRIVER_ATTR_RO(dev_size_mb);
6090 static ssize_t per_host_store_show(struct device_driver *ddp, char *buf)
6092 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_per_host_store);
6095 static ssize_t per_host_store_store(struct device_driver *ddp, const char *buf,
6100 if (kstrtobool(buf, &v))
6103 sdebug_per_host_store = v;
6106 static DRIVER_ATTR_RW(per_host_store);
6108 static ssize_t num_parts_show(struct device_driver *ddp, char *buf)
6110 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_parts);
6112 static DRIVER_ATTR_RO(num_parts);
6114 static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
6116 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_every_nth);
6118 static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
6124 if (sscanf(buf, "%10s", work) == 1) {
6125 if (strncasecmp(work, "0x", 2) == 0) {
6126 if (kstrtoint(work + 2, 16, &nth) == 0)
6127 goto every_nth_done;
6129 if (kstrtoint(work, 10, &nth) == 0)
6130 goto every_nth_done;
6136 sdebug_every_nth = nth;
6137 if (nth && !sdebug_statistics) {
6138 pr_info("every_nth needs statistics=1, set it\n");
6139 sdebug_statistics = true;
6144 static DRIVER_ATTR_RW(every_nth);
6146 static ssize_t lun_format_show(struct device_driver *ddp, char *buf)
6148 return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_lun_am);
6150 static ssize_t lun_format_store(struct device_driver *ddp, const char *buf,
6156 if (kstrtoint(buf, 0, &n))
6159 if (n > (int)SAM_LUN_AM_FLAT) {
6160 pr_warn("only LUN address methods 0 and 1 are supported\n");
6163 changed = ((int)sdebug_lun_am != n);
6165 if (changed && sdebug_scsi_level >= 5) { /* >= SPC-3 */
6166 struct sdebug_host_info *sdhp;
6167 struct sdebug_dev_info *dp;
6169 spin_lock(&sdebug_host_list_lock);
6170 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6171 list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
6172 set_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
6175 spin_unlock(&sdebug_host_list_lock);
6181 static DRIVER_ATTR_RW(lun_format);
6183 static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
6185 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_luns);
6187 static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
6193 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6195 pr_warn("max_luns can be no more than 256\n");
6198 changed = (sdebug_max_luns != n);
6199 sdebug_max_luns = n;
6200 sdebug_max_tgts_luns();
6201 if (changed && (sdebug_scsi_level >= 5)) { /* >= SPC-3 */
6202 struct sdebug_host_info *sdhp;
6203 struct sdebug_dev_info *dp;
6205 spin_lock(&sdebug_host_list_lock);
6206 list_for_each_entry(sdhp, &sdebug_host_list,
6208 list_for_each_entry(dp, &sdhp->dev_info_list,
6210 set_bit(SDEBUG_UA_LUNS_CHANGED,
6214 spin_unlock(&sdebug_host_list_lock);
6220 static DRIVER_ATTR_RW(max_luns);
6222 static ssize_t max_queue_show(struct device_driver *ddp, char *buf)
6224 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_queue);
6226 /* N.B. max_queue can be changed while there are queued commands. In flight
6227 * commands beyond the new max_queue will be completed. */
6228 static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
6232 struct sdebug_queue *sqp;
6234 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
6235 (n <= SDEBUG_CANQUEUE) &&
6236 (sdebug_host_max_queue == 0)) {
6237 block_unblock_all_queues(true);
6239 for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
6241 a = find_last_bit(sqp->in_use_bm, SDEBUG_CANQUEUE);
6245 sdebug_max_queue = n;
6246 if (k == SDEBUG_CANQUEUE)
6247 atomic_set(&retired_max_queue, 0);
6249 atomic_set(&retired_max_queue, k + 1);
6251 atomic_set(&retired_max_queue, 0);
6252 block_unblock_all_queues(false);
6257 static DRIVER_ATTR_RW(max_queue);
6259 static ssize_t host_max_queue_show(struct device_driver *ddp, char *buf)
6261 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_host_max_queue);
6265 * Since this is used for .can_queue, and we get the hc_idx tag from the bitmap
6266 * in range [0, sdebug_host_max_queue), we can't change it.
6268 static DRIVER_ATTR_RO(host_max_queue);
6270 static ssize_t no_uld_show(struct device_driver *ddp, char *buf)
6272 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_uld);
6274 static DRIVER_ATTR_RO(no_uld);
6276 static ssize_t scsi_level_show(struct device_driver *ddp, char *buf)
6278 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_scsi_level);
6280 static DRIVER_ATTR_RO(scsi_level);
6282 static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
6284 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_virtual_gb);
6286 static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
6292 /* Ignore capacity change for ZBC drives for now */
6293 if (sdeb_zbc_in_use)
6296 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6297 changed = (sdebug_virtual_gb != n);
6298 sdebug_virtual_gb = n;
6299 sdebug_capacity = get_sdebug_capacity();
6301 struct sdebug_host_info *sdhp;
6302 struct sdebug_dev_info *dp;
6304 spin_lock(&sdebug_host_list_lock);
6305 list_for_each_entry(sdhp, &sdebug_host_list,
6307 list_for_each_entry(dp, &sdhp->dev_info_list,
6309 set_bit(SDEBUG_UA_CAPACITY_CHANGED,
6313 spin_unlock(&sdebug_host_list_lock);
6319 static DRIVER_ATTR_RW(virtual_gb);
6321 static ssize_t add_host_show(struct device_driver *ddp, char *buf)
6323 /* absolute number of hosts currently active is what is shown */
6324 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_hosts);
6327 static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
6332 struct sdeb_store_info *sip;
6333 bool want_phs = (sdebug_fake_rw == 0) && sdebug_per_host_store;
6336 if (sscanf(buf, "%d", &delta_hosts) != 1)
6338 if (delta_hosts > 0) {
6342 xa_for_each_marked(per_store_ap, idx, sip,
6343 SDEB_XA_NOT_IN_USE) {
6344 sdeb_most_recent_idx = (int)idx;
6348 if (found) /* re-use case */
6349 sdebug_add_host_helper((int)idx);
6351 sdebug_do_add_host(true);
6353 sdebug_do_add_host(false);
6355 } while (--delta_hosts);
6356 } else if (delta_hosts < 0) {
6358 sdebug_do_remove_host(false);
6359 } while (++delta_hosts);
6363 static DRIVER_ATTR_RW(add_host);
6365 static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf)
6367 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_vpd_use_hostno);
6369 static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf,
6374 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6375 sdebug_vpd_use_hostno = n;
6380 static DRIVER_ATTR_RW(vpd_use_hostno);
6382 static ssize_t statistics_show(struct device_driver *ddp, char *buf)
6384 return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_statistics);
6386 static ssize_t statistics_store(struct device_driver *ddp, const char *buf,
6391 if ((count > 0) && (sscanf(buf, "%d", &n) == 1) && (n >= 0)) {
6393 sdebug_statistics = true;
6395 clear_queue_stats();
6396 sdebug_statistics = false;
6402 static DRIVER_ATTR_RW(statistics);
6404 static ssize_t sector_size_show(struct device_driver *ddp, char *buf)
6406 return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_sector_size);
6408 static DRIVER_ATTR_RO(sector_size);
6410 static ssize_t submit_queues_show(struct device_driver *ddp, char *buf)
6412 return scnprintf(buf, PAGE_SIZE, "%d\n", submit_queues);
6414 static DRIVER_ATTR_RO(submit_queues);
6416 static ssize_t dix_show(struct device_driver *ddp, char *buf)
6418 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dix);
6420 static DRIVER_ATTR_RO(dix);
6422 static ssize_t dif_show(struct device_driver *ddp, char *buf)
6424 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dif);
6426 static DRIVER_ATTR_RO(dif);
6428 static ssize_t guard_show(struct device_driver *ddp, char *buf)
6430 return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_guard);
6432 static DRIVER_ATTR_RO(guard);
6434 static ssize_t ato_show(struct device_driver *ddp, char *buf)
6436 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ato);
6438 static DRIVER_ATTR_RO(ato);
6440 static ssize_t map_show(struct device_driver *ddp, char *buf)
6444 if (!scsi_debug_lbp())
6445 return scnprintf(buf, PAGE_SIZE, "0-%u\n",
6446 sdebug_store_sectors);
6448 if (sdebug_fake_rw == 0 && !xa_empty(per_store_ap)) {
6449 struct sdeb_store_info *sip = xa_load(per_store_ap, 0);
6452 count = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
6453 (int)map_size, sip->map_storep);
6455 buf[count++] = '\n';
6460 static DRIVER_ATTR_RO(map);
6462 static ssize_t random_show(struct device_driver *ddp, char *buf)
6464 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_random);
6467 static ssize_t random_store(struct device_driver *ddp, const char *buf,
6472 if (kstrtobool(buf, &v))
6478 static DRIVER_ATTR_RW(random);
6480 static ssize_t removable_show(struct device_driver *ddp, char *buf)
6482 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_removable ? 1 : 0);
6484 static ssize_t removable_store(struct device_driver *ddp, const char *buf,
6489 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6490 sdebug_removable = (n > 0);
6495 static DRIVER_ATTR_RW(removable);
6497 static ssize_t host_lock_show(struct device_driver *ddp, char *buf)
6499 return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_host_lock);
6501 /* N.B. sdebug_host_lock does nothing, kept for backward compatibility */
6502 static ssize_t host_lock_store(struct device_driver *ddp, const char *buf,
6507 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6508 sdebug_host_lock = (n > 0);
6513 static DRIVER_ATTR_RW(host_lock);
6515 static ssize_t strict_show(struct device_driver *ddp, char *buf)
6517 return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_strict);
6519 static ssize_t strict_store(struct device_driver *ddp, const char *buf,
6524 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6525 sdebug_strict = (n > 0);
6530 static DRIVER_ATTR_RW(strict);
6532 static ssize_t uuid_ctl_show(struct device_driver *ddp, char *buf)
6534 return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_uuid_ctl);
6536 static DRIVER_ATTR_RO(uuid_ctl);
6538 static ssize_t cdb_len_show(struct device_driver *ddp, char *buf)
6540 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_cdb_len);
6542 static ssize_t cdb_len_store(struct device_driver *ddp, const char *buf,
6547 ret = kstrtoint(buf, 0, &n);
6551 all_config_cdb_len();
6554 static DRIVER_ATTR_RW(cdb_len);
6556 static const char * const zbc_model_strs_a[] = {
6557 [BLK_ZONED_NONE] = "none",
6558 [BLK_ZONED_HA] = "host-aware",
6559 [BLK_ZONED_HM] = "host-managed",
6562 static const char * const zbc_model_strs_b[] = {
6563 [BLK_ZONED_NONE] = "no",
6564 [BLK_ZONED_HA] = "aware",
6565 [BLK_ZONED_HM] = "managed",
6568 static const char * const zbc_model_strs_c[] = {
6569 [BLK_ZONED_NONE] = "0",
6570 [BLK_ZONED_HA] = "1",
6571 [BLK_ZONED_HM] = "2",
6574 static int sdeb_zbc_model_str(const char *cp)
6576 int res = sysfs_match_string(zbc_model_strs_a, cp);
6579 res = sysfs_match_string(zbc_model_strs_b, cp);
6581 res = sysfs_match_string(zbc_model_strs_c, cp);
6589 static ssize_t zbc_show(struct device_driver *ddp, char *buf)
6591 return scnprintf(buf, PAGE_SIZE, "%s\n",
6592 zbc_model_strs_a[sdeb_zbc_model]);
6594 static DRIVER_ATTR_RO(zbc);
6596 static ssize_t tur_ms_to_ready_show(struct device_driver *ddp, char *buf)
6598 return scnprintf(buf, PAGE_SIZE, "%d\n", sdeb_tur_ms_to_ready);
6600 static DRIVER_ATTR_RO(tur_ms_to_ready);
6602 /* Note: The following array creates attribute files in the
6603 /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
6604 files (over those found in the /sys/module/scsi_debug/parameters
6605 directory) is that auxiliary actions can be triggered when an attribute
6606 is changed. For example see: add_host_store() above.
6609 static struct attribute *sdebug_drv_attrs[] = {
6610 &driver_attr_delay.attr,
6611 &driver_attr_opts.attr,
6612 &driver_attr_ptype.attr,
6613 &driver_attr_dsense.attr,
6614 &driver_attr_fake_rw.attr,
6615 &driver_attr_host_max_queue.attr,
6616 &driver_attr_no_lun_0.attr,
6617 &driver_attr_num_tgts.attr,
6618 &driver_attr_dev_size_mb.attr,
6619 &driver_attr_num_parts.attr,
6620 &driver_attr_every_nth.attr,
6621 &driver_attr_lun_format.attr,
6622 &driver_attr_max_luns.attr,
6623 &driver_attr_max_queue.attr,
6624 &driver_attr_no_uld.attr,
6625 &driver_attr_scsi_level.attr,
6626 &driver_attr_virtual_gb.attr,
6627 &driver_attr_add_host.attr,
6628 &driver_attr_per_host_store.attr,
6629 &driver_attr_vpd_use_hostno.attr,
6630 &driver_attr_sector_size.attr,
6631 &driver_attr_statistics.attr,
6632 &driver_attr_submit_queues.attr,
6633 &driver_attr_dix.attr,
6634 &driver_attr_dif.attr,
6635 &driver_attr_guard.attr,
6636 &driver_attr_ato.attr,
6637 &driver_attr_map.attr,
6638 &driver_attr_random.attr,
6639 &driver_attr_removable.attr,
6640 &driver_attr_host_lock.attr,
6641 &driver_attr_ndelay.attr,
6642 &driver_attr_strict.attr,
6643 &driver_attr_uuid_ctl.attr,
6644 &driver_attr_cdb_len.attr,
6645 &driver_attr_tur_ms_to_ready.attr,
6646 &driver_attr_zbc.attr,
6649 ATTRIBUTE_GROUPS(sdebug_drv);
6651 static struct device *pseudo_primary;
6653 static int __init scsi_debug_init(void)
6655 bool want_store = (sdebug_fake_rw == 0);
6657 int k, ret, hosts_to_add;
6660 ramdisk_lck_a[0] = &atomic_rw;
6661 ramdisk_lck_a[1] = &atomic_rw2;
6662 atomic_set(&retired_max_queue, 0);
6664 if (sdebug_ndelay >= 1000 * 1000 * 1000) {
6665 pr_warn("ndelay must be less than 1 second, ignored\n");
6667 } else if (sdebug_ndelay > 0)
6668 sdebug_jdelay = JDELAY_OVERRIDDEN;
6670 switch (sdebug_sector_size) {
6677 pr_err("invalid sector_size %d\n", sdebug_sector_size);
6681 switch (sdebug_dif) {
6682 case T10_PI_TYPE0_PROTECTION:
6684 case T10_PI_TYPE1_PROTECTION:
6685 case T10_PI_TYPE2_PROTECTION:
6686 case T10_PI_TYPE3_PROTECTION:
6687 have_dif_prot = true;
6691 pr_err("dif must be 0, 1, 2 or 3\n");
6695 if (sdebug_num_tgts < 0) {
6696 pr_err("num_tgts must be >= 0\n");
6700 if (sdebug_guard > 1) {
6701 pr_err("guard must be 0 or 1\n");
6705 if (sdebug_ato > 1) {
6706 pr_err("ato must be 0 or 1\n");
6710 if (sdebug_physblk_exp > 15) {
6711 pr_err("invalid physblk_exp %u\n", sdebug_physblk_exp);
6715 sdebug_lun_am = sdebug_lun_am_i;
6716 if (sdebug_lun_am > SAM_LUN_AM_FLAT) {
6717 pr_warn("Invalid LUN format %u, using default\n", (int)sdebug_lun_am);
6718 sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
6721 if (sdebug_max_luns > 256) {
6722 if (sdebug_max_luns > 16384) {
6723 pr_warn("max_luns can be no more than 16384, use default\n");
6724 sdebug_max_luns = DEF_MAX_LUNS;
6726 sdebug_lun_am = SAM_LUN_AM_FLAT;
6729 if (sdebug_lowest_aligned > 0x3fff) {
6730 pr_err("lowest_aligned too big: %u\n", sdebug_lowest_aligned);
6734 if (submit_queues < 1) {
6735 pr_err("submit_queues must be 1 or more\n");
6739 if ((sdebug_max_queue > SDEBUG_CANQUEUE) || (sdebug_max_queue < 1)) {
6740 pr_err("max_queue must be in range [1, %d]\n", SDEBUG_CANQUEUE);
6744 if ((sdebug_host_max_queue > SDEBUG_CANQUEUE) ||
6745 (sdebug_host_max_queue < 0)) {
6746 pr_err("host_max_queue must be in range [0 %d]\n",
6751 if (sdebug_host_max_queue &&
6752 (sdebug_max_queue != sdebug_host_max_queue)) {
6753 sdebug_max_queue = sdebug_host_max_queue;
6754 pr_warn("fixing max submit queue depth to host max queue depth, %d\n",
6758 sdebug_q_arr = kcalloc(submit_queues, sizeof(struct sdebug_queue),
6760 if (sdebug_q_arr == NULL)
6762 for (k = 0; k < submit_queues; ++k)
6763 spin_lock_init(&sdebug_q_arr[k].qc_lock);
6766 * check for host managed zoned block device specified with
6767 * ptype=0x14 or zbc=XXX.
6769 if (sdebug_ptype == TYPE_ZBC) {
6770 sdeb_zbc_model = BLK_ZONED_HM;
6771 } else if (sdeb_zbc_model_s && *sdeb_zbc_model_s) {
6772 k = sdeb_zbc_model_str(sdeb_zbc_model_s);
6778 switch (sdeb_zbc_model) {
6779 case BLK_ZONED_NONE:
6781 sdebug_ptype = TYPE_DISK;
6784 sdebug_ptype = TYPE_ZBC;
6787 pr_err("Invalid ZBC model\n");
6792 if (sdeb_zbc_model != BLK_ZONED_NONE) {
6793 sdeb_zbc_in_use = true;
6794 if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
6795 sdebug_dev_size_mb = DEF_ZBC_DEV_SIZE_MB;
6798 if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
6799 sdebug_dev_size_mb = DEF_DEV_SIZE_MB;
6800 if (sdebug_dev_size_mb < 1)
6801 sdebug_dev_size_mb = 1; /* force minimum 1 MB ramdisk */
6802 sz = (unsigned long)sdebug_dev_size_mb * 1048576;
6803 sdebug_store_sectors = sz / sdebug_sector_size;
6804 sdebug_capacity = get_sdebug_capacity();
6806 /* play around with geometry, don't waste too much on track 0 */
6808 sdebug_sectors_per = 32;
6809 if (sdebug_dev_size_mb >= 256)
6811 else if (sdebug_dev_size_mb >= 16)
6813 sdebug_cylinders_per = (unsigned long)sdebug_capacity /
6814 (sdebug_sectors_per * sdebug_heads);
6815 if (sdebug_cylinders_per >= 1024) {
6816 /* other LLDs do this; implies >= 1GB ram disk ... */
6818 sdebug_sectors_per = 63;
6819 sdebug_cylinders_per = (unsigned long)sdebug_capacity /
6820 (sdebug_sectors_per * sdebug_heads);
6822 if (scsi_debug_lbp()) {
6823 sdebug_unmap_max_blocks =
6824 clamp(sdebug_unmap_max_blocks, 0U, 0xffffffffU);
6826 sdebug_unmap_max_desc =
6827 clamp(sdebug_unmap_max_desc, 0U, 256U);
6829 sdebug_unmap_granularity =
6830 clamp(sdebug_unmap_granularity, 1U, 0xffffffffU);
6832 if (sdebug_unmap_alignment &&
6833 sdebug_unmap_granularity <=
6834 sdebug_unmap_alignment) {
6835 pr_err("ERR: unmap_granularity <= unmap_alignment\n");
6840 xa_init_flags(per_store_ap, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
6842 idx = sdebug_add_store();
6849 pseudo_primary = root_device_register("pseudo_0");
6850 if (IS_ERR(pseudo_primary)) {
6851 pr_warn("root_device_register() error\n");
6852 ret = PTR_ERR(pseudo_primary);
6855 ret = bus_register(&pseudo_lld_bus);
6857 pr_warn("bus_register error: %d\n", ret);
6860 ret = driver_register(&sdebug_driverfs_driver);
6862 pr_warn("driver_register error: %d\n", ret);
6866 hosts_to_add = sdebug_add_host;
6867 sdebug_add_host = 0;
6869 for (k = 0; k < hosts_to_add; k++) {
6870 if (want_store && k == 0) {
6871 ret = sdebug_add_host_helper(idx);
6873 pr_err("add_host_helper k=%d, error=%d\n",
6878 ret = sdebug_do_add_host(want_store &&
6879 sdebug_per_host_store);
6881 pr_err("add_host k=%d error=%d\n", k, -ret);
6887 pr_info("built %d host(s)\n", sdebug_num_hosts);
6892 bus_unregister(&pseudo_lld_bus);
6894 root_device_unregister(pseudo_primary);
6896 sdebug_erase_store(idx, NULL);
6898 kfree(sdebug_q_arr);
6902 static void __exit scsi_debug_exit(void)
6904 int k = sdebug_num_hosts;
6908 sdebug_do_remove_host(true);
6910 driver_unregister(&sdebug_driverfs_driver);
6911 bus_unregister(&pseudo_lld_bus);
6912 root_device_unregister(pseudo_primary);
6914 sdebug_erase_all_stores(false);
6915 xa_destroy(per_store_ap);
6916 kfree(sdebug_q_arr);
6919 device_initcall(scsi_debug_init);
6920 module_exit(scsi_debug_exit);
6922 static void sdebug_release_adapter(struct device *dev)
6924 struct sdebug_host_info *sdbg_host;
6926 sdbg_host = to_sdebug_host(dev);
6930 /* idx must be valid, if sip is NULL then it will be obtained using idx */
6931 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip)
6936 if (xa_empty(per_store_ap))
6938 sip = xa_load(per_store_ap, idx);
6942 vfree(sip->map_storep);
6943 vfree(sip->dif_storep);
6945 xa_erase(per_store_ap, idx);
6949 /* Assume apart_from_first==false only in shutdown case. */
6950 static void sdebug_erase_all_stores(bool apart_from_first)
6953 struct sdeb_store_info *sip = NULL;
6955 xa_for_each(per_store_ap, idx, sip) {
6956 if (apart_from_first)
6957 apart_from_first = false;
6959 sdebug_erase_store(idx, sip);
6961 if (apart_from_first)
6962 sdeb_most_recent_idx = sdeb_first_idx;
6966 * Returns store xarray new element index (idx) if >=0 else negated errno.
6967 * Limit the number of stores to 65536.
6969 static int sdebug_add_store(void)
6973 unsigned long iflags;
6974 unsigned long sz = (unsigned long)sdebug_dev_size_mb * 1048576;
6975 struct sdeb_store_info *sip = NULL;
6976 struct xa_limit xal = { .max = 1 << 16, .min = 0 };
6978 sip = kzalloc(sizeof(*sip), GFP_KERNEL);
6982 xa_lock_irqsave(per_store_ap, iflags);
6983 res = __xa_alloc(per_store_ap, &n_idx, sip, xal, GFP_ATOMIC);
6984 if (unlikely(res < 0)) {
6985 xa_unlock_irqrestore(per_store_ap, iflags);
6987 pr_warn("%s: xa_alloc() errno=%d\n", __func__, -res);
6990 sdeb_most_recent_idx = n_idx;
6991 if (sdeb_first_idx < 0)
6992 sdeb_first_idx = n_idx;
6993 xa_unlock_irqrestore(per_store_ap, iflags);
6996 sip->storep = vzalloc(sz);
6998 pr_err("user data oom\n");
7001 if (sdebug_num_parts > 0)
7002 sdebug_build_parts(sip->storep, sz);
7004 /* DIF/DIX: what T10 calls Protection Information (PI) */
7008 dif_size = sdebug_store_sectors * sizeof(struct t10_pi_tuple);
7009 sip->dif_storep = vmalloc(dif_size);
7011 pr_info("dif_storep %u bytes @ %pK\n", dif_size,
7014 if (!sip->dif_storep) {
7015 pr_err("DIX oom\n");
7018 memset(sip->dif_storep, 0xff, dif_size);
7020 /* Logical Block Provisioning */
7021 if (scsi_debug_lbp()) {
7022 map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
7023 sip->map_storep = vmalloc(array_size(sizeof(long),
7024 BITS_TO_LONGS(map_size)));
7026 pr_info("%lu provisioning blocks\n", map_size);
7028 if (!sip->map_storep) {
7029 pr_err("LBP map oom\n");
7033 bitmap_zero(sip->map_storep, map_size);
7035 /* Map first 1KB for partition table */
7036 if (sdebug_num_parts)
7037 map_region(sip, 0, 2);
7040 rwlock_init(&sip->macc_lck);
7043 sdebug_erase_store((int)n_idx, sip);
7044 pr_warn("%s: failed, errno=%d\n", __func__, -res);
7048 static int sdebug_add_host_helper(int per_host_idx)
7050 int k, devs_per_host, idx;
7051 int error = -ENOMEM;
7052 struct sdebug_host_info *sdbg_host;
7053 struct sdebug_dev_info *sdbg_devinfo, *tmp;
7055 sdbg_host = kzalloc(sizeof(*sdbg_host), GFP_KERNEL);
7058 idx = (per_host_idx < 0) ? sdeb_first_idx : per_host_idx;
7059 if (xa_get_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE))
7060 xa_clear_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
7061 sdbg_host->si_idx = idx;
7063 INIT_LIST_HEAD(&sdbg_host->dev_info_list);
7065 devs_per_host = sdebug_num_tgts * sdebug_max_luns;
7066 for (k = 0; k < devs_per_host; k++) {
7067 sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
7072 spin_lock(&sdebug_host_list_lock);
7073 list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
7074 spin_unlock(&sdebug_host_list_lock);
7076 sdbg_host->dev.bus = &pseudo_lld_bus;
7077 sdbg_host->dev.parent = pseudo_primary;
7078 sdbg_host->dev.release = &sdebug_release_adapter;
7079 dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_num_hosts);
7081 error = device_register(&sdbg_host->dev);
7089 list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
7091 list_del(&sdbg_devinfo->dev_list);
7092 kfree(sdbg_devinfo->zstate);
7093 kfree(sdbg_devinfo);
7096 pr_warn("%s: failed, errno=%d\n", __func__, -error);
7100 static int sdebug_do_add_host(bool mk_new_store)
7102 int ph_idx = sdeb_most_recent_idx;
7105 ph_idx = sdebug_add_store();
7109 return sdebug_add_host_helper(ph_idx);
7112 static void sdebug_do_remove_host(bool the_end)
7115 struct sdebug_host_info *sdbg_host = NULL;
7116 struct sdebug_host_info *sdbg_host2;
7118 spin_lock(&sdebug_host_list_lock);
7119 if (!list_empty(&sdebug_host_list)) {
7120 sdbg_host = list_entry(sdebug_host_list.prev,
7121 struct sdebug_host_info, host_list);
7122 idx = sdbg_host->si_idx;
7124 if (!the_end && idx >= 0) {
7127 list_for_each_entry(sdbg_host2, &sdebug_host_list, host_list) {
7128 if (sdbg_host2 == sdbg_host)
7130 if (idx == sdbg_host2->si_idx) {
7136 xa_set_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
7137 if (idx == sdeb_most_recent_idx)
7138 --sdeb_most_recent_idx;
7142 list_del(&sdbg_host->host_list);
7143 spin_unlock(&sdebug_host_list_lock);
7148 device_unregister(&sdbg_host->dev);
7152 static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
7155 struct sdebug_dev_info *devip;
7157 block_unblock_all_queues(true);
7158 devip = (struct sdebug_dev_info *)sdev->hostdata;
7159 if (NULL == devip) {
7160 block_unblock_all_queues(false);
7163 num_in_q = atomic_read(&devip->num_in_q);
7167 /* allow to exceed max host qc_arr elements for testing */
7168 if (qdepth > SDEBUG_CANQUEUE + 10)
7169 qdepth = SDEBUG_CANQUEUE + 10;
7170 scsi_change_queue_depth(sdev, qdepth);
7172 if (SDEBUG_OPT_Q_NOISE & sdebug_opts) {
7173 sdev_printk(KERN_INFO, sdev, "%s: qdepth=%d, num_in_q=%d\n",
7174 __func__, qdepth, num_in_q);
7176 block_unblock_all_queues(false);
7177 return sdev->queue_depth;
7180 static bool fake_timeout(struct scsi_cmnd *scp)
7182 if (0 == (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth))) {
7183 if (sdebug_every_nth < -1)
7184 sdebug_every_nth = -1;
7185 if (SDEBUG_OPT_TIMEOUT & sdebug_opts)
7186 return true; /* ignore command causing timeout */
7187 else if (SDEBUG_OPT_MAC_TIMEOUT & sdebug_opts &&
7188 scsi_medium_access_command(scp))
7189 return true; /* time out reads and writes */
7194 /* Response to TUR or media access command when device stopped */
7195 static int resp_not_ready(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
7199 ktime_t now_ts = ktime_get_boottime();
7200 struct scsi_device *sdp = scp->device;
7202 stopped_state = atomic_read(&devip->stopped);
7203 if (stopped_state == 2) {
7204 if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
7205 diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
7206 if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
7207 /* tur_ms_to_ready timer extinguished */
7208 atomic_set(&devip->stopped, 0);
7212 mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x1);
7214 sdev_printk(KERN_INFO, sdp,
7215 "%s: Not ready: in process of becoming ready\n", my_name);
7216 if (scp->cmnd[0] == TEST_UNIT_READY) {
7217 u64 tur_nanosecs_to_ready = (u64)sdeb_tur_ms_to_ready * 1000000;
7219 if (diff_ns <= tur_nanosecs_to_ready)
7220 diff_ns = tur_nanosecs_to_ready - diff_ns;
7222 diff_ns = tur_nanosecs_to_ready;
7223 /* As per 20-061r2 approved for spc6 by T10 on 20200716 */
7224 do_div(diff_ns, 1000000); /* diff_ns becomes milliseconds */
7225 scsi_set_sense_information(scp->sense_buffer, SCSI_SENSE_BUFFERSIZE,
7227 return check_condition_result;
7230 mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x2);
7232 sdev_printk(KERN_INFO, sdp, "%s: Not ready: initializing command required\n",
7234 return check_condition_result;
7237 static int scsi_debug_queuecommand(struct Scsi_Host *shost,
7238 struct scsi_cmnd *scp)
7241 struct scsi_device *sdp = scp->device;
7242 const struct opcode_info_t *oip;
7243 const struct opcode_info_t *r_oip;
7244 struct sdebug_dev_info *devip;
7245 u8 *cmd = scp->cmnd;
7246 int (*r_pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
7247 int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *) = NULL;
7250 u64 lun_index = sdp->lun & 0x3FFF;
7257 scsi_set_resid(scp, 0);
7258 if (sdebug_statistics) {
7259 atomic_inc(&sdebug_cmnd_count);
7260 inject_now = inject_on_this_cmd();
7264 if (unlikely(sdebug_verbose &&
7265 !(SDEBUG_OPT_NO_CDB_NOISE & sdebug_opts))) {
7270 sb = (int)sizeof(b);
7272 strcpy(b, "too long, over 32 bytes");
7274 for (k = 0, n = 0; k < len && n < sb; ++k)
7275 n += scnprintf(b + n, sb - n, "%02x ",
7278 sdev_printk(KERN_INFO, sdp, "%s: tag=%#x, cmd %s\n", my_name,
7279 blk_mq_unique_tag(scp->request), b);
7281 if (unlikely(inject_now && (sdebug_opts & SDEBUG_OPT_HOST_BUSY)))
7282 return SCSI_MLQUEUE_HOST_BUSY;
7283 has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS);
7284 if (unlikely(lun_index >= sdebug_max_luns && !has_wlun_rl))
7287 sdeb_i = opcode_ind_arr[opcode]; /* fully mapped */
7288 oip = &opcode_info_arr[sdeb_i]; /* safe if table consistent */
7289 devip = (struct sdebug_dev_info *)sdp->hostdata;
7290 if (unlikely(!devip)) {
7291 devip = find_build_dev_info(sdp);
7295 if (unlikely(inject_now && !atomic_read(&sdeb_inject_pending)))
7296 atomic_set(&sdeb_inject_pending, 1);
7298 na = oip->num_attached;
7300 if (na) { /* multiple commands with this opcode */
7302 if (FF_SA & r_oip->flags) {
7303 if (F_SA_LOW & oip->flags)
7306 sa = get_unaligned_be16(cmd + 8);
7307 for (k = 0; k <= na; oip = r_oip->arrp + k++) {
7308 if (opcode == oip->opcode && sa == oip->sa)
7311 } else { /* since no service action only check opcode */
7312 for (k = 0; k <= na; oip = r_oip->arrp + k++) {
7313 if (opcode == oip->opcode)
7318 if (F_SA_LOW & r_oip->flags)
7319 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 4);
7320 else if (F_SA_HIGH & r_oip->flags)
7321 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, 7);
7323 mk_sense_invalid_opcode(scp);
7326 } /* else (when na==0) we assume the oip is a match */
7328 if (unlikely(F_INV_OP & flags)) {
7329 mk_sense_invalid_opcode(scp);
7332 if (unlikely(has_wlun_rl && !(F_RL_WLUN_OK & flags))) {
7334 sdev_printk(KERN_INFO, sdp, "%s: Opcode 0x%x not%s\n",
7335 my_name, opcode, " supported for wlun");
7336 mk_sense_invalid_opcode(scp);
7339 if (unlikely(sdebug_strict)) { /* check cdb against mask */
7343 for (k = 1; k < oip->len_mask[0] && k < 16; ++k) {
7344 rem = ~oip->len_mask[k] & cmd[k];
7346 for (j = 7; j >= 0; --j, rem <<= 1) {
7350 mk_sense_invalid_fld(scp, SDEB_IN_CDB, k, j);
7355 if (unlikely(!(F_SKIP_UA & flags) &&
7356 find_first_bit(devip->uas_bm,
7357 SDEBUG_NUM_UAS) != SDEBUG_NUM_UAS)) {
7358 errsts = make_ua(scp, devip);
7362 if (unlikely(((F_M_ACCESS & flags) || scp->cmnd[0] == TEST_UNIT_READY) &&
7363 atomic_read(&devip->stopped))) {
7364 errsts = resp_not_ready(scp, devip);
7368 if (sdebug_fake_rw && (F_FAKE_RW & flags))
7370 if (unlikely(sdebug_every_nth)) {
7371 if (fake_timeout(scp))
7372 return 0; /* ignore command: make trouble */
7374 if (likely(oip->pfp))
7375 pfp = oip->pfp; /* calls a resp_* function */
7377 pfp = r_pfp; /* if leaf function ptr NULL, try the root's */
7380 if (F_DELAY_OVERR & flags) /* cmds like INQUIRY respond asap */
7381 return schedule_resp(scp, devip, errsts, pfp, 0, 0);
7382 else if ((flags & F_LONG_DELAY) && (sdebug_jdelay > 0 ||
7383 sdebug_ndelay > 10000)) {
7385 * Skip long delays if ndelay <= 10 microseconds. Otherwise
7386 * for Start Stop Unit (SSU) want at least 1 second delay and
7387 * if sdebug_jdelay>1 want a long delay of that many seconds.
7388 * For Synchronize Cache want 1/20 of SSU's delay.
7390 int jdelay = (sdebug_jdelay < 2) ? 1 : sdebug_jdelay;
7391 int denom = (flags & F_SYNC_DELAY) ? 20 : 1;
7393 jdelay = mult_frac(USER_HZ * jdelay, HZ, denom * USER_HZ);
7394 return schedule_resp(scp, devip, errsts, pfp, jdelay, 0);
7396 return schedule_resp(scp, devip, errsts, pfp, sdebug_jdelay,
7399 return schedule_resp(scp, devip, check_condition_result, NULL, 0, 0);
7401 return schedule_resp(scp, NULL, DID_NO_CONNECT << 16, NULL, 0, 0);
7404 static struct scsi_host_template sdebug_driver_template = {
7405 .show_info = scsi_debug_show_info,
7406 .write_info = scsi_debug_write_info,
7407 .proc_name = sdebug_proc_name,
7408 .name = "SCSI DEBUG",
7409 .info = scsi_debug_info,
7410 .slave_alloc = scsi_debug_slave_alloc,
7411 .slave_configure = scsi_debug_slave_configure,
7412 .slave_destroy = scsi_debug_slave_destroy,
7413 .ioctl = scsi_debug_ioctl,
7414 .queuecommand = scsi_debug_queuecommand,
7415 .change_queue_depth = sdebug_change_qdepth,
7416 .eh_abort_handler = scsi_debug_abort,
7417 .eh_device_reset_handler = scsi_debug_device_reset,
7418 .eh_target_reset_handler = scsi_debug_target_reset,
7419 .eh_bus_reset_handler = scsi_debug_bus_reset,
7420 .eh_host_reset_handler = scsi_debug_host_reset,
7421 .can_queue = SDEBUG_CANQUEUE,
7423 .sg_tablesize = SG_MAX_SEGMENTS,
7424 .cmd_per_lun = DEF_CMD_PER_LUN,
7426 .max_segment_size = -1U,
7427 .module = THIS_MODULE,
7428 .track_queue_depth = 1,
7431 static int sdebug_driver_probe(struct device *dev)
7434 struct sdebug_host_info *sdbg_host;
7435 struct Scsi_Host *hpnt;
7438 sdbg_host = to_sdebug_host(dev);
7440 sdebug_driver_template.can_queue = sdebug_max_queue;
7441 if (!sdebug_clustering)
7442 sdebug_driver_template.dma_boundary = PAGE_SIZE - 1;
7444 hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
7446 pr_err("scsi_host_alloc failed\n");
7450 if (submit_queues > nr_cpu_ids) {
7451 pr_warn("%s: trim submit_queues (was %d) to nr_cpu_ids=%u\n",
7452 my_name, submit_queues, nr_cpu_ids);
7453 submit_queues = nr_cpu_ids;
7456 * Decide whether to tell scsi subsystem that we want mq. The
7457 * following should give the same answer for each host.
7459 hpnt->nr_hw_queues = submit_queues;
7460 if (sdebug_host_max_queue)
7461 hpnt->host_tagset = 1;
7463 sdbg_host->shost = hpnt;
7464 *((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host;
7465 if ((hpnt->this_id >= 0) && (sdebug_num_tgts > hpnt->this_id))
7466 hpnt->max_id = sdebug_num_tgts + 1;
7468 hpnt->max_id = sdebug_num_tgts;
7469 /* = sdebug_max_luns; */
7470 hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
7474 switch (sdebug_dif) {
7476 case T10_PI_TYPE1_PROTECTION:
7477 hprot = SHOST_DIF_TYPE1_PROTECTION;
7479 hprot |= SHOST_DIX_TYPE1_PROTECTION;
7482 case T10_PI_TYPE2_PROTECTION:
7483 hprot = SHOST_DIF_TYPE2_PROTECTION;
7485 hprot |= SHOST_DIX_TYPE2_PROTECTION;
7488 case T10_PI_TYPE3_PROTECTION:
7489 hprot = SHOST_DIF_TYPE3_PROTECTION;
7491 hprot |= SHOST_DIX_TYPE3_PROTECTION;
7496 hprot |= SHOST_DIX_TYPE0_PROTECTION;
7500 scsi_host_set_prot(hpnt, hprot);
7502 if (have_dif_prot || sdebug_dix)
7503 pr_info("host protection%s%s%s%s%s%s%s\n",
7504 (hprot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
7505 (hprot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
7506 (hprot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
7507 (hprot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
7508 (hprot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
7509 (hprot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
7510 (hprot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
7512 if (sdebug_guard == 1)
7513 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
7515 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
7517 sdebug_verbose = !!(SDEBUG_OPT_NOISE & sdebug_opts);
7518 sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & sdebug_opts);
7519 if (sdebug_every_nth) /* need stats counters for every_nth */
7520 sdebug_statistics = true;
7521 error = scsi_add_host(hpnt, &sdbg_host->dev);
7523 pr_err("scsi_add_host failed\n");
7525 scsi_host_put(hpnt);
7527 scsi_scan_host(hpnt);
7533 static int sdebug_driver_remove(struct device *dev)
7535 struct sdebug_host_info *sdbg_host;
7536 struct sdebug_dev_info *sdbg_devinfo, *tmp;
7538 sdbg_host = to_sdebug_host(dev);
7541 pr_err("Unable to locate host info\n");
7545 scsi_remove_host(sdbg_host->shost);
7547 list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
7549 list_del(&sdbg_devinfo->dev_list);
7550 kfree(sdbg_devinfo->zstate);
7551 kfree(sdbg_devinfo);
7554 scsi_host_put(sdbg_host->shost);
7558 static int pseudo_lld_bus_match(struct device *dev,
7559 struct device_driver *dev_driver)
7564 static struct bus_type pseudo_lld_bus = {
7566 .match = pseudo_lld_bus_match,
7567 .probe = sdebug_driver_probe,
7568 .remove = sdebug_driver_remove,
7569 .drv_groups = sdebug_drv_groups,