1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
4 * Copyright (C) 1992 Eric Youngdale
5 * Simulate a host adapter with 2 disks attached. Do a lot of checking
6 * to make sure that we are not getting blocks mixed up, and PANIC if
7 * anything out of the ordinary is seen.
8 * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
10 * Copyright (C) 2001 - 2020 Douglas Gilbert
12 * For documentation see http://sg.danny.cz/sg/scsi_debug.html
16 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
18 #include <linux/module.h>
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/jiffies.h>
23 #include <linux/slab.h>
24 #include <linux/types.h>
25 #include <linux/string.h>
26 #include <linux/genhd.h>
28 #include <linux/init.h>
29 #include <linux/proc_fs.h>
30 #include <linux/vmalloc.h>
31 #include <linux/moduleparam.h>
32 #include <linux/scatterlist.h>
33 #include <linux/blkdev.h>
34 #include <linux/crc-t10dif.h>
35 #include <linux/spinlock.h>
36 #include <linux/interrupt.h>
37 #include <linux/atomic.h>
38 #include <linux/hrtimer.h>
39 #include <linux/uuid.h>
40 #include <linux/t10-pi.h>
41 #include <linux/msdos_partition.h>
42 #include <linux/random.h>
43 #include <linux/xarray.h>
44 #include <linux/prefetch.h>
46 #include <net/checksum.h>
48 #include <asm/unaligned.h>
50 #include <scsi/scsi.h>
51 #include <scsi/scsi_cmnd.h>
52 #include <scsi/scsi_device.h>
53 #include <scsi/scsi_host.h>
54 #include <scsi/scsicam.h>
55 #include <scsi/scsi_eh.h>
56 #include <scsi/scsi_tcq.h>
57 #include <scsi/scsi_dbg.h>
60 #include "scsi_logging.h"
62 /* make sure inq_product_rev string corresponds to this version */
63 #define SDEBUG_VERSION "0190" /* format to fit INQUIRY revision field */
64 static const char *sdebug_version_date = "20200710";
66 #define MY_NAME "scsi_debug"
68 /* Additional Sense Code (ASC) */
69 #define NO_ADDITIONAL_SENSE 0x0
70 #define LOGICAL_UNIT_NOT_READY 0x4
71 #define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
72 #define UNRECOVERED_READ_ERR 0x11
73 #define PARAMETER_LIST_LENGTH_ERR 0x1a
74 #define INVALID_OPCODE 0x20
75 #define LBA_OUT_OF_RANGE 0x21
76 #define INVALID_FIELD_IN_CDB 0x24
77 #define INVALID_FIELD_IN_PARAM_LIST 0x26
78 #define WRITE_PROTECTED 0x27
79 #define UA_RESET_ASC 0x29
80 #define UA_CHANGED_ASC 0x2a
81 #define TARGET_CHANGED_ASC 0x3f
82 #define LUNS_CHANGED_ASCQ 0x0e
83 #define INSUFF_RES_ASC 0x55
84 #define INSUFF_RES_ASCQ 0x3
85 #define POWER_ON_RESET_ASCQ 0x0
86 #define BUS_RESET_ASCQ 0x2 /* scsi bus reset occurred */
87 #define MODE_CHANGED_ASCQ 0x1 /* mode parameters changed */
88 #define CAPACITY_CHANGED_ASCQ 0x9
89 #define SAVING_PARAMS_UNSUP 0x39
90 #define TRANSPORT_PROBLEM 0x4b
91 #define THRESHOLD_EXCEEDED 0x5d
92 #define LOW_POWER_COND_ON 0x5e
93 #define MISCOMPARE_VERIFY_ASC 0x1d
94 #define MICROCODE_CHANGED_ASCQ 0x1 /* with TARGET_CHANGED_ASC */
95 #define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16
96 #define WRITE_ERROR_ASC 0xc
97 #define UNALIGNED_WRITE_ASCQ 0x4
98 #define WRITE_BOUNDARY_ASCQ 0x5
99 #define READ_INVDATA_ASCQ 0x6
100 #define READ_BOUNDARY_ASCQ 0x7
101 #define INSUFF_ZONE_ASCQ 0xe
103 /* Additional Sense Code Qualifier (ASCQ) */
104 #define ACK_NAK_TO 0x3
106 /* Default values for driver parameters */
107 #define DEF_NUM_HOST 1
108 #define DEF_NUM_TGTS 1
109 #define DEF_MAX_LUNS 1
110 /* With these defaults, this driver will make 1 host with 1 target
111 * (id 0) containing 1 logical unit (lun 0). That is 1 device.
114 #define DEF_CDB_LEN 10
115 #define DEF_JDELAY 1 /* if > 0 unit is a jiffy */
116 #define DEF_DEV_SIZE_PRE_INIT 0
117 #define DEF_DEV_SIZE_MB 8
118 #define DEF_ZBC_DEV_SIZE_MB 128
121 #define DEF_PER_HOST_STORE false
122 #define DEF_D_SENSE 0
123 #define DEF_EVERY_NTH 0
124 #define DEF_FAKE_RW 0
126 #define DEF_HOST_LOCK 0
129 #define DEF_LBPWS10 0
131 #define DEF_LOWEST_ALIGNED 0
132 #define DEF_NDELAY 0 /* if > 0 unit is a nanosecond */
133 #define DEF_NO_LUN_0 0
134 #define DEF_NUM_PARTS 0
136 #define DEF_OPT_BLKS 1024
137 #define DEF_PHYSBLK_EXP 0
138 #define DEF_OPT_XFERLEN_EXP 0
139 #define DEF_PTYPE TYPE_DISK
140 #define DEF_RANDOM false
141 #define DEF_REMOVABLE false
142 #define DEF_SCSI_LEVEL 7 /* INQUIRY, byte2 [6->SPC-4; 7->SPC-5] */
143 #define DEF_SECTOR_SIZE 512
144 #define DEF_UNMAP_ALIGNMENT 0
145 #define DEF_UNMAP_GRANULARITY 1
146 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
147 #define DEF_UNMAP_MAX_DESC 256
148 #define DEF_VIRTUAL_GB 0
149 #define DEF_VPD_USE_HOSTNO 1
150 #define DEF_WRITESAME_LENGTH 0xFFFF
152 #define DEF_STATISTICS false
153 #define DEF_SUBMIT_QUEUES 1
154 #define DEF_TUR_MS_TO_READY 0
155 #define DEF_UUID_CTL 0
156 #define JDELAY_OVERRIDDEN -9999
158 /* Default parameters for ZBC drives */
159 #define DEF_ZBC_ZONE_SIZE_MB 128
160 #define DEF_ZBC_MAX_OPEN_ZONES 8
161 #define DEF_ZBC_NR_CONV_ZONES 1
163 #define SDEBUG_LUN_0_VAL 0
165 /* bit mask values for sdebug_opts */
166 #define SDEBUG_OPT_NOISE 1
167 #define SDEBUG_OPT_MEDIUM_ERR 2
168 #define SDEBUG_OPT_TIMEOUT 4
169 #define SDEBUG_OPT_RECOVERED_ERR 8
170 #define SDEBUG_OPT_TRANSPORT_ERR 16
171 #define SDEBUG_OPT_DIF_ERR 32
172 #define SDEBUG_OPT_DIX_ERR 64
173 #define SDEBUG_OPT_MAC_TIMEOUT 128
174 #define SDEBUG_OPT_SHORT_TRANSFER 0x100
175 #define SDEBUG_OPT_Q_NOISE 0x200
176 #define SDEBUG_OPT_ALL_TSF 0x400
177 #define SDEBUG_OPT_RARE_TSF 0x800
178 #define SDEBUG_OPT_N_WCE 0x1000
179 #define SDEBUG_OPT_RESET_NOISE 0x2000
180 #define SDEBUG_OPT_NO_CDB_NOISE 0x4000
181 #define SDEBUG_OPT_HOST_BUSY 0x8000
182 #define SDEBUG_OPT_CMD_ABORT 0x10000
183 #define SDEBUG_OPT_ALL_NOISE (SDEBUG_OPT_NOISE | SDEBUG_OPT_Q_NOISE | \
184 SDEBUG_OPT_RESET_NOISE)
185 #define SDEBUG_OPT_ALL_INJECTING (SDEBUG_OPT_RECOVERED_ERR | \
186 SDEBUG_OPT_TRANSPORT_ERR | \
187 SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR | \
188 SDEBUG_OPT_SHORT_TRANSFER | \
189 SDEBUG_OPT_HOST_BUSY | \
190 SDEBUG_OPT_CMD_ABORT)
191 #define SDEBUG_OPT_RECOV_DIF_DIX (SDEBUG_OPT_RECOVERED_ERR | \
192 SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR)
194 /* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs) are returned in
195 * priority order. In the subset implemented here lower numbers have higher
196 * priority. The UA numbers should be a sequence starting from 0 with
197 * SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */
198 #define SDEBUG_UA_POR 0 /* Power on, reset, or bus device reset */
199 #define SDEBUG_UA_BUS_RESET 1
200 #define SDEBUG_UA_MODE_CHANGED 2
201 #define SDEBUG_UA_CAPACITY_CHANGED 3
202 #define SDEBUG_UA_LUNS_CHANGED 4
203 #define SDEBUG_UA_MICROCODE_CHANGED 5 /* simulate firmware change */
204 #define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 6
205 #define SDEBUG_NUM_UAS 7
207 /* when 1==SDEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
208 * sector on read commands: */
209 #define OPT_MEDIUM_ERR_ADDR 0x1234 /* that's sector 4660 in decimal */
210 #define OPT_MEDIUM_ERR_NUM 10 /* number of consecutive medium errs */
212 /* SDEBUG_CANQUEUE is the maximum number of commands that can be queued
213 * (for response) per submit queue at one time. Can be reduced by max_queue
214 * option. Command responses are not queued when jdelay=0 and ndelay=0. The
215 * per-device DEF_CMD_PER_LUN can be changed via sysfs:
216 * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth
217 * but cannot exceed SDEBUG_CANQUEUE .
219 #define SDEBUG_CANQUEUE_WORDS 3 /* a WORD is bits in a long */
220 #define SDEBUG_CANQUEUE (SDEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
221 #define DEF_CMD_PER_LUN SDEBUG_CANQUEUE
223 /* UA - Unit Attention; SA - Service Action; SSU - Start Stop Unit */
224 #define F_D_IN 1 /* Data-in command (e.g. READ) */
225 #define F_D_OUT 2 /* Data-out command (e.g. WRITE) */
226 #define F_D_OUT_MAYBE 4 /* WRITE SAME, NDOB bit */
228 #define F_RL_WLUN_OK 0x10 /* allowed with REPORT LUNS W-LUN */
229 #define F_SKIP_UA 0x20 /* bypass UAs (e.g. INQUIRY command) */
230 #define F_DELAY_OVERR 0x40 /* for commands like INQUIRY */
231 #define F_SA_LOW 0x80 /* SA is in cdb byte 1, bits 4 to 0 */
232 #define F_SA_HIGH 0x100 /* SA is in cdb bytes 8 and 9 */
233 #define F_INV_OP 0x200 /* invalid opcode (not supported) */
234 #define F_FAKE_RW 0x400 /* bypass resp_*() when fake_rw set */
235 #define F_M_ACCESS 0x800 /* media access, reacts to SSU state */
236 #define F_SSU_DELAY 0x1000 /* SSU command delay (long-ish) */
237 #define F_SYNC_DELAY 0x2000 /* SYNCHRONIZE CACHE delay */
239 /* Useful combinations of the above flags */
240 #define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
241 #define FF_MEDIA_IO (F_M_ACCESS | F_FAKE_RW)
242 #define FF_SA (F_SA_HIGH | F_SA_LOW)
243 #define F_LONG_DELAY (F_SSU_DELAY | F_SYNC_DELAY)
245 #define SDEBUG_MAX_PARTS 4
247 #define SDEBUG_MAX_CMD_LEN 32
249 #define SDEB_XA_NOT_IN_USE XA_MARK_1
251 /* Zone types (zbcr05 table 25) */
253 ZBC_ZONE_TYPE_CNV = 0x1,
254 ZBC_ZONE_TYPE_SWR = 0x2,
255 ZBC_ZONE_TYPE_SWP = 0x3,
258 /* enumeration names taken from table 26, zbcr05 */
260 ZBC_NOT_WRITE_POINTER = 0x0,
262 ZC2_IMPLICIT_OPEN = 0x2,
263 ZC3_EXPLICIT_OPEN = 0x3,
270 struct sdeb_zone_state { /* ZBC: per zone state */
271 enum sdebug_z_type z_type;
272 enum sdebug_z_cond z_cond;
273 bool z_non_seq_resource;
279 struct sdebug_dev_info {
280 struct list_head dev_list;
281 unsigned int channel;
285 struct sdebug_host_info *sdbg_host;
286 unsigned long uas_bm[1];
288 atomic_t stopped; /* 1: by SSU, 2: device start */
291 /* For ZBC devices */
292 enum blk_zoned_model zmodel;
294 unsigned int zsize_shift;
295 unsigned int nr_zones;
296 unsigned int nr_conv_zones;
297 unsigned int nr_imp_open;
298 unsigned int nr_exp_open;
299 unsigned int nr_closed;
300 unsigned int max_open;
301 ktime_t create_ts; /* time since bootup that this device was created */
302 struct sdeb_zone_state *zstate;
305 struct sdebug_host_info {
306 struct list_head host_list;
307 int si_idx; /* sdeb_store_info (per host) xarray index */
308 struct Scsi_Host *shost;
310 struct list_head dev_info_list;
313 /* There is an xarray of pointers to this struct's objects, one per host */
314 struct sdeb_store_info {
315 rwlock_t macc_lck; /* for atomic media access on this store */
316 u8 *storep; /* user data storage (ram) */
317 struct t10_pi_tuple *dif_storep; /* protection info */
318 void *map_storep; /* provisioning map */
321 #define to_sdebug_host(d) \
322 container_of(d, struct sdebug_host_info, dev)
324 enum sdeb_defer_type {SDEB_DEFER_NONE = 0, SDEB_DEFER_HRT = 1,
325 SDEB_DEFER_WQ = 2, SDEB_DEFER_POLL = 3};
327 struct sdebug_defer {
329 struct execute_work ew;
330 ktime_t cmpl_ts;/* time since boot to complete this cmd */
331 int sqa_idx; /* index of sdebug_queue array */
332 int qc_idx; /* index of sdebug_queued_cmd array within sqa_idx */
333 int hc_idx; /* hostwide tag index */
338 bool aborted; /* true when blk_abort_request() already called */
339 enum sdeb_defer_type defer_t;
342 struct sdebug_queued_cmd {
343 /* corresponding bit set in in_use_bm[] in owning struct sdebug_queue
344 * instance indicates this slot is in use.
346 struct sdebug_defer *sd_dp;
347 struct scsi_cmnd *a_cmnd;
350 struct sdebug_queue {
351 struct sdebug_queued_cmd qc_arr[SDEBUG_CANQUEUE];
352 unsigned long in_use_bm[SDEBUG_CANQUEUE_WORDS];
354 atomic_t blocked; /* to temporarily stop more being queued */
357 static atomic_t sdebug_cmnd_count; /* number of incoming commands */
358 static atomic_t sdebug_completions; /* count of deferred completions */
359 static atomic_t sdebug_miss_cpus; /* submission + completion cpus differ */
360 static atomic_t sdebug_a_tsf; /* 'almost task set full' counter */
361 static atomic_t sdeb_inject_pending;
362 static atomic_t sdeb_mq_poll_count; /* bumped when mq_poll returns > 0 */
364 struct opcode_info_t {
365 u8 num_attached; /* 0 if this is it (i.e. a leaf); use 0xff */
366 /* for terminating element */
367 u8 opcode; /* if num_attached > 0, preferred */
368 u16 sa; /* service action */
369 u32 flags; /* OR-ed set of SDEB_F_* */
370 int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
371 const struct opcode_info_t *arrp; /* num_attached elements or NULL */
372 u8 len_mask[16]; /* len_mask[0]-->cdb_len, then mask for cdb */
373 /* 1 to min(cdb_len, 15); ignore cdb[15...] */
376 /* SCSI opcodes (first byte of cdb) of interest mapped onto these indexes */
377 enum sdeb_opcode_index {
378 SDEB_I_INVALID_OPCODE = 0,
380 SDEB_I_REPORT_LUNS = 2,
381 SDEB_I_REQUEST_SENSE = 3,
382 SDEB_I_TEST_UNIT_READY = 4,
383 SDEB_I_MODE_SENSE = 5, /* 6, 10 */
384 SDEB_I_MODE_SELECT = 6, /* 6, 10 */
385 SDEB_I_LOG_SENSE = 7,
386 SDEB_I_READ_CAPACITY = 8, /* 10; 16 is in SA_IN(16) */
387 SDEB_I_READ = 9, /* 6, 10, 12, 16 */
388 SDEB_I_WRITE = 10, /* 6, 10, 12, 16 */
389 SDEB_I_START_STOP = 11,
390 SDEB_I_SERV_ACT_IN_16 = 12, /* add ...SERV_ACT_IN_12 if needed */
391 SDEB_I_SERV_ACT_OUT_16 = 13, /* add ...SERV_ACT_OUT_12 if needed */
392 SDEB_I_MAINT_IN = 14,
393 SDEB_I_MAINT_OUT = 15,
394 SDEB_I_VERIFY = 16, /* VERIFY(10), VERIFY(16) */
395 SDEB_I_VARIABLE_LEN = 17, /* READ(32), WRITE(32), WR_SCAT(32) */
396 SDEB_I_RESERVE = 18, /* 6, 10 */
397 SDEB_I_RELEASE = 19, /* 6, 10 */
398 SDEB_I_ALLOW_REMOVAL = 20, /* PREVENT ALLOW MEDIUM REMOVAL */
399 SDEB_I_REZERO_UNIT = 21, /* REWIND in SSC */
400 SDEB_I_ATA_PT = 22, /* 12, 16 */
401 SDEB_I_SEND_DIAG = 23,
403 SDEB_I_WRITE_BUFFER = 25,
404 SDEB_I_WRITE_SAME = 26, /* 10, 16 */
405 SDEB_I_SYNC_CACHE = 27, /* 10, 16 */
406 SDEB_I_COMP_WRITE = 28,
407 SDEB_I_PRE_FETCH = 29, /* 10, 16 */
408 SDEB_I_ZONE_OUT = 30, /* 0x94+SA; includes no data xfer */
409 SDEB_I_ZONE_IN = 31, /* 0x95+SA; all have data-in */
410 SDEB_I_LAST_ELEM_P1 = 32, /* keep this last (previous + 1) */
414 static const unsigned char opcode_ind_arr[256] = {
415 /* 0x0; 0x0->0x1f: 6 byte cdbs */
416 SDEB_I_TEST_UNIT_READY, SDEB_I_REZERO_UNIT, 0, SDEB_I_REQUEST_SENSE,
418 SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, 0,
419 0, 0, SDEB_I_INQUIRY, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
421 0, 0, SDEB_I_MODE_SENSE, SDEB_I_START_STOP, 0, SDEB_I_SEND_DIAG,
422 SDEB_I_ALLOW_REMOVAL, 0,
423 /* 0x20; 0x20->0x3f: 10 byte cdbs */
424 0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY, 0, 0,
425 SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, SDEB_I_VERIFY,
426 0, 0, 0, 0, SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, 0,
427 0, 0, 0, SDEB_I_WRITE_BUFFER, 0, 0, 0, 0,
428 /* 0x40; 0x40->0x5f: 10 byte cdbs */
429 0, SDEB_I_WRITE_SAME, SDEB_I_UNMAP, 0, 0, 0, 0, 0,
430 0, 0, 0, 0, 0, SDEB_I_LOG_SENSE, 0, 0,
431 0, 0, 0, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
433 0, 0, SDEB_I_MODE_SENSE, 0, 0, 0, 0, 0,
434 /* 0x60; 0x60->0x7d are reserved, 0x7e is "extended cdb" */
435 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
436 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
437 0, SDEB_I_VARIABLE_LEN,
438 /* 0x80; 0x80->0x9f: 16 byte cdbs */
439 0, 0, 0, 0, 0, SDEB_I_ATA_PT, 0, 0,
440 SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0,
441 0, 0, 0, SDEB_I_VERIFY,
442 SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, SDEB_I_WRITE_SAME,
443 SDEB_I_ZONE_OUT, SDEB_I_ZONE_IN, 0, 0,
444 0, 0, 0, 0, 0, 0, SDEB_I_SERV_ACT_IN_16, SDEB_I_SERV_ACT_OUT_16,
445 /* 0xa0; 0xa0->0xbf: 12 byte cdbs */
446 SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN,
447 SDEB_I_MAINT_OUT, 0, 0, 0,
448 SDEB_I_READ, 0 /* SDEB_I_SERV_ACT_OUT_12 */, SDEB_I_WRITE,
449 0 /* SDEB_I_SERV_ACT_IN_12 */, 0, 0, 0, 0,
450 0, 0, 0, 0, 0, 0, 0, 0,
451 0, 0, 0, 0, 0, 0, 0, 0,
452 /* 0xc0; 0xc0->0xff: vendor specific */
453 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
454 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
455 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
456 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
460 * The following "response" functions return the SCSI mid-level's 4 byte
461 * tuple-in-an-int. To handle commands with an IMMED bit, for a faster
462 * command completion, they can mask their return value with
463 * SDEG_RES_IMMED_MASK .
465 #define SDEG_RES_IMMED_MASK 0x40000000
467 static int resp_inquiry(struct scsi_cmnd *, struct sdebug_dev_info *);
468 static int resp_report_luns(struct scsi_cmnd *, struct sdebug_dev_info *);
469 static int resp_requests(struct scsi_cmnd *, struct sdebug_dev_info *);
470 static int resp_mode_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
471 static int resp_mode_select(struct scsi_cmnd *, struct sdebug_dev_info *);
472 static int resp_log_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
473 static int resp_readcap(struct scsi_cmnd *, struct sdebug_dev_info *);
474 static int resp_read_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
475 static int resp_write_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
476 static int resp_write_scat(struct scsi_cmnd *, struct sdebug_dev_info *);
477 static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *);
478 static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *);
479 static int resp_get_lba_status(struct scsi_cmnd *, struct sdebug_dev_info *);
480 static int resp_report_tgtpgs(struct scsi_cmnd *, struct sdebug_dev_info *);
481 static int resp_unmap(struct scsi_cmnd *, struct sdebug_dev_info *);
482 static int resp_rsup_opcodes(struct scsi_cmnd *, struct sdebug_dev_info *);
483 static int resp_rsup_tmfs(struct scsi_cmnd *, struct sdebug_dev_info *);
484 static int resp_verify(struct scsi_cmnd *, struct sdebug_dev_info *);
485 static int resp_write_same_10(struct scsi_cmnd *, struct sdebug_dev_info *);
486 static int resp_write_same_16(struct scsi_cmnd *, struct sdebug_dev_info *);
487 static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *);
488 static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *);
489 static int resp_sync_cache(struct scsi_cmnd *, struct sdebug_dev_info *);
490 static int resp_pre_fetch(struct scsi_cmnd *, struct sdebug_dev_info *);
491 static int resp_report_zones(struct scsi_cmnd *, struct sdebug_dev_info *);
492 static int resp_open_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
493 static int resp_close_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
494 static int resp_finish_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
495 static int resp_rwp_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
497 static int sdebug_do_add_host(bool mk_new_store);
498 static int sdebug_add_host_helper(int per_host_idx);
499 static void sdebug_do_remove_host(bool the_end);
500 static int sdebug_add_store(void);
501 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip);
502 static void sdebug_erase_all_stores(bool apart_from_first);
505 * The following are overflow arrays for cdbs that "hit" the same index in
506 * the opcode_info_arr array. The most time sensitive (or commonly used) cdb
507 * should be placed in opcode_info_arr[], the others should be placed here.
509 static const struct opcode_info_t msense_iarr[] = {
510 {0, 0x1a, 0, F_D_IN, NULL, NULL,
511 {6, 0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
514 static const struct opcode_info_t mselect_iarr[] = {
515 {0, 0x15, 0, F_D_OUT, NULL, NULL,
516 {6, 0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
519 static const struct opcode_info_t read_iarr[] = {
520 {0, 0x28, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(10) */
521 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
523 {0, 0x8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL, /* READ(6) */
524 {6, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
525 {0, 0xa8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(12) */
526 {12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf,
530 static const struct opcode_info_t write_iarr[] = {
531 {0, 0x2a, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(10) */
532 NULL, {10, 0xfb, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7,
534 {0, 0xa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(6) */
535 NULL, {6, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0,
537 {0, 0xaa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(12) */
538 NULL, {12, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
539 0xbf, 0xc7, 0, 0, 0, 0} },
542 static const struct opcode_info_t verify_iarr[] = {
543 {0, 0x2f, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,/* VERIFY(10) */
544 NULL, {10, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xff, 0xff, 0xc7,
548 static const struct opcode_info_t sa_in_16_iarr[] = {
549 {0, 0x9e, 0x12, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
550 {16, 0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
551 0xff, 0xff, 0xff, 0, 0xc7} }, /* GET LBA STATUS(16) */
554 static const struct opcode_info_t vl_iarr[] = { /* VARIABLE LENGTH */
555 {0, 0x7f, 0xb, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_dt0,
556 NULL, {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0xb, 0xfa,
557 0, 0xff, 0xff, 0xff, 0xff} }, /* WRITE(32) */
558 {0, 0x7f, 0x11, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
559 NULL, {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x11, 0xf8,
560 0, 0xff, 0xff, 0x0, 0x0} }, /* WRITE SCATTERED(32) */
563 static const struct opcode_info_t maint_in_iarr[] = { /* MAINT IN */
564 {0, 0xa3, 0xc, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL,
565 {12, 0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
566 0xc7, 0, 0, 0, 0} }, /* REPORT SUPPORTED OPERATION CODES */
567 {0, 0xa3, 0xd, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL,
568 {12, 0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
569 0, 0} }, /* REPORTED SUPPORTED TASK MANAGEMENT FUNCTIONS */
572 static const struct opcode_info_t write_same_iarr[] = {
573 {0, 0x93, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_write_same_16, NULL,
574 {16, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
575 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* WRITE SAME(16) */
578 static const struct opcode_info_t reserve_iarr[] = {
579 {0, 0x16, 0, F_D_OUT, NULL, NULL, /* RESERVE(6) */
580 {6, 0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
583 static const struct opcode_info_t release_iarr[] = {
584 {0, 0x17, 0, F_D_OUT, NULL, NULL, /* RELEASE(6) */
585 {6, 0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
588 static const struct opcode_info_t sync_cache_iarr[] = {
589 {0, 0x91, 0, F_SYNC_DELAY | F_M_ACCESS, resp_sync_cache, NULL,
590 {16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
591 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* SYNC_CACHE (16) */
594 static const struct opcode_info_t pre_fetch_iarr[] = {
595 {0, 0x90, 0, F_SYNC_DELAY | FF_MEDIA_IO, resp_pre_fetch, NULL,
596 {16, 0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
597 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* PRE-FETCH (16) */
600 static const struct opcode_info_t zone_out_iarr[] = { /* ZONE OUT(16) */
601 {0, 0x94, 0x1, F_SA_LOW | F_M_ACCESS, resp_close_zone, NULL,
602 {16, 0x1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
603 0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} }, /* CLOSE ZONE */
604 {0, 0x94, 0x2, F_SA_LOW | F_M_ACCESS, resp_finish_zone, NULL,
605 {16, 0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
606 0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} }, /* FINISH ZONE */
607 {0, 0x94, 0x4, F_SA_LOW | F_M_ACCESS, resp_rwp_zone, NULL,
608 {16, 0x4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
609 0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} }, /* RESET WRITE POINTER */
612 static const struct opcode_info_t zone_in_iarr[] = { /* ZONE IN(16) */
613 {0, 0x95, 0x6, F_SA_LOW | F_D_IN | F_M_ACCESS, NULL, NULL,
614 {16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
615 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* REPORT ZONES */
619 /* This array is accessed via SDEB_I_* values. Make sure all are mapped,
620 * plus the terminating elements for logic that scans this table such as
621 * REPORT SUPPORTED OPERATION CODES. */
622 static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEM_P1 + 1] = {
624 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* unknown opcodes */
625 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
626 {0, 0x12, 0, FF_RESPOND | F_D_IN, resp_inquiry, NULL, /* INQUIRY */
627 {6, 0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
628 {0, 0xa0, 0, FF_RESPOND | F_D_IN, resp_report_luns, NULL,
629 {12, 0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
630 0, 0} }, /* REPORT LUNS */
631 {0, 0x3, 0, FF_RESPOND | F_D_IN, resp_requests, NULL,
632 {6, 0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
633 {0, 0x0, 0, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */
634 {6, 0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
636 {ARRAY_SIZE(msense_iarr), 0x5a, 0, F_D_IN, /* MODE SENSE(10) */
637 resp_mode_sense, msense_iarr, {10, 0xf8, 0xff, 0xff, 0, 0, 0,
638 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
639 {ARRAY_SIZE(mselect_iarr), 0x55, 0, F_D_OUT, /* MODE SELECT(10) */
640 resp_mode_select, mselect_iarr, {10, 0xf1, 0, 0, 0, 0, 0, 0xff,
641 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
642 {0, 0x4d, 0, F_D_IN, resp_log_sense, NULL, /* LOG SENSE */
643 {10, 0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0,
645 {0, 0x25, 0, F_D_IN, resp_readcap, NULL, /* READ CAPACITY(10) */
646 {10, 0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0,
648 {ARRAY_SIZE(read_iarr), 0x88, 0, F_D_IN | FF_MEDIA_IO, /* READ(16) */
649 resp_read_dt0, read_iarr, {16, 0xfe, 0xff, 0xff, 0xff, 0xff,
650 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
652 {ARRAY_SIZE(write_iarr), 0x8a, 0, F_D_OUT | FF_MEDIA_IO,
653 resp_write_dt0, write_iarr, /* WRITE(16) */
654 {16, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
655 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
656 {0, 0x1b, 0, F_SSU_DELAY, resp_start_stop, NULL,/* START STOP UNIT */
657 {6, 0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
658 {ARRAY_SIZE(sa_in_16_iarr), 0x9e, 0x10, F_SA_LOW | F_D_IN,
659 resp_readcap16, sa_in_16_iarr, /* SA_IN(16), READ CAPACITY(16) */
660 {16, 0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
661 0xff, 0xff, 0xff, 0xff, 0x1, 0xc7} },
662 {0, 0x9f, 0x12, F_SA_LOW | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
663 NULL, {16, 0x12, 0xf9, 0x0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff,
664 0xff, 0xff, 0xff, 0xff, 0xc7} }, /* SA_OUT(16), WRITE SCAT(16) */
665 {ARRAY_SIZE(maint_in_iarr), 0xa3, 0xa, F_SA_LOW | F_D_IN,
666 resp_report_tgtpgs, /* MAINT IN, REPORT TARGET PORT GROUPS */
667 maint_in_iarr, {12, 0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff,
668 0xff, 0, 0xc7, 0, 0, 0, 0} },
670 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
671 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
672 {ARRAY_SIZE(verify_iarr), 0x8f, 0,
673 F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify, /* VERIFY(16) */
674 verify_iarr, {16, 0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
675 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },
676 {ARRAY_SIZE(vl_iarr), 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_MEDIA_IO,
677 resp_read_dt0, vl_iarr, /* VARIABLE LENGTH, READ(32) */
678 {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x9, 0xfe, 0, 0xff, 0xff,
680 {ARRAY_SIZE(reserve_iarr), 0x56, 0, F_D_OUT,
681 NULL, reserve_iarr, /* RESERVE(10) <no response function> */
682 {10, 0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
684 {ARRAY_SIZE(release_iarr), 0x57, 0, F_D_OUT,
685 NULL, release_iarr, /* RELEASE(10) <no response function> */
686 {10, 0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
689 {0, 0x1e, 0, 0, NULL, NULL, /* ALLOW REMOVAL */
690 {6, 0, 0, 0, 0x3, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
691 {0, 0x1, 0, 0, resp_start_stop, NULL, /* REWIND ?? */
692 {6, 0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
693 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */
694 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
695 {0, 0x1d, F_D_OUT, 0, NULL, NULL, /* SEND DIAGNOSTIC */
696 {6, 0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
697 {0, 0x42, 0, F_D_OUT | FF_MEDIA_IO, resp_unmap, NULL, /* UNMAP */
698 {10, 0x1, 0, 0, 0, 0, 0x3f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
700 {0, 0x3b, 0, F_D_OUT_MAYBE, resp_write_buffer, NULL,
701 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0,
702 0, 0, 0, 0} }, /* WRITE_BUFFER */
703 {ARRAY_SIZE(write_same_iarr), 0x41, 0, F_D_OUT_MAYBE | FF_MEDIA_IO,
704 resp_write_same_10, write_same_iarr, /* WRITE SAME(10) */
705 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0,
707 {ARRAY_SIZE(sync_cache_iarr), 0x35, 0, F_SYNC_DELAY | F_M_ACCESS,
708 resp_sync_cache, sync_cache_iarr,
709 {10, 0x7, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
710 0, 0, 0, 0} }, /* SYNC_CACHE (10) */
711 {0, 0x89, 0, F_D_OUT | FF_MEDIA_IO, resp_comp_write, NULL,
712 {16, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
713 0, 0xff, 0x3f, 0xc7} }, /* COMPARE AND WRITE */
714 {ARRAY_SIZE(pre_fetch_iarr), 0x34, 0, F_SYNC_DELAY | FF_MEDIA_IO,
715 resp_pre_fetch, pre_fetch_iarr,
716 {10, 0x2, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
717 0, 0, 0, 0} }, /* PRE-FETCH (10) */
720 {ARRAY_SIZE(zone_out_iarr), 0x94, 0x3, F_SA_LOW | F_M_ACCESS,
721 resp_open_zone, zone_out_iarr, /* ZONE_OUT(16), OPEN ZONE) */
722 {16, 0x3 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
723 0xff, 0xff, 0x0, 0x0, 0xff, 0xff, 0x1, 0xc7} },
724 {ARRAY_SIZE(zone_in_iarr), 0x95, 0x0, F_SA_LOW | F_M_ACCESS,
725 resp_report_zones, zone_in_iarr, /* ZONE_IN(16), REPORT ZONES) */
726 {16, 0x0 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
727 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xc7} },
729 {0xff, 0, 0, 0, NULL, NULL, /* terminating element */
730 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
733 static int sdebug_num_hosts;
734 static int sdebug_add_host = DEF_NUM_HOST; /* in sysfs this is relative */
735 static int sdebug_ato = DEF_ATO;
736 static int sdebug_cdb_len = DEF_CDB_LEN;
737 static int sdebug_jdelay = DEF_JDELAY; /* if > 0 then unit is jiffies */
738 static int sdebug_dev_size_mb = DEF_DEV_SIZE_PRE_INIT;
739 static int sdebug_dif = DEF_DIF;
740 static int sdebug_dix = DEF_DIX;
741 static int sdebug_dsense = DEF_D_SENSE;
742 static int sdebug_every_nth = DEF_EVERY_NTH;
743 static int sdebug_fake_rw = DEF_FAKE_RW;
744 static unsigned int sdebug_guard = DEF_GUARD;
745 static int sdebug_host_max_queue; /* per host */
746 static int sdebug_lowest_aligned = DEF_LOWEST_ALIGNED;
747 static int sdebug_max_luns = DEF_MAX_LUNS;
748 static int sdebug_max_queue = SDEBUG_CANQUEUE; /* per submit queue */
749 static unsigned int sdebug_medium_error_start = OPT_MEDIUM_ERR_ADDR;
750 static int sdebug_medium_error_count = OPT_MEDIUM_ERR_NUM;
751 static atomic_t retired_max_queue; /* if > 0 then was prior max_queue */
752 static int sdebug_ndelay = DEF_NDELAY; /* if > 0 then unit is nanoseconds */
753 static int sdebug_no_lun_0 = DEF_NO_LUN_0;
754 static int sdebug_no_uld;
755 static int sdebug_num_parts = DEF_NUM_PARTS;
756 static int sdebug_num_tgts = DEF_NUM_TGTS; /* targets per host */
757 static int sdebug_opt_blks = DEF_OPT_BLKS;
758 static int sdebug_opts = DEF_OPTS;
759 static int sdebug_physblk_exp = DEF_PHYSBLK_EXP;
760 static int sdebug_opt_xferlen_exp = DEF_OPT_XFERLEN_EXP;
761 static int sdebug_ptype = DEF_PTYPE; /* SCSI peripheral device type */
762 static int sdebug_scsi_level = DEF_SCSI_LEVEL;
763 static int sdebug_sector_size = DEF_SECTOR_SIZE;
764 static int sdeb_tur_ms_to_ready = DEF_TUR_MS_TO_READY;
765 static int sdebug_virtual_gb = DEF_VIRTUAL_GB;
766 static int sdebug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
767 static unsigned int sdebug_lbpu = DEF_LBPU;
768 static unsigned int sdebug_lbpws = DEF_LBPWS;
769 static unsigned int sdebug_lbpws10 = DEF_LBPWS10;
770 static unsigned int sdebug_lbprz = DEF_LBPRZ;
771 static unsigned int sdebug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
772 static unsigned int sdebug_unmap_granularity = DEF_UNMAP_GRANULARITY;
773 static unsigned int sdebug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
774 static unsigned int sdebug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
775 static unsigned int sdebug_write_same_length = DEF_WRITESAME_LENGTH;
776 static int sdebug_uuid_ctl = DEF_UUID_CTL;
777 static bool sdebug_random = DEF_RANDOM;
778 static bool sdebug_per_host_store = DEF_PER_HOST_STORE;
779 static bool sdebug_removable = DEF_REMOVABLE;
780 static bool sdebug_clustering;
781 static bool sdebug_host_lock = DEF_HOST_LOCK;
782 static bool sdebug_strict = DEF_STRICT;
783 static bool sdebug_any_injecting_opt;
784 static bool sdebug_verbose;
785 static bool have_dif_prot;
786 static bool write_since_sync;
787 static bool sdebug_statistics = DEF_STATISTICS;
788 static bool sdebug_wp;
789 /* Following enum: 0: no zbc, def; 1: host aware; 2: host managed */
790 static enum blk_zoned_model sdeb_zbc_model = BLK_ZONED_NONE;
791 static char *sdeb_zbc_model_s;
793 enum sam_lun_addr_method {SAM_LUN_AM_PERIPHERAL = 0x0,
794 SAM_LUN_AM_FLAT = 0x1,
795 SAM_LUN_AM_LOGICAL_UNIT = 0x2,
796 SAM_LUN_AM_EXTENDED = 0x3};
797 static enum sam_lun_addr_method sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
798 static int sdebug_lun_am_i = (int)SAM_LUN_AM_PERIPHERAL;
800 static unsigned int sdebug_store_sectors;
801 static sector_t sdebug_capacity; /* in sectors */
803 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
804 may still need them */
805 static int sdebug_heads; /* heads per disk */
806 static int sdebug_cylinders_per; /* cylinders per surface */
807 static int sdebug_sectors_per; /* sectors per cylinder */
809 static LIST_HEAD(sdebug_host_list);
810 static DEFINE_SPINLOCK(sdebug_host_list_lock);
812 static struct xarray per_store_arr;
813 static struct xarray *per_store_ap = &per_store_arr;
814 static int sdeb_first_idx = -1; /* invalid index ==> none created */
815 static int sdeb_most_recent_idx = -1;
816 static DEFINE_RWLOCK(sdeb_fake_rw_lck); /* need a RW lock when fake_rw=1 */
818 static unsigned long map_size;
819 static int num_aborts;
820 static int num_dev_resets;
821 static int num_target_resets;
822 static int num_bus_resets;
823 static int num_host_resets;
824 static int dix_writes;
825 static int dix_reads;
826 static int dif_errors;
828 /* ZBC global data */
829 static bool sdeb_zbc_in_use; /* true for host-aware and host-managed disks */
830 static int sdeb_zbc_zone_size_mb;
831 static int sdeb_zbc_max_open = DEF_ZBC_MAX_OPEN_ZONES;
832 static int sdeb_zbc_nr_conv = DEF_ZBC_NR_CONV_ZONES;
834 static int submit_queues = DEF_SUBMIT_QUEUES; /* > 1 for multi-queue (mq) */
835 static int poll_queues; /* iouring iopoll interface.*/
836 static struct sdebug_queue *sdebug_q_arr; /* ptr to array of submit queues */
838 static DEFINE_RWLOCK(atomic_rw);
839 static DEFINE_RWLOCK(atomic_rw2);
841 static rwlock_t *ramdisk_lck_a[2];
843 static char sdebug_proc_name[] = MY_NAME;
844 static const char *my_name = MY_NAME;
846 static struct bus_type pseudo_lld_bus;
848 static struct device_driver sdebug_driverfs_driver = {
849 .name = sdebug_proc_name,
850 .bus = &pseudo_lld_bus,
853 static const int check_condition_result =
854 SAM_STAT_CHECK_CONDITION;
856 static const int illegal_condition_result =
857 (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
859 static const int device_qfull_result =
860 (DID_OK << 16) | SAM_STAT_TASK_SET_FULL;
862 static const int condition_met_result = SAM_STAT_CONDITION_MET;
865 /* Only do the extra work involved in logical block provisioning if one or
866 * more of the lbpu, lbpws or lbpws10 parameters are given and we are doing
867 * real reads and writes (i.e. not skipping them for speed).
869 static inline bool scsi_debug_lbp(void)
871 return 0 == sdebug_fake_rw &&
872 (sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10);
875 static void *lba2fake_store(struct sdeb_store_info *sip,
876 unsigned long long lba)
878 struct sdeb_store_info *lsip = sip;
880 lba = do_div(lba, sdebug_store_sectors);
881 if (!sip || !sip->storep) {
883 lsip = xa_load(per_store_ap, 0); /* should never be NULL */
885 return lsip->storep + lba * sdebug_sector_size;
888 static struct t10_pi_tuple *dif_store(struct sdeb_store_info *sip,
891 sector = sector_div(sector, sdebug_store_sectors);
893 return sip->dif_storep + sector;
896 static void sdebug_max_tgts_luns(void)
898 struct sdebug_host_info *sdbg_host;
899 struct Scsi_Host *hpnt;
901 spin_lock(&sdebug_host_list_lock);
902 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
903 hpnt = sdbg_host->shost;
904 if ((hpnt->this_id >= 0) &&
905 (sdebug_num_tgts > hpnt->this_id))
906 hpnt->max_id = sdebug_num_tgts + 1;
908 hpnt->max_id = sdebug_num_tgts;
909 /* sdebug_max_luns; */
910 hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
912 spin_unlock(&sdebug_host_list_lock);
915 enum sdeb_cmd_data {SDEB_IN_DATA = 0, SDEB_IN_CDB = 1};
917 /* Set in_bit to -1 to indicate no bit position of invalid field */
918 static void mk_sense_invalid_fld(struct scsi_cmnd *scp,
919 enum sdeb_cmd_data c_d,
920 int in_byte, int in_bit)
922 unsigned char *sbuff;
926 sbuff = scp->sense_buffer;
928 sdev_printk(KERN_ERR, scp->device,
929 "%s: sense_buffer is NULL\n", __func__);
932 asc = c_d ? INVALID_FIELD_IN_CDB : INVALID_FIELD_IN_PARAM_LIST;
933 memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
934 scsi_build_sense(scp, sdebug_dsense, ILLEGAL_REQUEST, asc, 0);
935 memset(sks, 0, sizeof(sks));
941 sks[0] |= 0x7 & in_bit;
943 put_unaligned_be16(in_byte, sks + 1);
949 memcpy(sbuff + sl + 4, sks, 3);
951 memcpy(sbuff + 15, sks, 3);
953 sdev_printk(KERN_INFO, scp->device, "%s: [sense_key,asc,ascq"
954 "]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n",
955 my_name, asc, c_d ? 'C' : 'D', in_byte, in_bit);
958 static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
960 if (!scp->sense_buffer) {
961 sdev_printk(KERN_ERR, scp->device,
962 "%s: sense_buffer is NULL\n", __func__);
965 memset(scp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
967 scsi_build_sense(scp, sdebug_dsense, key, asc, asq);
970 sdev_printk(KERN_INFO, scp->device,
971 "%s: [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
972 my_name, key, asc, asq);
975 static void mk_sense_invalid_opcode(struct scsi_cmnd *scp)
977 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
980 static int scsi_debug_ioctl(struct scsi_device *dev, unsigned int cmd,
983 if (sdebug_verbose) {
985 sdev_printk(KERN_INFO, dev,
986 "%s: BLKFLSBUF [0x1261]\n", __func__);
987 else if (0x5331 == cmd)
988 sdev_printk(KERN_INFO, dev,
989 "%s: CDROM_GET_CAPABILITY [0x5331]\n",
992 sdev_printk(KERN_INFO, dev, "%s: cmd=0x%x\n",
996 /* return -ENOTTY; // correct return but upsets fdisk */
999 static void config_cdb_len(struct scsi_device *sdev)
1001 switch (sdebug_cdb_len) {
1002 case 6: /* suggest 6 byte READ, WRITE and MODE SENSE/SELECT */
1003 sdev->use_10_for_rw = false;
1004 sdev->use_16_for_rw = false;
1005 sdev->use_10_for_ms = false;
1007 case 10: /* suggest 10 byte RWs and 6 byte MODE SENSE/SELECT */
1008 sdev->use_10_for_rw = true;
1009 sdev->use_16_for_rw = false;
1010 sdev->use_10_for_ms = false;
1012 case 12: /* suggest 10 byte RWs and 10 byte MODE SENSE/SELECT */
1013 sdev->use_10_for_rw = true;
1014 sdev->use_16_for_rw = false;
1015 sdev->use_10_for_ms = true;
1018 sdev->use_10_for_rw = false;
1019 sdev->use_16_for_rw = true;
1020 sdev->use_10_for_ms = true;
1022 case 32: /* No knobs to suggest this so same as 16 for now */
1023 sdev->use_10_for_rw = false;
1024 sdev->use_16_for_rw = true;
1025 sdev->use_10_for_ms = true;
1028 pr_warn("unexpected cdb_len=%d, force to 10\n",
1030 sdev->use_10_for_rw = true;
1031 sdev->use_16_for_rw = false;
1032 sdev->use_10_for_ms = false;
1033 sdebug_cdb_len = 10;
1038 static void all_config_cdb_len(void)
1040 struct sdebug_host_info *sdbg_host;
1041 struct Scsi_Host *shost;
1042 struct scsi_device *sdev;
1044 spin_lock(&sdebug_host_list_lock);
1045 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
1046 shost = sdbg_host->shost;
1047 shost_for_each_device(sdev, shost) {
1048 config_cdb_len(sdev);
1051 spin_unlock(&sdebug_host_list_lock);
1054 static void clear_luns_changed_on_target(struct sdebug_dev_info *devip)
1056 struct sdebug_host_info *sdhp;
1057 struct sdebug_dev_info *dp;
1059 spin_lock(&sdebug_host_list_lock);
1060 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
1061 list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
1062 if ((devip->sdbg_host == dp->sdbg_host) &&
1063 (devip->target == dp->target))
1064 clear_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
1067 spin_unlock(&sdebug_host_list_lock);
1070 static int make_ua(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1074 k = find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS);
1075 if (k != SDEBUG_NUM_UAS) {
1076 const char *cp = NULL;
1080 mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1081 POWER_ON_RESET_ASCQ);
1083 cp = "power on reset";
1085 case SDEBUG_UA_BUS_RESET:
1086 mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1091 case SDEBUG_UA_MODE_CHANGED:
1092 mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1095 cp = "mode parameters changed";
1097 case SDEBUG_UA_CAPACITY_CHANGED:
1098 mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1099 CAPACITY_CHANGED_ASCQ);
1101 cp = "capacity data changed";
1103 case SDEBUG_UA_MICROCODE_CHANGED:
1104 mk_sense_buffer(scp, UNIT_ATTENTION,
1106 MICROCODE_CHANGED_ASCQ);
1108 cp = "microcode has been changed";
1110 case SDEBUG_UA_MICROCODE_CHANGED_WO_RESET:
1111 mk_sense_buffer(scp, UNIT_ATTENTION,
1113 MICROCODE_CHANGED_WO_RESET_ASCQ);
1115 cp = "microcode has been changed without reset";
1117 case SDEBUG_UA_LUNS_CHANGED:
1119 * SPC-3 behavior is to report a UNIT ATTENTION with
1120 * ASC/ASCQ REPORTED LUNS DATA HAS CHANGED on every LUN
1121 * on the target, until a REPORT LUNS command is
1122 * received. SPC-4 behavior is to report it only once.
1123 * NOTE: sdebug_scsi_level does not use the same
1124 * values as struct scsi_device->scsi_level.
1126 if (sdebug_scsi_level >= 6) /* SPC-4 and above */
1127 clear_luns_changed_on_target(devip);
1128 mk_sense_buffer(scp, UNIT_ATTENTION,
1132 cp = "reported luns data has changed";
1135 pr_warn("unexpected unit attention code=%d\n", k);
1140 clear_bit(k, devip->uas_bm);
1142 sdev_printk(KERN_INFO, scp->device,
1143 "%s reports: Unit attention: %s\n",
1145 return check_condition_result;
1150 /* Build SCSI "data-in" buffer. Returns 0 if ok else (DID_ERROR << 16). */
1151 static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1155 struct scsi_data_buffer *sdb = &scp->sdb;
1159 if (scp->sc_data_direction != DMA_FROM_DEVICE)
1160 return DID_ERROR << 16;
1162 act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
1164 scsi_set_resid(scp, scsi_bufflen(scp) - act_len);
1169 /* Partial build of SCSI "data-in" buffer. Returns 0 if ok else
1170 * (DID_ERROR << 16). Can write to offset in data-in buffer. If multiple
1171 * calls, not required to write in ascending offset order. Assumes resid
1172 * set to scsi_bufflen() prior to any calls.
1174 static int p_fill_from_dev_buffer(struct scsi_cmnd *scp, const void *arr,
1175 int arr_len, unsigned int off_dst)
1177 unsigned int act_len, n;
1178 struct scsi_data_buffer *sdb = &scp->sdb;
1179 off_t skip = off_dst;
1181 if (sdb->length <= off_dst)
1183 if (scp->sc_data_direction != DMA_FROM_DEVICE)
1184 return DID_ERROR << 16;
1186 act_len = sg_pcopy_from_buffer(sdb->table.sgl, sdb->table.nents,
1187 arr, arr_len, skip);
1188 pr_debug("%s: off_dst=%u, scsi_bufflen=%u, act_len=%u, resid=%d\n",
1189 __func__, off_dst, scsi_bufflen(scp), act_len,
1190 scsi_get_resid(scp));
1191 n = scsi_bufflen(scp) - (off_dst + act_len);
1192 scsi_set_resid(scp, min_t(u32, scsi_get_resid(scp), n));
1196 /* Fetches from SCSI "data-out" buffer. Returns number of bytes fetched into
1197 * 'arr' or -1 if error.
1199 static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1202 if (!scsi_bufflen(scp))
1204 if (scp->sc_data_direction != DMA_TO_DEVICE)
1207 return scsi_sg_copy_to_buffer(scp, arr, arr_len);
1211 static char sdebug_inq_vendor_id[9] = "Linux ";
1212 static char sdebug_inq_product_id[17] = "scsi_debug ";
1213 static char sdebug_inq_product_rev[5] = SDEBUG_VERSION;
1214 /* Use some locally assigned NAAs for SAS addresses. */
1215 static const u64 naa3_comp_a = 0x3222222000000000ULL;
1216 static const u64 naa3_comp_b = 0x3333333000000000ULL;
1217 static const u64 naa3_comp_c = 0x3111111000000000ULL;
1219 /* Device identification VPD page. Returns number of bytes placed in arr */
1220 static int inquiry_vpd_83(unsigned char *arr, int port_group_id,
1221 int target_dev_id, int dev_id_num,
1222 const char *dev_id_str, int dev_id_str_len,
1223 const uuid_t *lu_name)
1228 port_a = target_dev_id + 1;
1229 /* T10 vendor identifier field format (faked) */
1230 arr[0] = 0x2; /* ASCII */
1233 memcpy(&arr[4], sdebug_inq_vendor_id, 8);
1234 memcpy(&arr[12], sdebug_inq_product_id, 16);
1235 memcpy(&arr[28], dev_id_str, dev_id_str_len);
1236 num = 8 + 16 + dev_id_str_len;
1239 if (dev_id_num >= 0) {
1240 if (sdebug_uuid_ctl) {
1241 /* Locally assigned UUID */
1242 arr[num++] = 0x1; /* binary (not necessarily sas) */
1243 arr[num++] = 0xa; /* PIV=0, lu, naa */
1246 arr[num++] = 0x10; /* uuid type=1, locally assigned */
1248 memcpy(arr + num, lu_name, 16);
1251 /* NAA-3, Logical unit identifier (binary) */
1252 arr[num++] = 0x1; /* binary (not necessarily sas) */
1253 arr[num++] = 0x3; /* PIV=0, lu, naa */
1256 put_unaligned_be64(naa3_comp_b + dev_id_num, arr + num);
1259 /* Target relative port number */
1260 arr[num++] = 0x61; /* proto=sas, binary */
1261 arr[num++] = 0x94; /* PIV=1, target port, rel port */
1262 arr[num++] = 0x0; /* reserved */
1263 arr[num++] = 0x4; /* length */
1264 arr[num++] = 0x0; /* reserved */
1265 arr[num++] = 0x0; /* reserved */
1267 arr[num++] = 0x1; /* relative port A */
1269 /* NAA-3, Target port identifier */
1270 arr[num++] = 0x61; /* proto=sas, binary */
1271 arr[num++] = 0x93; /* piv=1, target port, naa */
1274 put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1276 /* NAA-3, Target port group identifier */
1277 arr[num++] = 0x61; /* proto=sas, binary */
1278 arr[num++] = 0x95; /* piv=1, target port group id */
1283 put_unaligned_be16(port_group_id, arr + num);
1285 /* NAA-3, Target device identifier */
1286 arr[num++] = 0x61; /* proto=sas, binary */
1287 arr[num++] = 0xa3; /* piv=1, target device, naa */
1290 put_unaligned_be64(naa3_comp_a + target_dev_id, arr + num);
1292 /* SCSI name string: Target device identifier */
1293 arr[num++] = 0x63; /* proto=sas, UTF-8 */
1294 arr[num++] = 0xa8; /* piv=1, target device, SCSI name string */
1297 memcpy(arr + num, "naa.32222220", 12);
1299 snprintf(b, sizeof(b), "%08X", target_dev_id);
1300 memcpy(arr + num, b, 8);
1302 memset(arr + num, 0, 4);
1307 static unsigned char vpd84_data[] = {
1308 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
1309 0x22,0x22,0x22,0x0,0xbb,0x1,
1310 0x22,0x22,0x22,0x0,0xbb,0x2,
1313 /* Software interface identification VPD page */
1314 static int inquiry_vpd_84(unsigned char *arr)
1316 memcpy(arr, vpd84_data, sizeof(vpd84_data));
1317 return sizeof(vpd84_data);
1320 /* Management network addresses VPD page */
1321 static int inquiry_vpd_85(unsigned char *arr)
1324 const char *na1 = "https://www.kernel.org/config";
1325 const char *na2 = "http://www.kernel.org/log";
1328 arr[num++] = 0x1; /* lu, storage config */
1329 arr[num++] = 0x0; /* reserved */
1334 plen = ((plen / 4) + 1) * 4;
1335 arr[num++] = plen; /* length, null termianted, padded */
1336 memcpy(arr + num, na1, olen);
1337 memset(arr + num + olen, 0, plen - olen);
1340 arr[num++] = 0x4; /* lu, logging */
1341 arr[num++] = 0x0; /* reserved */
1346 plen = ((plen / 4) + 1) * 4;
1347 arr[num++] = plen; /* length, null terminated, padded */
1348 memcpy(arr + num, na2, olen);
1349 memset(arr + num + olen, 0, plen - olen);
1355 /* SCSI ports VPD page */
1356 static int inquiry_vpd_88(unsigned char *arr, int target_dev_id)
1361 port_a = target_dev_id + 1;
1362 port_b = port_a + 1;
1363 arr[num++] = 0x0; /* reserved */
1364 arr[num++] = 0x0; /* reserved */
1366 arr[num++] = 0x1; /* relative port 1 (primary) */
1367 memset(arr + num, 0, 6);
1370 arr[num++] = 12; /* length tp descriptor */
1371 /* naa-5 target port identifier (A) */
1372 arr[num++] = 0x61; /* proto=sas, binary */
1373 arr[num++] = 0x93; /* PIV=1, target port, NAA */
1374 arr[num++] = 0x0; /* reserved */
1375 arr[num++] = 0x8; /* length */
1376 put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1378 arr[num++] = 0x0; /* reserved */
1379 arr[num++] = 0x0; /* reserved */
1381 arr[num++] = 0x2; /* relative port 2 (secondary) */
1382 memset(arr + num, 0, 6);
1385 arr[num++] = 12; /* length tp descriptor */
1386 /* naa-5 target port identifier (B) */
1387 arr[num++] = 0x61; /* proto=sas, binary */
1388 arr[num++] = 0x93; /* PIV=1, target port, NAA */
1389 arr[num++] = 0x0; /* reserved */
1390 arr[num++] = 0x8; /* length */
1391 put_unaligned_be64(naa3_comp_a + port_b, arr + num);
1398 static unsigned char vpd89_data[] = {
1399 /* from 4th byte */ 0,0,0,0,
1400 'l','i','n','u','x',' ',' ',' ',
1401 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
1403 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
1405 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
1406 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
1407 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
1408 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
1410 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1412 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1414 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
1415 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
1416 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
1417 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
1418 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
1419 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
1420 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
1421 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1422 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1423 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1424 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
1425 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
1426 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
1427 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
1428 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1429 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1430 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1431 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1432 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1433 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1434 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1435 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1436 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1437 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1438 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1439 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
1442 /* ATA Information VPD page */
1443 static int inquiry_vpd_89(unsigned char *arr)
1445 memcpy(arr, vpd89_data, sizeof(vpd89_data));
1446 return sizeof(vpd89_data);
1450 static unsigned char vpdb0_data[] = {
1451 /* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
1452 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1453 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1454 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1457 /* Block limits VPD page (SBC-3) */
1458 static int inquiry_vpd_b0(unsigned char *arr)
1462 memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
1464 /* Optimal transfer length granularity */
1465 if (sdebug_opt_xferlen_exp != 0 &&
1466 sdebug_physblk_exp < sdebug_opt_xferlen_exp)
1467 gran = 1 << sdebug_opt_xferlen_exp;
1469 gran = 1 << sdebug_physblk_exp;
1470 put_unaligned_be16(gran, arr + 2);
1472 /* Maximum Transfer Length */
1473 if (sdebug_store_sectors > 0x400)
1474 put_unaligned_be32(sdebug_store_sectors, arr + 4);
1476 /* Optimal Transfer Length */
1477 put_unaligned_be32(sdebug_opt_blks, &arr[8]);
1480 /* Maximum Unmap LBA Count */
1481 put_unaligned_be32(sdebug_unmap_max_blocks, &arr[16]);
1483 /* Maximum Unmap Block Descriptor Count */
1484 put_unaligned_be32(sdebug_unmap_max_desc, &arr[20]);
1487 /* Unmap Granularity Alignment */
1488 if (sdebug_unmap_alignment) {
1489 put_unaligned_be32(sdebug_unmap_alignment, &arr[28]);
1490 arr[28] |= 0x80; /* UGAVALID */
1493 /* Optimal Unmap Granularity */
1494 put_unaligned_be32(sdebug_unmap_granularity, &arr[24]);
1496 /* Maximum WRITE SAME Length */
1497 put_unaligned_be64(sdebug_write_same_length, &arr[32]);
1499 return 0x3c; /* Mandatory page length for Logical Block Provisioning */
1501 return sizeof(vpdb0_data);
1504 /* Block device characteristics VPD page (SBC-3) */
1505 static int inquiry_vpd_b1(struct sdebug_dev_info *devip, unsigned char *arr)
1507 memset(arr, 0, 0x3c);
1509 arr[1] = 1; /* non rotating medium (e.g. solid state) */
1511 arr[3] = 5; /* less than 1.8" */
1512 if (devip->zmodel == BLK_ZONED_HA)
1513 arr[4] = 1 << 4; /* zoned field = 01b */
1518 /* Logical block provisioning VPD page (SBC-4) */
1519 static int inquiry_vpd_b2(unsigned char *arr)
1521 memset(arr, 0, 0x4);
1522 arr[0] = 0; /* threshold exponent */
1529 if (sdebug_lbprz && scsi_debug_lbp())
1530 arr[1] |= (sdebug_lbprz & 0x7) << 2; /* sbc4r07 and later */
1531 /* anc_sup=0; dp=0 (no provisioning group descriptor) */
1532 /* minimum_percentage=0; provisioning_type=0 (unknown) */
1533 /* threshold_percentage=0 */
1537 /* Zoned block device characteristics VPD page (ZBC mandatory) */
1538 static int inquiry_vpd_b6(struct sdebug_dev_info *devip, unsigned char *arr)
1540 memset(arr, 0, 0x3c);
1541 arr[0] = 0x1; /* set URSWRZ (unrestricted read in seq. wr req zone) */
1543 * Set Optimal number of open sequential write preferred zones and
1544 * Optimal number of non-sequentially written sequential write
1545 * preferred zones fields to 'not reported' (0xffffffff). Leave other
1546 * fields set to zero, apart from Max. number of open swrz_s field.
1548 put_unaligned_be32(0xffffffff, &arr[4]);
1549 put_unaligned_be32(0xffffffff, &arr[8]);
1550 if (sdeb_zbc_model == BLK_ZONED_HM && devip->max_open)
1551 put_unaligned_be32(devip->max_open, &arr[12]);
1553 put_unaligned_be32(0xffffffff, &arr[12]);
1557 #define SDEBUG_LONG_INQ_SZ 96
1558 #define SDEBUG_MAX_INQ_ARR_SZ 584
1560 static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1562 unsigned char pq_pdt;
1564 unsigned char *cmd = scp->cmnd;
1567 bool have_wlun, is_disk, is_zbc, is_disk_zbc;
1569 alloc_len = get_unaligned_be16(cmd + 3);
1570 arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
1572 return DID_REQUEUE << 16;
1573 is_disk = (sdebug_ptype == TYPE_DISK);
1574 is_zbc = (devip->zmodel != BLK_ZONED_NONE);
1575 is_disk_zbc = (is_disk || is_zbc);
1576 have_wlun = scsi_is_wlun(scp->device->lun);
1578 pq_pdt = TYPE_WLUN; /* present, wlun */
1579 else if (sdebug_no_lun_0 && (devip->lun == SDEBUG_LUN_0_VAL))
1580 pq_pdt = 0x7f; /* not present, PQ=3, PDT=0x1f */
1582 pq_pdt = (sdebug_ptype & 0x1f);
1584 if (0x2 & cmd[1]) { /* CMDDT bit set */
1585 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
1587 return check_condition_result;
1588 } else if (0x1 & cmd[1]) { /* EVPD bit set */
1589 int lu_id_num, port_group_id, target_dev_id;
1592 int host_no = devip->sdbg_host->shost->host_no;
1594 port_group_id = (((host_no + 1) & 0x7f) << 8) +
1595 (devip->channel & 0x7f);
1596 if (sdebug_vpd_use_hostno == 0)
1598 lu_id_num = have_wlun ? -1 : (((host_no + 1) * 2000) +
1599 (devip->target * 1000) + devip->lun);
1600 target_dev_id = ((host_no + 1) * 2000) +
1601 (devip->target * 1000) - 3;
1602 len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
1603 if (0 == cmd[2]) { /* supported vital product data pages */
1604 arr[1] = cmd[2]; /*sanity */
1606 arr[n++] = 0x0; /* this page */
1607 arr[n++] = 0x80; /* unit serial number */
1608 arr[n++] = 0x83; /* device identification */
1609 arr[n++] = 0x84; /* software interface ident. */
1610 arr[n++] = 0x85; /* management network addresses */
1611 arr[n++] = 0x86; /* extended inquiry */
1612 arr[n++] = 0x87; /* mode page policy */
1613 arr[n++] = 0x88; /* SCSI ports */
1614 if (is_disk_zbc) { /* SBC or ZBC */
1615 arr[n++] = 0x89; /* ATA information */
1616 arr[n++] = 0xb0; /* Block limits */
1617 arr[n++] = 0xb1; /* Block characteristics */
1619 arr[n++] = 0xb2; /* LB Provisioning */
1621 arr[n++] = 0xb6; /* ZB dev. char. */
1623 arr[3] = n - 4; /* number of supported VPD pages */
1624 } else if (0x80 == cmd[2]) { /* unit serial number */
1625 arr[1] = cmd[2]; /*sanity */
1627 memcpy(&arr[4], lu_id_str, len);
1628 } else if (0x83 == cmd[2]) { /* device identification */
1629 arr[1] = cmd[2]; /*sanity */
1630 arr[3] = inquiry_vpd_83(&arr[4], port_group_id,
1631 target_dev_id, lu_id_num,
1634 } else if (0x84 == cmd[2]) { /* Software interface ident. */
1635 arr[1] = cmd[2]; /*sanity */
1636 arr[3] = inquiry_vpd_84(&arr[4]);
1637 } else if (0x85 == cmd[2]) { /* Management network addresses */
1638 arr[1] = cmd[2]; /*sanity */
1639 arr[3] = inquiry_vpd_85(&arr[4]);
1640 } else if (0x86 == cmd[2]) { /* extended inquiry */
1641 arr[1] = cmd[2]; /*sanity */
1642 arr[3] = 0x3c; /* number of following entries */
1643 if (sdebug_dif == T10_PI_TYPE3_PROTECTION)
1644 arr[4] = 0x4; /* SPT: GRD_CHK:1 */
1645 else if (have_dif_prot)
1646 arr[4] = 0x5; /* SPT: GRD_CHK:1, REF_CHK:1 */
1648 arr[4] = 0x0; /* no protection stuff */
1649 arr[5] = 0x7; /* head of q, ordered + simple q's */
1650 } else if (0x87 == cmd[2]) { /* mode page policy */
1651 arr[1] = cmd[2]; /*sanity */
1652 arr[3] = 0x8; /* number of following entries */
1653 arr[4] = 0x2; /* disconnect-reconnect mp */
1654 arr[6] = 0x80; /* mlus, shared */
1655 arr[8] = 0x18; /* protocol specific lu */
1656 arr[10] = 0x82; /* mlus, per initiator port */
1657 } else if (0x88 == cmd[2]) { /* SCSI Ports */
1658 arr[1] = cmd[2]; /*sanity */
1659 arr[3] = inquiry_vpd_88(&arr[4], target_dev_id);
1660 } else if (is_disk_zbc && 0x89 == cmd[2]) { /* ATA info */
1661 arr[1] = cmd[2]; /*sanity */
1662 n = inquiry_vpd_89(&arr[4]);
1663 put_unaligned_be16(n, arr + 2);
1664 } else if (is_disk_zbc && 0xb0 == cmd[2]) { /* Block limits */
1665 arr[1] = cmd[2]; /*sanity */
1666 arr[3] = inquiry_vpd_b0(&arr[4]);
1667 } else if (is_disk_zbc && 0xb1 == cmd[2]) { /* Block char. */
1668 arr[1] = cmd[2]; /*sanity */
1669 arr[3] = inquiry_vpd_b1(devip, &arr[4]);
1670 } else if (is_disk && 0xb2 == cmd[2]) { /* LB Prov. */
1671 arr[1] = cmd[2]; /*sanity */
1672 arr[3] = inquiry_vpd_b2(&arr[4]);
1673 } else if (is_zbc && cmd[2] == 0xb6) { /* ZB dev. charact. */
1674 arr[1] = cmd[2]; /*sanity */
1675 arr[3] = inquiry_vpd_b6(devip, &arr[4]);
1677 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
1679 return check_condition_result;
1681 len = min_t(u32, get_unaligned_be16(arr + 2) + 4, alloc_len);
1682 ret = fill_from_dev_buffer(scp, arr,
1683 min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ));
1687 /* drops through here for a standard inquiry */
1688 arr[1] = sdebug_removable ? 0x80 : 0; /* Removable disk */
1689 arr[2] = sdebug_scsi_level;
1690 arr[3] = 2; /* response_data_format==2 */
1691 arr[4] = SDEBUG_LONG_INQ_SZ - 5;
1692 arr[5] = (int)have_dif_prot; /* PROTECT bit */
1693 if (sdebug_vpd_use_hostno == 0)
1694 arr[5] |= 0x10; /* claim: implicit TPGS */
1695 arr[6] = 0x10; /* claim: MultiP */
1696 /* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
1697 arr[7] = 0xa; /* claim: LINKED + CMDQUE */
1698 memcpy(&arr[8], sdebug_inq_vendor_id, 8);
1699 memcpy(&arr[16], sdebug_inq_product_id, 16);
1700 memcpy(&arr[32], sdebug_inq_product_rev, 4);
1701 /* Use Vendor Specific area to place driver date in ASCII hex */
1702 memcpy(&arr[36], sdebug_version_date, 8);
1703 /* version descriptors (2 bytes each) follow */
1704 put_unaligned_be16(0xc0, arr + 58); /* SAM-6 no version claimed */
1705 put_unaligned_be16(0x5c0, arr + 60); /* SPC-5 no version claimed */
1707 if (is_disk) { /* SBC-4 no version claimed */
1708 put_unaligned_be16(0x600, arr + n);
1710 } else if (sdebug_ptype == TYPE_TAPE) { /* SSC-4 rev 3 */
1711 put_unaligned_be16(0x525, arr + n);
1713 } else if (is_zbc) { /* ZBC BSR INCITS 536 revision 05 */
1714 put_unaligned_be16(0x624, arr + n);
1717 put_unaligned_be16(0x2100, arr + n); /* SPL-4 no version claimed */
1718 ret = fill_from_dev_buffer(scp, arr,
1719 min_t(u32, alloc_len, SDEBUG_LONG_INQ_SZ));
1724 /* See resp_iec_m_pg() for how this data is manipulated */
1725 static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1728 static int resp_requests(struct scsi_cmnd *scp,
1729 struct sdebug_dev_info *devip)
1731 unsigned char *cmd = scp->cmnd;
1732 unsigned char arr[SCSI_SENSE_BUFFERSIZE]; /* assume >= 18 bytes */
1733 bool dsense = !!(cmd[1] & 1);
1734 u32 alloc_len = cmd[4];
1736 int stopped_state = atomic_read(&devip->stopped);
1738 memset(arr, 0, sizeof(arr));
1739 if (stopped_state > 0) { /* some "pollable" data [spc6r02: 5.12.2] */
1743 arr[2] = LOGICAL_UNIT_NOT_READY;
1744 arr[3] = (stopped_state == 2) ? 0x1 : 0x2;
1748 arr[2] = NOT_READY; /* NO_SENSE in sense_key */
1749 arr[7] = 0xa; /* 18 byte sense buffer */
1750 arr[12] = LOGICAL_UNIT_NOT_READY;
1751 arr[13] = (stopped_state == 2) ? 0x1 : 0x2;
1753 } else if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
1754 /* Information exceptions control mode page: TEST=1, MRIE=6 */
1757 arr[1] = 0x0; /* NO_SENSE in sense_key */
1758 arr[2] = THRESHOLD_EXCEEDED;
1759 arr[3] = 0xff; /* Failure prediction(false) */
1763 arr[2] = 0x0; /* NO_SENSE in sense_key */
1764 arr[7] = 0xa; /* 18 byte sense buffer */
1765 arr[12] = THRESHOLD_EXCEEDED;
1766 arr[13] = 0xff; /* Failure prediction(false) */
1768 } else { /* nothing to report */
1771 memset(arr, 0, len);
1774 memset(arr, 0, len);
1779 return fill_from_dev_buffer(scp, arr, min_t(u32, len, alloc_len));
1782 static int resp_start_stop(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1784 unsigned char *cmd = scp->cmnd;
1785 int power_cond, want_stop, stopped_state;
1788 power_cond = (cmd[4] & 0xf0) >> 4;
1790 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 7);
1791 return check_condition_result;
1793 want_stop = !(cmd[4] & 1);
1794 stopped_state = atomic_read(&devip->stopped);
1795 if (stopped_state == 2) {
1796 ktime_t now_ts = ktime_get_boottime();
1798 if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
1799 u64 diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
1801 if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
1802 /* tur_ms_to_ready timer extinguished */
1803 atomic_set(&devip->stopped, 0);
1807 if (stopped_state == 2) {
1809 stopped_state = 1; /* dummy up success */
1810 } else { /* Disallow tur_ms_to_ready delay to be overridden */
1811 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 0 /* START bit */);
1812 return check_condition_result;
1816 changing = (stopped_state != want_stop);
1818 atomic_xchg(&devip->stopped, want_stop);
1819 if (!changing || (cmd[1] & 0x1)) /* state unchanged or IMMED bit set in cdb */
1820 return SDEG_RES_IMMED_MASK;
1825 static sector_t get_sdebug_capacity(void)
1827 static const unsigned int gibibyte = 1073741824;
1829 if (sdebug_virtual_gb > 0)
1830 return (sector_t)sdebug_virtual_gb *
1831 (gibibyte / sdebug_sector_size);
1833 return sdebug_store_sectors;
1836 #define SDEBUG_READCAP_ARR_SZ 8
1837 static int resp_readcap(struct scsi_cmnd *scp,
1838 struct sdebug_dev_info *devip)
1840 unsigned char arr[SDEBUG_READCAP_ARR_SZ];
1843 /* following just in case virtual_gb changed */
1844 sdebug_capacity = get_sdebug_capacity();
1845 memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
1846 if (sdebug_capacity < 0xffffffff) {
1847 capac = (unsigned int)sdebug_capacity - 1;
1848 put_unaligned_be32(capac, arr + 0);
1850 put_unaligned_be32(0xffffffff, arr + 0);
1851 put_unaligned_be16(sdebug_sector_size, arr + 6);
1852 return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
1855 #define SDEBUG_READCAP16_ARR_SZ 32
1856 static int resp_readcap16(struct scsi_cmnd *scp,
1857 struct sdebug_dev_info *devip)
1859 unsigned char *cmd = scp->cmnd;
1860 unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
1863 alloc_len = get_unaligned_be32(cmd + 10);
1864 /* following just in case virtual_gb changed */
1865 sdebug_capacity = get_sdebug_capacity();
1866 memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
1867 put_unaligned_be64((u64)(sdebug_capacity - 1), arr + 0);
1868 put_unaligned_be32(sdebug_sector_size, arr + 8);
1869 arr[13] = sdebug_physblk_exp & 0xf;
1870 arr[14] = (sdebug_lowest_aligned >> 8) & 0x3f;
1872 if (scsi_debug_lbp()) {
1873 arr[14] |= 0x80; /* LBPME */
1874 /* from sbc4r07, this LBPRZ field is 1 bit, but the LBPRZ in
1875 * the LB Provisioning VPD page is 3 bits. Note that lbprz=2
1876 * in the wider field maps to 0 in this field.
1878 if (sdebug_lbprz & 1) /* precisely what the draft requires */
1882 arr[15] = sdebug_lowest_aligned & 0xff;
1884 if (have_dif_prot) {
1885 arr[12] = (sdebug_dif - 1) << 1; /* P_TYPE */
1886 arr[12] |= 1; /* PROT_EN */
1889 return fill_from_dev_buffer(scp, arr,
1890 min_t(u32, alloc_len, SDEBUG_READCAP16_ARR_SZ));
1893 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
1895 static int resp_report_tgtpgs(struct scsi_cmnd *scp,
1896 struct sdebug_dev_info *devip)
1898 unsigned char *cmd = scp->cmnd;
1900 int host_no = devip->sdbg_host->shost->host_no;
1901 int port_group_a, port_group_b, port_a, port_b;
1905 alen = get_unaligned_be32(cmd + 6);
1906 arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
1908 return DID_REQUEUE << 16;
1910 * EVPD page 0x88 states we have two ports, one
1911 * real and a fake port with no device connected.
1912 * So we create two port groups with one port each
1913 * and set the group with port B to unavailable.
1915 port_a = 0x1; /* relative port A */
1916 port_b = 0x2; /* relative port B */
1917 port_group_a = (((host_no + 1) & 0x7f) << 8) +
1918 (devip->channel & 0x7f);
1919 port_group_b = (((host_no + 1) & 0x7f) << 8) +
1920 (devip->channel & 0x7f) + 0x80;
1923 * The asymmetric access state is cycled according to the host_id.
1926 if (sdebug_vpd_use_hostno == 0) {
1927 arr[n++] = host_no % 3; /* Asymm access state */
1928 arr[n++] = 0x0F; /* claim: all states are supported */
1930 arr[n++] = 0x0; /* Active/Optimized path */
1931 arr[n++] = 0x01; /* only support active/optimized paths */
1933 put_unaligned_be16(port_group_a, arr + n);
1935 arr[n++] = 0; /* Reserved */
1936 arr[n++] = 0; /* Status code */
1937 arr[n++] = 0; /* Vendor unique */
1938 arr[n++] = 0x1; /* One port per group */
1939 arr[n++] = 0; /* Reserved */
1940 arr[n++] = 0; /* Reserved */
1941 put_unaligned_be16(port_a, arr + n);
1943 arr[n++] = 3; /* Port unavailable */
1944 arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
1945 put_unaligned_be16(port_group_b, arr + n);
1947 arr[n++] = 0; /* Reserved */
1948 arr[n++] = 0; /* Status code */
1949 arr[n++] = 0; /* Vendor unique */
1950 arr[n++] = 0x1; /* One port per group */
1951 arr[n++] = 0; /* Reserved */
1952 arr[n++] = 0; /* Reserved */
1953 put_unaligned_be16(port_b, arr + n);
1957 put_unaligned_be32(rlen, arr + 0);
1960 * Return the smallest value of either
1961 * - The allocated length
1962 * - The constructed command length
1963 * - The maximum array size
1965 rlen = min(alen, n);
1966 ret = fill_from_dev_buffer(scp, arr,
1967 min_t(u32, rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
1972 static int resp_rsup_opcodes(struct scsi_cmnd *scp,
1973 struct sdebug_dev_info *devip)
1976 u8 reporting_opts, req_opcode, sdeb_i, supp;
1978 u32 alloc_len, a_len;
1979 int k, offset, len, errsts, count, bump, na;
1980 const struct opcode_info_t *oip;
1981 const struct opcode_info_t *r_oip;
1983 u8 *cmd = scp->cmnd;
1985 rctd = !!(cmd[2] & 0x80);
1986 reporting_opts = cmd[2] & 0x7;
1987 req_opcode = cmd[3];
1988 req_sa = get_unaligned_be16(cmd + 4);
1989 alloc_len = get_unaligned_be32(cmd + 6);
1990 if (alloc_len < 4 || alloc_len > 0xffff) {
1991 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
1992 return check_condition_result;
1994 if (alloc_len > 8192)
1998 arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_ATOMIC);
2000 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
2002 return check_condition_result;
2004 switch (reporting_opts) {
2005 case 0: /* all commands */
2006 /* count number of commands */
2007 for (count = 0, oip = opcode_info_arr;
2008 oip->num_attached != 0xff; ++oip) {
2009 if (F_INV_OP & oip->flags)
2011 count += (oip->num_attached + 1);
2013 bump = rctd ? 20 : 8;
2014 put_unaligned_be32(count * bump, arr);
2015 for (offset = 4, oip = opcode_info_arr;
2016 oip->num_attached != 0xff && offset < a_len; ++oip) {
2017 if (F_INV_OP & oip->flags)
2019 na = oip->num_attached;
2020 arr[offset] = oip->opcode;
2021 put_unaligned_be16(oip->sa, arr + offset + 2);
2023 arr[offset + 5] |= 0x2;
2024 if (FF_SA & oip->flags)
2025 arr[offset + 5] |= 0x1;
2026 put_unaligned_be16(oip->len_mask[0], arr + offset + 6);
2028 put_unaligned_be16(0xa, arr + offset + 8);
2030 for (k = 0, oip = oip->arrp; k < na; ++k, ++oip) {
2031 if (F_INV_OP & oip->flags)
2034 arr[offset] = oip->opcode;
2035 put_unaligned_be16(oip->sa, arr + offset + 2);
2037 arr[offset + 5] |= 0x2;
2038 if (FF_SA & oip->flags)
2039 arr[offset + 5] |= 0x1;
2040 put_unaligned_be16(oip->len_mask[0],
2043 put_unaligned_be16(0xa,
2050 case 1: /* one command: opcode only */
2051 case 2: /* one command: opcode plus service action */
2052 case 3: /* one command: if sa==0 then opcode only else opcode+sa */
2053 sdeb_i = opcode_ind_arr[req_opcode];
2054 oip = &opcode_info_arr[sdeb_i];
2055 if (F_INV_OP & oip->flags) {
2059 if (1 == reporting_opts) {
2060 if (FF_SA & oip->flags) {
2061 mk_sense_invalid_fld(scp, SDEB_IN_CDB,
2064 return check_condition_result;
2067 } else if (2 == reporting_opts &&
2068 0 == (FF_SA & oip->flags)) {
2069 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
2070 kfree(arr); /* point at requested sa */
2071 return check_condition_result;
2073 if (0 == (FF_SA & oip->flags) &&
2074 req_opcode == oip->opcode)
2076 else if (0 == (FF_SA & oip->flags)) {
2077 na = oip->num_attached;
2078 for (k = 0, oip = oip->arrp; k < na;
2080 if (req_opcode == oip->opcode)
2083 supp = (k >= na) ? 1 : 3;
2084 } else if (req_sa != oip->sa) {
2085 na = oip->num_attached;
2086 for (k = 0, oip = oip->arrp; k < na;
2088 if (req_sa == oip->sa)
2091 supp = (k >= na) ? 1 : 3;
2095 u = oip->len_mask[0];
2096 put_unaligned_be16(u, arr + 2);
2097 arr[4] = oip->opcode;
2098 for (k = 1; k < u; ++k)
2099 arr[4 + k] = (k < 16) ?
2100 oip->len_mask[k] : 0xff;
2105 arr[1] = (rctd ? 0x80 : 0) | supp;
2107 put_unaligned_be16(0xa, arr + offset);
2112 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
2114 return check_condition_result;
2116 offset = (offset < a_len) ? offset : a_len;
2117 len = (offset < alloc_len) ? offset : alloc_len;
2118 errsts = fill_from_dev_buffer(scp, arr, len);
2123 static int resp_rsup_tmfs(struct scsi_cmnd *scp,
2124 struct sdebug_dev_info *devip)
2129 u8 *cmd = scp->cmnd;
2131 memset(arr, 0, sizeof(arr));
2132 repd = !!(cmd[2] & 0x80);
2133 alloc_len = get_unaligned_be32(cmd + 6);
2134 if (alloc_len < 4) {
2135 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
2136 return check_condition_result;
2138 arr[0] = 0xc8; /* ATS | ATSS | LURS */
2139 arr[1] = 0x1; /* ITNRS */
2146 len = (len < alloc_len) ? len : alloc_len;
2147 return fill_from_dev_buffer(scp, arr, len);
2150 /* <<Following mode page info copied from ST318451LW>> */
2152 static int resp_err_recov_pg(unsigned char *p, int pcontrol, int target)
2153 { /* Read-Write Error Recovery page for mode_sense */
2154 unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
2157 memcpy(p, err_recov_pg, sizeof(err_recov_pg));
2159 memset(p + 2, 0, sizeof(err_recov_pg) - 2);
2160 return sizeof(err_recov_pg);
2163 static int resp_disconnect_pg(unsigned char *p, int pcontrol, int target)
2164 { /* Disconnect-Reconnect page for mode_sense */
2165 unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
2166 0, 0, 0, 0, 0, 0, 0, 0};
2168 memcpy(p, disconnect_pg, sizeof(disconnect_pg));
2170 memset(p + 2, 0, sizeof(disconnect_pg) - 2);
2171 return sizeof(disconnect_pg);
2174 static int resp_format_pg(unsigned char *p, int pcontrol, int target)
2175 { /* Format device page for mode_sense */
2176 unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
2177 0, 0, 0, 0, 0, 0, 0, 0,
2178 0, 0, 0, 0, 0x40, 0, 0, 0};
2180 memcpy(p, format_pg, sizeof(format_pg));
2181 put_unaligned_be16(sdebug_sectors_per, p + 10);
2182 put_unaligned_be16(sdebug_sector_size, p + 12);
2183 if (sdebug_removable)
2184 p[20] |= 0x20; /* should agree with INQUIRY */
2186 memset(p + 2, 0, sizeof(format_pg) - 2);
2187 return sizeof(format_pg);
2190 static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2191 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,
2194 static int resp_caching_pg(unsigned char *p, int pcontrol, int target)
2195 { /* Caching page for mode_sense */
2196 unsigned char ch_caching_pg[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0,
2197 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
2198 unsigned char d_caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2199 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0, 0, 0, 0, 0};
2201 if (SDEBUG_OPT_N_WCE & sdebug_opts)
2202 caching_pg[2] &= ~0x4; /* set WCE=0 (default WCE=1) */
2203 memcpy(p, caching_pg, sizeof(caching_pg));
2205 memcpy(p + 2, ch_caching_pg, sizeof(ch_caching_pg));
2206 else if (2 == pcontrol)
2207 memcpy(p, d_caching_pg, sizeof(d_caching_pg));
2208 return sizeof(caching_pg);
2211 static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2214 static int resp_ctrl_m_pg(unsigned char *p, int pcontrol, int target)
2215 { /* Control mode page for mode_sense */
2216 unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
2218 unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2222 ctrl_m_pg[2] |= 0x4;
2224 ctrl_m_pg[2] &= ~0x4;
2227 ctrl_m_pg[5] |= 0x80; /* ATO=1 */
2229 memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
2231 memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
2232 else if (2 == pcontrol)
2233 memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
2234 return sizeof(ctrl_m_pg);
2238 static int resp_iec_m_pg(unsigned char *p, int pcontrol, int target)
2239 { /* Informational Exceptions control mode page for mode_sense */
2240 unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
2242 unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
2245 memcpy(p, iec_m_pg, sizeof(iec_m_pg));
2247 memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
2248 else if (2 == pcontrol)
2249 memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
2250 return sizeof(iec_m_pg);
2253 static int resp_sas_sf_m_pg(unsigned char *p, int pcontrol, int target)
2254 { /* SAS SSP mode page - short format for mode_sense */
2255 unsigned char sas_sf_m_pg[] = {0x19, 0x6,
2256 0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
2258 memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
2260 memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
2261 return sizeof(sas_sf_m_pg);
2265 static int resp_sas_pcd_m_spg(unsigned char *p, int pcontrol, int target,
2267 { /* SAS phy control and discover mode page for mode_sense */
2268 unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
2269 0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
2270 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2271 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2272 0x2, 0, 0, 0, 0, 0, 0, 0,
2273 0x88, 0x99, 0, 0, 0, 0, 0, 0,
2274 0, 0, 0, 0, 0, 0, 0, 0,
2275 0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
2276 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2277 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2278 0x3, 0, 0, 0, 0, 0, 0, 0,
2279 0x88, 0x99, 0, 0, 0, 0, 0, 0,
2280 0, 0, 0, 0, 0, 0, 0, 0,
2284 put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 16);
2285 put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 24);
2286 put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 64);
2287 put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 72);
2288 port_a = target_dev_id + 1;
2289 port_b = port_a + 1;
2290 memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
2291 put_unaligned_be32(port_a, p + 20);
2292 put_unaligned_be32(port_b, p + 48 + 20);
2294 memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
2295 return sizeof(sas_pcd_m_pg);
2298 static int resp_sas_sha_m_spg(unsigned char *p, int pcontrol)
2299 { /* SAS SSP shared protocol specific port mode subpage */
2300 unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
2301 0, 0, 0, 0, 0, 0, 0, 0,
2304 memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
2306 memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
2307 return sizeof(sas_sha_m_pg);
2310 #define SDEBUG_MAX_MSENSE_SZ 256
2312 static int resp_mode_sense(struct scsi_cmnd *scp,
2313 struct sdebug_dev_info *devip)
2315 int pcontrol, pcode, subpcode, bd_len;
2316 unsigned char dev_spec;
2317 u32 alloc_len, offset, len;
2319 int target = scp->device->id;
2321 unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
2322 unsigned char *cmd = scp->cmnd;
2323 bool dbd, llbaa, msense_6, is_disk, is_zbc, bad_pcode;
2325 dbd = !!(cmd[1] & 0x8); /* disable block descriptors */
2326 pcontrol = (cmd[2] & 0xc0) >> 6;
2327 pcode = cmd[2] & 0x3f;
2329 msense_6 = (MODE_SENSE == cmd[0]);
2330 llbaa = msense_6 ? false : !!(cmd[1] & 0x10);
2331 is_disk = (sdebug_ptype == TYPE_DISK);
2332 is_zbc = (devip->zmodel != BLK_ZONED_NONE);
2333 if ((is_disk || is_zbc) && !dbd)
2334 bd_len = llbaa ? 16 : 8;
2337 alloc_len = msense_6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2338 memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
2339 if (0x3 == pcontrol) { /* Saving values not supported */
2340 mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0);
2341 return check_condition_result;
2343 target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
2344 (devip->target * 1000) - 3;
2345 /* for disks+zbc set DPOFUA bit and clear write protect (WP) bit */
2346 if (is_disk || is_zbc) {
2347 dev_spec = 0x10; /* =0x90 if WP=1 implies read-only */
2359 arr[4] = 0x1; /* set LONGLBA bit */
2360 arr[7] = bd_len; /* assume 255 or less */
2364 if ((bd_len > 0) && (!sdebug_capacity))
2365 sdebug_capacity = get_sdebug_capacity();
2368 if (sdebug_capacity > 0xfffffffe)
2369 put_unaligned_be32(0xffffffff, ap + 0);
2371 put_unaligned_be32(sdebug_capacity, ap + 0);
2372 put_unaligned_be16(sdebug_sector_size, ap + 6);
2375 } else if (16 == bd_len) {
2376 put_unaligned_be64((u64)sdebug_capacity, ap + 0);
2377 put_unaligned_be32(sdebug_sector_size, ap + 12);
2382 if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) {
2383 /* TODO: Control Extension page */
2384 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2385 return check_condition_result;
2390 case 0x1: /* Read-Write error recovery page, direct access */
2391 len = resp_err_recov_pg(ap, pcontrol, target);
2394 case 0x2: /* Disconnect-Reconnect page, all devices */
2395 len = resp_disconnect_pg(ap, pcontrol, target);
2398 case 0x3: /* Format device page, direct access */
2400 len = resp_format_pg(ap, pcontrol, target);
2405 case 0x8: /* Caching page, direct access */
2406 if (is_disk || is_zbc) {
2407 len = resp_caching_pg(ap, pcontrol, target);
2412 case 0xa: /* Control Mode page, all devices */
2413 len = resp_ctrl_m_pg(ap, pcontrol, target);
2416 case 0x19: /* if spc==1 then sas phy, control+discover */
2417 if ((subpcode > 0x2) && (subpcode < 0xff)) {
2418 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2419 return check_condition_result;
2422 if ((0x0 == subpcode) || (0xff == subpcode))
2423 len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2424 if ((0x1 == subpcode) || (0xff == subpcode))
2425 len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
2427 if ((0x2 == subpcode) || (0xff == subpcode))
2428 len += resp_sas_sha_m_spg(ap + len, pcontrol);
2431 case 0x1c: /* Informational Exceptions Mode page, all devices */
2432 len = resp_iec_m_pg(ap, pcontrol, target);
2435 case 0x3f: /* Read all Mode pages */
2436 if ((0 == subpcode) || (0xff == subpcode)) {
2437 len = resp_err_recov_pg(ap, pcontrol, target);
2438 len += resp_disconnect_pg(ap + len, pcontrol, target);
2440 len += resp_format_pg(ap + len, pcontrol,
2442 len += resp_caching_pg(ap + len, pcontrol,
2444 } else if (is_zbc) {
2445 len += resp_caching_pg(ap + len, pcontrol,
2448 len += resp_ctrl_m_pg(ap + len, pcontrol, target);
2449 len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2450 if (0xff == subpcode) {
2451 len += resp_sas_pcd_m_spg(ap + len, pcontrol,
2452 target, target_dev_id);
2453 len += resp_sas_sha_m_spg(ap + len, pcontrol);
2455 len += resp_iec_m_pg(ap + len, pcontrol, target);
2458 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2459 return check_condition_result;
2467 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2468 return check_condition_result;
2471 arr[0] = offset - 1;
2473 put_unaligned_be16((offset - 2), arr + 0);
2474 return fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, offset));
2477 #define SDEBUG_MAX_MSELECT_SZ 512
2479 static int resp_mode_select(struct scsi_cmnd *scp,
2480 struct sdebug_dev_info *devip)
2482 int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
2483 int param_len, res, mpage;
2484 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
2485 unsigned char *cmd = scp->cmnd;
2486 int mselect6 = (MODE_SELECT == cmd[0]);
2488 memset(arr, 0, sizeof(arr));
2491 param_len = mselect6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2492 if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
2493 mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1);
2494 return check_condition_result;
2496 res = fetch_to_dev_buffer(scp, arr, param_len);
2498 return DID_ERROR << 16;
2499 else if (sdebug_verbose && (res < param_len))
2500 sdev_printk(KERN_INFO, scp->device,
2501 "%s: cdb indicated=%d, IO sent=%d bytes\n",
2502 __func__, param_len, res);
2503 md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2);
2504 bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6);
2505 off = bd_len + (mselect6 ? 4 : 8);
2506 if (md_len > 2 || off >= res) {
2507 mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
2508 return check_condition_result;
2510 mpage = arr[off] & 0x3f;
2511 ps = !!(arr[off] & 0x80);
2513 mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 7);
2514 return check_condition_result;
2516 spf = !!(arr[off] & 0x40);
2517 pg_len = spf ? (get_unaligned_be16(arr + off + 2) + 4) :
2519 if ((pg_len + off) > param_len) {
2520 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2521 PARAMETER_LIST_LENGTH_ERR, 0);
2522 return check_condition_result;
2525 case 0x8: /* Caching Mode page */
2526 if (caching_pg[1] == arr[off + 1]) {
2527 memcpy(caching_pg + 2, arr + off + 2,
2528 sizeof(caching_pg) - 2);
2529 goto set_mode_changed_ua;
2532 case 0xa: /* Control Mode page */
2533 if (ctrl_m_pg[1] == arr[off + 1]) {
2534 memcpy(ctrl_m_pg + 2, arr + off + 2,
2535 sizeof(ctrl_m_pg) - 2);
2536 if (ctrl_m_pg[4] & 0x8)
2540 sdebug_dsense = !!(ctrl_m_pg[2] & 0x4);
2541 goto set_mode_changed_ua;
2544 case 0x1c: /* Informational Exceptions Mode page */
2545 if (iec_m_pg[1] == arr[off + 1]) {
2546 memcpy(iec_m_pg + 2, arr + off + 2,
2547 sizeof(iec_m_pg) - 2);
2548 goto set_mode_changed_ua;
2554 mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 5);
2555 return check_condition_result;
2556 set_mode_changed_ua:
2557 set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm);
2561 static int resp_temp_l_pg(unsigned char *arr)
2563 unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
2564 0x0, 0x1, 0x3, 0x2, 0x0, 65,
2567 memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
2568 return sizeof(temp_l_pg);
2571 static int resp_ie_l_pg(unsigned char *arr)
2573 unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
2576 memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
2577 if (iec_m_pg[2] & 0x4) { /* TEST bit set */
2578 arr[4] = THRESHOLD_EXCEEDED;
2581 return sizeof(ie_l_pg);
2584 #define SDEBUG_MAX_LSENSE_SZ 512
2586 static int resp_log_sense(struct scsi_cmnd *scp,
2587 struct sdebug_dev_info *devip)
2589 int ppc, sp, pcode, subpcode;
2590 u32 alloc_len, len, n;
2591 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
2592 unsigned char *cmd = scp->cmnd;
2594 memset(arr, 0, sizeof(arr));
2598 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, ppc ? 1 : 0);
2599 return check_condition_result;
2601 pcode = cmd[2] & 0x3f;
2602 subpcode = cmd[3] & 0xff;
2603 alloc_len = get_unaligned_be16(cmd + 7);
2605 if (0 == subpcode) {
2607 case 0x0: /* Supported log pages log page */
2609 arr[n++] = 0x0; /* this page */
2610 arr[n++] = 0xd; /* Temperature */
2611 arr[n++] = 0x2f; /* Informational exceptions */
2614 case 0xd: /* Temperature log page */
2615 arr[3] = resp_temp_l_pg(arr + 4);
2617 case 0x2f: /* Informational exceptions log page */
2618 arr[3] = resp_ie_l_pg(arr + 4);
2621 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2622 return check_condition_result;
2624 } else if (0xff == subpcode) {
2628 case 0x0: /* Supported log pages and subpages log page */
2631 arr[n++] = 0x0; /* 0,0 page */
2633 arr[n++] = 0xff; /* this page */
2635 arr[n++] = 0x0; /* Temperature */
2637 arr[n++] = 0x0; /* Informational exceptions */
2640 case 0xd: /* Temperature subpages */
2643 arr[n++] = 0x0; /* Temperature */
2646 case 0x2f: /* Informational exceptions subpages */
2649 arr[n++] = 0x0; /* Informational exceptions */
2653 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2654 return check_condition_result;
2657 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2658 return check_condition_result;
2660 len = min_t(u32, get_unaligned_be16(arr + 2) + 4, alloc_len);
2661 return fill_from_dev_buffer(scp, arr,
2662 min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ));
2665 static inline bool sdebug_dev_is_zoned(struct sdebug_dev_info *devip)
2667 return devip->nr_zones != 0;
2670 static struct sdeb_zone_state *zbc_zone(struct sdebug_dev_info *devip,
2671 unsigned long long lba)
2673 return &devip->zstate[lba >> devip->zsize_shift];
2676 static inline bool zbc_zone_is_conv(struct sdeb_zone_state *zsp)
2678 return zsp->z_type == ZBC_ZONE_TYPE_CNV;
2681 static void zbc_close_zone(struct sdebug_dev_info *devip,
2682 struct sdeb_zone_state *zsp)
2684 enum sdebug_z_cond zc;
2686 if (zbc_zone_is_conv(zsp))
2690 if (!(zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN))
2693 if (zc == ZC2_IMPLICIT_OPEN)
2694 devip->nr_imp_open--;
2696 devip->nr_exp_open--;
2698 if (zsp->z_wp == zsp->z_start) {
2699 zsp->z_cond = ZC1_EMPTY;
2701 zsp->z_cond = ZC4_CLOSED;
2706 static void zbc_close_imp_open_zone(struct sdebug_dev_info *devip)
2708 struct sdeb_zone_state *zsp = &devip->zstate[0];
2711 for (i = 0; i < devip->nr_zones; i++, zsp++) {
2712 if (zsp->z_cond == ZC2_IMPLICIT_OPEN) {
2713 zbc_close_zone(devip, zsp);
2719 static void zbc_open_zone(struct sdebug_dev_info *devip,
2720 struct sdeb_zone_state *zsp, bool explicit)
2722 enum sdebug_z_cond zc;
2724 if (zbc_zone_is_conv(zsp))
2728 if ((explicit && zc == ZC3_EXPLICIT_OPEN) ||
2729 (!explicit && zc == ZC2_IMPLICIT_OPEN))
2732 /* Close an implicit open zone if necessary */
2733 if (explicit && zsp->z_cond == ZC2_IMPLICIT_OPEN)
2734 zbc_close_zone(devip, zsp);
2735 else if (devip->max_open &&
2736 devip->nr_imp_open + devip->nr_exp_open >= devip->max_open)
2737 zbc_close_imp_open_zone(devip);
2739 if (zsp->z_cond == ZC4_CLOSED)
2742 zsp->z_cond = ZC3_EXPLICIT_OPEN;
2743 devip->nr_exp_open++;
2745 zsp->z_cond = ZC2_IMPLICIT_OPEN;
2746 devip->nr_imp_open++;
2750 static inline void zbc_set_zone_full(struct sdebug_dev_info *devip,
2751 struct sdeb_zone_state *zsp)
2753 switch (zsp->z_cond) {
2754 case ZC2_IMPLICIT_OPEN:
2755 devip->nr_imp_open--;
2757 case ZC3_EXPLICIT_OPEN:
2758 devip->nr_exp_open--;
2761 WARN_ONCE(true, "Invalid zone %llu condition %x\n",
2762 zsp->z_start, zsp->z_cond);
2765 zsp->z_cond = ZC5_FULL;
2768 static void zbc_inc_wp(struct sdebug_dev_info *devip,
2769 unsigned long long lba, unsigned int num)
2771 struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
2772 unsigned long long n, end, zend = zsp->z_start + zsp->z_size;
2774 if (zbc_zone_is_conv(zsp))
2777 if (zsp->z_type == ZBC_ZONE_TYPE_SWR) {
2779 if (zsp->z_wp >= zend)
2780 zbc_set_zone_full(devip, zsp);
2785 if (lba != zsp->z_wp)
2786 zsp->z_non_seq_resource = true;
2792 } else if (end > zsp->z_wp) {
2798 if (zsp->z_wp >= zend)
2799 zbc_set_zone_full(devip, zsp);
2805 zend = zsp->z_start + zsp->z_size;
2810 static int check_zbc_access_params(struct scsi_cmnd *scp,
2811 unsigned long long lba, unsigned int num, bool write)
2813 struct scsi_device *sdp = scp->device;
2814 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
2815 struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
2816 struct sdeb_zone_state *zsp_end = zbc_zone(devip, lba + num - 1);
2819 if (devip->zmodel == BLK_ZONED_HA)
2821 /* For host-managed, reads cannot cross zone types boundaries */
2822 if (zsp_end != zsp &&
2823 zbc_zone_is_conv(zsp) &&
2824 !zbc_zone_is_conv(zsp_end)) {
2825 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2828 return check_condition_result;
2833 /* No restrictions for writes within conventional zones */
2834 if (zbc_zone_is_conv(zsp)) {
2835 if (!zbc_zone_is_conv(zsp_end)) {
2836 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2838 WRITE_BOUNDARY_ASCQ);
2839 return check_condition_result;
2844 if (zsp->z_type == ZBC_ZONE_TYPE_SWR) {
2845 /* Writes cannot cross sequential zone boundaries */
2846 if (zsp_end != zsp) {
2847 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2849 WRITE_BOUNDARY_ASCQ);
2850 return check_condition_result;
2852 /* Cannot write full zones */
2853 if (zsp->z_cond == ZC5_FULL) {
2854 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2855 INVALID_FIELD_IN_CDB, 0);
2856 return check_condition_result;
2858 /* Writes must be aligned to the zone WP */
2859 if (lba != zsp->z_wp) {
2860 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2862 UNALIGNED_WRITE_ASCQ);
2863 return check_condition_result;
2867 /* Handle implicit open of closed and empty zones */
2868 if (zsp->z_cond == ZC1_EMPTY || zsp->z_cond == ZC4_CLOSED) {
2869 if (devip->max_open &&
2870 devip->nr_exp_open >= devip->max_open) {
2871 mk_sense_buffer(scp, DATA_PROTECT,
2874 return check_condition_result;
2876 zbc_open_zone(devip, zsp, false);
2882 static inline int check_device_access_params
2883 (struct scsi_cmnd *scp, unsigned long long lba,
2884 unsigned int num, bool write)
2886 struct scsi_device *sdp = scp->device;
2887 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
2889 if (lba + num > sdebug_capacity) {
2890 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2891 return check_condition_result;
2893 /* transfer length excessive (tie in to block limits VPD page) */
2894 if (num > sdebug_store_sectors) {
2895 /* needs work to find which cdb byte 'num' comes from */
2896 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2897 return check_condition_result;
2899 if (write && unlikely(sdebug_wp)) {
2900 mk_sense_buffer(scp, DATA_PROTECT, WRITE_PROTECTED, 0x2);
2901 return check_condition_result;
2903 if (sdebug_dev_is_zoned(devip))
2904 return check_zbc_access_params(scp, lba, num, write);
2910 * Note: if BUG_ON() fires it usually indicates a problem with the parser
2911 * tables. Perhaps a missing F_FAKE_RW or FF_MEDIA_IO flag. Response functions
2912 * that access any of the "stores" in struct sdeb_store_info should call this
2913 * function with bug_if_fake_rw set to true.
2915 static inline struct sdeb_store_info *devip2sip(struct sdebug_dev_info *devip,
2916 bool bug_if_fake_rw)
2918 if (sdebug_fake_rw) {
2919 BUG_ON(bug_if_fake_rw); /* See note above */
2922 return xa_load(per_store_ap, devip->sdbg_host->si_idx);
2925 /* Returns number of bytes copied or -1 if error. */
2926 static int do_device_access(struct sdeb_store_info *sip, struct scsi_cmnd *scp,
2927 u32 sg_skip, u64 lba, u32 num, bool do_write)
2930 u64 block, rest = 0;
2931 enum dma_data_direction dir;
2932 struct scsi_data_buffer *sdb = &scp->sdb;
2936 dir = DMA_TO_DEVICE;
2937 write_since_sync = true;
2939 dir = DMA_FROM_DEVICE;
2942 if (!sdb->length || !sip)
2944 if (scp->sc_data_direction != dir)
2948 block = do_div(lba, sdebug_store_sectors);
2949 if (block + num > sdebug_store_sectors)
2950 rest = block + num - sdebug_store_sectors;
2952 ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
2953 fsp + (block * sdebug_sector_size),
2954 (num - rest) * sdebug_sector_size, sg_skip, do_write);
2955 if (ret != (num - rest) * sdebug_sector_size)
2959 ret += sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
2960 fsp, rest * sdebug_sector_size,
2961 sg_skip + ((num - rest) * sdebug_sector_size),
2968 /* Returns number of bytes copied or -1 if error. */
2969 static int do_dout_fetch(struct scsi_cmnd *scp, u32 num, u8 *doutp)
2971 struct scsi_data_buffer *sdb = &scp->sdb;
2975 if (scp->sc_data_direction != DMA_TO_DEVICE)
2977 return sg_copy_buffer(sdb->table.sgl, sdb->table.nents, doutp,
2978 num * sdebug_sector_size, 0, true);
2981 /* If sip->storep+lba compares equal to arr(num), then copy top half of
2982 * arr into sip->storep+lba and return true. If comparison fails then
2984 static bool comp_write_worker(struct sdeb_store_info *sip, u64 lba, u32 num,
2985 const u8 *arr, bool compare_only)
2988 u64 block, rest = 0;
2989 u32 store_blks = sdebug_store_sectors;
2990 u32 lb_size = sdebug_sector_size;
2991 u8 *fsp = sip->storep;
2993 block = do_div(lba, store_blks);
2994 if (block + num > store_blks)
2995 rest = block + num - store_blks;
2997 res = !memcmp(fsp + (block * lb_size), arr, (num - rest) * lb_size);
3001 res = memcmp(fsp, arr + ((num - rest) * lb_size),
3007 arr += num * lb_size;
3008 memcpy(fsp + (block * lb_size), arr, (num - rest) * lb_size);
3010 memcpy(fsp, arr + ((num - rest) * lb_size), rest * lb_size);
3014 static __be16 dif_compute_csum(const void *buf, int len)
3019 csum = (__force __be16)ip_compute_csum(buf, len);
3021 csum = cpu_to_be16(crc_t10dif(buf, len));
3026 static int dif_verify(struct t10_pi_tuple *sdt, const void *data,
3027 sector_t sector, u32 ei_lba)
3029 __be16 csum = dif_compute_csum(data, sdebug_sector_size);
3031 if (sdt->guard_tag != csum) {
3032 pr_err("GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
3033 (unsigned long)sector,
3034 be16_to_cpu(sdt->guard_tag),
3038 if (sdebug_dif == T10_PI_TYPE1_PROTECTION &&
3039 be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
3040 pr_err("REF check failed on sector %lu\n",
3041 (unsigned long)sector);
3044 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3045 be32_to_cpu(sdt->ref_tag) != ei_lba) {
3046 pr_err("REF check failed on sector %lu\n",
3047 (unsigned long)sector);
3053 static void dif_copy_prot(struct scsi_cmnd *scp, sector_t sector,
3054 unsigned int sectors, bool read)
3058 struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3059 scp->device->hostdata, true);
3060 struct t10_pi_tuple *dif_storep = sip->dif_storep;
3061 const void *dif_store_end = dif_storep + sdebug_store_sectors;
3062 struct sg_mapping_iter miter;
3064 /* Bytes of protection data to copy into sgl */
3065 resid = sectors * sizeof(*dif_storep);
3067 sg_miter_start(&miter, scsi_prot_sglist(scp),
3068 scsi_prot_sg_count(scp), SG_MITER_ATOMIC |
3069 (read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
3071 while (sg_miter_next(&miter) && resid > 0) {
3072 size_t len = min_t(size_t, miter.length, resid);
3073 void *start = dif_store(sip, sector);
3076 if (dif_store_end < start + len)
3077 rest = start + len - dif_store_end;
3082 memcpy(paddr, start, len - rest);
3084 memcpy(start, paddr, len - rest);
3088 memcpy(paddr + len - rest, dif_storep, rest);
3090 memcpy(dif_storep, paddr + len - rest, rest);
3093 sector += len / sizeof(*dif_storep);
3096 sg_miter_stop(&miter);
3099 static int prot_verify_read(struct scsi_cmnd *scp, sector_t start_sec,
3100 unsigned int sectors, u32 ei_lba)
3105 struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3106 scp->device->hostdata, true);
3107 struct t10_pi_tuple *sdt;
3109 for (i = 0; i < sectors; i++, ei_lba++) {
3110 sector = start_sec + i;
3111 sdt = dif_store(sip, sector);
3113 if (sdt->app_tag == cpu_to_be16(0xffff))
3117 * Because scsi_debug acts as both initiator and
3118 * target we proceed to verify the PI even if
3119 * RDPROTECT=3. This is done so the "initiator" knows
3120 * which type of error to return. Otherwise we would
3121 * have to iterate over the PI twice.
3123 if (scp->cmnd[1] >> 5) { /* RDPROTECT */
3124 ret = dif_verify(sdt, lba2fake_store(sip, sector),
3133 dif_copy_prot(scp, start_sec, sectors, true);
3139 static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3146 struct sdeb_store_info *sip = devip2sip(devip, true);
3147 rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
3148 u8 *cmd = scp->cmnd;
3153 lba = get_unaligned_be64(cmd + 2);
3154 num = get_unaligned_be32(cmd + 10);
3159 lba = get_unaligned_be32(cmd + 2);
3160 num = get_unaligned_be16(cmd + 7);
3165 lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3166 (u32)(cmd[1] & 0x1f) << 16;
3167 num = (0 == cmd[4]) ? 256 : cmd[4];
3172 lba = get_unaligned_be32(cmd + 2);
3173 num = get_unaligned_be32(cmd + 6);
3176 case XDWRITEREAD_10:
3178 lba = get_unaligned_be32(cmd + 2);
3179 num = get_unaligned_be16(cmd + 7);
3182 default: /* assume READ(32) */
3183 lba = get_unaligned_be64(cmd + 12);
3184 ei_lba = get_unaligned_be32(cmd + 20);
3185 num = get_unaligned_be32(cmd + 28);
3189 if (unlikely(have_dif_prot && check_prot)) {
3190 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3192 mk_sense_invalid_opcode(scp);
3193 return check_condition_result;
3195 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3196 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3197 (cmd[1] & 0xe0) == 0)
3198 sdev_printk(KERN_ERR, scp->device, "Unprotected RD "
3201 if (unlikely((sdebug_opts & SDEBUG_OPT_SHORT_TRANSFER) &&
3202 atomic_read(&sdeb_inject_pending))) {
3204 atomic_set(&sdeb_inject_pending, 0);
3207 ret = check_device_access_params(scp, lba, num, false);
3210 if (unlikely((SDEBUG_OPT_MEDIUM_ERR & sdebug_opts) &&
3211 (lba <= (sdebug_medium_error_start + sdebug_medium_error_count - 1)) &&
3212 ((lba + num) > sdebug_medium_error_start))) {
3213 /* claim unrecoverable read error */
3214 mk_sense_buffer(scp, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
3215 /* set info field and valid bit for fixed descriptor */
3216 if (0x70 == (scp->sense_buffer[0] & 0x7f)) {
3217 scp->sense_buffer[0] |= 0x80; /* Valid bit */
3218 ret = (lba < OPT_MEDIUM_ERR_ADDR)
3219 ? OPT_MEDIUM_ERR_ADDR : (int)lba;
3220 put_unaligned_be32(ret, scp->sense_buffer + 3);
3222 scsi_set_resid(scp, scsi_bufflen(scp));
3223 return check_condition_result;
3226 read_lock(macc_lckp);
3229 if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3230 switch (prot_verify_read(scp, lba, num, ei_lba)) {
3231 case 1: /* Guard tag error */
3232 if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */
3233 read_unlock(macc_lckp);
3234 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3235 return check_condition_result;
3236 } else if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) {
3237 read_unlock(macc_lckp);
3238 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3239 return illegal_condition_result;
3242 case 3: /* Reference tag error */
3243 if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */
3244 read_unlock(macc_lckp);
3245 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3);
3246 return check_condition_result;
3247 } else if (scp->prot_flags & SCSI_PROT_REF_CHECK) {
3248 read_unlock(macc_lckp);
3249 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3);
3250 return illegal_condition_result;
3256 ret = do_device_access(sip, scp, 0, lba, num, false);
3257 read_unlock(macc_lckp);
3258 if (unlikely(ret == -1))
3259 return DID_ERROR << 16;
3261 scsi_set_resid(scp, scsi_bufflen(scp) - ret);
3263 if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3264 atomic_read(&sdeb_inject_pending))) {
3265 if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3266 mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3267 atomic_set(&sdeb_inject_pending, 0);
3268 return check_condition_result;
3269 } else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3270 /* Logical block guard check failed */
3271 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3272 atomic_set(&sdeb_inject_pending, 0);
3273 return illegal_condition_result;
3274 } else if (SDEBUG_OPT_DIX_ERR & sdebug_opts) {
3275 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3276 atomic_set(&sdeb_inject_pending, 0);
3277 return illegal_condition_result;
3283 static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
3284 unsigned int sectors, u32 ei_lba)
3287 struct t10_pi_tuple *sdt;
3289 sector_t sector = start_sec;
3292 struct sg_mapping_iter diter;
3293 struct sg_mapping_iter piter;
3295 BUG_ON(scsi_sg_count(SCpnt) == 0);
3296 BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
3298 sg_miter_start(&piter, scsi_prot_sglist(SCpnt),
3299 scsi_prot_sg_count(SCpnt),
3300 SG_MITER_ATOMIC | SG_MITER_FROM_SG);
3301 sg_miter_start(&diter, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
3302 SG_MITER_ATOMIC | SG_MITER_FROM_SG);
3304 /* For each protection page */
3305 while (sg_miter_next(&piter)) {
3307 if (WARN_ON(!sg_miter_next(&diter))) {
3312 for (ppage_offset = 0; ppage_offset < piter.length;
3313 ppage_offset += sizeof(struct t10_pi_tuple)) {
3314 /* If we're at the end of the current
3315 * data page advance to the next one
3317 if (dpage_offset >= diter.length) {
3318 if (WARN_ON(!sg_miter_next(&diter))) {
3325 sdt = piter.addr + ppage_offset;
3326 daddr = diter.addr + dpage_offset;
3328 if (SCpnt->cmnd[1] >> 5 != 3) { /* WRPROTECT */
3329 ret = dif_verify(sdt, daddr, sector, ei_lba);
3336 dpage_offset += sdebug_sector_size;
3338 diter.consumed = dpage_offset;
3339 sg_miter_stop(&diter);
3341 sg_miter_stop(&piter);
3343 dif_copy_prot(SCpnt, start_sec, sectors, false);
3350 sg_miter_stop(&diter);
3351 sg_miter_stop(&piter);
3355 static unsigned long lba_to_map_index(sector_t lba)
3357 if (sdebug_unmap_alignment)
3358 lba += sdebug_unmap_granularity - sdebug_unmap_alignment;
3359 sector_div(lba, sdebug_unmap_granularity);
3363 static sector_t map_index_to_lba(unsigned long index)
3365 sector_t lba = index * sdebug_unmap_granularity;
3367 if (sdebug_unmap_alignment)
3368 lba -= sdebug_unmap_granularity - sdebug_unmap_alignment;
3372 static unsigned int map_state(struct sdeb_store_info *sip, sector_t lba,
3376 unsigned int mapped;
3377 unsigned long index;
3380 index = lba_to_map_index(lba);
3381 mapped = test_bit(index, sip->map_storep);
3384 next = find_next_zero_bit(sip->map_storep, map_size, index);
3386 next = find_next_bit(sip->map_storep, map_size, index);
3388 end = min_t(sector_t, sdebug_store_sectors, map_index_to_lba(next));
3393 static void map_region(struct sdeb_store_info *sip, sector_t lba,
3396 sector_t end = lba + len;
3399 unsigned long index = lba_to_map_index(lba);
3401 if (index < map_size)
3402 set_bit(index, sip->map_storep);
3404 lba = map_index_to_lba(index + 1);
3408 static void unmap_region(struct sdeb_store_info *sip, sector_t lba,
3411 sector_t end = lba + len;
3412 u8 *fsp = sip->storep;
3415 unsigned long index = lba_to_map_index(lba);
3417 if (lba == map_index_to_lba(index) &&
3418 lba + sdebug_unmap_granularity <= end &&
3420 clear_bit(index, sip->map_storep);
3421 if (sdebug_lbprz) { /* for LBPRZ=2 return 0xff_s */
3422 memset(fsp + lba * sdebug_sector_size,
3423 (sdebug_lbprz & 1) ? 0 : 0xff,
3424 sdebug_sector_size *
3425 sdebug_unmap_granularity);
3427 if (sip->dif_storep) {
3428 memset(sip->dif_storep + lba, 0xff,
3429 sizeof(*sip->dif_storep) *
3430 sdebug_unmap_granularity);
3433 lba = map_index_to_lba(index + 1);
3437 static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3444 struct sdeb_store_info *sip = devip2sip(devip, true);
3445 rwlock_t *macc_lckp = &sip->macc_lck;
3446 u8 *cmd = scp->cmnd;
3451 lba = get_unaligned_be64(cmd + 2);
3452 num = get_unaligned_be32(cmd + 10);
3457 lba = get_unaligned_be32(cmd + 2);
3458 num = get_unaligned_be16(cmd + 7);
3463 lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3464 (u32)(cmd[1] & 0x1f) << 16;
3465 num = (0 == cmd[4]) ? 256 : cmd[4];
3470 lba = get_unaligned_be32(cmd + 2);
3471 num = get_unaligned_be32(cmd + 6);
3474 case 0x53: /* XDWRITEREAD(10) */
3476 lba = get_unaligned_be32(cmd + 2);
3477 num = get_unaligned_be16(cmd + 7);
3480 default: /* assume WRITE(32) */
3481 lba = get_unaligned_be64(cmd + 12);
3482 ei_lba = get_unaligned_be32(cmd + 20);
3483 num = get_unaligned_be32(cmd + 28);
3487 if (unlikely(have_dif_prot && check_prot)) {
3488 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3490 mk_sense_invalid_opcode(scp);
3491 return check_condition_result;
3493 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3494 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3495 (cmd[1] & 0xe0) == 0)
3496 sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3500 write_lock(macc_lckp);
3501 ret = check_device_access_params(scp, lba, num, true);
3503 write_unlock(macc_lckp);
3508 if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3509 switch (prot_verify_write(scp, lba, num, ei_lba)) {
3510 case 1: /* Guard tag error */
3511 if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) {
3512 write_unlock(macc_lckp);
3513 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3514 return illegal_condition_result;
3515 } else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */
3516 write_unlock(macc_lckp);
3517 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3518 return check_condition_result;
3521 case 3: /* Reference tag error */
3522 if (scp->prot_flags & SCSI_PROT_REF_CHECK) {
3523 write_unlock(macc_lckp);
3524 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3);
3525 return illegal_condition_result;
3526 } else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */
3527 write_unlock(macc_lckp);
3528 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3);
3529 return check_condition_result;
3535 ret = do_device_access(sip, scp, 0, lba, num, true);
3536 if (unlikely(scsi_debug_lbp()))
3537 map_region(sip, lba, num);
3538 /* If ZBC zone then bump its write pointer */
3539 if (sdebug_dev_is_zoned(devip))
3540 zbc_inc_wp(devip, lba, num);
3541 write_unlock(macc_lckp);
3542 if (unlikely(-1 == ret))
3543 return DID_ERROR << 16;
3544 else if (unlikely(sdebug_verbose &&
3545 (ret < (num * sdebug_sector_size))))
3546 sdev_printk(KERN_INFO, scp->device,
3547 "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3548 my_name, num * sdebug_sector_size, ret);
3550 if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3551 atomic_read(&sdeb_inject_pending))) {
3552 if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3553 mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3554 atomic_set(&sdeb_inject_pending, 0);
3555 return check_condition_result;
3556 } else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3557 /* Logical block guard check failed */
3558 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3559 atomic_set(&sdeb_inject_pending, 0);
3560 return illegal_condition_result;
3561 } else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
3562 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3563 atomic_set(&sdeb_inject_pending, 0);
3564 return illegal_condition_result;
3571 * T10 has only specified WRITE SCATTERED(16) and WRITE SCATTERED(32).
3572 * No READ GATHERED yet (requires bidi or long cdb holding gather list).
3574 static int resp_write_scat(struct scsi_cmnd *scp,
3575 struct sdebug_dev_info *devip)
3577 u8 *cmd = scp->cmnd;
3580 struct sdeb_store_info *sip = devip2sip(devip, true);
3581 rwlock_t *macc_lckp = &sip->macc_lck;
3583 u16 lbdof, num_lrd, k;
3584 u32 num, num_by, bt_len, lbdof_blen, sg_off, cum_lb;
3585 u32 lb_size = sdebug_sector_size;
3590 static const u32 lrd_size = 32; /* + parameter list header size */
3592 if (cmd[0] == VARIABLE_LENGTH_CMD) {
3594 wrprotect = (cmd[10] >> 5) & 0x7;
3595 lbdof = get_unaligned_be16(cmd + 12);
3596 num_lrd = get_unaligned_be16(cmd + 16);
3597 bt_len = get_unaligned_be32(cmd + 28);
3598 } else { /* that leaves WRITE SCATTERED(16) */
3600 wrprotect = (cmd[2] >> 5) & 0x7;
3601 lbdof = get_unaligned_be16(cmd + 4);
3602 num_lrd = get_unaligned_be16(cmd + 8);
3603 bt_len = get_unaligned_be32(cmd + 10);
3604 if (unlikely(have_dif_prot)) {
3605 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3607 mk_sense_invalid_opcode(scp);
3608 return illegal_condition_result;
3610 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3611 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3613 sdev_printk(KERN_ERR, scp->device,
3614 "Unprotected WR to DIF device\n");
3617 if ((num_lrd == 0) || (bt_len == 0))
3618 return 0; /* T10 says these do-nothings are not errors */
3621 sdev_printk(KERN_INFO, scp->device,
3622 "%s: %s: LB Data Offset field bad\n",
3624 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3625 return illegal_condition_result;
3627 lbdof_blen = lbdof * lb_size;
3628 if ((lrd_size + (num_lrd * lrd_size)) > lbdof_blen) {
3630 sdev_printk(KERN_INFO, scp->device,
3631 "%s: %s: LBA range descriptors don't fit\n",
3633 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3634 return illegal_condition_result;
3636 lrdp = kzalloc(lbdof_blen, GFP_ATOMIC);
3638 return SCSI_MLQUEUE_HOST_BUSY;
3640 sdev_printk(KERN_INFO, scp->device,
3641 "%s: %s: Fetch header+scatter_list, lbdof_blen=%u\n",
3642 my_name, __func__, lbdof_blen);
3643 res = fetch_to_dev_buffer(scp, lrdp, lbdof_blen);
3645 ret = DID_ERROR << 16;
3649 write_lock(macc_lckp);
3650 sg_off = lbdof_blen;
3651 /* Spec says Buffer xfer Length field in number of LBs in dout */
3653 for (k = 0, up = lrdp + lrd_size; k < num_lrd; ++k, up += lrd_size) {
3654 lba = get_unaligned_be64(up + 0);
3655 num = get_unaligned_be32(up + 8);
3657 sdev_printk(KERN_INFO, scp->device,
3658 "%s: %s: k=%d LBA=0x%llx num=%u sg_off=%u\n",
3659 my_name, __func__, k, lba, num, sg_off);
3662 ret = check_device_access_params(scp, lba, num, true);
3664 goto err_out_unlock;
3665 num_by = num * lb_size;
3666 ei_lba = is_16 ? 0 : get_unaligned_be32(up + 12);
3668 if ((cum_lb + num) > bt_len) {
3670 sdev_printk(KERN_INFO, scp->device,
3671 "%s: %s: sum of blocks > data provided\n",
3673 mk_sense_buffer(scp, ILLEGAL_REQUEST, WRITE_ERROR_ASC,
3675 ret = illegal_condition_result;
3676 goto err_out_unlock;
3680 if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3681 int prot_ret = prot_verify_write(scp, lba, num,
3685 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10,
3687 ret = illegal_condition_result;
3688 goto err_out_unlock;
3692 ret = do_device_access(sip, scp, sg_off, lba, num, true);
3693 /* If ZBC zone then bump its write pointer */
3694 if (sdebug_dev_is_zoned(devip))
3695 zbc_inc_wp(devip, lba, num);
3696 if (unlikely(scsi_debug_lbp()))
3697 map_region(sip, lba, num);
3698 if (unlikely(-1 == ret)) {
3699 ret = DID_ERROR << 16;
3700 goto err_out_unlock;
3701 } else if (unlikely(sdebug_verbose && (ret < num_by)))
3702 sdev_printk(KERN_INFO, scp->device,
3703 "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3704 my_name, num_by, ret);
3706 if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3707 atomic_read(&sdeb_inject_pending))) {
3708 if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3709 mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3710 atomic_set(&sdeb_inject_pending, 0);
3711 ret = check_condition_result;
3712 goto err_out_unlock;
3713 } else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3714 /* Logical block guard check failed */
3715 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3716 atomic_set(&sdeb_inject_pending, 0);
3717 ret = illegal_condition_result;
3718 goto err_out_unlock;
3719 } else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
3720 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3721 atomic_set(&sdeb_inject_pending, 0);
3722 ret = illegal_condition_result;
3723 goto err_out_unlock;
3731 write_unlock(macc_lckp);
3737 static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
3738 u32 ei_lba, bool unmap, bool ndob)
3740 struct scsi_device *sdp = scp->device;
3741 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
3742 unsigned long long i;
3744 u32 lb_size = sdebug_sector_size;
3746 struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3747 scp->device->hostdata, true);
3748 rwlock_t *macc_lckp = &sip->macc_lck;
3752 write_lock(macc_lckp);
3754 ret = check_device_access_params(scp, lba, num, true);
3756 write_unlock(macc_lckp);
3760 if (unmap && scsi_debug_lbp()) {
3761 unmap_region(sip, lba, num);
3765 block = do_div(lbaa, sdebug_store_sectors);
3766 /* if ndob then zero 1 logical block, else fetch 1 logical block */
3768 fs1p = fsp + (block * lb_size);
3770 memset(fs1p, 0, lb_size);
3773 ret = fetch_to_dev_buffer(scp, fs1p, lb_size);
3776 write_unlock(&sip->macc_lck);
3777 return DID_ERROR << 16;
3778 } else if (sdebug_verbose && !ndob && (ret < lb_size))
3779 sdev_printk(KERN_INFO, scp->device,
3780 "%s: %s: lb size=%u, IO sent=%d bytes\n",
3781 my_name, "write same", lb_size, ret);
3783 /* Copy first sector to remaining blocks */
3784 for (i = 1 ; i < num ; i++) {
3786 block = do_div(lbaa, sdebug_store_sectors);
3787 memmove(fsp + (block * lb_size), fs1p, lb_size);
3789 if (scsi_debug_lbp())
3790 map_region(sip, lba, num);
3791 /* If ZBC zone then bump its write pointer */
3792 if (sdebug_dev_is_zoned(devip))
3793 zbc_inc_wp(devip, lba, num);
3795 write_unlock(macc_lckp);
3800 static int resp_write_same_10(struct scsi_cmnd *scp,
3801 struct sdebug_dev_info *devip)
3803 u8 *cmd = scp->cmnd;
3810 if (sdebug_lbpws10 == 0) {
3811 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3812 return check_condition_result;
3816 lba = get_unaligned_be32(cmd + 2);
3817 num = get_unaligned_be16(cmd + 7);
3818 if (num > sdebug_write_same_length) {
3819 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3820 return check_condition_result;
3822 return resp_write_same(scp, lba, num, ei_lba, unmap, false);
3825 static int resp_write_same_16(struct scsi_cmnd *scp,
3826 struct sdebug_dev_info *devip)
3828 u8 *cmd = scp->cmnd;
3835 if (cmd[1] & 0x8) { /* UNMAP */
3836 if (sdebug_lbpws == 0) {
3837 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3838 return check_condition_result;
3842 if (cmd[1] & 0x1) /* NDOB (no data-out buffer, assumes zeroes) */
3844 lba = get_unaligned_be64(cmd + 2);
3845 num = get_unaligned_be32(cmd + 10);
3846 if (num > sdebug_write_same_length) {
3847 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
3848 return check_condition_result;
3850 return resp_write_same(scp, lba, num, ei_lba, unmap, ndob);
3853 /* Note the mode field is in the same position as the (lower) service action
3854 * field. For the Report supported operation codes command, SPC-4 suggests
3855 * each mode of this command should be reported separately; for future. */
3856 static int resp_write_buffer(struct scsi_cmnd *scp,
3857 struct sdebug_dev_info *devip)
3859 u8 *cmd = scp->cmnd;
3860 struct scsi_device *sdp = scp->device;
3861 struct sdebug_dev_info *dp;
3864 mode = cmd[1] & 0x1f;
3866 case 0x4: /* download microcode (MC) and activate (ACT) */
3867 /* set UAs on this device only */
3868 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3869 set_bit(SDEBUG_UA_MICROCODE_CHANGED, devip->uas_bm);
3871 case 0x5: /* download MC, save and ACT */
3872 set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET, devip->uas_bm);
3874 case 0x6: /* download MC with offsets and ACT */
3875 /* set UAs on most devices (LUs) in this target */
3876 list_for_each_entry(dp,
3877 &devip->sdbg_host->dev_info_list,
3879 if (dp->target == sdp->id) {
3880 set_bit(SDEBUG_UA_BUS_RESET, dp->uas_bm);
3882 set_bit(SDEBUG_UA_MICROCODE_CHANGED,
3886 case 0x7: /* download MC with offsets, save, and ACT */
3887 /* set UA on all devices (LUs) in this target */
3888 list_for_each_entry(dp,
3889 &devip->sdbg_host->dev_info_list,
3891 if (dp->target == sdp->id)
3892 set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET,
3896 /* do nothing for this command for other mode values */
3902 static int resp_comp_write(struct scsi_cmnd *scp,
3903 struct sdebug_dev_info *devip)
3905 u8 *cmd = scp->cmnd;
3907 struct sdeb_store_info *sip = devip2sip(devip, true);
3908 rwlock_t *macc_lckp = &sip->macc_lck;
3911 u32 lb_size = sdebug_sector_size;
3916 lba = get_unaligned_be64(cmd + 2);
3917 num = cmd[13]; /* 1 to a maximum of 255 logical blocks */
3919 return 0; /* degenerate case, not an error */
3920 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3922 mk_sense_invalid_opcode(scp);
3923 return check_condition_result;
3925 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3926 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3927 (cmd[1] & 0xe0) == 0)
3928 sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3930 ret = check_device_access_params(scp, lba, num, false);
3934 arr = kcalloc(lb_size, dnum, GFP_ATOMIC);
3936 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3938 return check_condition_result;
3941 write_lock(macc_lckp);
3943 ret = do_dout_fetch(scp, dnum, arr);
3945 retval = DID_ERROR << 16;
3947 } else if (sdebug_verbose && (ret < (dnum * lb_size)))
3948 sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb "
3949 "indicated=%u, IO sent=%d bytes\n", my_name,
3950 dnum * lb_size, ret);
3951 if (!comp_write_worker(sip, lba, num, arr, false)) {
3952 mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
3953 retval = check_condition_result;
3956 if (scsi_debug_lbp())
3957 map_region(sip, lba, num);
3959 write_unlock(macc_lckp);
3964 struct unmap_block_desc {
3970 static int resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3973 struct unmap_block_desc *desc;
3974 struct sdeb_store_info *sip = devip2sip(devip, true);
3975 rwlock_t *macc_lckp = &sip->macc_lck;
3976 unsigned int i, payload_len, descriptors;
3979 if (!scsi_debug_lbp())
3980 return 0; /* fib and say its done */
3981 payload_len = get_unaligned_be16(scp->cmnd + 7);
3982 BUG_ON(scsi_bufflen(scp) != payload_len);
3984 descriptors = (payload_len - 8) / 16;
3985 if (descriptors > sdebug_unmap_max_desc) {
3986 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3987 return check_condition_result;
3990 buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC);
3992 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3994 return check_condition_result;
3997 scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
3999 BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
4000 BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
4002 desc = (void *)&buf[8];
4004 write_lock(macc_lckp);
4006 for (i = 0 ; i < descriptors ; i++) {
4007 unsigned long long lba = get_unaligned_be64(&desc[i].lba);
4008 unsigned int num = get_unaligned_be32(&desc[i].blocks);
4010 ret = check_device_access_params(scp, lba, num, true);
4014 unmap_region(sip, lba, num);
4020 write_unlock(macc_lckp);
4026 #define SDEBUG_GET_LBA_STATUS_LEN 32
4028 static int resp_get_lba_status(struct scsi_cmnd *scp,
4029 struct sdebug_dev_info *devip)
4031 u8 *cmd = scp->cmnd;
4033 u32 alloc_len, mapped, num;
4035 u8 arr[SDEBUG_GET_LBA_STATUS_LEN];
4037 lba = get_unaligned_be64(cmd + 2);
4038 alloc_len = get_unaligned_be32(cmd + 10);
4043 ret = check_device_access_params(scp, lba, 1, false);
4047 if (scsi_debug_lbp()) {
4048 struct sdeb_store_info *sip = devip2sip(devip, true);
4050 mapped = map_state(sip, lba, &num);
4053 /* following just in case virtual_gb changed */
4054 sdebug_capacity = get_sdebug_capacity();
4055 if (sdebug_capacity - lba <= 0xffffffff)
4056 num = sdebug_capacity - lba;
4061 memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
4062 put_unaligned_be32(20, arr); /* Parameter Data Length */
4063 put_unaligned_be64(lba, arr + 8); /* LBA */
4064 put_unaligned_be32(num, arr + 16); /* Number of blocks */
4065 arr[20] = !mapped; /* prov_stat=0: mapped; 1: dealloc */
4067 return fill_from_dev_buffer(scp, arr, SDEBUG_GET_LBA_STATUS_LEN);
4070 static int resp_sync_cache(struct scsi_cmnd *scp,
4071 struct sdebug_dev_info *devip)
4076 u8 *cmd = scp->cmnd;
4078 if (cmd[0] == SYNCHRONIZE_CACHE) { /* 10 byte cdb */
4079 lba = get_unaligned_be32(cmd + 2);
4080 num_blocks = get_unaligned_be16(cmd + 7);
4081 } else { /* SYNCHRONIZE_CACHE(16) */
4082 lba = get_unaligned_be64(cmd + 2);
4083 num_blocks = get_unaligned_be32(cmd + 10);
4085 if (lba + num_blocks > sdebug_capacity) {
4086 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4087 return check_condition_result;
4089 if (!write_since_sync || (cmd[1] & 0x2))
4090 res = SDEG_RES_IMMED_MASK;
4091 else /* delay if write_since_sync and IMMED clear */
4092 write_since_sync = false;
4097 * Assuming the LBA+num_blocks is not out-of-range, this function will return
4098 * CONDITION MET if the specified blocks will/have fitted in the cache, and
4099 * a GOOD status otherwise. Model a disk with a big cache and yield
4100 * CONDITION MET. Actually tries to bring range in main memory into the
4101 * cache associated with the CPU(s).
4103 static int resp_pre_fetch(struct scsi_cmnd *scp,
4104 struct sdebug_dev_info *devip)
4108 u64 block, rest = 0;
4110 u8 *cmd = scp->cmnd;
4111 struct sdeb_store_info *sip = devip2sip(devip, true);
4112 rwlock_t *macc_lckp = &sip->macc_lck;
4113 u8 *fsp = sip->storep;
4115 if (cmd[0] == PRE_FETCH) { /* 10 byte cdb */
4116 lba = get_unaligned_be32(cmd + 2);
4117 nblks = get_unaligned_be16(cmd + 7);
4118 } else { /* PRE-FETCH(16) */
4119 lba = get_unaligned_be64(cmd + 2);
4120 nblks = get_unaligned_be32(cmd + 10);
4122 if (lba + nblks > sdebug_capacity) {
4123 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4124 return check_condition_result;
4128 /* PRE-FETCH spec says nothing about LBP or PI so skip them */
4129 block = do_div(lba, sdebug_store_sectors);
4130 if (block + nblks > sdebug_store_sectors)
4131 rest = block + nblks - sdebug_store_sectors;
4133 /* Try to bring the PRE-FETCH range into CPU's cache */
4134 read_lock(macc_lckp);
4135 prefetch_range(fsp + (sdebug_sector_size * block),
4136 (nblks - rest) * sdebug_sector_size);
4138 prefetch_range(fsp, rest * sdebug_sector_size);
4139 read_unlock(macc_lckp);
4142 res = SDEG_RES_IMMED_MASK;
4143 return res | condition_met_result;
4146 #define RL_BUCKET_ELEMS 8
4148 /* Even though each pseudo target has a REPORT LUNS "well known logical unit"
4149 * (W-LUN), the normal Linux scanning logic does not associate it with a
4150 * device (e.g. /dev/sg7). The following magic will make that association:
4151 * "cd /sys/class/scsi_host/host<n> ; echo '- - 49409' > scan"
4152 * where <n> is a host number. If there are multiple targets in a host then
4153 * the above will associate a W-LUN to each target. To only get a W-LUN
4154 * for target 2, then use "echo '- 2 49409' > scan" .
4156 static int resp_report_luns(struct scsi_cmnd *scp,
4157 struct sdebug_dev_info *devip)
4159 unsigned char *cmd = scp->cmnd;
4160 unsigned int alloc_len;
4161 unsigned char select_report;
4163 struct scsi_lun *lun_p;
4164 u8 arr[RL_BUCKET_ELEMS * sizeof(struct scsi_lun)];
4165 unsigned int lun_cnt; /* normal LUN count (max: 256) */
4166 unsigned int wlun_cnt; /* report luns W-LUN count */
4167 unsigned int tlun_cnt; /* total LUN count */
4168 unsigned int rlen; /* response length (in bytes) */
4170 unsigned int off_rsp = 0;
4171 const int sz_lun = sizeof(struct scsi_lun);
4173 clear_luns_changed_on_target(devip);
4175 select_report = cmd[2];
4176 alloc_len = get_unaligned_be32(cmd + 6);
4178 if (alloc_len < 4) {
4179 pr_err("alloc len too small %d\n", alloc_len);
4180 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
4181 return check_condition_result;
4184 switch (select_report) {
4185 case 0: /* all LUNs apart from W-LUNs */
4186 lun_cnt = sdebug_max_luns;
4189 case 1: /* only W-LUNs */
4193 case 2: /* all LUNs */
4194 lun_cnt = sdebug_max_luns;
4197 case 0x10: /* only administrative LUs */
4198 case 0x11: /* see SPC-5 */
4199 case 0x12: /* only subsiduary LUs owned by referenced LU */
4201 pr_debug("select report invalid %d\n", select_report);
4202 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
4203 return check_condition_result;
4206 if (sdebug_no_lun_0 && (lun_cnt > 0))
4209 tlun_cnt = lun_cnt + wlun_cnt;
4210 rlen = tlun_cnt * sz_lun; /* excluding 8 byte header */
4211 scsi_set_resid(scp, scsi_bufflen(scp));
4212 pr_debug("select_report %d luns = %d wluns = %d no_lun0 %d\n",
4213 select_report, lun_cnt, wlun_cnt, sdebug_no_lun_0);
4215 /* loops rely on sizeof response header same as sizeof lun (both 8) */
4216 lun = sdebug_no_lun_0 ? 1 : 0;
4217 for (k = 0, j = 0, res = 0; true; ++k, j = 0) {
4218 memset(arr, 0, sizeof(arr));
4219 lun_p = (struct scsi_lun *)&arr[0];
4221 put_unaligned_be32(rlen, &arr[0]);
4225 for ( ; j < RL_BUCKET_ELEMS; ++j, ++lun_p) {
4226 if ((k * RL_BUCKET_ELEMS) + j > lun_cnt)
4228 int_to_scsilun(lun++, lun_p);
4229 if (lun > 1 && sdebug_lun_am == SAM_LUN_AM_FLAT)
4230 lun_p->scsi_lun[0] |= 0x40;
4232 if (j < RL_BUCKET_ELEMS)
4235 res = p_fill_from_dev_buffer(scp, arr, n, off_rsp);
4241 int_to_scsilun(SCSI_W_LUN_REPORT_LUNS, lun_p);
4245 res = p_fill_from_dev_buffer(scp, arr, j * sz_lun, off_rsp);
4249 static int resp_verify(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4251 bool is_bytchk3 = false;
4254 u32 vnum, a_num, off;
4255 const u32 lb_size = sdebug_sector_size;
4258 u8 *cmd = scp->cmnd;
4259 struct sdeb_store_info *sip = devip2sip(devip, true);
4260 rwlock_t *macc_lckp = &sip->macc_lck;
4262 bytchk = (cmd[1] >> 1) & 0x3;
4264 return 0; /* always claim internal verify okay */
4265 } else if (bytchk == 2) {
4266 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
4267 return check_condition_result;
4268 } else if (bytchk == 3) {
4269 is_bytchk3 = true; /* 1 block sent, compared repeatedly */
4273 lba = get_unaligned_be64(cmd + 2);
4274 vnum = get_unaligned_be32(cmd + 10);
4276 case VERIFY: /* is VERIFY(10) */
4277 lba = get_unaligned_be32(cmd + 2);
4278 vnum = get_unaligned_be16(cmd + 7);
4281 mk_sense_invalid_opcode(scp);
4282 return check_condition_result;
4285 return 0; /* not an error */
4286 a_num = is_bytchk3 ? 1 : vnum;
4287 /* Treat following check like one for read (i.e. no write) access */
4288 ret = check_device_access_params(scp, lba, a_num, false);
4292 arr = kcalloc(lb_size, vnum, GFP_ATOMIC);
4294 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4296 return check_condition_result;
4298 /* Not changing store, so only need read access */
4299 read_lock(macc_lckp);
4301 ret = do_dout_fetch(scp, a_num, arr);
4303 ret = DID_ERROR << 16;
4305 } else if (sdebug_verbose && (ret < (a_num * lb_size))) {
4306 sdev_printk(KERN_INFO, scp->device,
4307 "%s: %s: cdb indicated=%u, IO sent=%d bytes\n",
4308 my_name, __func__, a_num * lb_size, ret);
4311 for (j = 1, off = lb_size; j < vnum; ++j, off += lb_size)
4312 memcpy(arr + off, arr, lb_size);
4315 if (!comp_write_worker(sip, lba, vnum, arr, true)) {
4316 mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
4317 ret = check_condition_result;
4321 read_unlock(macc_lckp);
4326 #define RZONES_DESC_HD 64
4328 /* Report zones depending on start LBA nad reporting options */
4329 static int resp_report_zones(struct scsi_cmnd *scp,
4330 struct sdebug_dev_info *devip)
4332 unsigned int i, max_zones, rep_max_zones, nrz = 0;
4334 u32 alloc_len, rep_opts, rep_len;
4337 u8 *arr = NULL, *desc;
4338 u8 *cmd = scp->cmnd;
4339 struct sdeb_zone_state *zsp;
4340 struct sdeb_store_info *sip = devip2sip(devip, false);
4341 rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4343 if (!sdebug_dev_is_zoned(devip)) {
4344 mk_sense_invalid_opcode(scp);
4345 return check_condition_result;
4347 zs_lba = get_unaligned_be64(cmd + 2);
4348 alloc_len = get_unaligned_be32(cmd + 10);
4350 return 0; /* not an error */
4351 rep_opts = cmd[14] & 0x3f;
4352 partial = cmd[14] & 0x80;
4354 if (zs_lba >= sdebug_capacity) {
4355 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4356 return check_condition_result;
4359 max_zones = devip->nr_zones - (zs_lba >> devip->zsize_shift);
4360 rep_max_zones = min((alloc_len - 64) >> ilog2(RZONES_DESC_HD),
4363 arr = kzalloc(alloc_len, GFP_ATOMIC);
4365 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4367 return check_condition_result;
4370 read_lock(macc_lckp);
4373 for (i = 0; i < max_zones; i++) {
4374 lba = zs_lba + devip->zsize * i;
4375 if (lba > sdebug_capacity)
4377 zsp = zbc_zone(devip, lba);
4384 if (zsp->z_cond != ZC1_EMPTY)
4388 /* Implicit open zones */
4389 if (zsp->z_cond != ZC2_IMPLICIT_OPEN)
4393 /* Explicit open zones */
4394 if (zsp->z_cond != ZC3_EXPLICIT_OPEN)
4399 if (zsp->z_cond != ZC4_CLOSED)
4404 if (zsp->z_cond != ZC5_FULL)
4411 * Read-only, offline, reset WP recommended are
4412 * not emulated: no zones to report;
4416 /* non-seq-resource set */
4417 if (!zsp->z_non_seq_resource)
4421 /* Not write pointer (conventional) zones */
4422 if (!zbc_zone_is_conv(zsp))
4426 mk_sense_buffer(scp, ILLEGAL_REQUEST,
4427 INVALID_FIELD_IN_CDB, 0);
4428 ret = check_condition_result;
4432 if (nrz < rep_max_zones) {
4433 /* Fill zone descriptor */
4434 desc[0] = zsp->z_type;
4435 desc[1] = zsp->z_cond << 4;
4436 if (zsp->z_non_seq_resource)
4438 put_unaligned_be64((u64)zsp->z_size, desc + 8);
4439 put_unaligned_be64((u64)zsp->z_start, desc + 16);
4440 put_unaligned_be64((u64)zsp->z_wp, desc + 24);
4444 if (partial && nrz >= rep_max_zones)
4451 put_unaligned_be32(nrz * RZONES_DESC_HD, arr + 0);
4452 put_unaligned_be64(sdebug_capacity - 1, arr + 8);
4454 rep_len = (unsigned long)desc - (unsigned long)arr;
4455 ret = fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, rep_len));
4458 read_unlock(macc_lckp);
4463 /* Logic transplanted from tcmu-runner, file_zbc.c */
4464 static void zbc_open_all(struct sdebug_dev_info *devip)
4466 struct sdeb_zone_state *zsp = &devip->zstate[0];
4469 for (i = 0; i < devip->nr_zones; i++, zsp++) {
4470 if (zsp->z_cond == ZC4_CLOSED)
4471 zbc_open_zone(devip, &devip->zstate[i], true);
4475 static int resp_open_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4479 enum sdebug_z_cond zc;
4480 u8 *cmd = scp->cmnd;
4481 struct sdeb_zone_state *zsp;
4482 bool all = cmd[14] & 0x01;
4483 struct sdeb_store_info *sip = devip2sip(devip, false);
4484 rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4486 if (!sdebug_dev_is_zoned(devip)) {
4487 mk_sense_invalid_opcode(scp);
4488 return check_condition_result;
4491 write_lock(macc_lckp);
4494 /* Check if all closed zones can be open */
4495 if (devip->max_open &&
4496 devip->nr_exp_open + devip->nr_closed > devip->max_open) {
4497 mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
4499 res = check_condition_result;
4502 /* Open all closed zones */
4503 zbc_open_all(devip);
4507 /* Open the specified zone */
4508 z_id = get_unaligned_be64(cmd + 2);
4509 if (z_id >= sdebug_capacity) {
4510 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4511 res = check_condition_result;
4515 zsp = zbc_zone(devip, z_id);
4516 if (z_id != zsp->z_start) {
4517 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4518 res = check_condition_result;
4521 if (zbc_zone_is_conv(zsp)) {
4522 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4523 res = check_condition_result;
4528 if (zc == ZC3_EXPLICIT_OPEN || zc == ZC5_FULL)
4531 if (devip->max_open && devip->nr_exp_open >= devip->max_open) {
4532 mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
4534 res = check_condition_result;
4538 zbc_open_zone(devip, zsp, true);
4540 write_unlock(macc_lckp);
4544 static void zbc_close_all(struct sdebug_dev_info *devip)
4548 for (i = 0; i < devip->nr_zones; i++)
4549 zbc_close_zone(devip, &devip->zstate[i]);
4552 static int resp_close_zone(struct scsi_cmnd *scp,
4553 struct sdebug_dev_info *devip)
4557 u8 *cmd = scp->cmnd;
4558 struct sdeb_zone_state *zsp;
4559 bool all = cmd[14] & 0x01;
4560 struct sdeb_store_info *sip = devip2sip(devip, false);
4561 rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4563 if (!sdebug_dev_is_zoned(devip)) {
4564 mk_sense_invalid_opcode(scp);
4565 return check_condition_result;
4568 write_lock(macc_lckp);
4571 zbc_close_all(devip);
4575 /* Close specified zone */
4576 z_id = get_unaligned_be64(cmd + 2);
4577 if (z_id >= sdebug_capacity) {
4578 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4579 res = check_condition_result;
4583 zsp = zbc_zone(devip, z_id);
4584 if (z_id != zsp->z_start) {
4585 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4586 res = check_condition_result;
4589 if (zbc_zone_is_conv(zsp)) {
4590 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4591 res = check_condition_result;
4595 zbc_close_zone(devip, zsp);
4597 write_unlock(macc_lckp);
4601 static void zbc_finish_zone(struct sdebug_dev_info *devip,
4602 struct sdeb_zone_state *zsp, bool empty)
4604 enum sdebug_z_cond zc = zsp->z_cond;
4606 if (zc == ZC4_CLOSED || zc == ZC2_IMPLICIT_OPEN ||
4607 zc == ZC3_EXPLICIT_OPEN || (empty && zc == ZC1_EMPTY)) {
4608 if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
4609 zbc_close_zone(devip, zsp);
4610 if (zsp->z_cond == ZC4_CLOSED)
4612 zsp->z_wp = zsp->z_start + zsp->z_size;
4613 zsp->z_cond = ZC5_FULL;
4617 static void zbc_finish_all(struct sdebug_dev_info *devip)
4621 for (i = 0; i < devip->nr_zones; i++)
4622 zbc_finish_zone(devip, &devip->zstate[i], false);
4625 static int resp_finish_zone(struct scsi_cmnd *scp,
4626 struct sdebug_dev_info *devip)
4628 struct sdeb_zone_state *zsp;
4631 u8 *cmd = scp->cmnd;
4632 bool all = cmd[14] & 0x01;
4633 struct sdeb_store_info *sip = devip2sip(devip, false);
4634 rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4636 if (!sdebug_dev_is_zoned(devip)) {
4637 mk_sense_invalid_opcode(scp);
4638 return check_condition_result;
4641 write_lock(macc_lckp);
4644 zbc_finish_all(devip);
4648 /* Finish the specified zone */
4649 z_id = get_unaligned_be64(cmd + 2);
4650 if (z_id >= sdebug_capacity) {
4651 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4652 res = check_condition_result;
4656 zsp = zbc_zone(devip, z_id);
4657 if (z_id != zsp->z_start) {
4658 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4659 res = check_condition_result;
4662 if (zbc_zone_is_conv(zsp)) {
4663 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4664 res = check_condition_result;
4668 zbc_finish_zone(devip, zsp, true);
4670 write_unlock(macc_lckp);
4674 static void zbc_rwp_zone(struct sdebug_dev_info *devip,
4675 struct sdeb_zone_state *zsp)
4677 enum sdebug_z_cond zc;
4678 struct sdeb_store_info *sip = devip2sip(devip, false);
4680 if (zbc_zone_is_conv(zsp))
4684 if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
4685 zbc_close_zone(devip, zsp);
4687 if (zsp->z_cond == ZC4_CLOSED)
4690 if (zsp->z_wp > zsp->z_start)
4691 memset(sip->storep + zsp->z_start * sdebug_sector_size, 0,
4692 (zsp->z_wp - zsp->z_start) * sdebug_sector_size);
4694 zsp->z_non_seq_resource = false;
4695 zsp->z_wp = zsp->z_start;
4696 zsp->z_cond = ZC1_EMPTY;
4699 static void zbc_rwp_all(struct sdebug_dev_info *devip)
4703 for (i = 0; i < devip->nr_zones; i++)
4704 zbc_rwp_zone(devip, &devip->zstate[i]);
4707 static int resp_rwp_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4709 struct sdeb_zone_state *zsp;
4712 u8 *cmd = scp->cmnd;
4713 bool all = cmd[14] & 0x01;
4714 struct sdeb_store_info *sip = devip2sip(devip, false);
4715 rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4717 if (!sdebug_dev_is_zoned(devip)) {
4718 mk_sense_invalid_opcode(scp);
4719 return check_condition_result;
4722 write_lock(macc_lckp);
4729 z_id = get_unaligned_be64(cmd + 2);
4730 if (z_id >= sdebug_capacity) {
4731 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4732 res = check_condition_result;
4736 zsp = zbc_zone(devip, z_id);
4737 if (z_id != zsp->z_start) {
4738 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4739 res = check_condition_result;
4742 if (zbc_zone_is_conv(zsp)) {
4743 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4744 res = check_condition_result;
4748 zbc_rwp_zone(devip, zsp);
4750 write_unlock(macc_lckp);
4754 static struct sdebug_queue *get_queue(struct scsi_cmnd *cmnd)
4757 u32 tag = blk_mq_unique_tag(scsi_cmd_to_rq(cmnd));
4759 hwq = blk_mq_unique_tag_to_hwq(tag);
4761 pr_debug("tag=%#x, hwq=%d\n", tag, hwq);
4762 if (WARN_ON_ONCE(hwq >= submit_queues))
4765 return sdebug_q_arr + hwq;
4768 static u32 get_tag(struct scsi_cmnd *cmnd)
4770 return blk_mq_unique_tag(scsi_cmd_to_rq(cmnd));
4773 /* Queued (deferred) command completions converge here. */
4774 static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
4776 bool aborted = sd_dp->aborted;
4779 unsigned long iflags;
4780 struct sdebug_queue *sqp;
4781 struct sdebug_queued_cmd *sqcp;
4782 struct scsi_cmnd *scp;
4783 struct sdebug_dev_info *devip;
4785 if (unlikely(aborted))
4786 sd_dp->aborted = false;
4787 qc_idx = sd_dp->qc_idx;
4788 sqp = sdebug_q_arr + sd_dp->sqa_idx;
4789 if (sdebug_statistics) {
4790 atomic_inc(&sdebug_completions);
4791 if (raw_smp_processor_id() != sd_dp->issuing_cpu)
4792 atomic_inc(&sdebug_miss_cpus);
4794 if (unlikely((qc_idx < 0) || (qc_idx >= SDEBUG_CANQUEUE))) {
4795 pr_err("wild qc_idx=%d\n", qc_idx);
4798 spin_lock_irqsave(&sqp->qc_lock, iflags);
4799 sd_dp->defer_t = SDEB_DEFER_NONE;
4800 sqcp = &sqp->qc_arr[qc_idx];
4802 if (unlikely(scp == NULL)) {
4803 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4804 pr_err("scp is NULL, sqa_idx=%d, qc_idx=%d, hc_idx=%d\n",
4805 sd_dp->sqa_idx, qc_idx, sd_dp->hc_idx);
4808 devip = (struct sdebug_dev_info *)scp->device->hostdata;
4810 atomic_dec(&devip->num_in_q);
4812 pr_err("devip=NULL\n");
4813 if (unlikely(atomic_read(&retired_max_queue) > 0))
4816 sqcp->a_cmnd = NULL;
4817 if (unlikely(!test_and_clear_bit(qc_idx, sqp->in_use_bm))) {
4818 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4819 pr_err("Unexpected completion\n");
4823 if (unlikely(retiring)) { /* user has reduced max_queue */
4826 retval = atomic_read(&retired_max_queue);
4827 if (qc_idx >= retval) {
4828 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4829 pr_err("index %d too large\n", retval);
4832 k = find_last_bit(sqp->in_use_bm, retval);
4833 if ((k < sdebug_max_queue) || (k == retval))
4834 atomic_set(&retired_max_queue, 0);
4836 atomic_set(&retired_max_queue, k + 1);
4838 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4839 if (unlikely(aborted)) {
4841 pr_info("bypassing scsi_done() due to aborted cmd\n");
4844 scp->scsi_done(scp); /* callback to mid level */
4847 /* When high resolution timer goes off this function is called. */
4848 static enum hrtimer_restart sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
4850 struct sdebug_defer *sd_dp = container_of(timer, struct sdebug_defer,
4852 sdebug_q_cmd_complete(sd_dp);
4853 return HRTIMER_NORESTART;
4856 /* When work queue schedules work, it calls this function. */
4857 static void sdebug_q_cmd_wq_complete(struct work_struct *work)
4859 struct sdebug_defer *sd_dp = container_of(work, struct sdebug_defer,
4861 sdebug_q_cmd_complete(sd_dp);
4864 static bool got_shared_uuid;
4865 static uuid_t shared_uuid;
4867 static int sdebug_device_create_zones(struct sdebug_dev_info *devip)
4869 struct sdeb_zone_state *zsp;
4870 sector_t capacity = get_sdebug_capacity();
4871 sector_t zstart = 0;
4875 * Set the zone size: if sdeb_zbc_zone_size_mb is not set, figure out
4876 * a zone size allowing for at least 4 zones on the device. Otherwise,
4877 * use the specified zone size checking that at least 2 zones can be
4878 * created for the device.
4880 if (!sdeb_zbc_zone_size_mb) {
4881 devip->zsize = (DEF_ZBC_ZONE_SIZE_MB * SZ_1M)
4882 >> ilog2(sdebug_sector_size);
4883 while (capacity < devip->zsize << 2 && devip->zsize >= 2)
4885 if (devip->zsize < 2) {
4886 pr_err("Device capacity too small\n");
4890 if (!is_power_of_2(sdeb_zbc_zone_size_mb)) {
4891 pr_err("Zone size is not a power of 2\n");
4894 devip->zsize = (sdeb_zbc_zone_size_mb * SZ_1M)
4895 >> ilog2(sdebug_sector_size);
4896 if (devip->zsize >= capacity) {
4897 pr_err("Zone size too large for device capacity\n");
4902 devip->zsize_shift = ilog2(devip->zsize);
4903 devip->nr_zones = (capacity + devip->zsize - 1) >> devip->zsize_shift;
4905 if (sdeb_zbc_nr_conv >= devip->nr_zones) {
4906 pr_err("Number of conventional zones too large\n");
4909 devip->nr_conv_zones = sdeb_zbc_nr_conv;
4911 if (devip->zmodel == BLK_ZONED_HM) {
4912 /* zbc_max_open_zones can be 0, meaning "not reported" */
4913 if (sdeb_zbc_max_open >= devip->nr_zones - 1)
4914 devip->max_open = (devip->nr_zones - 1) / 2;
4916 devip->max_open = sdeb_zbc_max_open;
4919 devip->zstate = kcalloc(devip->nr_zones,
4920 sizeof(struct sdeb_zone_state), GFP_KERNEL);
4924 for (i = 0; i < devip->nr_zones; i++) {
4925 zsp = &devip->zstate[i];
4927 zsp->z_start = zstart;
4929 if (i < devip->nr_conv_zones) {
4930 zsp->z_type = ZBC_ZONE_TYPE_CNV;
4931 zsp->z_cond = ZBC_NOT_WRITE_POINTER;
4932 zsp->z_wp = (sector_t)-1;
4934 if (devip->zmodel == BLK_ZONED_HM)
4935 zsp->z_type = ZBC_ZONE_TYPE_SWR;
4937 zsp->z_type = ZBC_ZONE_TYPE_SWP;
4938 zsp->z_cond = ZC1_EMPTY;
4939 zsp->z_wp = zsp->z_start;
4942 if (zsp->z_start + devip->zsize < capacity)
4943 zsp->z_size = devip->zsize;
4945 zsp->z_size = capacity - zsp->z_start;
4947 zstart += zsp->z_size;
4953 static struct sdebug_dev_info *sdebug_device_create(
4954 struct sdebug_host_info *sdbg_host, gfp_t flags)
4956 struct sdebug_dev_info *devip;
4958 devip = kzalloc(sizeof(*devip), flags);
4960 if (sdebug_uuid_ctl == 1)
4961 uuid_gen(&devip->lu_name);
4962 else if (sdebug_uuid_ctl == 2) {
4963 if (got_shared_uuid)
4964 devip->lu_name = shared_uuid;
4966 uuid_gen(&shared_uuid);
4967 got_shared_uuid = true;
4968 devip->lu_name = shared_uuid;
4971 devip->sdbg_host = sdbg_host;
4972 if (sdeb_zbc_in_use) {
4973 devip->zmodel = sdeb_zbc_model;
4974 if (sdebug_device_create_zones(devip)) {
4979 devip->zmodel = BLK_ZONED_NONE;
4981 devip->sdbg_host = sdbg_host;
4982 devip->create_ts = ktime_get_boottime();
4983 atomic_set(&devip->stopped, (sdeb_tur_ms_to_ready > 0 ? 2 : 0));
4984 list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
4989 static struct sdebug_dev_info *find_build_dev_info(struct scsi_device *sdev)
4991 struct sdebug_host_info *sdbg_host;
4992 struct sdebug_dev_info *open_devip = NULL;
4993 struct sdebug_dev_info *devip;
4995 sdbg_host = *(struct sdebug_host_info **)shost_priv(sdev->host);
4997 pr_err("Host info NULL\n");
5001 list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
5002 if ((devip->used) && (devip->channel == sdev->channel) &&
5003 (devip->target == sdev->id) &&
5004 (devip->lun == sdev->lun))
5007 if ((!devip->used) && (!open_devip))
5011 if (!open_devip) { /* try and make a new one */
5012 open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
5014 pr_err("out of memory at line %d\n", __LINE__);
5019 open_devip->channel = sdev->channel;
5020 open_devip->target = sdev->id;
5021 open_devip->lun = sdev->lun;
5022 open_devip->sdbg_host = sdbg_host;
5023 atomic_set(&open_devip->num_in_q, 0);
5024 set_bit(SDEBUG_UA_POR, open_devip->uas_bm);
5025 open_devip->used = true;
5029 static int scsi_debug_slave_alloc(struct scsi_device *sdp)
5032 pr_info("slave_alloc <%u %u %u %llu>\n",
5033 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5037 static int scsi_debug_slave_configure(struct scsi_device *sdp)
5039 struct sdebug_dev_info *devip =
5040 (struct sdebug_dev_info *)sdp->hostdata;
5043 pr_info("slave_configure <%u %u %u %llu>\n",
5044 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5045 if (sdp->host->max_cmd_len != SDEBUG_MAX_CMD_LEN)
5046 sdp->host->max_cmd_len = SDEBUG_MAX_CMD_LEN;
5047 if (devip == NULL) {
5048 devip = find_build_dev_info(sdp);
5050 return 1; /* no resources, will be marked offline */
5052 sdp->hostdata = devip;
5054 sdp->no_uld_attach = 1;
5055 config_cdb_len(sdp);
5059 static void scsi_debug_slave_destroy(struct scsi_device *sdp)
5061 struct sdebug_dev_info *devip =
5062 (struct sdebug_dev_info *)sdp->hostdata;
5065 pr_info("slave_destroy <%u %u %u %llu>\n",
5066 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5068 /* make this slot available for re-use */
5069 devip->used = false;
5070 sdp->hostdata = NULL;
5074 static void stop_qc_helper(struct sdebug_defer *sd_dp,
5075 enum sdeb_defer_type defer_t)
5079 if (defer_t == SDEB_DEFER_HRT)
5080 hrtimer_cancel(&sd_dp->hrt);
5081 else if (defer_t == SDEB_DEFER_WQ)
5082 cancel_work_sync(&sd_dp->ew.work);
5085 /* If @cmnd found deletes its timer or work queue and returns true; else
5087 static bool stop_queued_cmnd(struct scsi_cmnd *cmnd)
5089 unsigned long iflags;
5090 int j, k, qmax, r_qmax;
5091 enum sdeb_defer_type l_defer_t;
5092 struct sdebug_queue *sqp;
5093 struct sdebug_queued_cmd *sqcp;
5094 struct sdebug_dev_info *devip;
5095 struct sdebug_defer *sd_dp;
5097 for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5098 spin_lock_irqsave(&sqp->qc_lock, iflags);
5099 qmax = sdebug_max_queue;
5100 r_qmax = atomic_read(&retired_max_queue);
5103 for (k = 0; k < qmax; ++k) {
5104 if (test_bit(k, sqp->in_use_bm)) {
5105 sqcp = &sqp->qc_arr[k];
5106 if (cmnd != sqcp->a_cmnd)
5109 devip = (struct sdebug_dev_info *)
5110 cmnd->device->hostdata;
5112 atomic_dec(&devip->num_in_q);
5113 sqcp->a_cmnd = NULL;
5114 sd_dp = sqcp->sd_dp;
5116 l_defer_t = sd_dp->defer_t;
5117 sd_dp->defer_t = SDEB_DEFER_NONE;
5119 l_defer_t = SDEB_DEFER_NONE;
5120 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5121 stop_qc_helper(sd_dp, l_defer_t);
5122 clear_bit(k, sqp->in_use_bm);
5126 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5131 /* Deletes (stops) timers or work queues of all queued commands */
5132 static void stop_all_queued(void)
5134 unsigned long iflags;
5136 enum sdeb_defer_type l_defer_t;
5137 struct sdebug_queue *sqp;
5138 struct sdebug_queued_cmd *sqcp;
5139 struct sdebug_dev_info *devip;
5140 struct sdebug_defer *sd_dp;
5142 for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5143 spin_lock_irqsave(&sqp->qc_lock, iflags);
5144 for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
5145 if (test_bit(k, sqp->in_use_bm)) {
5146 sqcp = &sqp->qc_arr[k];
5147 if (sqcp->a_cmnd == NULL)
5149 devip = (struct sdebug_dev_info *)
5150 sqcp->a_cmnd->device->hostdata;
5152 atomic_dec(&devip->num_in_q);
5153 sqcp->a_cmnd = NULL;
5154 sd_dp = sqcp->sd_dp;
5156 l_defer_t = sd_dp->defer_t;
5157 sd_dp->defer_t = SDEB_DEFER_NONE;
5159 l_defer_t = SDEB_DEFER_NONE;
5160 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5161 stop_qc_helper(sd_dp, l_defer_t);
5162 clear_bit(k, sqp->in_use_bm);
5163 spin_lock_irqsave(&sqp->qc_lock, iflags);
5166 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5170 /* Free queued command memory on heap */
5171 static void free_all_queued(void)
5174 struct sdebug_queue *sqp;
5175 struct sdebug_queued_cmd *sqcp;
5177 for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5178 for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
5179 sqcp = &sqp->qc_arr[k];
5186 static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
5192 ok = stop_queued_cmnd(SCpnt);
5193 if (SCpnt->device && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
5194 sdev_printk(KERN_INFO, SCpnt->device,
5195 "%s: command%s found\n", __func__,
5201 static int scsi_debug_device_reset(struct scsi_cmnd *SCpnt)
5204 if (SCpnt && SCpnt->device) {
5205 struct scsi_device *sdp = SCpnt->device;
5206 struct sdebug_dev_info *devip =
5207 (struct sdebug_dev_info *)sdp->hostdata;
5209 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5210 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5212 set_bit(SDEBUG_UA_POR, devip->uas_bm);
5217 static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
5219 struct sdebug_host_info *sdbg_host;
5220 struct sdebug_dev_info *devip;
5221 struct scsi_device *sdp;
5222 struct Scsi_Host *hp;
5225 ++num_target_resets;
5228 sdp = SCpnt->device;
5231 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5232 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5236 sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
5238 list_for_each_entry(devip,
5239 &sdbg_host->dev_info_list,
5241 if (devip->target == sdp->id) {
5242 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5246 if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5247 sdev_printk(KERN_INFO, sdp,
5248 "%s: %d device(s) found in target\n", __func__, k);
5253 static int scsi_debug_bus_reset(struct scsi_cmnd *SCpnt)
5255 struct sdebug_host_info *sdbg_host;
5256 struct sdebug_dev_info *devip;
5257 struct scsi_device *sdp;
5258 struct Scsi_Host *hp;
5262 if (!(SCpnt && SCpnt->device))
5264 sdp = SCpnt->device;
5265 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5266 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5269 sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
5271 list_for_each_entry(devip,
5272 &sdbg_host->dev_info_list,
5274 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5279 if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5280 sdev_printk(KERN_INFO, sdp,
5281 "%s: %d device(s) found in host\n", __func__, k);
5286 static int scsi_debug_host_reset(struct scsi_cmnd *SCpnt)
5288 struct sdebug_host_info *sdbg_host;
5289 struct sdebug_dev_info *devip;
5293 if ((SCpnt->device) && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
5294 sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__);
5295 spin_lock(&sdebug_host_list_lock);
5296 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
5297 list_for_each_entry(devip, &sdbg_host->dev_info_list,
5299 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5303 spin_unlock(&sdebug_host_list_lock);
5305 if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5306 sdev_printk(KERN_INFO, SCpnt->device,
5307 "%s: %d device(s) found\n", __func__, k);
5311 static void sdebug_build_parts(unsigned char *ramp, unsigned long store_size)
5313 struct msdos_partition *pp;
5314 int starts[SDEBUG_MAX_PARTS + 2], max_part_secs;
5315 int sectors_per_part, num_sectors, k;
5316 int heads_by_sects, start_sec, end_sec;
5318 /* assume partition table already zeroed */
5319 if ((sdebug_num_parts < 1) || (store_size < 1048576))
5321 if (sdebug_num_parts > SDEBUG_MAX_PARTS) {
5322 sdebug_num_parts = SDEBUG_MAX_PARTS;
5323 pr_warn("reducing partitions to %d\n", SDEBUG_MAX_PARTS);
5325 num_sectors = (int)get_sdebug_capacity();
5326 sectors_per_part = (num_sectors - sdebug_sectors_per)
5328 heads_by_sects = sdebug_heads * sdebug_sectors_per;
5329 starts[0] = sdebug_sectors_per;
5330 max_part_secs = sectors_per_part;
5331 for (k = 1; k < sdebug_num_parts; ++k) {
5332 starts[k] = ((k * sectors_per_part) / heads_by_sects)
5334 if (starts[k] - starts[k - 1] < max_part_secs)
5335 max_part_secs = starts[k] - starts[k - 1];
5337 starts[sdebug_num_parts] = num_sectors;
5338 starts[sdebug_num_parts + 1] = 0;
5340 ramp[510] = 0x55; /* magic partition markings */
5342 pp = (struct msdos_partition *)(ramp + 0x1be);
5343 for (k = 0; starts[k + 1]; ++k, ++pp) {
5344 start_sec = starts[k];
5345 end_sec = starts[k] + max_part_secs - 1;
5348 pp->cyl = start_sec / heads_by_sects;
5349 pp->head = (start_sec - (pp->cyl * heads_by_sects))
5350 / sdebug_sectors_per;
5351 pp->sector = (start_sec % sdebug_sectors_per) + 1;
5353 pp->end_cyl = end_sec / heads_by_sects;
5354 pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
5355 / sdebug_sectors_per;
5356 pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
5358 pp->start_sect = cpu_to_le32(start_sec);
5359 pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1);
5360 pp->sys_ind = 0x83; /* plain Linux partition */
5364 static void block_unblock_all_queues(bool block)
5367 struct sdebug_queue *sqp;
5369 for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp)
5370 atomic_set(&sqp->blocked, (int)block);
5373 /* Adjust (by rounding down) the sdebug_cmnd_count so abs(every_nth)-1
5374 * commands will be processed normally before triggers occur.
5376 static void tweak_cmnd_count(void)
5380 modulo = abs(sdebug_every_nth);
5383 block_unblock_all_queues(true);
5384 count = atomic_read(&sdebug_cmnd_count);
5385 atomic_set(&sdebug_cmnd_count, (count / modulo) * modulo);
5386 block_unblock_all_queues(false);
5389 static void clear_queue_stats(void)
5391 atomic_set(&sdebug_cmnd_count, 0);
5392 atomic_set(&sdebug_completions, 0);
5393 atomic_set(&sdebug_miss_cpus, 0);
5394 atomic_set(&sdebug_a_tsf, 0);
5397 static bool inject_on_this_cmd(void)
5399 if (sdebug_every_nth == 0)
5401 return (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) == 0;
5404 #define INCLUSIVE_TIMING_MAX_NS 1000000 /* 1 millisecond */
5406 /* Complete the processing of the thread that queued a SCSI command to this
5407 * driver. It either completes the command by calling cmnd_done() or
5408 * schedules a hr timer or work queue then returns 0. Returns
5409 * SCSI_MLQUEUE_HOST_BUSY if temporarily out of resources.
5411 static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
5413 int (*pfp)(struct scsi_cmnd *,
5414 struct sdebug_dev_info *),
5415 int delta_jiff, int ndelay)
5418 bool inject = false;
5419 bool hipri = scsi_cmd_to_rq(cmnd)->cmd_flags & REQ_HIPRI;
5420 int k, num_in_q, qdepth;
5421 unsigned long iflags;
5422 u64 ns_from_boot = 0;
5423 struct sdebug_queue *sqp;
5424 struct sdebug_queued_cmd *sqcp;
5425 struct scsi_device *sdp;
5426 struct sdebug_defer *sd_dp;
5428 if (unlikely(devip == NULL)) {
5429 if (scsi_result == 0)
5430 scsi_result = DID_NO_CONNECT << 16;
5431 goto respond_in_thread;
5435 if (delta_jiff == 0)
5436 goto respond_in_thread;
5438 sqp = get_queue(cmnd);
5439 spin_lock_irqsave(&sqp->qc_lock, iflags);
5440 if (unlikely(atomic_read(&sqp->blocked))) {
5441 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5442 return SCSI_MLQUEUE_HOST_BUSY;
5444 num_in_q = atomic_read(&devip->num_in_q);
5445 qdepth = cmnd->device->queue_depth;
5446 if (unlikely((qdepth > 0) && (num_in_q >= qdepth))) {
5448 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5449 goto respond_in_thread;
5451 scsi_result = device_qfull_result;
5452 } else if (unlikely(sdebug_every_nth &&
5453 (SDEBUG_OPT_RARE_TSF & sdebug_opts) &&
5454 (scsi_result == 0))) {
5455 if ((num_in_q == (qdepth - 1)) &&
5456 (atomic_inc_return(&sdebug_a_tsf) >=
5457 abs(sdebug_every_nth))) {
5458 atomic_set(&sdebug_a_tsf, 0);
5460 scsi_result = device_qfull_result;
5464 k = find_first_zero_bit(sqp->in_use_bm, sdebug_max_queue);
5465 if (unlikely(k >= sdebug_max_queue)) {
5466 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5468 goto respond_in_thread;
5469 else if (SDEBUG_OPT_ALL_TSF & sdebug_opts)
5470 scsi_result = device_qfull_result;
5471 if (SDEBUG_OPT_Q_NOISE & sdebug_opts)
5472 sdev_printk(KERN_INFO, sdp,
5473 "%s: max_queue=%d exceeded, %s\n",
5474 __func__, sdebug_max_queue,
5475 (scsi_result ? "status: TASK SET FULL" :
5476 "report: host busy"));
5478 goto respond_in_thread;
5480 return SCSI_MLQUEUE_HOST_BUSY;
5482 set_bit(k, sqp->in_use_bm);
5483 atomic_inc(&devip->num_in_q);
5484 sqcp = &sqp->qc_arr[k];
5485 sqcp->a_cmnd = cmnd;
5486 cmnd->host_scribble = (unsigned char *)sqcp;
5487 sd_dp = sqcp->sd_dp;
5488 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5491 sd_dp = kzalloc(sizeof(*sd_dp), GFP_ATOMIC);
5493 atomic_dec(&devip->num_in_q);
5494 clear_bit(k, sqp->in_use_bm);
5495 return SCSI_MLQUEUE_HOST_BUSY;
5502 /* Set the hostwide tag */
5503 if (sdebug_host_max_queue)
5504 sd_dp->hc_idx = get_tag(cmnd);
5507 ns_from_boot = ktime_get_boottime_ns();
5509 /* one of the resp_*() response functions is called here */
5510 cmnd->result = pfp ? pfp(cmnd, devip) : 0;
5511 if (cmnd->result & SDEG_RES_IMMED_MASK) {
5512 cmnd->result &= ~SDEG_RES_IMMED_MASK;
5513 delta_jiff = ndelay = 0;
5515 if (cmnd->result == 0 && scsi_result != 0)
5516 cmnd->result = scsi_result;
5517 if (cmnd->result == 0 && unlikely(sdebug_opts & SDEBUG_OPT_TRANSPORT_ERR)) {
5518 if (atomic_read(&sdeb_inject_pending)) {
5519 mk_sense_buffer(cmnd, ABORTED_COMMAND, TRANSPORT_PROBLEM, ACK_NAK_TO);
5520 atomic_set(&sdeb_inject_pending, 0);
5521 cmnd->result = check_condition_result;
5525 if (unlikely(sdebug_verbose && cmnd->result))
5526 sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n",
5527 __func__, cmnd->result);
5529 if (delta_jiff > 0 || ndelay > 0) {
5532 if (delta_jiff > 0) {
5533 u64 ns = jiffies_to_nsecs(delta_jiff);
5535 if (sdebug_random && ns < U32_MAX) {
5536 ns = prandom_u32_max((u32)ns);
5537 } else if (sdebug_random) {
5538 ns >>= 12; /* scale to 4 usec precision */
5539 if (ns < U32_MAX) /* over 4 hours max */
5540 ns = prandom_u32_max((u32)ns);
5543 kt = ns_to_ktime(ns);
5544 } else { /* ndelay has a 4.2 second max */
5545 kt = sdebug_random ? prandom_u32_max((u32)ndelay) :
5547 if (ndelay < INCLUSIVE_TIMING_MAX_NS) {
5548 u64 d = ktime_get_boottime_ns() - ns_from_boot;
5550 if (kt <= d) { /* elapsed duration >= kt */
5551 spin_lock_irqsave(&sqp->qc_lock, iflags);
5552 sqcp->a_cmnd = NULL;
5553 atomic_dec(&devip->num_in_q);
5554 clear_bit(k, sqp->in_use_bm);
5555 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5558 /* call scsi_done() from this thread */
5559 cmnd->scsi_done(cmnd);
5562 /* otherwise reduce kt by elapsed time */
5567 sd_dp->cmpl_ts = ktime_add(ns_to_ktime(ns_from_boot), kt);
5568 spin_lock_irqsave(&sqp->qc_lock, iflags);
5569 if (!sd_dp->init_poll) {
5570 sd_dp->init_poll = true;
5571 sqcp->sd_dp = sd_dp;
5572 sd_dp->sqa_idx = sqp - sdebug_q_arr;
5575 sd_dp->defer_t = SDEB_DEFER_POLL;
5576 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5578 if (!sd_dp->init_hrt) {
5579 sd_dp->init_hrt = true;
5580 sqcp->sd_dp = sd_dp;
5581 hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC,
5582 HRTIMER_MODE_REL_PINNED);
5583 sd_dp->hrt.function = sdebug_q_cmd_hrt_complete;
5584 sd_dp->sqa_idx = sqp - sdebug_q_arr;
5587 sd_dp->defer_t = SDEB_DEFER_HRT;
5588 /* schedule the invocation of scsi_done() for a later time */
5589 hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED);
5591 if (sdebug_statistics)
5592 sd_dp->issuing_cpu = raw_smp_processor_id();
5593 } else { /* jdelay < 0, use work queue */
5594 if (unlikely((sdebug_opts & SDEBUG_OPT_CMD_ABORT) &&
5595 atomic_read(&sdeb_inject_pending)))
5596 sd_dp->aborted = true;
5598 sd_dp->cmpl_ts = ns_to_ktime(ns_from_boot);
5599 spin_lock_irqsave(&sqp->qc_lock, iflags);
5600 if (!sd_dp->init_poll) {
5601 sd_dp->init_poll = true;
5602 sqcp->sd_dp = sd_dp;
5603 sd_dp->sqa_idx = sqp - sdebug_q_arr;
5606 sd_dp->defer_t = SDEB_DEFER_POLL;
5607 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5609 if (!sd_dp->init_wq) {
5610 sd_dp->init_wq = true;
5611 sqcp->sd_dp = sd_dp;
5612 sd_dp->sqa_idx = sqp - sdebug_q_arr;
5614 INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete);
5616 sd_dp->defer_t = SDEB_DEFER_WQ;
5617 schedule_work(&sd_dp->ew.work);
5619 if (sdebug_statistics)
5620 sd_dp->issuing_cpu = raw_smp_processor_id();
5621 if (unlikely(sd_dp->aborted)) {
5622 sdev_printk(KERN_INFO, sdp, "abort request tag %d\n",
5623 scsi_cmd_to_rq(cmnd)->tag);
5624 blk_abort_request(scsi_cmd_to_rq(cmnd));
5625 atomic_set(&sdeb_inject_pending, 0);
5626 sd_dp->aborted = false;
5629 if (unlikely((SDEBUG_OPT_Q_NOISE & sdebug_opts) && scsi_result == device_qfull_result))
5630 sdev_printk(KERN_INFO, sdp, "%s: num_in_q=%d +1, %s%s\n", __func__,
5631 num_in_q, (inject ? "<inject> " : ""), "status: TASK SET FULL");
5634 respond_in_thread: /* call back to mid-layer using invocation thread */
5635 cmnd->result = pfp != NULL ? pfp(cmnd, devip) : 0;
5636 cmnd->result &= ~SDEG_RES_IMMED_MASK;
5637 if (cmnd->result == 0 && scsi_result != 0)
5638 cmnd->result = scsi_result;
5639 cmnd->scsi_done(cmnd);
5643 /* Note: The following macros create attribute files in the
5644 /sys/module/scsi_debug/parameters directory. Unfortunately this
5645 driver is unaware of a change and cannot trigger auxiliary actions
5646 as it can when the corresponding attribute in the
5647 /sys/bus/pseudo/drivers/scsi_debug directory is changed.
5649 module_param_named(add_host, sdebug_add_host, int, S_IRUGO | S_IWUSR);
5650 module_param_named(ato, sdebug_ato, int, S_IRUGO);
5651 module_param_named(cdb_len, sdebug_cdb_len, int, 0644);
5652 module_param_named(clustering, sdebug_clustering, bool, S_IRUGO | S_IWUSR);
5653 module_param_named(delay, sdebug_jdelay, int, S_IRUGO | S_IWUSR);
5654 module_param_named(dev_size_mb, sdebug_dev_size_mb, int, S_IRUGO);
5655 module_param_named(dif, sdebug_dif, int, S_IRUGO);
5656 module_param_named(dix, sdebug_dix, int, S_IRUGO);
5657 module_param_named(dsense, sdebug_dsense, int, S_IRUGO | S_IWUSR);
5658 module_param_named(every_nth, sdebug_every_nth, int, S_IRUGO | S_IWUSR);
5659 module_param_named(fake_rw, sdebug_fake_rw, int, S_IRUGO | S_IWUSR);
5660 module_param_named(guard, sdebug_guard, uint, S_IRUGO);
5661 module_param_named(host_lock, sdebug_host_lock, bool, S_IRUGO | S_IWUSR);
5662 module_param_named(host_max_queue, sdebug_host_max_queue, int, S_IRUGO);
5663 module_param_string(inq_product, sdebug_inq_product_id,
5664 sizeof(sdebug_inq_product_id), S_IRUGO | S_IWUSR);
5665 module_param_string(inq_rev, sdebug_inq_product_rev,
5666 sizeof(sdebug_inq_product_rev), S_IRUGO | S_IWUSR);
5667 module_param_string(inq_vendor, sdebug_inq_vendor_id,
5668 sizeof(sdebug_inq_vendor_id), S_IRUGO | S_IWUSR);
5669 module_param_named(lbprz, sdebug_lbprz, int, S_IRUGO);
5670 module_param_named(lbpu, sdebug_lbpu, int, S_IRUGO);
5671 module_param_named(lbpws, sdebug_lbpws, int, S_IRUGO);
5672 module_param_named(lbpws10, sdebug_lbpws10, int, S_IRUGO);
5673 module_param_named(lowest_aligned, sdebug_lowest_aligned, int, S_IRUGO);
5674 module_param_named(lun_format, sdebug_lun_am_i, int, S_IRUGO | S_IWUSR);
5675 module_param_named(max_luns, sdebug_max_luns, int, S_IRUGO | S_IWUSR);
5676 module_param_named(max_queue, sdebug_max_queue, int, S_IRUGO | S_IWUSR);
5677 module_param_named(medium_error_count, sdebug_medium_error_count, int,
5679 module_param_named(medium_error_start, sdebug_medium_error_start, int,
5681 module_param_named(ndelay, sdebug_ndelay, int, S_IRUGO | S_IWUSR);
5682 module_param_named(no_lun_0, sdebug_no_lun_0, int, S_IRUGO | S_IWUSR);
5683 module_param_named(no_uld, sdebug_no_uld, int, S_IRUGO);
5684 module_param_named(num_parts, sdebug_num_parts, int, S_IRUGO);
5685 module_param_named(num_tgts, sdebug_num_tgts, int, S_IRUGO | S_IWUSR);
5686 module_param_named(opt_blks, sdebug_opt_blks, int, S_IRUGO);
5687 module_param_named(opt_xferlen_exp, sdebug_opt_xferlen_exp, int, S_IRUGO);
5688 module_param_named(opts, sdebug_opts, int, S_IRUGO | S_IWUSR);
5689 module_param_named(per_host_store, sdebug_per_host_store, bool,
5691 module_param_named(physblk_exp, sdebug_physblk_exp, int, S_IRUGO);
5692 module_param_named(ptype, sdebug_ptype, int, S_IRUGO | S_IWUSR);
5693 module_param_named(random, sdebug_random, bool, S_IRUGO | S_IWUSR);
5694 module_param_named(removable, sdebug_removable, bool, S_IRUGO | S_IWUSR);
5695 module_param_named(scsi_level, sdebug_scsi_level, int, S_IRUGO);
5696 module_param_named(sector_size, sdebug_sector_size, int, S_IRUGO);
5697 module_param_named(statistics, sdebug_statistics, bool, S_IRUGO | S_IWUSR);
5698 module_param_named(strict, sdebug_strict, bool, S_IRUGO | S_IWUSR);
5699 module_param_named(submit_queues, submit_queues, int, S_IRUGO);
5700 module_param_named(poll_queues, poll_queues, int, S_IRUGO);
5701 module_param_named(tur_ms_to_ready, sdeb_tur_ms_to_ready, int, S_IRUGO);
5702 module_param_named(unmap_alignment, sdebug_unmap_alignment, int, S_IRUGO);
5703 module_param_named(unmap_granularity, sdebug_unmap_granularity, int, S_IRUGO);
5704 module_param_named(unmap_max_blocks, sdebug_unmap_max_blocks, int, S_IRUGO);
5705 module_param_named(unmap_max_desc, sdebug_unmap_max_desc, int, S_IRUGO);
5706 module_param_named(uuid_ctl, sdebug_uuid_ctl, int, S_IRUGO);
5707 module_param_named(virtual_gb, sdebug_virtual_gb, int, S_IRUGO | S_IWUSR);
5708 module_param_named(vpd_use_hostno, sdebug_vpd_use_hostno, int,
5710 module_param_named(wp, sdebug_wp, bool, S_IRUGO | S_IWUSR);
5711 module_param_named(write_same_length, sdebug_write_same_length, int,
5713 module_param_named(zbc, sdeb_zbc_model_s, charp, S_IRUGO);
5714 module_param_named(zone_max_open, sdeb_zbc_max_open, int, S_IRUGO);
5715 module_param_named(zone_nr_conv, sdeb_zbc_nr_conv, int, S_IRUGO);
5716 module_param_named(zone_size_mb, sdeb_zbc_zone_size_mb, int, S_IRUGO);
5718 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
5719 MODULE_DESCRIPTION("SCSI debug adapter driver");
5720 MODULE_LICENSE("GPL");
5721 MODULE_VERSION(SDEBUG_VERSION);
5723 MODULE_PARM_DESC(add_host, "add n hosts, in sysfs if negative remove host(s) (def=1)");
5724 MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
5725 MODULE_PARM_DESC(cdb_len, "suggest CDB lengths to drivers (def=10)");
5726 MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
5727 MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
5728 MODULE_PARM_DESC(dev_size_mb, "size in MiB of ram shared by devs(def=8)");
5729 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
5730 MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
5731 MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
5732 MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
5733 MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
5734 MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
5735 MODULE_PARM_DESC(host_lock, "host_lock is ignored (def=0)");
5736 MODULE_PARM_DESC(host_max_queue,
5737 "host max # of queued cmds (0 to max(def) [max_queue fixed equal for !0])");
5738 MODULE_PARM_DESC(inq_product, "SCSI INQUIRY product string (def=\"scsi_debug\")");
5739 MODULE_PARM_DESC(inq_rev, "SCSI INQUIRY revision string (def=\""
5740 SDEBUG_VERSION "\")");
5741 MODULE_PARM_DESC(inq_vendor, "SCSI INQUIRY vendor string (def=\"Linux\")");
5742 MODULE_PARM_DESC(lbprz,
5743 "on read unmapped LBs return 0 when 1 (def), return 0xff when 2");
5744 MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
5745 MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
5746 MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
5747 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
5748 MODULE_PARM_DESC(lun_format, "LUN format: 0->peripheral (def); 1 --> flat address method");
5749 MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
5750 MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))");
5751 MODULE_PARM_DESC(medium_error_count, "count of sectors to return follow on MEDIUM error");
5752 MODULE_PARM_DESC(medium_error_start, "starting sector number to return MEDIUM error");
5753 MODULE_PARM_DESC(ndelay, "response delay in nanoseconds (def=0 -> ignore)");
5754 MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
5755 MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
5756 MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
5757 MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
5758 MODULE_PARM_DESC(opt_blks, "optimal transfer length in blocks (def=1024)");
5759 MODULE_PARM_DESC(opt_xferlen_exp, "optimal transfer length granularity exponent (def=physblk_exp)");
5760 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
5761 MODULE_PARM_DESC(per_host_store, "If set, next positive add_host will get new store (def=0)");
5762 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
5763 MODULE_PARM_DESC(poll_queues, "support for iouring iopoll queues (1 to max(submit_queues - 1))");
5764 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
5765 MODULE_PARM_DESC(random, "If set, uniformly randomize command duration between 0 and delay_in_ns");
5766 MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
5767 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=7[SPC-5])");
5768 MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
5769 MODULE_PARM_DESC(statistics, "collect statistics on commands, queues (def=0)");
5770 MODULE_PARM_DESC(strict, "stricter checks: reserved field in cdb (def=0)");
5771 MODULE_PARM_DESC(submit_queues, "support for block multi-queue (def=1)");
5772 MODULE_PARM_DESC(tur_ms_to_ready, "TEST UNIT READY millisecs before initial good status (def=0)");
5773 MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
5774 MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
5775 MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
5776 MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
5777 MODULE_PARM_DESC(uuid_ctl,
5778 "1->use uuid for lu name, 0->don't, 2->all use same (def=0)");
5779 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)");
5780 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
5781 MODULE_PARM_DESC(wp, "Write Protect (def=0)");
5782 MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
5783 MODULE_PARM_DESC(zbc, "'none' [0]; 'aware' [1]; 'managed' [2] (def=0). Can have 'host-' prefix");
5784 MODULE_PARM_DESC(zone_max_open, "Maximum number of open zones; [0] for no limit (def=auto)");
5785 MODULE_PARM_DESC(zone_nr_conv, "Number of conventional zones (def=1)");
5786 MODULE_PARM_DESC(zone_size_mb, "Zone size in MiB (def=auto)");
5788 #define SDEBUG_INFO_LEN 256
5789 static char sdebug_info[SDEBUG_INFO_LEN];
5791 static const char *scsi_debug_info(struct Scsi_Host *shp)
5795 k = scnprintf(sdebug_info, SDEBUG_INFO_LEN, "%s: version %s [%s]\n",
5796 my_name, SDEBUG_VERSION, sdebug_version_date);
5797 if (k >= (SDEBUG_INFO_LEN - 1))
5799 scnprintf(sdebug_info + k, SDEBUG_INFO_LEN - k,
5800 " dev_size_mb=%d, opts=0x%x, submit_queues=%d, %s=%d",
5801 sdebug_dev_size_mb, sdebug_opts, submit_queues,
5802 "statistics", (int)sdebug_statistics);
5806 /* 'echo <val> > /proc/scsi/scsi_debug/<host_id>' writes to opts */
5807 static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer,
5812 int minLen = length > 15 ? 15 : length;
5814 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
5816 memcpy(arr, buffer, minLen);
5818 if (1 != sscanf(arr, "%d", &opts))
5821 sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
5822 sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
5823 if (sdebug_every_nth != 0)
5828 /* Output seen with 'cat /proc/scsi/scsi_debug/<host_id>'. It will be the
5829 * same for each scsi_debug host (if more than one). Some of the counters
5830 * output are not atomics so might be inaccurate in a busy system. */
5831 static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
5834 struct sdebug_queue *sqp;
5835 struct sdebug_host_info *sdhp;
5837 seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n",
5838 SDEBUG_VERSION, sdebug_version_date);
5839 seq_printf(m, "num_tgts=%d, %ssize=%d MB, opts=0x%x, every_nth=%d\n",
5840 sdebug_num_tgts, "shared (ram) ", sdebug_dev_size_mb,
5841 sdebug_opts, sdebug_every_nth);
5842 seq_printf(m, "delay=%d, ndelay=%d, max_luns=%d, sector_size=%d %s\n",
5843 sdebug_jdelay, sdebug_ndelay, sdebug_max_luns,
5844 sdebug_sector_size, "bytes");
5845 seq_printf(m, "cylinders=%d, heads=%d, sectors=%d, command aborts=%d\n",
5846 sdebug_cylinders_per, sdebug_heads, sdebug_sectors_per,
5848 seq_printf(m, "RESETs: device=%d, target=%d, bus=%d, host=%d\n",
5849 num_dev_resets, num_target_resets, num_bus_resets,
5851 seq_printf(m, "dix_reads=%d, dix_writes=%d, dif_errors=%d\n",
5852 dix_reads, dix_writes, dif_errors);
5853 seq_printf(m, "usec_in_jiffy=%lu, statistics=%d\n", TICK_NSEC / 1000,
5855 seq_printf(m, "cmnd_count=%d, completions=%d, %s=%d, a_tsf=%d, mq_polls=%d\n",
5856 atomic_read(&sdebug_cmnd_count),
5857 atomic_read(&sdebug_completions),
5858 "miss_cpus", atomic_read(&sdebug_miss_cpus),
5859 atomic_read(&sdebug_a_tsf),
5860 atomic_read(&sdeb_mq_poll_count));
5862 seq_printf(m, "submit_queues=%d\n", submit_queues);
5863 for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5864 seq_printf(m, " queue %d:\n", j);
5865 f = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
5866 if (f != sdebug_max_queue) {
5867 l = find_last_bit(sqp->in_use_bm, sdebug_max_queue);
5868 seq_printf(m, " in_use_bm BUSY: %s: %d,%d\n",
5869 "first,last bits", f, l);
5873 seq_printf(m, "this host_no=%d\n", host->host_no);
5874 if (!xa_empty(per_store_ap)) {
5877 unsigned long l_idx;
5878 struct sdeb_store_info *sip;
5880 seq_puts(m, "\nhost list:\n");
5882 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
5884 seq_printf(m, " %d: host_no=%d, si_idx=%d\n", j,
5885 sdhp->shost->host_no, idx);
5888 seq_printf(m, "\nper_store array [most_recent_idx=%d]:\n",
5889 sdeb_most_recent_idx);
5891 xa_for_each(per_store_ap, l_idx, sip) {
5892 niu = xa_get_mark(per_store_ap, l_idx,
5893 SDEB_XA_NOT_IN_USE);
5895 seq_printf(m, " %d: idx=%d%s\n", j, idx,
5896 (niu ? " not_in_use" : ""));
5903 static ssize_t delay_show(struct device_driver *ddp, char *buf)
5905 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_jdelay);
5907 /* Returns -EBUSY if jdelay is being changed and commands are queued. The unit
5908 * of delay is jiffies.
5910 static ssize_t delay_store(struct device_driver *ddp, const char *buf,
5915 if (count > 0 && sscanf(buf, "%d", &jdelay) == 1) {
5917 if (sdebug_jdelay != jdelay) {
5919 struct sdebug_queue *sqp;
5921 block_unblock_all_queues(true);
5922 for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
5924 k = find_first_bit(sqp->in_use_bm,
5926 if (k != sdebug_max_queue) {
5927 res = -EBUSY; /* queued commands */
5932 sdebug_jdelay = jdelay;
5935 block_unblock_all_queues(false);
5941 static DRIVER_ATTR_RW(delay);
5943 static ssize_t ndelay_show(struct device_driver *ddp, char *buf)
5945 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ndelay);
5947 /* Returns -EBUSY if ndelay is being changed and commands are queued */
5948 /* If > 0 and accepted then sdebug_jdelay is set to JDELAY_OVERRIDDEN */
5949 static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
5954 if ((count > 0) && (1 == sscanf(buf, "%d", &ndelay)) &&
5955 (ndelay >= 0) && (ndelay < (1000 * 1000 * 1000))) {
5957 if (sdebug_ndelay != ndelay) {
5959 struct sdebug_queue *sqp;
5961 block_unblock_all_queues(true);
5962 for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
5964 k = find_first_bit(sqp->in_use_bm,
5966 if (k != sdebug_max_queue) {
5967 res = -EBUSY; /* queued commands */
5972 sdebug_ndelay = ndelay;
5973 sdebug_jdelay = ndelay ? JDELAY_OVERRIDDEN
5976 block_unblock_all_queues(false);
5982 static DRIVER_ATTR_RW(ndelay);
5984 static ssize_t opts_show(struct device_driver *ddp, char *buf)
5986 return scnprintf(buf, PAGE_SIZE, "0x%x\n", sdebug_opts);
5989 static ssize_t opts_store(struct device_driver *ddp, const char *buf,
5995 if (sscanf(buf, "%10s", work) == 1) {
5996 if (strncasecmp(work, "0x", 2) == 0) {
5997 if (kstrtoint(work + 2, 16, &opts) == 0)
6000 if (kstrtoint(work, 10, &opts) == 0)
6007 sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
6008 sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
6012 static DRIVER_ATTR_RW(opts);
6014 static ssize_t ptype_show(struct device_driver *ddp, char *buf)
6016 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ptype);
6018 static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
6023 /* Cannot change from or to TYPE_ZBC with sysfs */
6024 if (sdebug_ptype == TYPE_ZBC)
6027 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6035 static DRIVER_ATTR_RW(ptype);
6037 static ssize_t dsense_show(struct device_driver *ddp, char *buf)
6039 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dsense);
6041 static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
6046 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6052 static DRIVER_ATTR_RW(dsense);
6054 static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
6056 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_fake_rw);
6058 static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
6063 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6064 bool want_store = (n == 0);
6065 struct sdebug_host_info *sdhp;
6068 sdebug_fake_rw = (sdebug_fake_rw > 0);
6069 if (sdebug_fake_rw == n)
6070 return count; /* not transitioning so do nothing */
6072 if (want_store) { /* 1 --> 0 transition, set up store */
6073 if (sdeb_first_idx < 0) {
6074 idx = sdebug_add_store();
6078 idx = sdeb_first_idx;
6079 xa_clear_mark(per_store_ap, idx,
6080 SDEB_XA_NOT_IN_USE);
6082 /* make all hosts use same store */
6083 list_for_each_entry(sdhp, &sdebug_host_list,
6085 if (sdhp->si_idx != idx) {
6086 xa_set_mark(per_store_ap, sdhp->si_idx,
6087 SDEB_XA_NOT_IN_USE);
6091 sdeb_most_recent_idx = idx;
6092 } else { /* 0 --> 1 transition is trigger for shrink */
6093 sdebug_erase_all_stores(true /* apart from first */);
6100 static DRIVER_ATTR_RW(fake_rw);
6102 static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
6104 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_lun_0);
6106 static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
6111 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6112 sdebug_no_lun_0 = n;
6117 static DRIVER_ATTR_RW(no_lun_0);
6119 static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
6121 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_tgts);
6123 static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
6128 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6129 sdebug_num_tgts = n;
6130 sdebug_max_tgts_luns();
6135 static DRIVER_ATTR_RW(num_tgts);
6137 static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)
6139 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dev_size_mb);
6141 static DRIVER_ATTR_RO(dev_size_mb);
6143 static ssize_t per_host_store_show(struct device_driver *ddp, char *buf)
6145 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_per_host_store);
6148 static ssize_t per_host_store_store(struct device_driver *ddp, const char *buf,
6153 if (kstrtobool(buf, &v))
6156 sdebug_per_host_store = v;
6159 static DRIVER_ATTR_RW(per_host_store);
6161 static ssize_t num_parts_show(struct device_driver *ddp, char *buf)
6163 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_parts);
6165 static DRIVER_ATTR_RO(num_parts);
6167 static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
6169 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_every_nth);
6171 static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
6177 if (sscanf(buf, "%10s", work) == 1) {
6178 if (strncasecmp(work, "0x", 2) == 0) {
6179 if (kstrtoint(work + 2, 16, &nth) == 0)
6180 goto every_nth_done;
6182 if (kstrtoint(work, 10, &nth) == 0)
6183 goto every_nth_done;
6189 sdebug_every_nth = nth;
6190 if (nth && !sdebug_statistics) {
6191 pr_info("every_nth needs statistics=1, set it\n");
6192 sdebug_statistics = true;
6197 static DRIVER_ATTR_RW(every_nth);
6199 static ssize_t lun_format_show(struct device_driver *ddp, char *buf)
6201 return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_lun_am);
6203 static ssize_t lun_format_store(struct device_driver *ddp, const char *buf,
6209 if (kstrtoint(buf, 0, &n))
6212 if (n > (int)SAM_LUN_AM_FLAT) {
6213 pr_warn("only LUN address methods 0 and 1 are supported\n");
6216 changed = ((int)sdebug_lun_am != n);
6218 if (changed && sdebug_scsi_level >= 5) { /* >= SPC-3 */
6219 struct sdebug_host_info *sdhp;
6220 struct sdebug_dev_info *dp;
6222 spin_lock(&sdebug_host_list_lock);
6223 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6224 list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
6225 set_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
6228 spin_unlock(&sdebug_host_list_lock);
6234 static DRIVER_ATTR_RW(lun_format);
6236 static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
6238 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_luns);
6240 static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
6246 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6248 pr_warn("max_luns can be no more than 256\n");
6251 changed = (sdebug_max_luns != n);
6252 sdebug_max_luns = n;
6253 sdebug_max_tgts_luns();
6254 if (changed && (sdebug_scsi_level >= 5)) { /* >= SPC-3 */
6255 struct sdebug_host_info *sdhp;
6256 struct sdebug_dev_info *dp;
6258 spin_lock(&sdebug_host_list_lock);
6259 list_for_each_entry(sdhp, &sdebug_host_list,
6261 list_for_each_entry(dp, &sdhp->dev_info_list,
6263 set_bit(SDEBUG_UA_LUNS_CHANGED,
6267 spin_unlock(&sdebug_host_list_lock);
6273 static DRIVER_ATTR_RW(max_luns);
6275 static ssize_t max_queue_show(struct device_driver *ddp, char *buf)
6277 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_queue);
6279 /* N.B. max_queue can be changed while there are queued commands. In flight
6280 * commands beyond the new max_queue will be completed. */
6281 static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
6285 struct sdebug_queue *sqp;
6287 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
6288 (n <= SDEBUG_CANQUEUE) &&
6289 (sdebug_host_max_queue == 0)) {
6290 block_unblock_all_queues(true);
6292 for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
6294 a = find_last_bit(sqp->in_use_bm, SDEBUG_CANQUEUE);
6298 sdebug_max_queue = n;
6299 if (k == SDEBUG_CANQUEUE)
6300 atomic_set(&retired_max_queue, 0);
6302 atomic_set(&retired_max_queue, k + 1);
6304 atomic_set(&retired_max_queue, 0);
6305 block_unblock_all_queues(false);
6310 static DRIVER_ATTR_RW(max_queue);
6312 static ssize_t host_max_queue_show(struct device_driver *ddp, char *buf)
6314 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_host_max_queue);
6318 * Since this is used for .can_queue, and we get the hc_idx tag from the bitmap
6319 * in range [0, sdebug_host_max_queue), we can't change it.
6321 static DRIVER_ATTR_RO(host_max_queue);
6323 static ssize_t no_uld_show(struct device_driver *ddp, char *buf)
6325 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_uld);
6327 static DRIVER_ATTR_RO(no_uld);
6329 static ssize_t scsi_level_show(struct device_driver *ddp, char *buf)
6331 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_scsi_level);
6333 static DRIVER_ATTR_RO(scsi_level);
6335 static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
6337 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_virtual_gb);
6339 static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
6345 /* Ignore capacity change for ZBC drives for now */
6346 if (sdeb_zbc_in_use)
6349 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6350 changed = (sdebug_virtual_gb != n);
6351 sdebug_virtual_gb = n;
6352 sdebug_capacity = get_sdebug_capacity();
6354 struct sdebug_host_info *sdhp;
6355 struct sdebug_dev_info *dp;
6357 spin_lock(&sdebug_host_list_lock);
6358 list_for_each_entry(sdhp, &sdebug_host_list,
6360 list_for_each_entry(dp, &sdhp->dev_info_list,
6362 set_bit(SDEBUG_UA_CAPACITY_CHANGED,
6366 spin_unlock(&sdebug_host_list_lock);
6372 static DRIVER_ATTR_RW(virtual_gb);
6374 static ssize_t add_host_show(struct device_driver *ddp, char *buf)
6376 /* absolute number of hosts currently active is what is shown */
6377 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_hosts);
6380 static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
6385 struct sdeb_store_info *sip;
6386 bool want_phs = (sdebug_fake_rw == 0) && sdebug_per_host_store;
6389 if (sscanf(buf, "%d", &delta_hosts) != 1)
6391 if (delta_hosts > 0) {
6395 xa_for_each_marked(per_store_ap, idx, sip,
6396 SDEB_XA_NOT_IN_USE) {
6397 sdeb_most_recent_idx = (int)idx;
6401 if (found) /* re-use case */
6402 sdebug_add_host_helper((int)idx);
6404 sdebug_do_add_host(true);
6406 sdebug_do_add_host(false);
6408 } while (--delta_hosts);
6409 } else if (delta_hosts < 0) {
6411 sdebug_do_remove_host(false);
6412 } while (++delta_hosts);
6416 static DRIVER_ATTR_RW(add_host);
6418 static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf)
6420 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_vpd_use_hostno);
6422 static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf,
6427 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6428 sdebug_vpd_use_hostno = n;
6433 static DRIVER_ATTR_RW(vpd_use_hostno);
6435 static ssize_t statistics_show(struct device_driver *ddp, char *buf)
6437 return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_statistics);
6439 static ssize_t statistics_store(struct device_driver *ddp, const char *buf,
6444 if ((count > 0) && (sscanf(buf, "%d", &n) == 1) && (n >= 0)) {
6446 sdebug_statistics = true;
6448 clear_queue_stats();
6449 sdebug_statistics = false;
6455 static DRIVER_ATTR_RW(statistics);
6457 static ssize_t sector_size_show(struct device_driver *ddp, char *buf)
6459 return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_sector_size);
6461 static DRIVER_ATTR_RO(sector_size);
6463 static ssize_t submit_queues_show(struct device_driver *ddp, char *buf)
6465 return scnprintf(buf, PAGE_SIZE, "%d\n", submit_queues);
6467 static DRIVER_ATTR_RO(submit_queues);
6469 static ssize_t dix_show(struct device_driver *ddp, char *buf)
6471 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dix);
6473 static DRIVER_ATTR_RO(dix);
6475 static ssize_t dif_show(struct device_driver *ddp, char *buf)
6477 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dif);
6479 static DRIVER_ATTR_RO(dif);
6481 static ssize_t guard_show(struct device_driver *ddp, char *buf)
6483 return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_guard);
6485 static DRIVER_ATTR_RO(guard);
6487 static ssize_t ato_show(struct device_driver *ddp, char *buf)
6489 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ato);
6491 static DRIVER_ATTR_RO(ato);
6493 static ssize_t map_show(struct device_driver *ddp, char *buf)
6497 if (!scsi_debug_lbp())
6498 return scnprintf(buf, PAGE_SIZE, "0-%u\n",
6499 sdebug_store_sectors);
6501 if (sdebug_fake_rw == 0 && !xa_empty(per_store_ap)) {
6502 struct sdeb_store_info *sip = xa_load(per_store_ap, 0);
6505 count = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
6506 (int)map_size, sip->map_storep);
6508 buf[count++] = '\n';
6513 static DRIVER_ATTR_RO(map);
6515 static ssize_t random_show(struct device_driver *ddp, char *buf)
6517 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_random);
6520 static ssize_t random_store(struct device_driver *ddp, const char *buf,
6525 if (kstrtobool(buf, &v))
6531 static DRIVER_ATTR_RW(random);
6533 static ssize_t removable_show(struct device_driver *ddp, char *buf)
6535 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_removable ? 1 : 0);
6537 static ssize_t removable_store(struct device_driver *ddp, const char *buf,
6542 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6543 sdebug_removable = (n > 0);
6548 static DRIVER_ATTR_RW(removable);
6550 static ssize_t host_lock_show(struct device_driver *ddp, char *buf)
6552 return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_host_lock);
6554 /* N.B. sdebug_host_lock does nothing, kept for backward compatibility */
6555 static ssize_t host_lock_store(struct device_driver *ddp, const char *buf,
6560 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6561 sdebug_host_lock = (n > 0);
6566 static DRIVER_ATTR_RW(host_lock);
6568 static ssize_t strict_show(struct device_driver *ddp, char *buf)
6570 return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_strict);
6572 static ssize_t strict_store(struct device_driver *ddp, const char *buf,
6577 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6578 sdebug_strict = (n > 0);
6583 static DRIVER_ATTR_RW(strict);
6585 static ssize_t uuid_ctl_show(struct device_driver *ddp, char *buf)
6587 return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_uuid_ctl);
6589 static DRIVER_ATTR_RO(uuid_ctl);
6591 static ssize_t cdb_len_show(struct device_driver *ddp, char *buf)
6593 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_cdb_len);
6595 static ssize_t cdb_len_store(struct device_driver *ddp, const char *buf,
6600 ret = kstrtoint(buf, 0, &n);
6604 all_config_cdb_len();
6607 static DRIVER_ATTR_RW(cdb_len);
6609 static const char * const zbc_model_strs_a[] = {
6610 [BLK_ZONED_NONE] = "none",
6611 [BLK_ZONED_HA] = "host-aware",
6612 [BLK_ZONED_HM] = "host-managed",
6615 static const char * const zbc_model_strs_b[] = {
6616 [BLK_ZONED_NONE] = "no",
6617 [BLK_ZONED_HA] = "aware",
6618 [BLK_ZONED_HM] = "managed",
6621 static const char * const zbc_model_strs_c[] = {
6622 [BLK_ZONED_NONE] = "0",
6623 [BLK_ZONED_HA] = "1",
6624 [BLK_ZONED_HM] = "2",
6627 static int sdeb_zbc_model_str(const char *cp)
6629 int res = sysfs_match_string(zbc_model_strs_a, cp);
6632 res = sysfs_match_string(zbc_model_strs_b, cp);
6634 res = sysfs_match_string(zbc_model_strs_c, cp);
6642 static ssize_t zbc_show(struct device_driver *ddp, char *buf)
6644 return scnprintf(buf, PAGE_SIZE, "%s\n",
6645 zbc_model_strs_a[sdeb_zbc_model]);
6647 static DRIVER_ATTR_RO(zbc);
6649 static ssize_t tur_ms_to_ready_show(struct device_driver *ddp, char *buf)
6651 return scnprintf(buf, PAGE_SIZE, "%d\n", sdeb_tur_ms_to_ready);
6653 static DRIVER_ATTR_RO(tur_ms_to_ready);
6655 /* Note: The following array creates attribute files in the
6656 /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
6657 files (over those found in the /sys/module/scsi_debug/parameters
6658 directory) is that auxiliary actions can be triggered when an attribute
6659 is changed. For example see: add_host_store() above.
6662 static struct attribute *sdebug_drv_attrs[] = {
6663 &driver_attr_delay.attr,
6664 &driver_attr_opts.attr,
6665 &driver_attr_ptype.attr,
6666 &driver_attr_dsense.attr,
6667 &driver_attr_fake_rw.attr,
6668 &driver_attr_host_max_queue.attr,
6669 &driver_attr_no_lun_0.attr,
6670 &driver_attr_num_tgts.attr,
6671 &driver_attr_dev_size_mb.attr,
6672 &driver_attr_num_parts.attr,
6673 &driver_attr_every_nth.attr,
6674 &driver_attr_lun_format.attr,
6675 &driver_attr_max_luns.attr,
6676 &driver_attr_max_queue.attr,
6677 &driver_attr_no_uld.attr,
6678 &driver_attr_scsi_level.attr,
6679 &driver_attr_virtual_gb.attr,
6680 &driver_attr_add_host.attr,
6681 &driver_attr_per_host_store.attr,
6682 &driver_attr_vpd_use_hostno.attr,
6683 &driver_attr_sector_size.attr,
6684 &driver_attr_statistics.attr,
6685 &driver_attr_submit_queues.attr,
6686 &driver_attr_dix.attr,
6687 &driver_attr_dif.attr,
6688 &driver_attr_guard.attr,
6689 &driver_attr_ato.attr,
6690 &driver_attr_map.attr,
6691 &driver_attr_random.attr,
6692 &driver_attr_removable.attr,
6693 &driver_attr_host_lock.attr,
6694 &driver_attr_ndelay.attr,
6695 &driver_attr_strict.attr,
6696 &driver_attr_uuid_ctl.attr,
6697 &driver_attr_cdb_len.attr,
6698 &driver_attr_tur_ms_to_ready.attr,
6699 &driver_attr_zbc.attr,
6702 ATTRIBUTE_GROUPS(sdebug_drv);
6704 static struct device *pseudo_primary;
6706 static int __init scsi_debug_init(void)
6708 bool want_store = (sdebug_fake_rw == 0);
6710 int k, ret, hosts_to_add;
6713 ramdisk_lck_a[0] = &atomic_rw;
6714 ramdisk_lck_a[1] = &atomic_rw2;
6715 atomic_set(&retired_max_queue, 0);
6717 if (sdebug_ndelay >= 1000 * 1000 * 1000) {
6718 pr_warn("ndelay must be less than 1 second, ignored\n");
6720 } else if (sdebug_ndelay > 0)
6721 sdebug_jdelay = JDELAY_OVERRIDDEN;
6723 switch (sdebug_sector_size) {
6730 pr_err("invalid sector_size %d\n", sdebug_sector_size);
6734 switch (sdebug_dif) {
6735 case T10_PI_TYPE0_PROTECTION:
6737 case T10_PI_TYPE1_PROTECTION:
6738 case T10_PI_TYPE2_PROTECTION:
6739 case T10_PI_TYPE3_PROTECTION:
6740 have_dif_prot = true;
6744 pr_err("dif must be 0, 1, 2 or 3\n");
6748 if (sdebug_num_tgts < 0) {
6749 pr_err("num_tgts must be >= 0\n");
6753 if (sdebug_guard > 1) {
6754 pr_err("guard must be 0 or 1\n");
6758 if (sdebug_ato > 1) {
6759 pr_err("ato must be 0 or 1\n");
6763 if (sdebug_physblk_exp > 15) {
6764 pr_err("invalid physblk_exp %u\n", sdebug_physblk_exp);
6768 sdebug_lun_am = sdebug_lun_am_i;
6769 if (sdebug_lun_am > SAM_LUN_AM_FLAT) {
6770 pr_warn("Invalid LUN format %u, using default\n", (int)sdebug_lun_am);
6771 sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
6774 if (sdebug_max_luns > 256) {
6775 if (sdebug_max_luns > 16384) {
6776 pr_warn("max_luns can be no more than 16384, use default\n");
6777 sdebug_max_luns = DEF_MAX_LUNS;
6779 sdebug_lun_am = SAM_LUN_AM_FLAT;
6782 if (sdebug_lowest_aligned > 0x3fff) {
6783 pr_err("lowest_aligned too big: %u\n", sdebug_lowest_aligned);
6787 if (submit_queues < 1) {
6788 pr_err("submit_queues must be 1 or more\n");
6792 if ((sdebug_max_queue > SDEBUG_CANQUEUE) || (sdebug_max_queue < 1)) {
6793 pr_err("max_queue must be in range [1, %d]\n", SDEBUG_CANQUEUE);
6797 if ((sdebug_host_max_queue > SDEBUG_CANQUEUE) ||
6798 (sdebug_host_max_queue < 0)) {
6799 pr_err("host_max_queue must be in range [0 %d]\n",
6804 if (sdebug_host_max_queue &&
6805 (sdebug_max_queue != sdebug_host_max_queue)) {
6806 sdebug_max_queue = sdebug_host_max_queue;
6807 pr_warn("fixing max submit queue depth to host max queue depth, %d\n",
6811 sdebug_q_arr = kcalloc(submit_queues, sizeof(struct sdebug_queue),
6813 if (sdebug_q_arr == NULL)
6815 for (k = 0; k < submit_queues; ++k)
6816 spin_lock_init(&sdebug_q_arr[k].qc_lock);
6819 * check for host managed zoned block device specified with
6820 * ptype=0x14 or zbc=XXX.
6822 if (sdebug_ptype == TYPE_ZBC) {
6823 sdeb_zbc_model = BLK_ZONED_HM;
6824 } else if (sdeb_zbc_model_s && *sdeb_zbc_model_s) {
6825 k = sdeb_zbc_model_str(sdeb_zbc_model_s);
6831 switch (sdeb_zbc_model) {
6832 case BLK_ZONED_NONE:
6834 sdebug_ptype = TYPE_DISK;
6837 sdebug_ptype = TYPE_ZBC;
6840 pr_err("Invalid ZBC model\n");
6845 if (sdeb_zbc_model != BLK_ZONED_NONE) {
6846 sdeb_zbc_in_use = true;
6847 if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
6848 sdebug_dev_size_mb = DEF_ZBC_DEV_SIZE_MB;
6851 if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
6852 sdebug_dev_size_mb = DEF_DEV_SIZE_MB;
6853 if (sdebug_dev_size_mb < 1)
6854 sdebug_dev_size_mb = 1; /* force minimum 1 MB ramdisk */
6855 sz = (unsigned long)sdebug_dev_size_mb * 1048576;
6856 sdebug_store_sectors = sz / sdebug_sector_size;
6857 sdebug_capacity = get_sdebug_capacity();
6859 /* play around with geometry, don't waste too much on track 0 */
6861 sdebug_sectors_per = 32;
6862 if (sdebug_dev_size_mb >= 256)
6864 else if (sdebug_dev_size_mb >= 16)
6866 sdebug_cylinders_per = (unsigned long)sdebug_capacity /
6867 (sdebug_sectors_per * sdebug_heads);
6868 if (sdebug_cylinders_per >= 1024) {
6869 /* other LLDs do this; implies >= 1GB ram disk ... */
6871 sdebug_sectors_per = 63;
6872 sdebug_cylinders_per = (unsigned long)sdebug_capacity /
6873 (sdebug_sectors_per * sdebug_heads);
6875 if (scsi_debug_lbp()) {
6876 sdebug_unmap_max_blocks =
6877 clamp(sdebug_unmap_max_blocks, 0U, 0xffffffffU);
6879 sdebug_unmap_max_desc =
6880 clamp(sdebug_unmap_max_desc, 0U, 256U);
6882 sdebug_unmap_granularity =
6883 clamp(sdebug_unmap_granularity, 1U, 0xffffffffU);
6885 if (sdebug_unmap_alignment &&
6886 sdebug_unmap_granularity <=
6887 sdebug_unmap_alignment) {
6888 pr_err("ERR: unmap_granularity <= unmap_alignment\n");
6893 xa_init_flags(per_store_ap, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
6895 idx = sdebug_add_store();
6902 pseudo_primary = root_device_register("pseudo_0");
6903 if (IS_ERR(pseudo_primary)) {
6904 pr_warn("root_device_register() error\n");
6905 ret = PTR_ERR(pseudo_primary);
6908 ret = bus_register(&pseudo_lld_bus);
6910 pr_warn("bus_register error: %d\n", ret);
6913 ret = driver_register(&sdebug_driverfs_driver);
6915 pr_warn("driver_register error: %d\n", ret);
6919 hosts_to_add = sdebug_add_host;
6920 sdebug_add_host = 0;
6922 for (k = 0; k < hosts_to_add; k++) {
6923 if (want_store && k == 0) {
6924 ret = sdebug_add_host_helper(idx);
6926 pr_err("add_host_helper k=%d, error=%d\n",
6931 ret = sdebug_do_add_host(want_store &&
6932 sdebug_per_host_store);
6934 pr_err("add_host k=%d error=%d\n", k, -ret);
6940 pr_info("built %d host(s)\n", sdebug_num_hosts);
6945 bus_unregister(&pseudo_lld_bus);
6947 root_device_unregister(pseudo_primary);
6949 sdebug_erase_store(idx, NULL);
6951 kfree(sdebug_q_arr);
6955 static void __exit scsi_debug_exit(void)
6957 int k = sdebug_num_hosts;
6961 sdebug_do_remove_host(true);
6963 driver_unregister(&sdebug_driverfs_driver);
6964 bus_unregister(&pseudo_lld_bus);
6965 root_device_unregister(pseudo_primary);
6967 sdebug_erase_all_stores(false);
6968 xa_destroy(per_store_ap);
6969 kfree(sdebug_q_arr);
6972 device_initcall(scsi_debug_init);
6973 module_exit(scsi_debug_exit);
6975 static void sdebug_release_adapter(struct device *dev)
6977 struct sdebug_host_info *sdbg_host;
6979 sdbg_host = to_sdebug_host(dev);
6983 /* idx must be valid, if sip is NULL then it will be obtained using idx */
6984 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip)
6989 if (xa_empty(per_store_ap))
6991 sip = xa_load(per_store_ap, idx);
6995 vfree(sip->map_storep);
6996 vfree(sip->dif_storep);
6998 xa_erase(per_store_ap, idx);
7002 /* Assume apart_from_first==false only in shutdown case. */
7003 static void sdebug_erase_all_stores(bool apart_from_first)
7006 struct sdeb_store_info *sip = NULL;
7008 xa_for_each(per_store_ap, idx, sip) {
7009 if (apart_from_first)
7010 apart_from_first = false;
7012 sdebug_erase_store(idx, sip);
7014 if (apart_from_first)
7015 sdeb_most_recent_idx = sdeb_first_idx;
7019 * Returns store xarray new element index (idx) if >=0 else negated errno.
7020 * Limit the number of stores to 65536.
7022 static int sdebug_add_store(void)
7026 unsigned long iflags;
7027 unsigned long sz = (unsigned long)sdebug_dev_size_mb * 1048576;
7028 struct sdeb_store_info *sip = NULL;
7029 struct xa_limit xal = { .max = 1 << 16, .min = 0 };
7031 sip = kzalloc(sizeof(*sip), GFP_KERNEL);
7035 xa_lock_irqsave(per_store_ap, iflags);
7036 res = __xa_alloc(per_store_ap, &n_idx, sip, xal, GFP_ATOMIC);
7037 if (unlikely(res < 0)) {
7038 xa_unlock_irqrestore(per_store_ap, iflags);
7040 pr_warn("%s: xa_alloc() errno=%d\n", __func__, -res);
7043 sdeb_most_recent_idx = n_idx;
7044 if (sdeb_first_idx < 0)
7045 sdeb_first_idx = n_idx;
7046 xa_unlock_irqrestore(per_store_ap, iflags);
7049 sip->storep = vzalloc(sz);
7051 pr_err("user data oom\n");
7054 if (sdebug_num_parts > 0)
7055 sdebug_build_parts(sip->storep, sz);
7057 /* DIF/DIX: what T10 calls Protection Information (PI) */
7061 dif_size = sdebug_store_sectors * sizeof(struct t10_pi_tuple);
7062 sip->dif_storep = vmalloc(dif_size);
7064 pr_info("dif_storep %u bytes @ %pK\n", dif_size,
7067 if (!sip->dif_storep) {
7068 pr_err("DIX oom\n");
7071 memset(sip->dif_storep, 0xff, dif_size);
7073 /* Logical Block Provisioning */
7074 if (scsi_debug_lbp()) {
7075 map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
7076 sip->map_storep = vmalloc(array_size(sizeof(long),
7077 BITS_TO_LONGS(map_size)));
7079 pr_info("%lu provisioning blocks\n", map_size);
7081 if (!sip->map_storep) {
7082 pr_err("LBP map oom\n");
7086 bitmap_zero(sip->map_storep, map_size);
7088 /* Map first 1KB for partition table */
7089 if (sdebug_num_parts)
7090 map_region(sip, 0, 2);
7093 rwlock_init(&sip->macc_lck);
7096 sdebug_erase_store((int)n_idx, sip);
7097 pr_warn("%s: failed, errno=%d\n", __func__, -res);
7101 static int sdebug_add_host_helper(int per_host_idx)
7103 int k, devs_per_host, idx;
7104 int error = -ENOMEM;
7105 struct sdebug_host_info *sdbg_host;
7106 struct sdebug_dev_info *sdbg_devinfo, *tmp;
7108 sdbg_host = kzalloc(sizeof(*sdbg_host), GFP_KERNEL);
7111 idx = (per_host_idx < 0) ? sdeb_first_idx : per_host_idx;
7112 if (xa_get_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE))
7113 xa_clear_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
7114 sdbg_host->si_idx = idx;
7116 INIT_LIST_HEAD(&sdbg_host->dev_info_list);
7118 devs_per_host = sdebug_num_tgts * sdebug_max_luns;
7119 for (k = 0; k < devs_per_host; k++) {
7120 sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
7125 spin_lock(&sdebug_host_list_lock);
7126 list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
7127 spin_unlock(&sdebug_host_list_lock);
7129 sdbg_host->dev.bus = &pseudo_lld_bus;
7130 sdbg_host->dev.parent = pseudo_primary;
7131 sdbg_host->dev.release = &sdebug_release_adapter;
7132 dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_num_hosts);
7134 error = device_register(&sdbg_host->dev);
7142 list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
7144 list_del(&sdbg_devinfo->dev_list);
7145 kfree(sdbg_devinfo->zstate);
7146 kfree(sdbg_devinfo);
7149 pr_warn("%s: failed, errno=%d\n", __func__, -error);
7153 static int sdebug_do_add_host(bool mk_new_store)
7155 int ph_idx = sdeb_most_recent_idx;
7158 ph_idx = sdebug_add_store();
7162 return sdebug_add_host_helper(ph_idx);
7165 static void sdebug_do_remove_host(bool the_end)
7168 struct sdebug_host_info *sdbg_host = NULL;
7169 struct sdebug_host_info *sdbg_host2;
7171 spin_lock(&sdebug_host_list_lock);
7172 if (!list_empty(&sdebug_host_list)) {
7173 sdbg_host = list_entry(sdebug_host_list.prev,
7174 struct sdebug_host_info, host_list);
7175 idx = sdbg_host->si_idx;
7177 if (!the_end && idx >= 0) {
7180 list_for_each_entry(sdbg_host2, &sdebug_host_list, host_list) {
7181 if (sdbg_host2 == sdbg_host)
7183 if (idx == sdbg_host2->si_idx) {
7189 xa_set_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
7190 if (idx == sdeb_most_recent_idx)
7191 --sdeb_most_recent_idx;
7195 list_del(&sdbg_host->host_list);
7196 spin_unlock(&sdebug_host_list_lock);
7201 device_unregister(&sdbg_host->dev);
7205 static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
7208 struct sdebug_dev_info *devip;
7210 block_unblock_all_queues(true);
7211 devip = (struct sdebug_dev_info *)sdev->hostdata;
7212 if (NULL == devip) {
7213 block_unblock_all_queues(false);
7216 num_in_q = atomic_read(&devip->num_in_q);
7218 if (qdepth > SDEBUG_CANQUEUE) {
7219 qdepth = SDEBUG_CANQUEUE;
7220 pr_warn("%s: requested qdepth [%d] exceeds canqueue [%d], trim\n", __func__,
7221 qdepth, SDEBUG_CANQUEUE);
7225 if (qdepth != sdev->queue_depth)
7226 scsi_change_queue_depth(sdev, qdepth);
7228 if (SDEBUG_OPT_Q_NOISE & sdebug_opts) {
7229 sdev_printk(KERN_INFO, sdev, "%s: qdepth=%d, num_in_q=%d\n",
7230 __func__, qdepth, num_in_q);
7232 block_unblock_all_queues(false);
7233 return sdev->queue_depth;
7236 static bool fake_timeout(struct scsi_cmnd *scp)
7238 if (0 == (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth))) {
7239 if (sdebug_every_nth < -1)
7240 sdebug_every_nth = -1;
7241 if (SDEBUG_OPT_TIMEOUT & sdebug_opts)
7242 return true; /* ignore command causing timeout */
7243 else if (SDEBUG_OPT_MAC_TIMEOUT & sdebug_opts &&
7244 scsi_medium_access_command(scp))
7245 return true; /* time out reads and writes */
7250 /* Response to TUR or media access command when device stopped */
7251 static int resp_not_ready(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
7255 ktime_t now_ts = ktime_get_boottime();
7256 struct scsi_device *sdp = scp->device;
7258 stopped_state = atomic_read(&devip->stopped);
7259 if (stopped_state == 2) {
7260 if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
7261 diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
7262 if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
7263 /* tur_ms_to_ready timer extinguished */
7264 atomic_set(&devip->stopped, 0);
7268 mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x1);
7270 sdev_printk(KERN_INFO, sdp,
7271 "%s: Not ready: in process of becoming ready\n", my_name);
7272 if (scp->cmnd[0] == TEST_UNIT_READY) {
7273 u64 tur_nanosecs_to_ready = (u64)sdeb_tur_ms_to_ready * 1000000;
7275 if (diff_ns <= tur_nanosecs_to_ready)
7276 diff_ns = tur_nanosecs_to_ready - diff_ns;
7278 diff_ns = tur_nanosecs_to_ready;
7279 /* As per 20-061r2 approved for spc6 by T10 on 20200716 */
7280 do_div(diff_ns, 1000000); /* diff_ns becomes milliseconds */
7281 scsi_set_sense_information(scp->sense_buffer, SCSI_SENSE_BUFFERSIZE,
7283 return check_condition_result;
7286 mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x2);
7288 sdev_printk(KERN_INFO, sdp, "%s: Not ready: initializing command required\n",
7290 return check_condition_result;
7293 static int sdebug_map_queues(struct Scsi_Host *shost)
7297 if (shost->nr_hw_queues == 1)
7300 for (i = 0, qoff = 0; i < HCTX_MAX_TYPES; i++) {
7301 struct blk_mq_queue_map *map = &shost->tag_set.map[i];
7305 if (i == HCTX_TYPE_DEFAULT)
7306 map->nr_queues = submit_queues - poll_queues;
7307 else if (i == HCTX_TYPE_POLL)
7308 map->nr_queues = poll_queues;
7310 if (!map->nr_queues) {
7311 BUG_ON(i == HCTX_TYPE_DEFAULT);
7315 map->queue_offset = qoff;
7316 blk_mq_map_queues(map);
7318 qoff += map->nr_queues;
7325 static int sdebug_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
7328 bool retiring = false;
7329 int num_entries = 0;
7330 unsigned int qc_idx = 0;
7331 unsigned long iflags;
7332 ktime_t kt_from_boot = ktime_get_boottime();
7333 struct sdebug_queue *sqp;
7334 struct sdebug_queued_cmd *sqcp;
7335 struct scsi_cmnd *scp;
7336 struct sdebug_dev_info *devip;
7337 struct sdebug_defer *sd_dp;
7339 sqp = sdebug_q_arr + queue_num;
7340 spin_lock_irqsave(&sqp->qc_lock, iflags);
7342 for (first = true; first || qc_idx + 1 < sdebug_max_queue; ) {
7344 qc_idx = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
7347 qc_idx = find_next_bit(sqp->in_use_bm, sdebug_max_queue, qc_idx + 1);
7349 if (unlikely(qc_idx >= sdebug_max_queue))
7352 sqcp = &sqp->qc_arr[qc_idx];
7353 sd_dp = sqcp->sd_dp;
7354 if (unlikely(!sd_dp))
7357 if (unlikely(scp == NULL)) {
7358 pr_err("scp is NULL, queue_num=%d, qc_idx=%u from %s\n",
7359 queue_num, qc_idx, __func__);
7362 if (sd_dp->defer_t == SDEB_DEFER_POLL) {
7363 if (kt_from_boot < sd_dp->cmpl_ts)
7366 } else /* ignoring non REQ_HIPRI requests */
7368 devip = (struct sdebug_dev_info *)scp->device->hostdata;
7370 atomic_dec(&devip->num_in_q);
7372 pr_err("devip=NULL from %s\n", __func__);
7373 if (unlikely(atomic_read(&retired_max_queue) > 0))
7376 sqcp->a_cmnd = NULL;
7377 if (unlikely(!test_and_clear_bit(qc_idx, sqp->in_use_bm))) {
7378 pr_err("Unexpected completion sqp %p queue_num=%d qc_idx=%u from %s\n",
7379 sqp, queue_num, qc_idx, __func__);
7382 if (unlikely(retiring)) { /* user has reduced max_queue */
7385 retval = atomic_read(&retired_max_queue);
7386 if (qc_idx >= retval) {
7387 pr_err("index %d too large\n", retval);
7390 k = find_last_bit(sqp->in_use_bm, retval);
7391 if ((k < sdebug_max_queue) || (k == retval))
7392 atomic_set(&retired_max_queue, 0);
7394 atomic_set(&retired_max_queue, k + 1);
7396 sd_dp->defer_t = SDEB_DEFER_NONE;
7397 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
7398 scp->scsi_done(scp); /* callback to mid level */
7399 spin_lock_irqsave(&sqp->qc_lock, iflags);
7402 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
7403 if (num_entries > 0)
7404 atomic_add(num_entries, &sdeb_mq_poll_count);
7408 static int scsi_debug_queuecommand(struct Scsi_Host *shost,
7409 struct scsi_cmnd *scp)
7412 struct scsi_device *sdp = scp->device;
7413 const struct opcode_info_t *oip;
7414 const struct opcode_info_t *r_oip;
7415 struct sdebug_dev_info *devip;
7416 u8 *cmd = scp->cmnd;
7417 int (*r_pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
7418 int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *) = NULL;
7421 u64 lun_index = sdp->lun & 0x3FFF;
7428 scsi_set_resid(scp, 0);
7429 if (sdebug_statistics) {
7430 atomic_inc(&sdebug_cmnd_count);
7431 inject_now = inject_on_this_cmd();
7435 if (unlikely(sdebug_verbose &&
7436 !(SDEBUG_OPT_NO_CDB_NOISE & sdebug_opts))) {
7441 sb = (int)sizeof(b);
7443 strcpy(b, "too long, over 32 bytes");
7445 for (k = 0, n = 0; k < len && n < sb; ++k)
7446 n += scnprintf(b + n, sb - n, "%02x ",
7449 sdev_printk(KERN_INFO, sdp, "%s: tag=%#x, cmd %s\n", my_name,
7450 blk_mq_unique_tag(scsi_cmd_to_rq(scp)), b);
7452 if (unlikely(inject_now && (sdebug_opts & SDEBUG_OPT_HOST_BUSY)))
7453 return SCSI_MLQUEUE_HOST_BUSY;
7454 has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS);
7455 if (unlikely(lun_index >= sdebug_max_luns && !has_wlun_rl))
7458 sdeb_i = opcode_ind_arr[opcode]; /* fully mapped */
7459 oip = &opcode_info_arr[sdeb_i]; /* safe if table consistent */
7460 devip = (struct sdebug_dev_info *)sdp->hostdata;
7461 if (unlikely(!devip)) {
7462 devip = find_build_dev_info(sdp);
7466 if (unlikely(inject_now && !atomic_read(&sdeb_inject_pending)))
7467 atomic_set(&sdeb_inject_pending, 1);
7469 na = oip->num_attached;
7471 if (na) { /* multiple commands with this opcode */
7473 if (FF_SA & r_oip->flags) {
7474 if (F_SA_LOW & oip->flags)
7477 sa = get_unaligned_be16(cmd + 8);
7478 for (k = 0; k <= na; oip = r_oip->arrp + k++) {
7479 if (opcode == oip->opcode && sa == oip->sa)
7482 } else { /* since no service action only check opcode */
7483 for (k = 0; k <= na; oip = r_oip->arrp + k++) {
7484 if (opcode == oip->opcode)
7489 if (F_SA_LOW & r_oip->flags)
7490 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 4);
7491 else if (F_SA_HIGH & r_oip->flags)
7492 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, 7);
7494 mk_sense_invalid_opcode(scp);
7497 } /* else (when na==0) we assume the oip is a match */
7499 if (unlikely(F_INV_OP & flags)) {
7500 mk_sense_invalid_opcode(scp);
7503 if (unlikely(has_wlun_rl && !(F_RL_WLUN_OK & flags))) {
7505 sdev_printk(KERN_INFO, sdp, "%s: Opcode 0x%x not%s\n",
7506 my_name, opcode, " supported for wlun");
7507 mk_sense_invalid_opcode(scp);
7510 if (unlikely(sdebug_strict)) { /* check cdb against mask */
7514 for (k = 1; k < oip->len_mask[0] && k < 16; ++k) {
7515 rem = ~oip->len_mask[k] & cmd[k];
7517 for (j = 7; j >= 0; --j, rem <<= 1) {
7521 mk_sense_invalid_fld(scp, SDEB_IN_CDB, k, j);
7526 if (unlikely(!(F_SKIP_UA & flags) &&
7527 find_first_bit(devip->uas_bm,
7528 SDEBUG_NUM_UAS) != SDEBUG_NUM_UAS)) {
7529 errsts = make_ua(scp, devip);
7533 if (unlikely(((F_M_ACCESS & flags) || scp->cmnd[0] == TEST_UNIT_READY) &&
7534 atomic_read(&devip->stopped))) {
7535 errsts = resp_not_ready(scp, devip);
7539 if (sdebug_fake_rw && (F_FAKE_RW & flags))
7541 if (unlikely(sdebug_every_nth)) {
7542 if (fake_timeout(scp))
7543 return 0; /* ignore command: make trouble */
7545 if (likely(oip->pfp))
7546 pfp = oip->pfp; /* calls a resp_* function */
7548 pfp = r_pfp; /* if leaf function ptr NULL, try the root's */
7551 if (F_DELAY_OVERR & flags) /* cmds like INQUIRY respond asap */
7552 return schedule_resp(scp, devip, errsts, pfp, 0, 0);
7553 else if ((flags & F_LONG_DELAY) && (sdebug_jdelay > 0 ||
7554 sdebug_ndelay > 10000)) {
7556 * Skip long delays if ndelay <= 10 microseconds. Otherwise
7557 * for Start Stop Unit (SSU) want at least 1 second delay and
7558 * if sdebug_jdelay>1 want a long delay of that many seconds.
7559 * For Synchronize Cache want 1/20 of SSU's delay.
7561 int jdelay = (sdebug_jdelay < 2) ? 1 : sdebug_jdelay;
7562 int denom = (flags & F_SYNC_DELAY) ? 20 : 1;
7564 jdelay = mult_frac(USER_HZ * jdelay, HZ, denom * USER_HZ);
7565 return schedule_resp(scp, devip, errsts, pfp, jdelay, 0);
7567 return schedule_resp(scp, devip, errsts, pfp, sdebug_jdelay,
7570 return schedule_resp(scp, devip, check_condition_result, NULL, 0, 0);
7572 return schedule_resp(scp, NULL, DID_NO_CONNECT << 16, NULL, 0, 0);
7575 static struct scsi_host_template sdebug_driver_template = {
7576 .show_info = scsi_debug_show_info,
7577 .write_info = scsi_debug_write_info,
7578 .proc_name = sdebug_proc_name,
7579 .name = "SCSI DEBUG",
7580 .info = scsi_debug_info,
7581 .slave_alloc = scsi_debug_slave_alloc,
7582 .slave_configure = scsi_debug_slave_configure,
7583 .slave_destroy = scsi_debug_slave_destroy,
7584 .ioctl = scsi_debug_ioctl,
7585 .queuecommand = scsi_debug_queuecommand,
7586 .change_queue_depth = sdebug_change_qdepth,
7587 .map_queues = sdebug_map_queues,
7588 .mq_poll = sdebug_blk_mq_poll,
7589 .eh_abort_handler = scsi_debug_abort,
7590 .eh_device_reset_handler = scsi_debug_device_reset,
7591 .eh_target_reset_handler = scsi_debug_target_reset,
7592 .eh_bus_reset_handler = scsi_debug_bus_reset,
7593 .eh_host_reset_handler = scsi_debug_host_reset,
7594 .can_queue = SDEBUG_CANQUEUE,
7596 .sg_tablesize = SG_MAX_SEGMENTS,
7597 .cmd_per_lun = DEF_CMD_PER_LUN,
7599 .max_segment_size = -1U,
7600 .module = THIS_MODULE,
7601 .track_queue_depth = 1,
7604 static int sdebug_driver_probe(struct device *dev)
7607 struct sdebug_host_info *sdbg_host;
7608 struct Scsi_Host *hpnt;
7611 sdbg_host = to_sdebug_host(dev);
7613 sdebug_driver_template.can_queue = sdebug_max_queue;
7614 sdebug_driver_template.cmd_per_lun = sdebug_max_queue;
7615 if (!sdebug_clustering)
7616 sdebug_driver_template.dma_boundary = PAGE_SIZE - 1;
7618 hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
7620 pr_err("scsi_host_alloc failed\n");
7624 if (submit_queues > nr_cpu_ids) {
7625 pr_warn("%s: trim submit_queues (was %d) to nr_cpu_ids=%u\n",
7626 my_name, submit_queues, nr_cpu_ids);
7627 submit_queues = nr_cpu_ids;
7630 * Decide whether to tell scsi subsystem that we want mq. The
7631 * following should give the same answer for each host.
7633 hpnt->nr_hw_queues = submit_queues;
7634 if (sdebug_host_max_queue)
7635 hpnt->host_tagset = 1;
7637 /* poll queues are possible for nr_hw_queues > 1 */
7638 if (hpnt->nr_hw_queues == 1 || (poll_queues < 1)) {
7639 pr_warn("%s: trim poll_queues to 0. poll_q/nr_hw = (%d/%d)\n",
7640 my_name, poll_queues, hpnt->nr_hw_queues);
7645 * Poll queues don't need interrupts, but we need at least one I/O queue
7646 * left over for non-polled I/O.
7647 * If condition not met, trim poll_queues to 1 (just for simplicity).
7649 if (poll_queues >= submit_queues) {
7650 if (submit_queues < 3)
7651 pr_warn("%s: trim poll_queues to 1\n", my_name);
7653 pr_warn("%s: trim poll_queues to 1. Perhaps try poll_queues=%d\n",
7654 my_name, submit_queues - 1);
7660 sdbg_host->shost = hpnt;
7661 *((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host;
7662 if ((hpnt->this_id >= 0) && (sdebug_num_tgts > hpnt->this_id))
7663 hpnt->max_id = sdebug_num_tgts + 1;
7665 hpnt->max_id = sdebug_num_tgts;
7666 /* = sdebug_max_luns; */
7667 hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
7671 switch (sdebug_dif) {
7673 case T10_PI_TYPE1_PROTECTION:
7674 hprot = SHOST_DIF_TYPE1_PROTECTION;
7676 hprot |= SHOST_DIX_TYPE1_PROTECTION;
7679 case T10_PI_TYPE2_PROTECTION:
7680 hprot = SHOST_DIF_TYPE2_PROTECTION;
7682 hprot |= SHOST_DIX_TYPE2_PROTECTION;
7685 case T10_PI_TYPE3_PROTECTION:
7686 hprot = SHOST_DIF_TYPE3_PROTECTION;
7688 hprot |= SHOST_DIX_TYPE3_PROTECTION;
7693 hprot |= SHOST_DIX_TYPE0_PROTECTION;
7697 scsi_host_set_prot(hpnt, hprot);
7699 if (have_dif_prot || sdebug_dix)
7700 pr_info("host protection%s%s%s%s%s%s%s\n",
7701 (hprot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
7702 (hprot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
7703 (hprot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
7704 (hprot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
7705 (hprot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
7706 (hprot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
7707 (hprot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
7709 if (sdebug_guard == 1)
7710 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
7712 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
7714 sdebug_verbose = !!(SDEBUG_OPT_NOISE & sdebug_opts);
7715 sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & sdebug_opts);
7716 if (sdebug_every_nth) /* need stats counters for every_nth */
7717 sdebug_statistics = true;
7718 error = scsi_add_host(hpnt, &sdbg_host->dev);
7720 pr_err("scsi_add_host failed\n");
7722 scsi_host_put(hpnt);
7724 scsi_scan_host(hpnt);
7730 static void sdebug_driver_remove(struct device *dev)
7732 struct sdebug_host_info *sdbg_host;
7733 struct sdebug_dev_info *sdbg_devinfo, *tmp;
7735 sdbg_host = to_sdebug_host(dev);
7737 scsi_remove_host(sdbg_host->shost);
7739 list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
7741 list_del(&sdbg_devinfo->dev_list);
7742 kfree(sdbg_devinfo->zstate);
7743 kfree(sdbg_devinfo);
7746 scsi_host_put(sdbg_host->shost);
7749 static int pseudo_lld_bus_match(struct device *dev,
7750 struct device_driver *dev_driver)
7755 static struct bus_type pseudo_lld_bus = {
7757 .match = pseudo_lld_bus_match,
7758 .probe = sdebug_driver_probe,
7759 .remove = sdebug_driver_remove,
7760 .drv_groups = sdebug_drv_groups,